summaryrefslogtreecommitdiffstats
path: root/ansible_collections/dellemc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-18 05:52:22 +0000
commit38b7c80217c4e72b1d8988eb1e60bb6e77334114 (patch)
tree356e9fd3762877d07cde52d21e77070aeff7e789 /ansible_collections/dellemc
parentAdding upstream version 7.7.0+dfsg. (diff)
downloadansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.tar.xz
ansible-38b7c80217c4e72b1d8988eb1e60bb6e77334114.zip
Adding upstream version 9.4.0+dfsg.upstream/9.4.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/dellemc')
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/.github/CODEOWNERS14
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/ask_a_question.md11
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/bug_report.yml116
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/config.yml1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/feature_request.md23
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/.github/PULL_REQUEST_TEMPLATE.md53
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml36
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/.github/workflows/code-coverage.yml63
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/CHANGELOG.rst390
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/FILES.json4030
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/MANIFEST.json12
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/README.md59
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/bindep.txt6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml84
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/CHANGELOG.rst119
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/106-change-ntp-get-fact.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/107-change-ntp-key-values-in-regression-script.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/113-change-ntp-module-name.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/114-change-ntp-module-name-in-regression-script.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/118-add-ntp-prefer-attribute.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/119-lag_interfaces-port-name-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/124-l2-interfaces-oc-yang-vlan-range-format-config-support.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/128-add-several-attributes-to-interface-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/129-ntp-minpoll-maxpoll-config.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/130-vxlans-attribute-restriction-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/141-add-route-distinguisher-target-attributes-to-bgp-af-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/145-mclag-new-attributes.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/146-vrf-mgmt-bug-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/148-dhcp-relay-unit-tests.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/149-l2-interfaces-vlan-trunk-range-support-for-playbooks.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/150-replaced-overridden-for-logging-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/151-replaced-overridden-for-ntp-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/152-copp.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/156-replaced-overridden-for-vrfs-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/157-mac.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/159-replaced-overridden-for-system-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/160-sanity-check-errors-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/172-module-utils-import-remove-empties-from-ansible-lib.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/176-update-netcommon-version.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/177-bfd.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/180-regression-failures-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/182-unit-tests-for-bgp-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/183-unit-tests-for-bgp-af-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/184-unit-tests-for-bgp-as-paths-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/185-unit-tests-for-bgp-communities-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/186-unit-tests-for-bgp-ext-communities-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/187-unit-tests-for-bgp-neighbors-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/188-unit-tests-for-bgp-neighbors-af-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/191-unit-tests-for-bgp-af-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/195-aaa-login-authentication.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/196-replaced-overridden-for-lag-interface-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/197-unit-tests-for-interfaces-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/198-unit-tests-for-aaa-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/199-code-coverage-workflow-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/200-unit-tests-for-l2-interfaces-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/201-bgp-af-modify-vni-advertise-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/202-unit-tests-for-l3-interfaces-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/203-unit-tests-for-lag-interfaces-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/207-unit-tests-for-ntp-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/208-unit-tests-for-tacacs-server-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/209-unit-tests-for-prefix-lists-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/210-unit-tests-for-radius-server-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/212-unit-tests-for-static-routes-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/213-unit-tests-for-users-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/214-unit-tests-vlans-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/215-unit-tests-for-vxlans-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/216-unit-tests-for-vrfs-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/217-replaced-overridden-for-vlans-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/218-unit-tests-for-api-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/219-unit-tests-for-command-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/220-unit-tests-for-config-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/221-l2-interfaces-replaced-overridden-support.yaml3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/222-unit-tests-for-facts-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/223-unit-tests-for-system-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/224-ntp-clear-all-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/225-unit-tests-for-ip-neighbor-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/226-unit-tests-for-logging-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/227-replaced-overridden-for-port-group-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/228-unit-tests-for-port-group-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/229-unit-tests-for-port-breakout-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/230-vrfs-delete-interface-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/231-l3-interfaces-delete-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/233-bgp-neighbors-defaults-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/235-replaced-overridden-for-tacacs-server-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/236-static-routes-replaced-overridden-support.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/237-aaa-replaced-overridden-support.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/239-replaced-overridden-for-radius-server-resource-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/240-bgp-replaced-overridden-support.yaml3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/242-users-replaced-overridden-support.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/244-added-rt-delay-attribute-to-bgp-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/245-enhance-bgp-neighbors-unit-tests.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/246-bgp-af-replaced-overridden-support.yaml3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/247-vxlans-replaced-overridden-support.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/249-dhcp-relay-replaced-overridden-support.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/250-bgp-as-paths-fix-merged-deleted.yaml3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/253-change-replaced-function-for-ip-neighbor-module.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/254-update-replace-methods.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/255-prefix_lists_replaced_overridden_support.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/257-vrfs-cli-test-case-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/258-change-logging-module-source-interface-naming.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/261-interfaces-timeout-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/262-vlan-mapping-bug-fix.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/v2.1.0_summary.yaml8
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/269-revert-aaa-breaking-changes.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/270-galaxy-yml-netcommon-and-version-fixes.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/v2.2.0_summary.yaml15
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/321-requirements-update-meta-runtime-ansible-version.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/322-docs-README-updates.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/v2.4.0_summary.yaml15
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml630
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/docs/ADDITIONAL_INFORMATION.md15
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/docs/BRANCHING.md1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/docs/COMMITTER_GUIDE.md43
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/docs/CONTRIBUTING.md207
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/docs/ISSUE_TRIAGE.md291
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/docs/MAINTAINER_GUIDE.md1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/docs/SUPPORT.md13
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_config.yaml27
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_off.yaml6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_on.yaml6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/acl_interfaces/__init__.py (renamed from ansible_collections/dellemc/os10/plugins/module_utils/network/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/acl_interfaces/acl_interfaces.py82
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bfd/__init__.py (renamed from ansible_collections/dellemc/os10/plugins/modules/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bfd/bfd.py89
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py19
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/copp/__init__.py (renamed from ansible_collections/dellemc/os10/tests/unit/modules/network/os10/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/copp/copp.py59
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_relay/__init__.py (renamed from ansible_collections/dellemc/os6/plugins/action/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_relay/dhcp_relay.py94
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_snooping/__init__.py (renamed from ansible_collections/dellemc/os6/plugins/cliconf/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_snooping/dhcp_snooping.py81
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py20
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py25
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ip_neighbor/ip_neighbor.py56
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_acls/__init__.py (renamed from ansible_collections/dellemc/os6/plugins/doc_fragments/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_acls/l2_acls.py129
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_acls/__init__.py (renamed from ansible_collections/dellemc/os6/plugins/module_utils/network/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_acls/l3_acls.py223
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lldp_global/__init__.py (renamed from ansible_collections/dellemc/os6/plugins/modules/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lldp_global/lldp_global.py81
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/logging/logging.py64
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mac/__init__.py (renamed from ansible_collections/dellemc/os6/plugins/terminal/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mac/mac.py66
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py16
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/pki/__init__.py (renamed from ansible_collections/dellemc/os6/tests/unit/modules/network/os6/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/pki/pki.py78
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py8
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_group/__init__.py (renamed from ansible_collections/dellemc/os9/plugins/action/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_group/port_group.py66
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/route_maps/route_maps.py196
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/stp/__init__.py (renamed from ansible_collections/dellemc/os9/plugins/cliconf/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/stp/stp.py152
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlan_mapping/__init__.py (renamed from ansible_collections/dellemc/os9/plugins/doc_fragments/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlan_mapping/vlan_mapping.py64
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py132
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/acl_interfaces/__init__.py (renamed from ansible_collections/dellemc/os9/plugins/module_utils/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/acl_interfaces/acl_interfaces.py499
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/__init__.py (renamed from ansible_collections/dellemc/os9/plugins/module_utils/network/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/bfd.py734
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py228
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py672
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py195
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py290
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py287
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py294
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py7
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/copp/__init__.py (renamed from ansible_collections/dellemc/os9/plugins/modules/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/copp/copp.py393
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_relay/__init__.py (renamed from ansible_collections/dellemc/os9/plugins/terminal/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_relay/dhcp_relay.py695
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_snooping/__init__.py (renamed from ansible_collections/dellemc/os9/tests/integration/targets/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_snooping/dhcp_snooping.py649
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py556
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ip_neighbor/ip_neighbor.py420
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_acls/__init__.py (renamed from ansible_collections/dellemc/os9/tests/unit/modules/network/os9/__init__.py)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_acls/l2_acls.py602
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py560
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_acls/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tests/aaa_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_acls/l3_acls.py763
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py159
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py75
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lldp_global/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tests/acl_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lldp_global/lldp_global.py296
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/logging/logging.py458
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mac/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tests/bgp_vrf.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mac/mac.py431
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py328
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py188
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/pki/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tests/ecmp_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/pki/pki.py563
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py71
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_group/port_group.py380
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py63
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py83
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/route_maps/route_maps.py2354
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py187
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/stp/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tests/flow_monitor_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/stp/stp.py1404
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py169
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py83
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py94
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlan_mapping/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tests/interface_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlan_mapping/vlan_mapping.py517
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py114
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py192
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py139
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/acl_interfaces/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tests/lag_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/acl_interfaces/acl_interfaces.py148
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bfd/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tests/lldp_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bfd/bfd.py236
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py44
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py54
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py69
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py8
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/copp/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tests/logging_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/copp/copp.py127
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_relay/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tests/prefix_list_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_relay/dhcp_relay.py208
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_snooping/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tests/qos_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_snooping/dhcp_snooping.py213
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py34
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py31
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ip_neighbor/ip_neighbor.py126
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_acls/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tests/route_map_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_acls/l2_acls.py236
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py26
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_acls/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tests/snmp_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_acls/l3_acls.py322
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lldp_global/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tests/system_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lldp_global/lldp_global.py114
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/logging/logging.py128
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mac/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tests/uplink_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mac/mac.py151
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py40
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py14
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/pki/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tests/users_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/pki/pki.py144
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py13
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_group/port_group.py116
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/route_maps/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tests/vlan_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/route_maps/route_maps.py517
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/stp/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tests/vlt_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/stp/stp.py364
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py20
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlan_mapping/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tests/vrrp_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlan_mapping/vlan_mapping.py225
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py8
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py3
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py11
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/formatted_diff_utils.py588
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py86
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py242
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py72
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_acl_interfaces.py385
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bfd.py684
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py275
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py375
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py133
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py169
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py145
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_copp.py295
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_dhcp_relay.py781
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_dhcp_snooping.py499
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py18
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py296
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ip_neighbor.py300
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_acls.py582
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py159
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_acls.py1058
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py198
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py107
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lldp_global.py301
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_logging.py274
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mac.py319
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py686
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py163
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_pki.py301
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py209
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_group.py370
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py97
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py108
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_route_maps.py1606
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py83
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_stp.py677
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py85
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py111
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py159
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlan_mapping.py543
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py60
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py84
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py87
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/test-requirements.txt4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml18
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/defaults/main.yml188
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/meta/main.yml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/cleanup_tests.yaml30
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/preparation_tests.yaml50
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/tasks_template.yaml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/defaults/main.yml236
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/main.yml11
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/preparation_tests.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/tasks_template_del.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml90
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg4
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml239
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml159
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml151
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml295
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml316
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml18
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml12
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/defaults/main.yml80
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/main.yml11
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/preparation_tests.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/defaults/main.yml350
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/meta/main.yml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/cleanup_tests.yaml30
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/preparation_tests.yaml38
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/tasks_template.yaml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/defaults/main.yml133
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/meta/main.yml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/cleanup_tests.yaml7
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/preparation_tests.yaml12
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/tasks_template.yaml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml156
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/defaults/main.yml41
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/tasks/main.yml10
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/defaults/main.yml214
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/meta/main.yml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/cleanup_tests.yaml6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/preparation_tests.yaml18
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/tasks_template.yaml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml234
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/defaults/main.yml476
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/meta/main.yml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/cleanup_tests.yaml6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/preparation_tests.yaml18
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/tasks_template.yaml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml120
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml40
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/defaults/main.yml47
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/meta/main.yml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/cleanup_tests.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/main.yml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/preparation_tests.yaml6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/tasks_template.yaml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/tasks_template_del.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/defaults/main.yml127
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/cleanup_tests.yaml28
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/preparation_tests.yaml34
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/defaults/main.yml101
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/cleanup_tests.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/main.yml14
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/preparation_tests.yaml37
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml182
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/cleanup_tests.yaml18
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml105
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/defaults/main.yml122
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/cleanup_tests.yaml6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/preparation_tests.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/tasks_template_del.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml47
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml32
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml8
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/defaults/main.yml82
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/cleanup_tests.yaml7
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/preparation_tests.yaml7
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml88
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml76
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/defaults/main.yml1060
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/cleanup_tests.yaml42
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/main.yml15
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/preparation_tests.yaml42
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml140
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/cleanup_tests.yaml7
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml7
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/defaults/main.yml534
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/meta/main.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/cleanup_tests.yaml11
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/main.yml14
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/preparation_tests.yaml47
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/tasks_template.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/tasks_template_del.yaml (renamed from ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml32
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml66
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml62
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/cli_tests.yaml14
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/defaults/main.yml195
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/meta/main.yml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/cleanup_tests.yaml6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/preparation_tests.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/tasks_template.yaml22
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml51
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml84
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml6
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml29
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml20
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg2
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml60
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2165
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml15
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.15.txt1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.16.txt1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.17.txt1
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/__init__.py (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tests/xstp_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/mock.py23
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/unittest.py29
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/__init__.py0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/conftest.py31
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/__init__.py0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/__init__.py0
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_aaa.yaml78
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_acl_interfaces.yaml511
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_api.yaml7
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bfd.yaml598
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp.yaml473
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_af.yaml1007
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_as_paths.yaml494
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_communities.yaml390
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_ext_communities.yaml499
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors.yaml1114
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors_af.yaml471
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_command.yaml12
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_config.yaml21
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_copp.yaml203
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_dhcp_relay.yaml917
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_dhcp_snooping.yaml1128
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_facts.yaml5
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_interfaces.yaml339
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_ip_neighbor.yaml146
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l2_acls.yaml962
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l2_interfaces.yaml187
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l3_acls.yaml1290
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l3_interfaces.yaml586
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_lag_interfaces.yaml204
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_logging.yaml180
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_mac.yaml373
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_mclag.yaml987
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_ntp.yaml486
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_pki.yaml187
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_port_breakout.yaml252
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_port_group.yaml115
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_prefix_lists.yaml153
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_radius_server.yaml207
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_static_routes.yaml340
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_stp.yaml991
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_system.yaml232
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_tacacs_server.yaml172
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_users.yaml93
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vlans.yaml190
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vrfs.yaml249
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vxlans.yaml418
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/sonic_module.py149
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_aaa.py69
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_acl_interfaces.py96
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_api.py38
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bfd.py86
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp.py75
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_af.py82
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_as_paths.py83
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_communities.py104
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_ext_communities.py146
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_neighbors.py75
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_neighbors_af.py74
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_command.py59
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_config.py78
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_copp.py86
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_dhcp_relay.py120
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_dhcp_snooping.py213
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_facts.py45
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_interfaces.py90
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_ip_neighbor.py87
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l2_acls.py105
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l2_interfaces.py76
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l3_acls.py105
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l3_interfaces.py83
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_lag_interfaces.py76
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_logging.py87
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_mac.py86
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_mclag.py104
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_ntp.py96
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_pki.py85
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_port_breakout.py89
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_port_group.py105
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_prefix_lists.py69
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_radius_server.py69
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_static_routes.py94
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_stp.py155
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_system.py93
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_tacacs_server.py73
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_users.py73
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vlans.py94
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vrfs.py87
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vxlans.py96
-rw-r--r--ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/utils.py51
-rw-r--r--ansible_collections/dellemc/openmanage/.ansible-lint2
-rw-r--r--ansible_collections/dellemc/openmanage/.ansible-lint-ignore122
-rw-r--r--ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml136
-rw-r--r--ansible_collections/dellemc/openmanage/.gitignore4
-rw-r--r--ansible_collections/dellemc/openmanage/CHANGELOG.rst550
-rw-r--r--ansible_collections/dellemc/openmanage/FILES.json6498
-rw-r--r--ansible_collections/dellemc/openmanage/MANIFEST.json22
-rw-r--r--ansible_collections/dellemc/openmanage/README.md21
-rw-r--r--ansible_collections/dellemc/openmanage/bindep.txt4
-rw-r--r--ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml264
-rw-r--r--ansible_collections/dellemc/openmanage/changelogs/changelog.yaml2198
-rw-r--r--ansible_collections/dellemc/openmanage/changelogs/config.yaml34
-rw-r--r--ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md7
-rw-r--r--ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md154
-rw-r--r--ansible_collections/dellemc/openmanage/docs/README.md18
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst23
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst51
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_firmware_inventory.rst107
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_system_inventory.rst107
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst25
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst15
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst25
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst20
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst98
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst14
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst77
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst123
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst23
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_license.rst432
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst25
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst15
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst15
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst19
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst19
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_network_attributes.rst340
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst15
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst141
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst15
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst270
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst47
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst15
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst19
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst20
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_user_info.rst148
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst24
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst26
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies.rst447
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_actions_info.rst121
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_category_info.rst121
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_info.rst137
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_message_id_info.rst121
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst18
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst14
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst36
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst14
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst14
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst14
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst13
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst13
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst9
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst14
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst33
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst22
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst27
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst35
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst66
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst44
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst67
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst24
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst23
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst14
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst56
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst31
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst28
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_profile_info.rst196
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst20
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_info.rst148
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst12
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink_info.rst175
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst77
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan_info.rst150
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst20
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst10
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst41
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware_rollback.rst163
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst8
-rw-r--r--ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst201
-rw-r--r--ansible_collections/dellemc/openmanage/meta/execution-environment.yml3
-rw-r--r--ansible_collections/dellemc/openmanage/meta/runtime.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml154
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml114
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml78
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_system_inventory.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml132
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml48
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml206
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml124
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml85
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml67
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml143
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml23
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_license.yml183
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml46
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml27
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml23
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_network_attributes.yml139
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml500
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml75
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml45
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml521
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml27
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml23
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml130
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user_info.yml31
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml196
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies.yml110
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_actions_info.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_category_info.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_info.yml21
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_message_id_info.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml67
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml70
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml95
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml179
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml218
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml113
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml51
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml105
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml26
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml69
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml97
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml102
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml60
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml59
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml179
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml60
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml113
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml32
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml229
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml277
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml181
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_all_groups.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_by_group_with_host_vars.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_by_groups.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_groups_with_host_and_group_vars.yml14
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml33
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml56
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml124
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml92
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml198
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml106
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml95
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml121
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml45
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml100
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml63
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml37
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml153
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml69
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml29
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml54
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml19
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml56
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml36
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml44
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_info.yml31
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml76
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink_info.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml31
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml90
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml58
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml140
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml45
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_info.yml60
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml46
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml46
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml227
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml227
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml41
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml71
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml122
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan_info.yml31
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml82
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml105
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml21
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml64
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml64
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_rollback.yml45
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml42
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml126
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml58
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml58
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml58
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml58
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/README.md56
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_attributes/idrac_attributes.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_bios/idrac_bios.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_certificate/idrac_certificate.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_export_server_config_profile/idrac_export_server_config_profile.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_firmware/idrac_firmware.yml11
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_gather_facts/idrac_gather_facts.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_import_server_config_profile/idrac_import_server_config_profile.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_job_queue/idrac_job_queue.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_os_deployment/idrac_os_deployment.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_reset/idrac_reset.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_server_powerstate/idrac_server_powerstate.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/idrac_storage_controller/idrac_storage_controller.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/inventory4
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/redfish_firmware/redfish_firmware.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/redfish_storage_volume/redfish_storage_volume.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/attributes.yml8
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/bios.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/certificates.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/credentials.yml27
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/export.yml14
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/firmware.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/import.yml11
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/osd.yml27
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/reset.yml3
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/storage.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/storage_controller.yml21
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/README.md18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py28
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/inventory/ome_inventory.py208
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py15
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py78
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py15
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py187
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py60
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py148
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py141
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py11
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py25
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py112
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py333
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py268
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py24
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_license.py1118
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py23
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py9
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py21
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py13
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py17
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_network_attributes.py748
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py15
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py361
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py9
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py696
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py41
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py9
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py14
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py38
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_user_info.py232
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py30
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies.py1114
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_actions_info.py290
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_category_info.py316
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_info.py201
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_message_id_info.py141
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py12
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py49
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py14
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py21
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py5
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py27
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py32
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py113
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py5
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py22
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py138
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py100
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py56
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py118
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py41
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py54
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py302
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py32
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_profile_info.py410
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py4
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py20
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_info.py699
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink_info.py346
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py175
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan_info.py251
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py18
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py6
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py116
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware_rollback.py358
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py10
-rw-r--r--ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py374
-rw-r--r--ansible_collections/dellemc/openmanage/requirements.txt3
-rw-r--r--ansible_collections/dellemc/openmanage/requirements.yml4
-rw-r--r--ansible_collections/dellemc/openmanage/roles/README.md22
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/README.md308
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/defaults/main.yml5
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/meta/argument_specs.yml71
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/meta/main.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/default/converge.yml306
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/default/molecule.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/cleanup.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/converge.yml32
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/molecule.yml (renamed from ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml)0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/cleanup.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/converge.yml32
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/molecule.yml (renamed from ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml)0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/cleanup.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/converge.yml32
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/tests/inventory (renamed from ansible_collections/dellemc/os6/roles/os6_ntp/tests/inventory)0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_attributes/vars/main.yml3
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/README.md368
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/defaults/main.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/meta/argument_specs.yml122
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/meta/main.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/__get_data.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/converge.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/prepare.yml62
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/default/converge.yml135
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/default/molecule.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/converge.yml159
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/molecule.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios/converge.yml32
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios/molecule.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/converge.yml35
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/molecule.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/resources/cleanup.yml17
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/resources/prepare.yml30
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate/converge.yml54
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate/molecule.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/converge.yml60
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/molecule.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset/converge.yml50
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset/molecule.yml14
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/converge.yml74
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/molecule.yml14
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_with_maintenance_window/converge.yml74
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_with_maintenance_window/molecule.yml14
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/tasks/main.yml67
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/tests/inventory (renamed from ansible_collections/dellemc/os6/roles/os6_qos/tests/inventory)0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_bios/vars/main.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/README.md373
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/defaults/main.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/meta/argument_specs.yml133
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/meta/main.yml26
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/converge.yml119
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/converge.yml119
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/converge.yml120
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/converge.yml120
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/converge.yml102
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/converge.yml102
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/converge.yml102
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/converge.yml116
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/converge.yml129
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/default/converge.yml311
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/default/molecule.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/tasks/main.yml27
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_boot/vars/main.yml3
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/README.md386
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/defaults/main.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/meta/argument_specs.yml132
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/meta/main.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CA/converge.yml90
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CA/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CSC/converge.yml168
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CSC/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CTC/converge.yml90
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CTC/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CustomCertificate/converge.yml207
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CustomCertificate/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/HTTPS/converge.yml90
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/HTTPS/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/SSLKEY/converge.yml94
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/SSLKEY/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__delete_directory.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__extract_firmware_version.yml21
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__get_helper.yml40
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__get_ssl_key.yml18
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/default/converge.yml381
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/default/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/generateCSR/converge.yml56
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/generateCSR/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/reset/converge.yml31
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/reset/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/export.yml14
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/generate_csr.yml24
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/import.yml18
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/main.yml18
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/reset.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/tests/inventory1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_certificate/vars/main.yml3
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/README.md361
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/defaults/main.yml15
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/meta/argument_specs.yml143
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/meta/main.yml53
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/cleanup.yml96
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/converge.yml100
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/molecule.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/verify.yml115
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/main.yml24
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/pre_req.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_cifs.yml21
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_http.yml27
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_https.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_local.yml19
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_nfs.yml19
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/vars/main.yml4
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/README.md342
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/defaults/main.yml11
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/meta/argument_specs.yml114
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/meta/main.yml20
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/cifs_share/converge.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/cifs_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/default/converge.yml101
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/default/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/ftp_share/converge.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/ftp_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/http_share/converge.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/http_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/https_share/converge.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/https_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/httpsproxy_share/converge.yml117
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/httpsproxy_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/negative_scenarios/converge.yml206
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/negative_scenarios/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/nfs_share/converge.yml37
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/nfs_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/tasks/main.yml27
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_firmware/vars/main.yml3
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/README.md767
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/defaults/main.yml8
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/meta/argument_specs.yml110
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/meta/main.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/backplane/converge.yml44
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/backplane/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/bios/converge.yml81
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/bios/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/controller/converge.yml40
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/controller/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/cpu/converge.yml45
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/cpu/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/default/converge.yml95
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/default/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosure/converge.yml44
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosure/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosureemm/converge.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosureemm/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/fan/converge.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/fan/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/firmware/converge.yml44
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/firmware/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/hostnic/converge.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/hostnic/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/idrac/converge.yml93
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/idrac/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/license/converge.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/license/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/memory/converge.yml44
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/memory/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/negative/converge.yml92
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/negative/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/nic/converge.yml44
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/nic/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/passensor/converge.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/passensor/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/pciedevice/converge.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/pciedevice/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/physicaldisk/converge.yml44
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/physicaldisk/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/powersupply/converge.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/powersupply/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/secureboot/converge.yml40
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/secureboot/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsbattery/converge.yml80
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsbattery/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsintrusion/converge.yml79
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsintrusion/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsvoltage/converge.yml65
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsvoltage/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/systemmetrics/converge.yml105
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/systemmetrics/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/virtualdisk/converge.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/virtualdisk/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_attributes_info.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_backplane_info.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_battery_info.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_bios_info.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_controller_info.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_cpu_info.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_enclosure_emm_info.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_enclosure_info.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_fan_info.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_firmware_info.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_host_nic_info.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_intrusion_info.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_license_info.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_memory_info.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_metrics_info.yml93
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_nic_info.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_pas_sensor_info.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_pcie_device_info.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_physical_info.yml24
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_power_supply_info.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_resource_id.yml60
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_secure_boot_info.yml41
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_system_info.yml19
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_virtual_disk_info.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_voltage_info.yml12
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/main.yml81
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/backplane_assert.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/controller_assert.yml70
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/cpu_assert.yml40
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/enclosure_assert.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/enclosureemm_assert.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/fan_assert.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/firmware_assert.yml40
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/hostnic_assert.yml40
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/lc_assert.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/license_assert.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/manager_assert.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/memory_assert.yml40
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/mmetrics_assert.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/nic_assert.yml40
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/passensor_assert.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/pciedevice_assert.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/physicaldisk_assert.yml45
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/powersupply_assert.yml42
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/psmetrics_assert.yml29
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/secureboot_assert.yml62
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/sensorsvoltage_assert.yml20
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/system_assert.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/tmetrics_assert.yml20
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/virtualdisk_assert.yml45
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/vars/main.yml46
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/README.md393
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/defaults/main.yml15
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/meta/argument_specs.yml138
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/meta/main.yml53
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/converge.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/default/converge.yml313
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/default/molecule.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/converge.yml39
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/converge.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/converge.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/converge.yml37
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/converge.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/converge.yml36
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/converge.yml35
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/converge.yml41
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/converge.yml38
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/prepare.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/resources/tests/prepare.yml18
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tasks/main.yml31
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/vars/main.yml4
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/README.md181
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/defaults/main.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/meta/argument_specs.yml53
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/meta/main.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/clear_job_queue/converge.yml112
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/clear_job_queue/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/default/converge.yml90
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/default/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/delete_job/converge.yml91
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/delete_job/molecule.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/clear_jobs_with_api.yml66
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/clear_jobs_with_wsman.yml70
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/delete_job_with_id.yml41
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/get_idrac_firmware_version.yml20
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/main.yml53
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/validate_input_data.yml73
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/templates/idrac_delete_job_queue.j225
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/templates/idrac_delete_job_queue_force.j225
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_job_queue/vars/main.yml30
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/README.md509
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/esxi.yml20
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/main.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/rhel.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/meta/argument_specs.yml191
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/clean_up/clean_up_destinations.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/clean_up/clean_up_working_directory.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/copy_iso_to_destination.yml60
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/create_working_directory_path.yml28
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/download_or_copy_source_files.yml112
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/validate_inputs.yml74
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/esxi/compile_iso.yml37
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/attach_iso_to_virtual_media.yml43
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/clean_up_virtual_media_slot.yml13
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/set_boot_mode_and_restart.yml15
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/iso/extract_iso.yml15
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/iso/generate_kickstart_file.yml15
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/main.yml74
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/rhel/compile_iso.yml73
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/tracking/track_for_os_deployment.yml11
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/ESXI_8.j239
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/RHEL_8.j231
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/RHEL_9.j231
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tests/inventory8
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/vars/main.yml93
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/README.md180
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/defaults/main.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/meta/argument_specs.yml70
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/meta/main.yml25
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/tasks/lcstatus_check.yml95
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/tasks/main.yml146
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/templates/idrac_lifecycle_controller_status.j223
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_reset/vars/main.yml41
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/README.md217
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/defaults/main.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/meta/argument_specs.yml69
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/meta/main.yml54
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/default/converge.yml188
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/default/molecule.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forceoff/converge.yml69
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forceoff/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forcerestart/converge.yml72
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forcerestart/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulrestart/converge.yml72
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulrestart/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulshutdown/converge.yml63
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulshutdown/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/nmi/converge.yml72
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/nmi/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/on/converge.yml73
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/on/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/powercycle/converge.yml72
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/powercycle/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/pushpowerbutton/converge.yml48
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/pushpowerbutton/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tasks/init.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tasks/main.yml18
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/vars/main.yml3
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/README.md682
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/defaults/main.yml7
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/meta/argument_specs.yml200
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/meta/main.yml26
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tasks/main.yml200
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/molecule.yml37
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/README.md219
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/defaults/main.yml9
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/meta/argument_specs.yml60
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/meta/main.yml55
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/default/converge.yml74
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/default/molecule.yml15
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/negative/converge.yml174
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/negative/molecule.yml15
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/resources/cleanup.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/resources/prepare.yml17
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_firmware/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/README.md417
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/defaults/main.yml10
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/meta/argument_specs.yml176
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/meta/main.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID0/converge.yml83
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID0/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID1/converge.yml83
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID1/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID10/converge.yml83
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID10/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID5/converge.yml83
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID5/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID50/converge.yml83
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID50/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID6/converge.yml80
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID6/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID60/converge.yml80
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID60/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__delete_virtual_drive.yml33
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__extract_storage.yml27
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__get_helper.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__idrac_reset.yml23
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__lc_status.yml22
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_default/converge.yml156
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_default/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_immediate/converge.yml133
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_immediate/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/converge.yml157
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/converge.yml157
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/molecule.yml1
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/default/converge.yml132
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/default/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/initialization/converge.yml55
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/initialization/molecule.yml0
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tasks/main.yml34
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tests/inventory2
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tests/test.yml6
-rw-r--r--ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/vars/main.yml4
-rw-r--r--ansible_collections/dellemc/openmanage/tests/README.md18
-rw-r--r--ansible_collections/dellemc/openmanage/tests/config.yml2
-rw-r--r--ansible_collections/dellemc/openmanage/tests/requirements.txt9
-rw-r--r--ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.10.txt3
-rw-r--r--ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.11.txt3
-rw-r--r--ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.12.txt3
-rw-r--r--ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.9.txt7
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_idrac_redfish.py345
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py389
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_redfish.py171
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py10
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py133
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py128
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py108
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_system_inventory.py75
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py122
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py10
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py67
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py325
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py9
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py174
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py231
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py814
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_license.py746
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py12
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py91
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py10
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py24
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network_attributes.py1011
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py13
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py742
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py10
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py499
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py83
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py59
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py174
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user_info.py231
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py70
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py20
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies.py1578
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_actions_info.py93
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_category_info.py2670
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_info.py121
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_message_id_info.py84
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py13
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py50
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py22
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py12
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py22
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py13
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py58
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py9
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py10
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py112
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py25
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py267
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py217
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py38
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py245
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py175
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py4
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py28
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py43
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py56
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py20
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py16
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py22
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py10
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py30
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py12
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py34
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py23
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py11
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py18
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py28
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile_info.py1279
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py122
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py44
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_info.py324
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py6
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink_info.py1155
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py294
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py6
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan_info.py346
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py11
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py8
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py60
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py83
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware_rollback.py299
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py15
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py571
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/utils.py49
-rw-r--r--ansible_collections/dellemc/openmanage/tests/unit/requirements.txt9
-rw-r--r--ansible_collections/dellemc/os10/.github/workflows/ansible-test.yml33
-rw-r--r--ansible_collections/dellemc/os10/.gitignore387
-rw-r--r--ansible_collections/dellemc/os10/FILES.json8951
-rw-r--r--ansible_collections/dellemc/os10/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/MANIFEST.json38
-rw-r--r--ansible_collections/dellemc/os10/README.md93
-rw-r--r--ansible_collections/dellemc/os10/changelogs/CHANGELOG.rst116
-rw-r--r--ansible_collections/dellemc/os10/changelogs/changelog.yaml124
-rw-r--r--ansible_collections/dellemc/os10/changelogs/config.yaml30
-rw-r--r--ansible_collections/dellemc/os10/docs/dellemc.os10.os10_command_module.rst446
-rw-r--r--ansible_collections/dellemc/os10/docs/dellemc.os10.os10_config_module.rst606
-rw-r--r--ansible_collections/dellemc/os10/docs/dellemc.os10.os10_facts_module.rst511
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_aaa.md136
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_acl.md130
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_bfd.md89
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_bgp.md729
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_copy_config.md131
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_dns.md125
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_ecmp.md78
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_fabric_summary.md119
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_flow_monitor.md152
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_image_upgrade.md73
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_interface.md178
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_lag.md103
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_lldp.md149
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_logging.md97
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_network_validation.md304
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_ntp.md124
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_prefix_list.md104
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_qos.md90
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_raguard.md126
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_route_map.md190
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_snmp.md269
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_system.md126
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_template.md75
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_uplink.md109
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_users.md89
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_vlan.md123
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_vlt.md108
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_vrf.md143
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_vrrp.md139
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_vxlan.md259
-rw-r--r--ansible_collections/dellemc/os10/docs/os10_xstp.md196
-rw-r--r--ansible_collections/dellemc/os10/docs/roles.rst193
-rw-r--r--ansible_collections/dellemc/os10/meta/runtime.yml8
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/README.md37
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/datacenter.yaml11
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/all9
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/spine.yaml85
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml77
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml81
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml81
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml77
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml61
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml60
-rw-r--r--ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/README.md63
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/datacenter.yaml16
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml210
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml194
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml206
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml200
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine1.yaml95
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine2.yaml95
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os10/playbooks/vxlan_evpn/vxlan_evpn_topology.pngbin101145 -> 0 bytes
-rw-r--r--ansible_collections/dellemc/os10/plugins/action/os10.py94
-rw-r--r--ansible_collections/dellemc/os10/plugins/action/textfsm_parser.py81
-rw-r--r--ansible_collections/dellemc/os10/plugins/cliconf/os10.py88
-rw-r--r--ansible_collections/dellemc/os10/plugins/doc_fragments/os10.py75
-rw-r--r--ansible_collections/dellemc/os10/plugins/module_utils/network/base_network_show.py42
-rw-r--r--ansible_collections/dellemc/os10/plugins/module_utils/network/os10.py146
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/base_xml_to_dict.py124
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/bgp_validate.py303
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/mtu_validate.py220
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/os10_command.py229
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/os10_config.py346
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/os10_facts.py505
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/show_system_network_summary.py145
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/vlt_validate.py236
-rw-r--r--ansible_collections/dellemc/os10/plugins/modules/wiring_validate.py246
-rw-r--r--ansible_collections/dellemc/os10/plugins/terminal/os10.py81
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/README.md136
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/defaults/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/templates/os10_aaa.j2148
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/tests/main.os10.yaml35
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_aaa/vars/main.yml1
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/README.md130
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/templates/os10_acl.j2212
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/tests/main.os10.yaml33
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_acl/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/README.md89
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/templates/os10_bfd.j234
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/tests/main.yaml11
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bfd/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/README.md729
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/templates/os10_bgp.j21244
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/tests/main.os10.yaml384
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_bgp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/README.md131
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/tasks/main.yml7
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/templates/leaf1.j23
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/tests/inventory2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_copy_config/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/README.md125
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/templates/os10_dns.j2101
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/tests/main.yaml43
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_dns/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/README.md78
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/templates/os10_ecmp.j225
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/tests/main.os10.yaml7
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ecmp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/README.md119
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/tasks/main.yml20
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine112
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine212
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine112
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine212
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/inventory.yaml14
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/main.yaml11
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/test.yml7
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/README.md152
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/templates/os10_flow_monitor.j286
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/main.yaml33
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_flow_monitor/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/README.md73
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/tasks/main.yml37
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/inventory2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/main.yml9
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_image_upgrade/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/README.md178
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/templates/os10_interface.j2258
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/tests/main.os10.yaml72
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_interface/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/README.md103
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/templates/os10_lag.j289
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/tests/main.os10.yaml15
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lag/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/README.md149
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/templates/os10_lldp.j2195
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/tests/main.os10.yaml48
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_lldp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/README.md97
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/templates/os10_logging.j267
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/tests/main.os10.yaml15
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_logging/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/README.md304
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/meta/main.yml21
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/bgp_validation.yaml33
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/main.yaml9
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/mtu_validation.yaml32
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/vlt_validation.yaml44
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/wiring_validation.yaml24
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tests/group_vars/all30
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine111
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine211
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine111
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine211
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tests/inventory.yaml14
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tests/main.yaml11
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_network_validation/tests/test.yaml56
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/README.md124
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/tasks/main.yml41
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp.j2125
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp_vrf.j218
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/tests/inventory2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/tests/main.os10.yaml25
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_ntp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/README.md104
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/.galaxy_install_info1
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/templates/os10_prefix_list.j295
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/main.os10.yaml20
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_prefix_list/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/README.md90
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/defaults/main.yml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/templates/os10_qos.j248
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/tests/inventory2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/tests/main.os10.yaml11
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_qos/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/README.md126
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/templates/os10_raguard.j2174
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/tests/inventory.yaml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/tests/main.os10.yaml56
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_raguard/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/README.md190
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/templates/os10_route_map.j2348
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/tests/inventory2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/tests/main.yaml55
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_route_map/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/README.md269
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/templates/os10_snmp.j2441
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/tests/main.os10.yaml29
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_snmp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/README.md126
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/templates/os10_system.j2130
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/tests/main.os10.yaml31
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_system/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/README.md75
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/main.yml24
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_bgp_summary.yaml21
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_interface_brief.yaml21
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_vrf.yaml21
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/show_lldp_neighbors.yaml21
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/show_port-channel_summary.yaml21
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml21
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlan.yaml21
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml20
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tasks/textfsm.yaml3
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_bgp_summary.template16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_interface_brief.template9
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_vrf.template7
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_lldp_neighbors.template7
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_port-channel_summary.template9
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template6
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlan.template12
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tests/group_vars/all3
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tests/main.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_template/tests/test.yaml9
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/README.md109
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/templates/os10_uplink.j2102
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/tests/main.yaml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_uplink/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/README.md89
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/templates/os10_users.j227
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/tests/main.os10.yaml13
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_users/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/README.md123
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/templates/os10_vlan.j2129
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/tests/main.os10.yaml31
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlan/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/README.md108
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/templates/os10_vlt.j2108
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/tests/main.os10.yaml17
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vlt/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/README.md143
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/templates/os10_vrf.j2122
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/tests/main.os10.yaml33
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrf/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/README.md139
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/templates/os10_vrrp.j2154
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/tests/main.os10.yaml45
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vrrp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/README.md259
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/meta/main.yml20
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/templates/os10_vxlan.j2434
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/tests/main.yaml112
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_vxlan/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/README.md196
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/templates/os10_xstp.j2398
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/tests/inventory.yaml22
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/tests/main.yaml74
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os10/roles/os10_xstp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/target-prefixes.network1
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j215
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j224
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j224
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j223
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j220
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j223
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/vars/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j223
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j217
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j231
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j230
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j234
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j27
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j224
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j229
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/vars/main.yaml5
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2191
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2185
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2153
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2147
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2314
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2313
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j213
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j213
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/vars/main.yaml7
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_command/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/cli.yaml16
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/bad_operator.yaml19
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/contains.yaml19
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/invalid.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/output.yaml27
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/timeout.yaml18
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/cli.yaml15
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel.yaml38
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml58
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml59
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel.yaml33
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml40
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml40
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml35
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j24
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j24
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j24
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j23
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/vars/main.yaml5
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/cli.yaml16
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tests/cli/facts.yaml45
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j214
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j216
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j216
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j23
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j223
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/vars/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j214
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j214
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j217
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j217
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j24
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j245
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/vars/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/vars/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j244
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j244
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j244
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j246
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/vars/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j28
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j211
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/vars/main.yaml5
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j224
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j220
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j214
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j211
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j216
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j24
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j232
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/vars/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j217
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j217
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j217
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j216
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j216
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/vars/main.yaml5
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j210
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j210
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j210
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j29
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/vars/main.yaml5
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j248
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j248
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j248
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j247
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j247
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/vars/main.yaml5
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2135
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2144
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2147
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j24
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2147
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/vars/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j226
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j226
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j226
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j225
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/vars/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j232
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j230
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j230
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j25
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j238
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/vars/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks_old/main.yaml15
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j29
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j25
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j29
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/vars/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j217
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j219
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j217
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j220
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j23
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j225
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/vars/main.yaml6
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/defaults/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j215
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j214
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j27
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j223
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/vars/main.yaml7
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j243
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j241
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j241
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j241
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j23
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j247
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/vars/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j214
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j212
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j216
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j216
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j224
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j224
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j225
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j225
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j24
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j260
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/vars/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/main.yaml3
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/prepare_test_facts.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml20
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml62
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml26
-rw-r--r--ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/tests.yaml14
-rw-r--r--ansible_collections/dellemc/os10/tests/sanity/ignore-2.10.txt20
-rw-r--r--ansible_collections/dellemc/os10/tests/sanity/ignore-2.11.txt13
-rw-r--r--ansible_collections/dellemc/os10/tests/sanity/ignore-2.9.txt3
-rw-r--r--ansible_collections/dellemc/os10/tests/sanity/requirements.txt4
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_config.cfg13
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_src.cfg12
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_interface__display-xml19467
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml855
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_1
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_running-config252
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_system__display-xml194
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version9
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version__display-xml27
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/os10_module.py90
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_command.py110
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_config.py150
-rw-r--r--ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_facts.py110
-rw-r--r--ansible_collections/dellemc/os6/.ansible-lint2
-rw-r--r--ansible_collections/dellemc/os6/.github/workflows/ansible-test.yml33
-rw-r--r--ansible_collections/dellemc/os6/.gitignore387
-rw-r--r--ansible_collections/dellemc/os6/COPYING675
-rw-r--r--ansible_collections/dellemc/os6/FILES.json2987
-rw-r--r--ansible_collections/dellemc/os6/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/MANIFEST.json37
-rw-r--r--ansible_collections/dellemc/os6/README.md98
-rw-r--r--ansible_collections/dellemc/os6/changelogs/CHANGELOG.rst98
-rw-r--r--ansible_collections/dellemc/os6/changelogs/changelog.yaml112
-rw-r--r--ansible_collections/dellemc/os6/changelogs/config.yaml30
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_aaa.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_acl.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_bgp.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_interface.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_lag.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_lldp.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_logging.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_ntp.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_qos.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_snmp.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_system.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_users.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_vlan.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_vrrp.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/os6_xstp.md1
-rw-r--r--ansible_collections/dellemc/os6/docs/roles.rst94
-rw-r--r--ansible_collections/dellemc/os6/meta/runtime.yml8
-rw-r--r--ansible_collections/dellemc/os6/playbooks/ibgp/README.md21
-rw-r--r--ansible_collections/dellemc/os6/playbooks/ibgp/group_vars/all4
-rw-r--r--ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch1.yaml47
-rw-r--r--ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch2.yaml47
-rw-r--r--ansible_collections/dellemc/os6/playbooks/ibgp/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/playbooks/ibgp/os6switch.yaml13
-rw-r--r--ansible_collections/dellemc/os6/plugins/action/os6.py95
-rw-r--r--ansible_collections/dellemc/os6/plugins/cliconf/os6.py88
-rw-r--r--ansible_collections/dellemc/os6/plugins/doc_fragments/os6.py73
-rw-r--r--ansible_collections/dellemc/os6/plugins/module_utils/network/os6.py278
-rw-r--r--ansible_collections/dellemc/os6/plugins/modules/os6_command.py225
-rw-r--r--ansible_collections/dellemc/os6/plugins/modules/os6_config.py410
-rw-r--r--ansible_collections/dellemc/os6/plugins/modules/os6_facts.py478
-rw-r--r--ansible_collections/dellemc/os6/plugins/terminal/os6.py95
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/README.md210
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/defaults/main.yml16
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2437
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/tests/main.os6.yaml111
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_aaa/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/README.md118
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/templates/os6_acl.j2202
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/tests/main.os6.yaml43
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_acl/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/README.md153
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2255
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/tests/inventory.yaml7
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/tests/main.os6.yaml47
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_bgp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/README.md110
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/templates/os6_interface.j294
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/tests/main.os6.yaml28
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_interface/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/README.md96
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/templates/os6_lag.j278
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/tests/main.os6.yaml15
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lag/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/README.md114
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/templates/os6_lldp.j2159
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/tests/main.os6.yaml26
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_lldp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/README.md89
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/templates/os6_logging.j236
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/tests/main.os6.yaml10
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_logging/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/README.md82
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/templates/os6_ntp.j227
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/tests/main.os6.yaml9
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_ntp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/README.md102
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/defaults/main.yml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/templates/os6_qos.j297
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/tests/main.os6.yaml73
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/tests/test.yml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_qos/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/README.md108
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/templates/os6_snmp.j294
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/tests/main.os6.yaml23
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_snmp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/README.md83
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/templates/os6_system.j234
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/tests/main.os6.yaml9
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_system/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/README.md93
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/templates/os6_users.j237
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/tests/main.os6.yaml10
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_users/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/README.md104
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/templates/os6_vlan.j2135
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/tests/main.os6.yaml21
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vlan/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/README.md92
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/templates/os6_vrrp.j272
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml14
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_vrrp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/README.md117
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/templates/os6_xstp.j2129
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/tests/inventory.yaml6
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/tests/main.os6.yaml24
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os6/roles/os6_xstp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os6/tests/.gitignore1
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/cli.yaml13
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml20
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml20
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml28
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml29
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml19
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/cli.yaml13
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml53
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml38
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml43
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml41
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml42
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml62
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml66
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml63
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml37
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml44
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml44
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml39
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml14
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml42
-rw-r--r--ansible_collections/dellemc/os6/tests/sanity/ignore-2.10.txt4
-rw-r--r--ansible_collections/dellemc/os6/tests/sanity/ignore-2.11.txt4
-rw-r--r--ansible_collections/dellemc/os6/tests/sanity/ignore-2.9.txt1
-rw-r--r--ansible_collections/dellemc/os6/tests/sanity/requirements.txt4
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_config.cfg16
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_src.cfg7
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces41
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_status48
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties6
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_ip_int15
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp11
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all10
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_memory_cpu3
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config124
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname3
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_version17
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/os6_module.py88
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_command.py108
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_config.py146
-rw-r--r--ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py105
-rw-r--r--ansible_collections/dellemc/os9/.ansible-lint2
-rw-r--r--ansible_collections/dellemc/os9/.github/workflows/ansible-test.yml33
-rw-r--r--ansible_collections/dellemc/os9/.gitignore387
-rw-r--r--ansible_collections/dellemc/os9/COPYING675
-rw-r--r--ansible_collections/dellemc/os9/FILES.json3953
-rw-r--r--ansible_collections/dellemc/os9/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/MANIFEST.json37
-rw-r--r--ansible_collections/dellemc/os9/README.md96
-rw-r--r--ansible_collections/dellemc/os9/changelogs/CHANGELOG.rst76
-rw-r--r--ansible_collections/dellemc/os9/changelogs/changelog.yaml107
-rw-r--r--ansible_collections/dellemc/os9/changelogs/config.yaml30
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_aaa.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_acl.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_bgp.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_copy_config.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_dcb.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_dns.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_ecmp.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_interface.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_lag.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_lldp.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_logging.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_ntp.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_prefix_list.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_sflow.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_snmp.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_system.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_users.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_vlan.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_vlt.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_vrf.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_vrrp.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/os9_xstp.md1
-rw-r--r--ansible_collections/dellemc/os9/docs/roles.rst136
-rw-r--r--ansible_collections/dellemc/os9/meta/runtime.yml8
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/README.md35
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/datacenter.yaml11
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/all10
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/spine.yaml64
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml61
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml65
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml65
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml61
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml61
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml60
-rw-r--r--ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/plugins/action/os9.py95
-rw-r--r--ansible_collections/dellemc/os9/plugins/cliconf/os9.py88
-rw-r--r--ansible_collections/dellemc/os9/plugins/doc_fragments/os9.py73
-rw-r--r--ansible_collections/dellemc/os9/plugins/module_utils/network/os9.py146
-rw-r--r--ansible_collections/dellemc/os9/plugins/modules/os9_command.py232
-rw-r--r--ansible_collections/dellemc/os9/plugins/modules/os9_config.py350
-rw-r--r--ansible_collections/dellemc/os9/plugins/modules/os9_facts.py578
-rw-r--r--ansible_collections/dellemc/os9/plugins/terminal/os9.py83
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/README.md331
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/defaults/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2680
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/tests/main.os6.yaml133
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_aaa/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/README.md134
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/templates/os9_acl.j2277
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/tests/main.os9.yaml88
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_acl/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/README.md224
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2351
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/tests/main.os9.yaml97
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_bgp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/README.md131
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/tasks/main.yml7
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/templates/os9_copy_config.j23
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/tests/main.os9.yaml1
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_copy_config/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/README.md133
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2216
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/tests/main.os9.yaml38
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dcb/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/README.md94
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/templates/os9_dns.j2111
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/tests/main.os9.yaml40
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_dns/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/README.md89
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/tasks/main.yml17
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/templates/os9_ecmp.j262
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml14
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ecmp/vars/main.yml3
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/README.md173
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/templates/os9_interface.j2237
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/tests/main.os9.yaml50
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_interface/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/README.md110
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/templates/os9_lag.j2114
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/tests/main.os9.yaml21
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lag/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/README.md245
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/templates/os9_lldp.j2514
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/tests/main.os9.yaml94
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/tests/test.yaml6
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_lldp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/README.md148
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/templates/os9_logging.j2198
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/tests/main.os9.yaml44
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_logging/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/README.md98
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/templates/os9_ntp.j241
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/tests/main.os9.yaml11
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_ntp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/README.md110
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/meta/main.yml17
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/templates/os9_prefix_list.j281
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/main.os9.yaml33
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_prefix_list/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/README.md120
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/templates/os9_sflow.j2143
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/tests/main.os9.yaml35
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_sflow/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/README.md192
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/templates/os9_snmp.j2524
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/tests/main.os9.yaml83
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_snmp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/README.md223
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/templates/os9_system.j2422
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/tests/main.os9.yaml74
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_system/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/README.md109
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/templates/os9_users.j2141
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/tests/main.os9.yaml28
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_users/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/README.md105
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/templates/os9_vlan.j279
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/tests/main.os9.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlan/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/README.md132
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2217
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/tests/main.os9.yaml39
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vlt/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/README.md125
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/templates/os9_vrf.j268
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/tests/main.os9.yaml15
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrf/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/README.md148
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/meta/main.yml19
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/templates/os9_vrrp.j2218
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml59
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_vrrp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/LICENSE674
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/README.md127
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/defaults/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/handlers/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/meta/main.yml18
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/tasks/main.yml16
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/templates/os9_xstp.j2160
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/tests/inventory.yaml20
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/tests/main.os9.yaml34
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/tests/test.yaml5
-rw-r--r--ansible_collections/dellemc/os9/roles/os9_xstp/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/os9/tests/.gitignore1
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/cli.yaml14
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator20
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/contains20
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/invalid28
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/output29
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml74
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/timeout19
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/cli.yaml13
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml134
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml37
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt9
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml2
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml13
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml2
-rw-r--r--ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml55
-rw-r--r--ansible_collections/dellemc/os9/tests/sanity/ignore-2.10.txt4
-rw-r--r--ansible_collections/dellemc/os9/tests/sanity/ignore-2.11.txt4
-rw-r--r--ansible_collections/dellemc/os9/tests/sanity/ignore-2.9.txt1
-rw-r--r--ansible_collections/dellemc/os9/tests/sanity/requirements.txt4
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_config.cfg13
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_src.cfg12
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_file-systems10
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_interfaces1259
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_inventory19
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_ipv6_interface26
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail35
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_memory__except_Processor4
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config238
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname1
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_version18
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/os9_module.py88
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_command.py108
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_config.py148
-rw-r--r--ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_facts.py106
-rw-r--r--ansible_collections/dellemc/powerflex/.ansible-lint2
-rw-r--r--ansible_collections/dellemc/powerflex/.ansible-lint-ignore27
-rw-r--r--ansible_collections/dellemc/powerflex/.github/CODEOWNERS3
-rw-r--r--ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml128
-rw-r--r--ansible_collections/dellemc/powerflex/.gitignore3
-rw-r--r--ansible_collections/dellemc/powerflex/CHANGELOG.rst73
-rw-r--r--ansible_collections/dellemc/powerflex/FILES.json2551
-rw-r--r--ansible_collections/dellemc/powerflex/MANIFEST.json13
-rw-r--r--ansible_collections/dellemc/powerflex/README.md54
-rw-r--r--ansible_collections/dellemc/powerflex/ansible.cfg484
-rw-r--r--ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml9
-rw-r--r--ansible_collections/dellemc/powerflex/changelogs/changelog.yaml161
-rw-r--r--ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md10
-rw-r--r--ansible_collections/dellemc/powerflex/docs/INSTALLATION.md6
-rw-r--r--ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md6
-rw-r--r--ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md4
-rw-r--r--ansible_collections/dellemc/powerflex/docs/Release Notes.md30
-rw-r--r--ansible_collections/dellemc/powerflex/docs/SECURITY.md4
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/device.rst60
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/fault_set.rst215
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/info.rst209
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst14
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst6
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst100
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst23
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/sdc.rst31
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/sds.rst26
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst28
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/snapshot_policy.rst414
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst11
-rw-r--r--ansible_collections/dellemc/powerflex/docs/modules/volume.rst24
-rw-r--r--ansible_collections/dellemc/powerflex/meta/runtime.yml2
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/device.yml90
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/fault_set.yml67
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/info.yml80
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/mdm_cluster.yml148
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/protection_domain.yml124
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/replication_consistency_group.yml234
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/replication_pair.yml82
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/sdc.yml64
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/sds.yml149
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/snapshot.yml130
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/snapshot_policy.yml215
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/storagepool.yml61
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/modules/volume.yml133
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/roles/group_vars/all51
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/roles/host_vars/node211
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/roles/inventory41
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/roles/site.yml64
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/roles/site_powerflex45.yml59
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/roles/uninstall_powerflex.yml64
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/roles/uninstall_powerflex45.yml61
-rw-r--r--ansible_collections/dellemc/powerflex/playbooks/roles/vars_files/connection.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py10
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/__init__.py0
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/configuration.py121
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/powerflex_base.py45
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py33
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/device.py85
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/fault_set.py380
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/info.py818
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py42
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py8
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py252
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py21
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/sdc.py114
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/sds.py708
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py22
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/snapshot_policy.py828
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py5
-rw-r--r--ansible_collections/dellemc/powerflex/plugins/modules/volume.py18
-rw-r--r--ansible_collections/dellemc/powerflex/requirements.txt4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/README.md117
-rw-r--r--ansible_collections/dellemc/powerflex/roles/molecule.yml25
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/README.md155
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/defaults/main.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/meta/argument_specs.yml48
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/meta/main.yml27
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_install/converge.yml31
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_install/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_uninstallation/converge.yml67
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/install_activemq.yml30
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/uninstall_activemq.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_activemq/vars/main.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/README.md3
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/defaults/main.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/meta/main.yml41
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_CentOS.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_RedHat.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_Rocky.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_SLES.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_Ubuntu.yml10
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_CentOS.yml36
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_RedHat.yml36
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_Rocky.yml36
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_SLES.yml36
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_Ubuntu.yml51
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_VMkernel.yml50
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_WindowsOS.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_powerflex.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/main.yml14
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/CentOS.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/RedHat.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/Rocky.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/SLES.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/Ubuntu.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/VMkernel.yml2
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/WindowsOS.yml2
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_config/README.md165
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_config/defaults/main.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_config/meta/argument_specs.yml50
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_config/meta/main.yml25
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_config/molecule/configure_protection_domain/converge.yml47
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_config/molecule/configure_protection_domain/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_config/tasks/main.yml79
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_config/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/README.md160
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/defaults/main.yml17
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/handlers/main.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/meta/argument_specs.yml47
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/meta/main.yml28
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation/converge.yml29
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/converge.yml32
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/molecule.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_uninstallation/converge.yml48
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_gateway.yml52
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_keepalived.yml28
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/uninstall_gateway.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/templates/keepalived.conf.j218
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/CentOS.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/RedHat.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/SLES.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/Ubuntu.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/main.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/README.md170
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/defaults/main.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/meta/main.yml27
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_install/converge.yml30
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_install/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/converge.yml34
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/molecule.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_uninstallation/converge.yml30
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/install_lia.yml22
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/uninstall_lia.yml19
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_lia/vars/main.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/README.md142
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/defaults/main.yml9
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/meta/argument_spec.yml28
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/meta/main.yml29
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_installation/converge.yml63
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_installation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_uninstallation/converge.yml53
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/add_certs.yml168
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_mdm.yml32
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex3x_mdm.yml128
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml101
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/mdm_set_facts.yml40
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/remove_mdm.yml58
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/uninstall_mdm.yml19
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/CentOS.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/RedHat.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/SLES.yml3
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/Ubuntu.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/main.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/README.md311
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/defaults/main.yml21
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/handlers/main.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/meta/main.yml25
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/converge.yml118
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/molecule.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation/converge.yml62
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/converge.yml54
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/molecule.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_uninstallation/converge.yml57
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/var_values.yml7
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/converge.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/inventory4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/molecule.yml11
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/converge.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/inventory4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/molecule.yml11
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/configure_sdc.yml28
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/install_sdc.yml75
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/register_esxi_sdc.yml47
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/remove_sdc.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/uninstall_esxi_sdc.yml30
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/uninstall_sdc.yml40
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/templates/driver_sync.conf.j231
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdc/vars/main.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/README.md145
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/defaults/main.yml9
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/meta/argument_specs.yml34
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/meta/main.yml21
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation/converge.yml30
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/converge.yml34
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/molecule.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/converge.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/molecule.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_uninstallation/converge.yml66
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/var_values.yml3
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/add_sdr.yml142
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/remove_sdr.yml120
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/sdr_set_facts.yml7
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sdr/vars/main.yml2
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/README.md243
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/defaults/main.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/meta/argument_spec.yml88
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/meta/main.yml24
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_installation/converge.yml55
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_installation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_uninstallation/converge.yml50
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/var_values.yml11
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/install_sds.yml112
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/uninstall_sds.yml31
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_sds/vars/main.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/README.md210
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/defaults/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/argument_spec.yml85
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/argument_specs.yml65
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/main.yml25
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_installation/converge.yml35
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_installation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_uninstallation/converge.yml19
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/var_values.yml7
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/converge.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/inventory4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/molecule.yml11
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb3x.yml81
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb4x.yml69
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/main.yml31
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/set_tb_ips.yml29
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/uninstall_tb.yml97
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_tb/vars/main.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/README.md165
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/defaults/main.yml6
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/meta/argument_specs.yml52
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/meta/main.yml29
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation/converge.yml30
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/converge.yml34
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/molecule.yml4
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_uninstallation/converge.yml48
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_uninstallation/molecule.yml1
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/install_webui.yml23
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/main.yml8
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/uninstall_webui.yml20
-rw-r--r--ansible_collections/dellemc/powerflex/roles/powerflex_webui/vars/main.yml5
-rw-r--r--ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt11
-rw-r--r--ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt11
-rw-r--r--ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt31
-rw-r--r--ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.15.txt42
-rw-r--r--ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.16.txt28
-rw-r--r--ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.17.txt13
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/__init__.py0
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/fail_json.py21
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/initial_mock.py17
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/powerflex_unit_base.py40
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_device_api.py146
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_fail_json.py21
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_fault_set_api.py69
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py105
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py124
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py1
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_pair_api.py11
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdc_api.py64
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sds_api.py147
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_snapshot_policy_api.py186
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py166
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py64
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_device.py471
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_fault_set.py215
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py475
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py542
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py125
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_pair.py132
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_sdc.py192
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_sds.py630
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_snapshot_policy.py502
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py355
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py664
-rw-r--r--ansible_collections/dellemc/powerflex/tests/unit/requirements.txt (renamed from ansible_collections/dellemc/powerflex/tests/requirements.txt)0
-rw-r--r--ansible_collections/dellemc/unity/.ansible-lint4
-rw-r--r--ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml49
-rw-r--r--ansible_collections/dellemc/unity/CHANGELOG.rst25
-rw-r--r--ansible_collections/dellemc/unity/FILES.json318
-rw-r--r--ansible_collections/dellemc/unity/MANIFEST.json10
-rw-r--r--ansible_collections/dellemc/unity/README.md65
-rw-r--r--ansible_collections/dellemc/unity/ansible.cfg484
-rw-r--r--ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml9
-rw-r--r--ansible_collections/dellemc/unity/changelogs/changelog.yaml19
-rw-r--r--ansible_collections/dellemc/unity/docs/CONTRIBUTING.md8
-rw-r--r--ansible_collections/dellemc/unity/docs/INSTALLATION.md4
-rw-r--r--ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md2
-rw-r--r--ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md2
-rw-r--r--ansible_collections/dellemc/unity/docs/Release Notes.md7
-rw-r--r--ansible_collections/dellemc/unity/docs/SECURITY.md2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/cifsserver.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/filesystem.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/host.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/info.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/interface.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/nasserver.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/nfs.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/nfsserver.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/replication_session.rst294
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/smbshare.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/snapshot.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/storagepool.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/tree_quota.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/user_quota.rst2
-rw-r--r--ansible_collections/dellemc/unity/docs/modules/volume.rst2
-rw-r--r--ansible_collections/dellemc/unity/meta/runtime.yml2
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/cifsserver.yml151
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/consistencygroup.yml259
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/filesystem.yml294
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/filesystem_snapshot.yml111
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/host.yml180
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/info.yml220
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/interface.yml91
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/nasserver.yml173
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/nfs.yml551
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/nfsserver.yml104
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/replication_session.yml133
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/smbshare.yml229
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/snapshot.yml194
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/snapshotschedule.yml203
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/storagepool.yml185
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/tree_quota.yml138
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/user_quota.yml255
-rw-r--r--ansible_collections/dellemc/unity/playbooks/modules/volume.yml241
-rw-r--r--ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py2
-rw-r--r--ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py54
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/cifsserver.py4
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py4
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/filesystem.py10
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py6
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/host.py2
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/info.py107
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/interface.py2
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nasserver.py32
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nfs.py4
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/nfsserver.py16
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/replication_session.py551
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/smbshare.py36
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/snapshot.py5
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py11
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/storagepool.py13
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/tree_quota.py4
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/user_quota.py4
-rw-r--r--ansible_collections/dellemc/unity/plugins/modules/volume.py17
-rw-r--r--ansible_collections/dellemc/unity/requirements.txt3
-rw-r--r--ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt33
-rw-r--r--ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt5
-rw-r--r--ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt5
-rw-r--r--ansible_collections/dellemc/unity/tests/sanity/ignore-2.15.txt32
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py4
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_info_api.py58
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py1
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py1
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_replication_session_api.py112
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py2
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py2
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py2
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_info.py54
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py2
-rw-r--r--ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_replication_session.py178
3261 files changed, 162083 insertions, 162186 deletions
diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/CODEOWNERS b/ansible_collections/dellemc/enterprise_sonic/.github/CODEOWNERS
new file mode 100644
index 000000000..5a1377bdc
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/.github/CODEOWNERS
@@ -0,0 +1,14 @@
+# CODEOWNERS
+#
+# documentation for this file can be found at:
+# https://help.github.com/en/articles/about-code-owners
+
+# These are the default owners for the code and will
+# be requested for review when someone opens a pull request.
+# The order is alphabetical for easier maintenance.
+#
+# Kerry Meyer (kerry_meyer@dell.com)
+# Shade Talabi (Shade_Talabi@Dell.com)
+
+# for all files:
+* @kerry-meyer @stalabi1
diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/ask_a_question.md b/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/ask_a_question.md
new file mode 100644
index 000000000..aacce06a8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/ask_a_question.md
@@ -0,0 +1,11 @@
+---
+name: 💬 Ask a question
+about: Ask usage questions here
+title: "[QUESTION]:"
+labels: type/question
+assignees: ''
+
+---
+### How can the team help?
+
+**Details: ?** \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/bug_report.yml b/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/bug_report.yml
new file mode 100644
index 000000000..65ce40653
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -0,0 +1,116 @@
+---
+name: 🐛 Bug report
+description: Create a report to help us improve.
+title: "[BUG]: "
+labels: ["type/bug", "needs-triage"]
+body:
+ - type: markdown
+ attributes:
+ value: |
+ Thank you for taking the time to create this bug report. To better help us investigate this bug, provide the following information.
+ Verify first that your issue is not already reported on GitHub
+ Also test if the latest release and devel branch are affected too
+ Complete *all* sections as described, this form is processed automatically
+ - type: textarea
+ id: Description
+ attributes:
+ label: Bug Description
+ description: Provide a clear and concise summary about the bug.
+ validations:
+ required: true
+ - type: input
+ id: Product
+ attributes:
+ label: Product Name
+ description: Product Name
+ placeholder: |
+ Dell Enterprise SONiC / SONiC-OS etc
+ validations:
+ required: true
+ - type: input
+ id: component
+ attributes:
+ label: Component or Module Name
+ description: Write the short name of the module, plugin, task or feature below, use your best guess if unsure
+ placeholder: httpapi, cliconf, sonic_bgp, etc.
+ validations:
+ required: true
+ - type: input
+ id: ansible
+ attributes:
+ label: DellEMC Enterprise SONiC Ansible Collection Version
+ description: Provide the output from "ansible-galaxy collection list dellemc.enterprise_sonic"
+ placeholder: |
+ dellemc.enterprise_sonic 2.0.0
+ validations:
+ required: true
+ - type: input
+ id: Productv
+ attributes:
+ label: SONiC Software Version
+ description: Provide the first line of "show version" output.
+ placeholder: |
+ SONiC Software Version
+ validations:
+ required: true
+ - type: textarea
+ id: device
+ attributes:
+ label: Configuration
+ description: Paste verbatim output from "ansible-config dump --only-changed"
+ placeholder: |
+ "ansible-config dump --only-changed"
+ validations:
+ required: true
+ - type: textarea
+ id: STEPS
+ attributes:
+ label: Steps to Reproduce
+ description: Describe exactly how to reproduce the problem, using a minimal test-case
+ validations:
+ required: true
+ - type: textarea
+ id: expected
+ attributes:
+ label: Expected Behavior
+ description: Enter the clear and concise description about the expected behaviour.
+ validations:
+ required: true
+ - type: textarea
+ id: actual
+ attributes:
+ label: Actual Behavior
+ description: Enter the clear and concise description about the actual behaviour.
+ validations:
+ required: true
+ - type: textarea
+ id: files
+ attributes:
+ label: Logs
+ description: Copy any relevant log output here or drop the log files in the text area.
+ Please be sure to remove all sensitive data such as IP addresses, host names, credentials, etc.
+ validations:
+ required: true
+ - type: textarea
+ id: screenshots
+ attributes:
+ label: Screenshots
+ description: Drop any relevant screenshots in the text area.
+ validations:
+ required: false
+ - type: input
+ id: ainfo
+ attributes:
+ label: Additional Information
+ description: Provide any additional information about the bug.
+ validations:
+ required: false
+ - type: markdown
+ attributes:
+ value: |
+ Community Note
+ * Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/)
+ to the original issue to help the community and maintainers prioritize this request
+ * Please do not leave "+1" or other comments that do not add relevant new information or questions,
+ they generate extra noise for issue followers and do not help prioritize the request
+ * If you are interested in working on this issue or have submitted a pull request, please leave a comment \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/config.yml b/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 000000000..ec4bb386b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1 @@
+blank_issues_enabled: false \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/feature_request.md b/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/feature_request.md
new file mode 100644
index 000000000..c414549ee
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/.github/ISSUE_TEMPLATE/feature_request.md
@@ -0,0 +1,23 @@
+---
+name: 🚀 Feature Request
+about: Suggest an idea for this project
+title: "[FEATURE]:"
+labels: type/feature-request, needs-triage
+assignees: ''
+---
+**Describe the solution you'd like**
+Provide a clear and concise description about the new feature request.
+
+**Describe alternatives you've considered**
+Provide a clear and concise description of any alternative solutions or features that you have considered.
+
+**Additional context**
+Describe any relevant context or add any relevant screenshots about the feature request.
+
+**Community Note**
+
+* Please vote on this issue by adding a 👍 [reaction](https://blog.github.com/2016-03-10-add-reactions-to-pull-requests-issues-and-comments/)
+ to the original issue to help the community and maintainers prioritize this request
+* Please do not leave "+1" or other comments that do not add relevant new information or questions,
+ they generate extra noise for issue followers and do not help prioritize the request
+* If you are interested in working on this issue or have submitted a pull request, please leave a comment \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/PULL_REQUEST_TEMPLATE.md b/ansible_collections/dellemc/enterprise_sonic/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 000000000..006a4eed2
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,53 @@
+##### SUMMARY
+<!--- Describe the change below, including rationale and design decisions -->
+
+##### GitHub Issues
+List the GitHub issues impacted by this PR. If no Github issues are affected, please indicate this with "N/A".
+
+| GitHub Issue # |
+| -------------- |
+| |
+
+
+##### ISSUE TYPE
+<!--- Pick one below and delete the rest -->
+- Bugfix Pull Request
+- Docs Pull Request
+- Feature Pull Request
+- Test Pull Request
+
+##### COMPONENT NAME
+<!--- Write the short name of the module, plugin, task or feature below -->
+
+##### OUTPUT
+<!--- Paste the functionality test result below -->
+```paste below
+
+```
+##### ADDITIONAL INFORMATION
+<!--- Include additional information to help people understand the change here -->
+<!--- A step-by-step reproduction of the problem is helpful if there is no related issue -->
+
+<!--- Paste verbatim command output below, e.g. before and after your change -->
+```paste below
+
+```
+<!--- Measure the code coverage before and after the change by running the UT and ensure that the "coverage after the change" is not less than the coverage "before the change". Note that the unit testing coverage can be manually executed using the pytest tool or ansible-test tool. -->
+
+##### Checklist:
+
+- [ ] I have performed a self-review of my own code to ensure there are no formatting, linting, or security issues
+- [ ] I have verified that new and existing unit tests pass locally with my changes
+- [ ] I have not allowed coverage numbers to degenerate
+- [ ] I have maintained at least 90% code coverage
+- [ ] I have commented my code, particularly in hard-to-understand areas
+- [ ] I have made corresponding changes to the documentation
+- [ ] I have added tests that prove my fix is effective or that my feature works
+- [ ] I have maintained backward compatibility or have provided any relevant "breaking_changes" descriptions in a "fragment" file in the "changelogs/fragments" directory of this repository.
+- [ ] I have provided a summary for this PR in valid "fragment" file format in the "changelogs/fragments" directory of this repository branch. Reference : [Ansible Change Log Document](https://docs.ansible.com/ansible/devel/community/development_process.html#changelogs-how-to)
+
+##### How Has This Been Tested?
+Please describe the tests that you ran to verify your changes. Please also list any relevant details for your test configuration
+
+- [ ] Test A
+- [ ] Test B
diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml
index c2ae1dd09..3a5bbd9f6 100644
--- a/ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/.github/workflows/ansible-test.yml
@@ -7,36 +7,10 @@ on:
# This ensures that even if there haven't been commits that we are still testing against latest version of ansible-test for each ansible-base version
schedule:
- cron: '0 6 * * *'
+
jobs:
+ changelog:
+ uses: ansible-network/github_actions/.github/workflows/changelog.yml@main
+ if: (github.event_name != 'schedule' && github.event_name != 'push' && !contains(github.event.pull_request.labels.*.name, 'new_resource_module'))
sanity:
- name: Sanity (${{ matrix.ansible }})
- strategy:
- matrix:
- ansible:
- - stable-2.9
- - stable-2.10
- - stable-2.11
- - stable-2.12
- - stable-2.13
- - devel
- runs-on: ubuntu-latest
- steps:
-
- - name: Check out code
- uses: actions/checkout@v1
- with:
- path: ansible_collections/dellemc/enterprise_sonic
-
- - name: Set up Python 3.9
- uses: actions/setup-python@v1
- with:
- python-version: 3.9
-
- - name: Install ansible-base (${{ matrix.ansible }})
- run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
-
- - name: Install ansible_collections.ansible.netcommon
- run: ansible-galaxy collection install ansible.netcommon -p ../../
-
- - name: Run sanity tests
- run: ansible-test sanity --docker -v --color
+ uses: ansible-network/github_actions/.github/workflows/sanity.yml@main
diff --git a/ansible_collections/dellemc/enterprise_sonic/.github/workflows/code-coverage.yml b/ansible_collections/dellemc/enterprise_sonic/.github/workflows/code-coverage.yml
new file mode 100644
index 000000000..1f23d7a6e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/.github/workflows/code-coverage.yml
@@ -0,0 +1,63 @@
+name: Code coverage
+on:
+ push:
+ pull_request:
+
+jobs:
+ codecoverage:
+ env:
+ PY_COLORS: "1"
+ source_directory: "./source"
+ strategy:
+ fail-fast: false
+ matrix:
+ os:
+ - ubuntu-latest
+ ansible-version:
+ - stable-2.14
+ python-version:
+ - "3.10"
+ runs-on: ${{ matrix.os }}
+
+ name: "Code coverage py${{ matrix.python-version }} / ${{ matrix.os }} / ${{ matrix.ansible-version }}"
+ steps:
+ - name: Checkout the collection repository
+ uses: actions/checkout@v3
+ with:
+ path: ${{ env.source_directory }}
+ ref: ${{ github.event.pull_request.head.sha }}
+ fetch-depth: "0"
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install ansible-core (${{ matrix.ansible-version }})
+ run: python3 -m pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
+
+ - name: Read collection metadata from galaxy.yml
+ id: identify
+ uses: ansible-network/github_actions/.github/actions/identify_collection@main
+ with:
+ source_path: ${{ env.source_directory }}
+
+ - name: Build and install the collection
+ uses: ansible-network/github_actions/.github/actions/build_install_collection@main
+ with:
+ install_python_dependencies: true
+ source_path: ${{ env.source_directory }}
+ collection_path: ${{ steps.identify.outputs.collection_path }}
+ tar_file: ${{ steps.identify.outputs.tar_file }}
+
+ - name: Print the ansible version
+ run: ansible --version
+
+ - name: Print the python dependencies
+ run: python3 -m pip list
+
+ - name: Run code coverage tests
+ run: |
+ ansible-test units --python ${{ matrix.python-version }} --coverage --requirements
+ ansible-test coverage report
+ working-directory: ${{ steps.identify.outputs.collection_path }}
diff --git a/ansible_collections/dellemc/enterprise_sonic/CHANGELOG.rst b/ansible_collections/dellemc/enterprise_sonic/CHANGELOG.rst
new file mode 100644
index 000000000..d3ecd6f70
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/CHANGELOG.rst
@@ -0,0 +1,390 @@
+======================================
+Dellemc.Enterprise_Sonic Release Notes
+======================================
+
+.. contents:: Topics
+
+
+v2.4.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2024-0108
+| This release provides an Ansible compliance change required on top of the changes included in
+| the 2.3.0 release of the enterprise_sonic Ansible network resource module collection.
+| It addresses an issue raised by the Ansible core team with the content of the 2.3.0 release,
+| and provides accompanying documentation changes in the README file. Additional details are
+| described below.
+| 1) Update the "requires_ansible" version in the meta/runtime.yml file for this collection
+| to the oldest supported version of ansible-core. (This was recently changed by Redhat/Ansible
+| to version "2.14.0".)
+| 2) Update the README file "Recommended version" values for Ansible and Python in accordance
+| with the previous change item to modify the oldest supported version of ansible-core which,
+| in turn, requires a Python version >= "3.9".
+| 3) Update the list of resource modules in the README file to include all currently available
+| resource modules for this collection.
+
+
+Bugfixes
+--------
+
+- requirements - Update requires_ansible version in meta/runtime.yml to the oldest supported version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/321).
+
+v2.3.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2024-0103
+| This release provides the functionality enhancements listed below, along with fixes for
+| problems found in regression testing or reported by users. The main functionality enhancements
+| provided are the following items.
+| 1) Complete the support for "replaced" and "overridden" state handling for all resource modules except for the bgp_neighbors and bgp_neighbors_af modules.
+| With this release, the required support has been added for any resource modules that were not
+| provided with this support for the 2.1.0 release with the two exceptions noted above.
+| 2) Provide initial support for the "--check" and "--diff" mode options for playbook execution. This
+| release provides the common utility support for these options for use by all resource modules.
+| It also provides the specific resource module changes required for implementation of the
+| functionality in many of the existing resource modules. (The "--check" and "--diff" mode support
+| for the remaining resource modules is planned for inclusion in the next release.)
+| 3) New resource modules for "Public Key Infrastructure", STP, and DHCP Snooping.
+| 4) Support for "ranges" of vlans (e.g '2-100') in tasks for the mclag resource module.
+| Please refer to the "CHANGELOG.rst" file at the top directory level of this repo for additional
+| details on the contents of this release.
+
+
+Minor Changes
+-------------
+
+- sonic_aaa - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/304).
+- sonic_aaa - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_acl_interfaces - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/306).
+- sonic_acl_interfaces - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_bgp_as_paths - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/290).
+- sonic_bgp_communities - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/251).
+- sonic_bgp_ext_communities - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/252).
+- sonic_interfaces - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/301).
+- sonic_interfaces - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/314).
+- sonic_interfaces - Change deleted design for interfaces module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/310).
+- sonic_interfaces - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_ip_neighbor - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/285).
+- sonic_ip_neighbor - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_l2_acls - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/306).
+- sonic_l2_acls - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_l2_interfaces - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/303).
+- sonic_l2_interfaces - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_l3_acls - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/306).
+- sonic_l3_acls - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_l3_interfaces - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/241).
+- sonic_lag_interfaces - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/303).
+- sonic_lag_interfaces - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_logging - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/285).
+- sonic_logging - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_mclag - Add VLAN range support for 'unique_ip' and 'peer_gateway' options (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/288).
+- sonic_mclag - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/288).
+- sonic_ntp - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/281).
+- sonic_ntp - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_port_breakout - Add Ansible support for all port breakout modes now allowed in Enterprise SONiC (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/276).
+- sonic_port_breakout - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/291).
+- sonic_port_group - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/284).
+- sonic_port_group - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_radius_server - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/279).
+- sonic_radius_server - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_static_routes - Add playbook check and diff modes support for static routes resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/313).
+- sonic_static_routes - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_system - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/284).
+- sonic_system - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_tacacs_server - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/281).
+- sonic_tacacs_server - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_users - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/304).
+- sonic_users - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_vlans - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/301).
+- sonic_vlans - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- sonic_vrfs - Add mgmt VRF replaced state handling to sonic_vrfs module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/298).
+- sonic_vrfs - Add mgmt VRF support to sonic_vrfs module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/293).
+- sonic_vrfs - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/285).
+- sonic_vrfs - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+- tests - Add UTs for BFD, COPP, and MAC modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/287).
+- tests - Enable contiguous execution of all regression integration tests on an S5296f (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/277).
+- tests - Fix the bgp CLI test base_cfg_path derivation of the bgp role_path by avoiding relative pathing from the possibly external playbook_dir (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/283).
+
+Bugfixes
+--------
+
+- sonic_bgp_communities - Fix incorrect "facts" handling for parsing of a BGP community list configured with an empty "members" list (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/319).
+- sonic_bgp_neighbors - Fix prefix-limit issue (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/289).
+- sonic_interfaces - Add warnings when speed and auto_negotiate is configured at same time (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/314).
+- sonic_interfaces - Fix support for standard naming interfaces (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/314).
+- sonic_interfaces - Prevent configuring speed in port group interfaces (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/314).
+- sonic_stp - Correct the commands list for STP delete state (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/302).
+
+New Modules
+-----------
+
+- dellemc.enterprise_sonic.sonic_dhcp_snooping - Manage DHCP Snooping on SONiC
+- dellemc.enterprise_sonic.sonic_pki - Manages PKI attributes of Enterprise Sonic
+- dellemc.enterprise_sonic.sonic_stp - Manage STP configuration on SONiC
+
+v2.2.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2023-06-01
+| This release provides Ansible compliance changes required on top of the changes included in
+| the 2.1.0 release of the enterprise_sonic Ansible network resource module collection.
+| It addresses two issues raised by the Ansible core team with the content of the 2.1.0 release.
+| 1) Back out the "breaking_change" made in the sonic_aaa resource module to fix a functional
+| shortcoming in the enterprise_sonic Ansible collection. Although the change is still needed,
+| it will be deferred to a "major" release.
+| 2) Re-enable installation of new Ansible Netcommon repo instances when installing the
+| enterprise_sonic Ansible collection. The 2.1.0 enterprise_sonic Ansible release included a
+| workaround for a bug introduced in the 5.0.0 version of the Ansible Netcommon repo. This
+| workaround was implemented in the "galaxy.yml" file for the enterprise_sonic
+| 2.1.0 release. New versions of Ansible Netcommon were published after the problematic 5.0.0
+| version and the revised "galaxy.yml" file for this release enables installation of these
+| newer versions.
+
+
+Minor Changes
+-------------
+
+- galaxy_yml - Enable installation of Ansible Netcomon versions after 5.0.0 and update the enterprise_sonic release version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/270).
+- sonic_aaa - Revert breaking changes for AAA nodule (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/269).
+
+v2.1.0
+======
+
+Release Summary
+---------------
+
+| Release Date: 2023-0515
+| This release provides enhanced Dell Enterprise SONiC Ansible Collection support for SONiC 4.x images.
+| In addition to new resource modules to support previously existing functionality, it provides
+| support for the "QinQ" (Vlan Mapping) function introduced with SONiC release 4.1. It also provides
+| bug fixes and enhancements for support of features that were initially introduced in previous
+| Enterprise SONiC Ansible releases. The changelog describes changes made to the modules and plugins
+| included in this collection since release 2.0.0.
+
+
+Minor Changes
+-------------
+
+- module_utils - Change the location for importing remove_empties from the obsolete Netcommon location to the offically required Ansible library location to fix sanity errors (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/172).
+- sonic_aaa - Add replaced and overridden states support for AAA resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/237).
+- sonic_aaa - Add unit tests for AAA resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/198).
+- sonic_api - Add unit tests for api resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/218).
+- sonic_bfd, sonic_copp - Update replaced methods (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/254).
+- sonic_bgp - Add rt_delay attribute to module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/244).
+- sonic_bgp - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/240).
+- sonic_bgp - Add unit tests for BGP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/182).
+- sonic_bgp_af - Add several attributes to support configuration of route distinguisher and route target (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/141).
+- sonic_bgp_af - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/246).
+- sonic_bgp_af - Add unit tests for BGP AF resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/183).
+- sonic_bgp_af - Modify BGP AF resource module unit tests to adjust for changes in the resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/191).
+- sonic_bgp_as_paths - Add unit tests for BGP AS paths resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/184).
+- sonic_bgp_communities - Add unit tests for BGP communities resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/185).
+- sonic_bgp_ext_communities - Add unit tests for BGP ext communities resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/186).
+- sonic_bgp_neighbors - Add unit tests for BGP neighbors resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/187).
+- sonic_bgp_neighbors - Enhance unit tests for BGP Neighbors resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/245).
+- sonic_bgp_neighbors_af - Add unit tests for BGP neighbors AF resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/188).
+- sonic_command - Add unit tests for command resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/219).
+- sonic_config - Add unit tests for config resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/220).
+- sonic_dhcp_relay - Add a common unit tests module and unit tests for dhcp relay module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/148).
+- sonic_dhcp_relay - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/249).
+- sonic_facts - Add unit tests for facts resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/222).
+- sonic_interfaces - Add speed, auto-negotiate, advertised-speed and FEC to interface resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/128).
+- sonic_interfaces - Add unit tests for interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/197).
+- sonic_ip_neighbor - Add unit tests for IP neighbor resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/225).
+- sonic_ip_neighbor - Change the replaced function in ip_neighbor resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/253).
+- sonic_l2_interfaces - Add support for parsing configuration containing the OC Yang vlan range syntax (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/124).
+- sonic_l2_interfaces - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/221).
+- sonic_l2_interfaces - Add support for specifying vlan trunk ranges in Ansible playbooks (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/149).
+- sonic_l2_interfaces - Add unit tests for l2_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/200).
+- sonic_l3_interfaces - Add unit tests for l3_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/202).
+- sonic_lag_interface - Add replaced and overridden states support for LAG interface resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/196).
+- sonic_lag_interfaces - Add unit tests for lag_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/203).
+- sonic_logging - Add replaced and overridden states support for logging resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/150).
+- sonic_logging - Add unit tests for logging resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/226).
+- sonic_logging - Change logging get facts for source_interface naming (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/258).
+- sonic_mclag - Add delay_restore, gateway_mac, and peer_gateway attributes to module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/145).
+- sonic_ntp - Add prefer attribute to NTP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/118).
+- sonic_ntp - Add replaced and overridden states support for NTP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/151).
+- sonic_ntp - Add unit tests for NTP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/207).
+- sonic_ntp - Change NTP get facts to get default parameters (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/106).
+- sonic_ntp - Change NTP key values in NTP regression test script (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/107).
+- sonic_ntp - Change NTP module name (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/113).
+- sonic_ntp - Change NTP module names in NTP regression test script (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/114).
+- sonic_ntp - Change NTP resource module to make minpoll and maxpoll be configured together (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/129).
+- sonic_port_breakout - Add unit tests for port breakout resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/229).
+- sonic_port_group - Add replaced and overridden states support for port group resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/227).
+- sonic_port_group - Add unit tests for port group resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/228).
+- sonic_prefix_lists - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/255).
+- sonic_prefix_lists - Add unit tests for prefix lists resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/209).
+- sonic_radius_server - Add replaced and overridden states support for RADIUS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/239).
+- sonic_radius_server - Add unit tests for RADIUS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/210).
+- sonic_static_routes - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/236).
+- sonic_static_routes - Add unit tests for static routes resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/212).
+- sonic_system - Add replaced and overridden states support for system resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/159).
+- sonic_system - Add unit tests for system resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/223).
+- sonic_tacacs_server - Add replaced and overridden states support for TACACS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/235).
+- sonic_tacacs_server - Add unit tests for TACACS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/208).
+- sonic_users - Add replaced and overridden states support for users resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/242).
+- sonic_users - Add unit tests for users resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/213).
+- sonic_vlans - Add replaced and overridden states support for VLAN resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/217).
+- sonic_vlans - Add unit tests for Vlans resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/214).
+- sonic_vrfs - Add replaced and overridden states support for VRF resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/156).
+- sonic_vrfs - Add unit tests for VRFS resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/216).
+- sonic_vxlans - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/247).
+- sonic_vxlans - Add unit tests for VxLans resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/215).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- sonic_aaa - Add default_auth attribute to the argspec to replace the deleted group and local attributes. This change allows for ordered login authentication. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/195).
+
+Bugfixes
+--------
+
+- Fix regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/180).
+- Fix sanity check errors in the collection caused by Ansible library changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/160).
+- install - Update the required ansible.netcommon version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/176).
+- sonic_bgp_af - Fix issue with vnis and advertise modification for a single BGP AF (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/201).
+- sonic_bgp_as_paths - Fix issues with merged and deleted states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/250).
+- sonic_interfaces - Fix command timeout issue (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/261).
+- sonic_l3_interfaces - Fix IP address deletion issue (GitHub issue#170) (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/231).
+- sonic_lag_interfaces - Fix port name issue (GitHub issue#153) (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/119).
+- sonic_neighbors - Fix handling of default attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/233).
+- sonic_ntp - Fix the issue (GitHub issue#205) with NTP clear all without config given (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/224).
+- sonic_vlan_mapping - Remove platform checks (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/262).
+- sonic_vrfs - Add tasks as a workaround to mgmt VRF bug (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/146).
+- sonic_vrfs - Fix spacing issue in CLI test case (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/257).
+- sonic_vrfs - Fix the issue (GitHub issue#194) with VRF when deleting interface(https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/230).
+- sonic_vxlans - Remove required_together restriction for evpn_nvo and source_ip attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/130).
+- workflows - Fix dependency installation issue in the code coverage workflow (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/199).
+
+New Modules
+-----------
+
+- dellemc.enterprise_sonic.sonic_acl_interfaces - Manage access control list (ACL) to interface binding on SONiC
+- dellemc.enterprise_sonic.sonic_bfd - Manage BFD configuration on SONiC
+- dellemc.enterprise_sonic.sonic_copp - Manage CoPP configuration on SONiC
+- dellemc.enterprise_sonic.sonic_dhcp_relay - Manage DHCP and DHCPv6 relay configurations on SONiC
+- dellemc.enterprise_sonic.sonic_ip_neighbor - Manage IP neighbor global configuration on SONiC
+- dellemc.enterprise_sonic.sonic_l2_acls - Manage Layer 2 access control lists (ACL) configurations on SONiC
+- dellemc.enterprise_sonic.sonic_l3_acls - Manage Layer 3 access control lists (ACL) configurations on SONiC
+- dellemc.enterprise_sonic.sonic_lldp_global - Manage Global LLDP configurations on SONiC
+- dellemc.enterprise_sonic.sonic_logging - Manage logging configuration on SONiC
+- dellemc.enterprise_sonic.sonic_mac - Manage MAC configuration on SONiC
+- dellemc.enterprise_sonic.sonic_port_group - Manages port group configuration on SONiC
+- dellemc.enterprise_sonic.sonic_route_maps - route map configuration handling for SONiC
+- dellemc.enterprise_sonic.sonic_vlan_mapping - Configure vlan mappings on SONiC
+
+v2.0.0
+======
+
+Release Summary
+---------------
+
+This release provides Dell SONiC Enterprise Ansible Collection support for SONiC 4.x images. It is the first release for the 2.x branch of the collection. Subsequent enhancements for support of SONiC 4.x images will also be provided as needed on the 2.x branch. This release also contains bugfixes and enhancements to supplement the Ansible functionality provided previously for SONiC 3.x images. The changelog describes changes made to the modules and plugins included in this collection since release 1.1.0.
+
+
+Minor Changes
+-------------
+
+- Add an execution-environment.yml file to the "meta" directory to enable use of Ansible execution environment infrastructure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88).
+- bgp_af - Add support for BGP options to configure usage and advertisement of vxlan primary IP address related attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/62).
+- bgp_as_paths - Update module examples with 'permit' attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102).
+- bgp_neighbors - Add BGP peer group support for multiple attributes. The added attributes correspond to the same set of attributes added for BGP neighbors with PR 72 (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81).
+- bgp_neighbors - Add an auth_pwd dictionary and nbr_description attribute to the argspec (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/67).
+- bgp_neighbors - Add prefix-list related peer-group attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101).
+- bgp_neighbors - Add support for multiple attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72).
+- bgp_neighbors_af - Add prefix-list related neighbor attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101).
+- playbook - Update examples to reflect module changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102).
+- sonic_vxlans - Add configuration capability for the primary IP address of a vxlan vtep to facilitate vxlan path redundundancy (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/58).
+- vlans - Add support for the vlan "description" attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/98).
+- workflow - Add stable-2.13 to the sanity test matrix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- bgp_af - Add the route_advertise_list dictionary to the argspec to replace the deleted, obsolete advertise_prefix attribute used for SONiC 3.x images on the 1.x branch of this collection. This change corresponds to a SONiC 4.0 OC YANG REST compliance change for the BGP AF REST API. It enables specification of a route map in conjunction with each route advertisement prefix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/63).
+- bgp_af - Remove the obsolete 'advertise_prefix' attribute from argspec and config code. This and subsequent co-req replacement with the new route advertise list argument structure require corresponding changes in playbooks previoulsly used for configuring route advertise prefixes for SONiC 3.x images. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60).
+- bgp_neighbors - Replace the previously defined standalone "bfd" attribute with a bfd dictionary containing multiple attributes. This change corresponds to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images must be modified for use on SONiC 4.0 images to use the new definition for the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72).
+- bgp_neighbors - Replace, for BGP peer groups, the previously defined standalone "bfd" attribute with a bfd dictionary containing multiple attributes. This change corresponds to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images must be modified for use on SONiC 4.0 images to use the new definition for the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81).
+
+Bugfixes
+--------
+
+- Fixed regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103).
+- Fixed regression test sequencing and other regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/85).
+- bgp_neighbors - Remove string conversion of timer attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60).
+- port_breakout - Fixed a bug in formulation of port breakout REST APIs (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88).
+- sonic - Fix a bug in handling of interface names in standard interface naming mode (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103).
+- sonic_aaa - Fix a bug in facts gathering by providing required conditional branching (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
+- sonic_aaa - Modify regression test sequencing to enable correct testing of the functionality for this module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/78).
+- sonic_command - Fix bugs in handling of CLI commands involving a prompt and answer sequence (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/76/files).
+- users - Fixed a bug in facts gathering (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
+- vxlan - update Vxlan test cases to comply with SONiC behavior (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/105).
+
+New Modules
+-----------
+
+- dellemc.enterprise_sonic.sonic_ntp - Manage NTP configuration on SONiC.
+- dellemc.enterprise_sonic.sonic_prefix_lists - prefix list configuration handling for SONiC
+- dellemc.enterprise_sonic.sonic_static_routes - Manage static routes configuration on SONiC
+
+v1.1.0
+======
+
+New Modules
+-----------
+
+- dellemc.enterprise_sonic.sonic_aaa - Manage AAA and its parameters
+- dellemc.enterprise_sonic.sonic_radius_server - Manage RADIUS server and its parameters
+- dellemc.enterprise_sonic.sonic_system - Configure system parameters
+- dellemc.enterprise_sonic.sonic_tacacs_server - Manage TACACS server and its parameters
+
+v1.0.0
+======
+
+New Plugins
+-----------
+
+Cliconf
+~~~~~~~
+
+- dellemc.enterprise_sonic.sonic - Use sonic cliconf to run command on Dell OS10 platform
+
+Httpapi
+~~~~~~~
+
+- dellemc.enterprise_sonic.sonic - HttpApi Plugin for devices supporting Restconf SONIC API
+
+New Modules
+-----------
+
+- dellemc.enterprise_sonic.sonic_api - Manages REST operations on devices running Enterprise SONiC
+- dellemc.enterprise_sonic.sonic_bgp - Manage global BGP and its parameters
+- dellemc.enterprise_sonic.sonic_bgp_af - Manage global BGP address-family and its parameters
+- dellemc.enterprise_sonic.sonic_bgp_as_paths - Manage BGP autonomous system path (or as-path-list) and its parameters
+- dellemc.enterprise_sonic.sonic_bgp_communities - Manage BGP community and its parameters
+- dellemc.enterprise_sonic.sonic_bgp_ext_communities - Manage BGP extended community-list and its parameters
+- dellemc.enterprise_sonic.sonic_bgp_neighbors - Manage a BGP neighbor and its parameters
+- dellemc.enterprise_sonic.sonic_bgp_neighbors_af - Manage the BGP neighbor address-family and its parameters
+- dellemc.enterprise_sonic.sonic_command - Runs commands on devices running Enterprise SONiC
+- dellemc.enterprise_sonic.sonic_config - Manages configuration sections on devices running Enterprise SONiC
+- dellemc.enterprise_sonic.sonic_interfaces - Configure Interface attributes on interfaces such as, Eth, LAG, VLAN, and loopback. (create a loopback interface if it does not exist.)
+- dellemc.enterprise_sonic.sonic_l2_interfaces - Configure interface-to-VLAN association that is based on access or trunk mode
+- dellemc.enterprise_sonic.sonic_l3_interfaces - Configure the IPv4 and IPv6 parameters on Interfaces such as, Eth, LAG, VLAN, and loopback
+- dellemc.enterprise_sonic.sonic_lag_interfaces - Manage link aggregation group (LAG) interface parameters
+- dellemc.enterprise_sonic.sonic_mclag - Manage multi chassis link aggregation groups domain (MCLAG) and its parameters
+- dellemc.enterprise_sonic.sonic_port_breakout - Configure port breakout settings on physical interfaces
+- dellemc.enterprise_sonic.sonic_users - Manage users and its parameters
+- dellemc.enterprise_sonic.sonic_vlans - Manage VLAN and its parameters
+- dellemc.enterprise_sonic.sonic_vrfs - Manage VRFs and associate VRFs to interfaces such as, Eth, LAG, VLAN, and loopback
+- dellemc.enterprise_sonic.sonic_vxlans - Manage VxLAN EVPN and its parameters
diff --git a/ansible_collections/dellemc/enterprise_sonic/FILES.json b/ansible_collections/dellemc/enterprise_sonic/FILES.json
index 39a1d6342..f16263808 100644
--- a/ansible_collections/dellemc/enterprise_sonic/FILES.json
+++ b/ansible_collections/dellemc/enterprise_sonic/FILES.json
@@ -25,14 +25,14 @@
"name": "tests/regression/test.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "47a1db3b7c5d49f80307023f42c78c5efdf5efa0b1f507f1106b96179b64dc4c",
+ "chksum_sha256": "b59c33c40ac775fc7177021955d7dedd56df9564df5ab5c1792ac712e6350064",
"format": 1
},
{
"name": "tests/regression/hosts",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "22f4ec1024a7ff0f9d471735392d71eac3378f5c7ddc7cf1e0c42a958def8e78",
+ "chksum_sha256": "b449f2e1d0b054fa1a08d6bbc746a4b4abbf1f98b76c538d11188abee20a2494",
"format": 1
},
{
@@ -99,28 +99,147 @@
"format": 1
},
{
- "name": "tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml",
+ "name": "tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8d53284cc72caddd5bc84a507bb7330af983d44c35427ac7fbe623a29aa7d47f",
+ "chksum_sha256": "96931ead58f28114732ef53d9e1ac7fabeaf40c3993686c5b75732305df07b12",
"format": 1
},
{
- "name": "tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml",
+ "name": "tests/regression/roles/sonic_bgp_neighbors/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bgp_neighbors/meta/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6b8363d6259e43af603abde938ed159d1a3a083cbef49fa37095bdc779133921",
+ "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd",
"format": 1
},
{
- "name": "tests/regression/roles/sonic_bgp_neighbors/meta",
+ "name": "tests/regression/roles/sonic_bfd",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_bgp_neighbors/meta/main.yaml",
+ "name": "tests/regression/roles/sonic_bfd/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bfd/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ba37c792a7724e9bec876a003c1951a8d25f366b2978e69c0ea1a329099874a",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bfd/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "784facd1ffa9792649d0bf05dcc4aed644595590c628537b97459bf0b1ecea3c",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bfd/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5128608a2f36b2073e023aabb3784b767bd90b3dd69fce9a124c61b9ebbf4ea3",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bfd/tasks/tasks_template_del.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df2ec9b1feb6098f03f1d0dc64ac974197b1d0b0b3a39b87a84f2be87a8f53e2",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bfd/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bfd/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c3ac5d4ed7222f6485a3ac746527a4467cfbf966e65284f7ed4a3f6d2403e24",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bfd/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_bfd/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_ip_neighbor",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_ip_neighbor/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_ip_neighbor/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d130603ee0811b08d8d857272c3e80e5c5d48af76904b3cbebd595e8c46aeaa7",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_ip_neighbor/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34113182d1947b1e123ed123acc25e7a86c20a66ee2f248f3467bd9804ccf07a",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_ip_neighbor/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_ip_neighbor/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81d2c5ac604ced07b4b832048716a51fd8cb2e155c11f52415899d3a05f2e45f",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_ip_neighbor/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_ip_neighbor/meta/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd",
@@ -172,7 +291,7 @@
"name": "tests/regression/roles/sonic_lag_interfaces/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7198a5cabb43600d45494873192676174231f617e4bd15e13ba4e8f10b6944dc",
+ "chksum_sha256": "f327aa7319766dab5cf7bea44ec96494908930d205b60d08db4f80957c059085",
"format": 1
},
{
@@ -347,7 +466,7 @@
"name": "tests/regression/roles/sonic_bgp_af/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cf66c8d3e6ab8abfe99759ba204cf9bc33e8108248489ad13dfcbc8b6e8b1484",
+ "chksum_sha256": "b62b37272161b4ddd12dabc2e373e14e479537b8860a73a73d38d82eab449db4",
"format": 1
},
{
@@ -365,6 +484,76 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_dhcp_relay",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c27ff568fed008dea00a2b6949da7c6a755c5a451b50b29b217ab00be056a368",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e06cd628bdd1aa09a2388e9df767eac0bb5844b90a86216270bcf5d3b93db42",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9566df6886312aa83c5888a95d4f5e39d8f3397953fa2e9c0c5a59f6e0de748",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "903c93343b0c14238a167f67e178d767b86fc2c0380b0b046d16f4b2add7cae2",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "94bf41ab768d299355fe3384854cd92ce500a8ecc9b8bbb390cce687d0e454f3",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_relay/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_interfaces",
"ftype": "dir",
"chksum_type": null,
@@ -382,7 +571,7 @@
"name": "tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9d9e4c73c7952f36475c79fbf88ed2e5866588bafe4acec029dfc50071b75e5a",
+ "chksum_sha256": "0a7e039308eca14d5eeb7f856f663aab56621275ecff63fb132260f59397c212",
"format": 1
},
{
@@ -410,7 +599,7 @@
"name": "tests/regression/roles/sonic_interfaces/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5e3c20b43609a7a0a867ef1f51dd5b891e6e8b891f5f07d04aada1883d1b6c96",
+ "chksum_sha256": "0f8a93d53647d2a8a4c91c8fb9d62a39e903ba7bf3d377e186a8666659f41e77",
"format": 1
},
{
@@ -428,6 +617,69 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_copp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_copp/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_copp/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9ce82484cecdba7d6d3426c23da85cda523b37cbcd9b853643437a3431b141d",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_copp/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f8d499c11967160cbab26e5f4508f3c408132a6f75cdbb4f899eec69c9d3cb7",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_copp/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f9ea6a1efb1752d1b02311b9294762286fb8a394a805c08adc254e6ba293bd0",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_copp/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_copp/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7746cee5fb3de5273d9cfb523635aa5ad277f61d21f1557373cf21e009486dfd",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_copp/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_copp/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_tacacs_server",
"ftype": "dir",
"chksum_type": null,
@@ -501,7 +753,7 @@
"name": "tests/regression/roles/sonic_tacacs_server/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f2800620b593938fecb1b8a7354c680bcc3f1fcbb59381ca25889d2424b82198",
+ "chksum_sha256": "71f69483df452a9a49c621a0e5d1bd6969f94e5f8a904182f0292304af1ce4ba",
"format": 1
},
{
@@ -519,6 +771,153 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_lldp_global",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f7db7fe6a82ee960e18bec865faa1062973cdfa30f9ec0b21c6b81edba890e6",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0f93f0b020d3d1680d2e64b5337b77d643997c8704f39e5f447473298231668",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ad002966c04a93d1cbda87ccb6a304bbb78ae20b4ff2eccbd30be279ee4b116",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "885eb873521b39d983538049ca493bc3f71b6e0f5fa220202bebe4faa356c9a4",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/tasks/tasks_template_del.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "18ae30030b9a4aba7f28bb37e405852600fdce4597f65a155f837b153670bca0",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d2651aa73462357d6a8689016231f1e83d3f90629e0c57ff38a06116c7c7f13",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_lldp_global/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9757a1b1fe3a0bca2fd41e679cea17d540be146945a3ec76409e86ef0d58580d",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "193bd7ccf62c0e7a3a6bd118df9644e87be8f763508afaec9c3f560baa7eb505",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1d91d6f3391c839f0d4ae26f43946678fd58ffbe6247220a408f3b7eaaf231b",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4020500110e677dd3b356d91a7125b7bc2a31273b3649d8f510b12d7fd95fa0b",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd4fc37c2effbf6cb374040a6b05e07aba468c412e06c0a0158a1b2bb2d04881",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l2_acls/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_command",
"ftype": "dir",
"chksum_type": null,
@@ -669,7 +1068,7 @@
"name": "tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9f72f840292265f4c4a9d96c0ec18aec1f73302bcd870b4b351ed319286eddd3",
+ "chksum_sha256": "6fc80ce5ba488d6713de614989f50f39e9bdaf770d42ef564c68daf9c442ee97",
"format": 1
},
{
@@ -732,7 +1131,7 @@
"name": "tests/regression/roles/sonic_vlans/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9f64b5c536fe544240ce9c38f0cb6cd324c5759d627a5c290bf0347fd2740512",
+ "chksum_sha256": "b494021c45cd48e08b01e9ff35ff6544402e846e0aa040f784d00cf791b648e1",
"format": 1
},
{
@@ -802,7 +1201,7 @@
"name": "tests/regression/roles/sonic_prefix_lists/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "04127c5fbb729e1e955d2aeaadb9ec67d2df28afaa7cd9918a836942fbdaed9a",
+ "chksum_sha256": "646f761fa8c6d1fb18d576d19bab65bab0a48a4327ae4d8865c212725309c662",
"format": 1
},
{
@@ -914,7 +1313,7 @@
"name": "tests/regression/roles/common/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3f82f73da84954bb7f19f2dceb7105bb98e34f83943514a99533dcde30707c6c",
+ "chksum_sha256": "2a818cf45804f22099e39cdebe416c89acc29d1177e2ac34c6e7953828514f15",
"format": 1
},
{
@@ -932,6 +1331,76 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_l3_acls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "45862482bfc8612b782ac80b2011df8e3aef9fe0991c87c31b6d981ea6da1d1a",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3fefb47c57b7eea6e953664778ebb024a47d5ebb131f891c80b50e6b5a9e27e5",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1d91d6f3391c839f0d4ae26f43946678fd58ffbe6247220a408f3b7eaaf231b",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "970a3b340a7ad61f92b8433e145e786ff075d3828821db2b5ffb9d6ffb775207",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aaa7417efbeed43ec3285569e4b5f9eac229a530311250a200b0e249e54653de",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_l3_acls/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_ntp",
"ftype": "dir",
"chksum_type": null,
@@ -984,7 +1453,7 @@
"name": "tests/regression/roles/sonic_ntp/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f86c7979a8ba1918a426d3a04d8eea9ca1d1f0bb0e9470e9be5a1a426b0e1aa4",
+ "chksum_sha256": "dd02c49de6170dd9e5f1b32004c8224b986d1d3a60c071a4ad5fba8b0f53c31a",
"format": 1
},
{
@@ -1075,7 +1544,7 @@
"name": "tests/regression/roles/sonic_radius_server/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5f4b8ef73168fc7cdbf0582a176198e4034c0c8301120a9e65a170c7e394bde6",
+ "chksum_sha256": "8e97b8f8db857938ce1b1d31c02b427ef03e2b9acce6b32de5f09cfe430d0fbd",
"format": 1
},
{
@@ -1159,7 +1628,7 @@
"name": "tests/regression/roles/sonic_vxlan/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ebd7db1ebfdb114eb0280e5ff41adaffff92caa2df2f74cc7dc98a33214d36ad",
+ "chksum_sha256": "98960e6d041d674de21efac8be60319c82f32bd6f62c13349fa65ab2ac9aa01a",
"format": 1
},
{
@@ -1191,6 +1660,13 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_mclag/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0950d1798fd0b6ae3c49dd051c9e6049a8a796f221c14be8d602e2ce015d263",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_mclag/tasks/tasks_template.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -1208,7 +1684,7 @@
"name": "tests/regression/roles/sonic_mclag/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "59f11ea9543380e101f9d2d9bd11d47f0ec0225aac74b706647b7bb6babad758",
+ "chksum_sha256": "df17fe35195f652a957121492f407dd281f1759e04250c89e80c8370b1e84fba",
"format": 1
},
{
@@ -1229,7 +1705,7 @@
"name": "tests/regression/roles/sonic_mclag/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1a3fdc2249981d95b911544645bc81439e0bab922c06eb72d9a53457122539ca",
+ "chksum_sha256": "67b8c0dcf572a867ffb049254f897c9980a18951de2fd96ed28cf272f17c3352",
"format": 1
},
{
@@ -1296,17 +1772,10 @@
"format": 1
},
{
- "name": "tests/regression/roles/sonic_users/tasks/cli_tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "37f946b76a42a134a1e8bd8bd337760512e44038b418be2d3943a2c05459f6f2",
- "format": 1
- },
- {
"name": "tests/regression/roles/sonic_users/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "895f8900d570fdfc3c187e09c688ceefb21cd13bcd8af8b78beb2dc7b59c5a4b",
+ "chksum_sha256": "4159d2c33b2349ee35482a7af04584f302c6033466e8c65c4c86d5c20c0d92f9",
"format": 1
},
{
@@ -1320,7 +1789,7 @@
"name": "tests/regression/roles/sonic_users/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "29e2bb2399e1576c889e4d1a2c915773ca57c3b0befa939be5c1f30bd422f6c1",
+ "chksum_sha256": "6bd93f27731271ea90d1bac60a29523020ee18ea8873c1488f951f704cb861f6",
"format": 1
},
{
@@ -1338,38 +1807,101 @@
"format": 1
},
{
- "name": "tests/regression/roles/sonic_vrfs",
+ "name": "tests/regression/roles/sonic_dhcp_snooping",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_vrfs/templates",
+ "name": "tests/regression/roles/sonic_dhcp_snooping/tasks",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg",
+ "name": "tests/regression/roles/sonic_dhcp_snooping/tasks/cleanup_tests.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8e05b1c592caa3ffa0aef7d93d7f1e12d42c1eb98ce4f6b5b8736ac698ebd351",
+ "chksum_sha256": "b614a84ae322512ff4f365d3964688f6cc4fec47d0aa39c3699a610234039c2f",
"format": 1
},
{
- "name": "tests/regression/roles/sonic_vrfs/tasks",
+ "name": "tests/regression/roles/sonic_dhcp_snooping/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8e57b2782f58b8f61663fdfc7ba10faa1b169c463a9852e3e36c90f5a89f6db",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_snooping/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b04b49d3e449d0b75e1970e581c970c31e55641b2e798622c29e1917862d3549",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_snooping/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1933de6c2fbf34610af75db33540157c8447dd5aed36cb2fb9a3e7e9f1c20775",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_snooping/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_snooping/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93fe55260d77d598c10fec53353ef25f97fd473a95281670d06a78543803c0dc",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_snooping/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_dhcp_snooping/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vrfs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vrfs/templates",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml",
+ "name": "tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a0a4218ce9437e1a54f2edc499296591ffdf35f861f5d12eee0f53b76292032f",
+ "chksum_sha256": "543ae24304c348bec48a338979d12396b22638cb4064ba5c78fa822c9b5c2b0d",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vrfs/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
"format": 1
},
{
@@ -1383,14 +1915,14 @@
"name": "tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a640983377723baab16e6b9a3fd9b5f68fd947abd94282237c8591a396fefc94",
+ "chksum_sha256": "47f9d14be7a62e7c2ce8078162ed5e227ae4f66431a317a49e26de7ac2550914",
"format": 1
},
{
"name": "tests/regression/roles/sonic_vrfs/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f27630400987fa257da883e08c8fc908510f51e2dea4bbba00219d329fe3eae4",
+ "chksum_sha256": "4c66d3dbf3797263c35165a4ee0b1700191fde6756bc9bdb0691fa00b82db906",
"format": 1
},
{
@@ -1404,7 +1936,7 @@
"name": "tests/regression/roles/sonic_vrfs/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "31e02192de52bb9e3299115d5392687e7baf808d0224e168e426bf1fd98a6048",
+ "chksum_sha256": "7d052d943034184c33885c4c5564a83993825026a82fb592722fa75d2f01c7d0",
"format": 1
},
{
@@ -1481,7 +2013,7 @@
"name": "tests/regression/roles/sonic_system/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5d09922e1001ecaedc3e5326bd7f1c4e9133357ba2eba8df4c8fe8e9ffb9eb4b",
+ "chksum_sha256": "923b800c0f4be3ac9074a1ff688ba3b9b725076118fc46355a1024c2af6ad8b9",
"format": 1
},
{
@@ -1625,6 +2157,13 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_static_routes/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da555d0e369c8c0daae5112eb1801f8cf6d53c40f276525b607f7294d1ac92c0",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_static_routes/tasks/tasks_template.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -1642,39 +2181,102 @@
"name": "tests/regression/roles/sonic_static_routes/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "05c972841c89a97abfeffe3ea0ce4df26d90a7ea0e4b12a43e94391dc8f4a8a1",
+ "chksum_sha256": "3d4e79dbd408f0a9143552e55412c5b47f1cb8970c3afc70a9954fb99bf08ac5",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_static_routes/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml",
+ "name": "tests/regression/roles/sonic_static_routes/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "162e1ee53ad29b43871e3319c81a8a897c3fa5e431c3adfa11225c39819c1b68",
+ "chksum_sha256": "26bf5a3a1f242151d5192d191279032cd9de8a19e3501fc5b0496f52bfa0efd0",
"format": 1
},
{
- "name": "tests/regression/roles/sonic_static_routes/defaults",
+ "name": "tests/regression/roles/sonic_static_routes/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_static_routes/defaults/main.yml",
+ "name": "tests/regression/roles/sonic_static_routes/meta/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2df5842ca274f91f4c478ebfa10e895039cffe8e44eff6eb846a0206feb04192",
+ "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876",
"format": 1
},
{
- "name": "tests/regression/roles/sonic_static_routes/meta",
+ "name": "tests/regression/roles/sonic_mac",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_static_routes/meta/main.yaml",
+ "name": "tests/regression/roles/sonic_mac/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_mac/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8bfae10085ae270b566d618ec13b499ffd57691d6eb264789959d5b7c4949253",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_mac/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72432a5fbaaa41b3ffd8494450da257df2b6736654b93fdc72c5c6c20cc97076",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_mac/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b83563657926ea8b4f3ab71eb6a190f55b05e729ad0a9de24c51dcd5b38ceb2",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_mac/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca35cf404e80a4326059cc872ae324c80d871f636e59401ee02a822d6dbe8b3d",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_mac/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_mac/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e84a948ab216fa7501a30bee677b9c770d56c81e413a95003927e14aa95daa47",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_mac/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_mac/meta/main.yaml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876",
@@ -1698,7 +2300,7 @@
"name": "tests/regression/roles/test_reports/templates/regression_html_report.j2",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ba914f5e928e8d590b975bc1392def64bf9724d0a405a17e20c2a40ff297ff34",
+ "chksum_sha256": "b44730b5284330a2308b097eaa9ad10ec3df0d0165b2a2efac6f9c3d777b79bd",
"format": 1
},
{
@@ -1796,7 +2398,7 @@
"name": "tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8c74c59e8e666909f5dc072cafbfb243973612317c3a2aef51c9474ddb84c505",
+ "chksum_sha256": "47566db8562aa54bf86ace18e285524af1c203128332d459a45930a81c7bcda7",
"format": 1
},
{
@@ -1814,6 +2416,153 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_vlan_mapping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe8418d3201dc4e07b63ff53105f743f59567b1cb9c72eafbcfe03d6682d81b3",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a64267f08b28406bd2cc8f472d12e0fc9d43de121d41b15bdd628fe3a228853b",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a784efb397137c55c72693c1e62a4ae2ad95b350a9030e7c90fcf42c5c9c8549",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3f1789877770c3a5f6141cdf26c45ccd43b6c81f1025e48075edb73321c007a",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4fe4d04ceb98beac6327157077ca7f386b30a6cda19662437714c5adff51ed6",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_vlan_mapping/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "07bf7c7c747cbb27b05e02568b2d138b2ee0d1c32929edc785b4809ee5a9f362",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f176a6a888b035d4a7e7db03b38a9c8201547ee35b2ae67a691c360e4dc0a5b",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fbc8b30dea4474a35f9387409ed5aaa3a797d77990cc01a5d63e84b85ca1ccb",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84279b579ec3b6afd9b4e0e80dec9b7f16467cf4cca02827a22a70bd3773b49d",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/tasks/tasks_template_del.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf6880b29e7cb0360e99db6440f732b27de14ff9cdc89435ba6933ff0c088733",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ddd3d69d300a23a64a8dbd01be9d2e75950e6563daaf2856ffab266fa84f78be",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_pki/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_l3_interfaces",
"ftype": "dir",
"chksum_type": null,
@@ -1838,7 +2587,7 @@
"name": "tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9cf66706c721d3eea2222ebb0aba2c6fe1b4cdba36cd8bbdbffb39882f551d51",
+ "chksum_sha256": "b7e1520a9a666a935ab282a4a54f6421717457a50169b3364ad0b6f90921d4ff",
"format": 1
},
{
@@ -1859,7 +2608,7 @@
"name": "tests/regression/roles/sonic_l3_interfaces/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e8ded0a1e8c9fa73380e115371f1cfc30a59fbe4520fddf1fec4c09f8317ad4d",
+ "chksum_sha256": "9f52ca0fcfe84732914aa8fd009c1359f2110c5a761bf5c6f4fb5bf2ee7e4f5a",
"format": 1
},
{
@@ -1929,7 +2678,7 @@
"name": "tests/regression/roles/sonic_aaa/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "696ae45b0e5d370c8b2330355a266ecb910da3d29073a6892ca3f756c18af4c8",
+ "chksum_sha256": "b27248b5ccb69300f21f7d146f203991ed7a21aac4020082d1828ad387d0cf54",
"format": 1
},
{
@@ -1947,6 +2696,146 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_port_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "609ba46dc67d758e013fa3d7b903efe6432896aa257ef6a4810a768e287d437e",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5e0319990e4850b0c26104462b64de5538845f2070687efd65643bd53c2eaeb",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e57db59c6f4143f32fb6ef55ad4365bb6c99bbfc112dfb8c3a88ca82186b740",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bba3732c4a67066dfd85415588a4363774dfe38a767f7529834bf481ab5035f",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9a39599414d552b3da8bafb342d27761a450f34d641a2b8067f372a3b266479",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_group/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d7aeef77926e57734152e5267e2f662ae13feb403edfae40df642550b6f79dc",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a138e8a66245c02dbf7e3f25728f5c9575d9df602cf78e27f82e43556985351",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a0798645ec73fc0c9a19f75acf10ca277a6496e52436cd76c15920a8bea5475",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5bcd1f568fa27b9c1bd1ca8204499df590663028ba59ca3070a7884e63d4f2d",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0eac27d4b755873e686133b19a0ddb237fe17c6966865982b4e1ace457bbcd91",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_route_maps/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_l2_interfaces",
"ftype": "dir",
"chksum_type": null,
@@ -1992,7 +2881,7 @@
"name": "tests/regression/roles/sonic_l2_interfaces/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f9f8c4a3db4f35e73942f4a63a109181a274470176e955ab0b9a4403446f743f",
+ "chksum_sha256": "b63a9264a3fbe13aaa02ad4c5a7042d498457104ce603e9c203ebcf6a5f7c5e6",
"format": 1
},
{
@@ -2010,6 +2899,76 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_logging",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86eee618148807d4346ebef4e688f2f6469f18482f609177e8dd27fc3903050b",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46b4d92958a8e042e9a8fe87eb8a2f5ce55a47f1853e670a915e28582f4677be",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "574a69598232bc2ef6b98c5f7fd1c526f1dbab925885726791e6bc2188c9a54e",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe9d4e14ffd81f5fde70ad10d78c8476d9bb5b9cad6677fe07fa50de4b29a8c7",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5f0a520ca407ae8525bdeaca7233ffd78a33909b9add21657ee64c26d555c39",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_logging/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4780e46505dc61639a5541c0108b65dadd794ba515d4d102a4b5a791e5f857dd",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_bgp_ext_communities",
"ftype": "dir",
"chksum_type": null,
@@ -2055,7 +3014,7 @@
"name": "tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "af2a290eb466df24bdb922fea12f682d747f282f863e490f20c5e52b04257c87",
+ "chksum_sha256": "d97c8ce284a0ce4462f35fe81910920e9df1b864d1789aeaab1464dc3de293b2",
"format": 1
},
{
@@ -2073,24 +3032,80 @@
"format": 1
},
{
- "name": "tests/regression/roles/sonic_port_breakout",
+ "name": "tests/regression/roles/sonic_acl_interfaces",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_acl_interfaces/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_acl_interfaces/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6023efafd474483d305c0a00be0cecf00e6b2b1f59597fff6648875ed11d3e23",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_acl_interfaces/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a7a9b02d62c55c9de1d8c8c3bf6e73a5a665b87fa4c39c680c22c7432120281",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_acl_interfaces/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed1a8509ced1032c4cbcafd67a756ae7149967ceb5c843d4a9259b82bd50b738",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_acl_interfaces/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2dc54dcb4c04c0ecfb6c7a8e2f76e31057b3390e767f57fcaed428fff64a2aff",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_acl_interfaces/defaults",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_port_breakout/templates",
+ "name": "tests/regression/roles/sonic_acl_interfaces/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19b4facf27771fc212fc8a323d019dd9084cad21b50091f3cb75e4a20ee74bdf",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_acl_interfaces/meta",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg",
+ "name": "tests/regression/roles/sonic_acl_interfaces/meta/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7d4f4ca7215bfc805c826f3d98811131a6b0d2a4edd3e7e7ebb98ecac9fbf9ac",
+ "chksum_sha256": "af0aad24caea6100f1eaf28dd660a9deb60f86870295a92c77ba0dc0ee9b92b1",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_port_breakout",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
"format": 1
},
{
@@ -2104,14 +3119,14 @@
"name": "tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7fa4dc0183d6c8a679bc13428976c0398b8a87bec0d1032fc6b9d29dc2e4a72a",
+ "chksum_sha256": "57668ca2a2ec12b8936b63a90584ed2e42cf1df540dee221c51c768869f62e33",
"format": 1
},
{
"name": "tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ee8893139926d4625752da4613cdf0615613a69ad298a5adbf58641af1249f1e",
+ "chksum_sha256": "aab1882c51c9960dce77b9cad7b7b248ece221b8579e69f2f686882d4252d12d",
"format": 1
},
{
@@ -2125,7 +3140,7 @@
"name": "tests/regression/roles/sonic_port_breakout/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "547f1a09ce0a20c3f9e1ad4b189618dfc6f260b23ddd9c19d29228d08409f228",
+ "chksum_sha256": "41e7d0e8c33bf1696cc52354a2bcc48ef506f7050b6a565f5fdbff17b7bf2939",
"format": 1
},
{
@@ -2139,7 +3154,7 @@
"name": "tests/regression/roles/sonic_port_breakout/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e9ba4af225692807f8176182acc8f195d083e9be5c9efa46b6dcba30c8e21f04",
+ "chksum_sha256": "d111a1491cb26bbad6bf5dc053cedb04a5da1b9b92c13a13dfc0f6123be2658e",
"format": 1
},
{
@@ -2157,6 +3172,83 @@
"format": 1
},
{
+ "name": "tests/regression/roles/sonic_stp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/tasks/cleanup_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42372bd707f2523bad1f34703d862ece8da9267ccc3e1ba874d610f6b3fb4e06",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/tasks/tasks_template.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d96cb1d38757a269f3e6d76f7bb9738db25b446b299fd023673372a8d8572fd0",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/tasks/preparation_tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be5ca34c31c98943ebfde9441e451e3d54426c8e1754a48b64b923c96cca6bea",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccef7685ab5396e8ede8b998f0729f20af1e9d9c96a4743ebd20d301981b41a0",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/tasks/tasks_template_del.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "162e1ee53ad29b43871e3319c81a8a897c3fa5e431c3adfa11225c39819c1b68",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29f3e4e7b69f48f417552efdef5d8c38df4d8518275287ec09d88f7d4a5eea56",
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/regression/roles/sonic_stp/meta/main.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b16570e34d6f129bbec985a4ed0852051e0bb2a4007e8a9a73120f9a1d3ae876",
+ "format": 1
+ },
+ {
"name": "tests/regression/roles/sonic_bgp_communities",
"ftype": "dir",
"chksum_type": null,
@@ -2202,7 +3294,7 @@
"name": "tests/regression/roles/sonic_bgp_communities/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dc406ecec44fdd088a078456ed4385c7df08875bbc1b12bc216a8ce5b88da5c5",
+ "chksum_sha256": "9f504a50c935ab1338be04bfa241007828d028dcf233efb52bfc38d52a639b72",
"format": 1
},
{
@@ -2237,7 +3329,7 @@
"name": "tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "aa7d7ab0948506ca0f3bf4deba39bc70898dc04b0374a4cce76c8683b4017c53",
+ "chksum_sha256": "53b28c7b273239f77b18c154a7d2e2ae7a0c0e14e45b0461df31a3bf0a2b83c8",
"format": 1
},
{
@@ -2272,7 +3364,7 @@
"name": "tests/regression/roles/sonic_bgp/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a0496cb1c3516588ee23d24020b55400241530bfa142471bcc79d25a07064f11",
+ "chksum_sha256": "3b173415754c18435f3641ca991cf903ae28a1be2ecfd53e1fe25105695258c5",
"format": 1
},
{
@@ -2286,7 +3378,7 @@
"name": "tests/regression/roles/sonic_bgp/defaults/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ba7db7bb0225c59771f2c8809ba3de33ec974de3821518f70faea65cc24cdfd5",
+ "chksum_sha256": "5455f01f6a02095972453d6d292bdafafac6b7cc9b1b56b578ce12c3be65795e",
"format": 1
},
{
@@ -2318,6 +3410,13 @@
"format": 1
},
{
+ "name": "tests/sanity/ignore-2.17.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134",
+ "format": 1
+ },
+ {
"name": "tests/sanity/ignore-2.13.txt",
"ftype": "file",
"chksum_type": "sha256",
@@ -2353,6 +3452,13 @@
"format": 1
},
{
+ "name": "tests/sanity/ignore-2.15.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134",
+ "format": 1
+ },
+ {
"name": "tests/sanity/ignore-2.11.txt",
"ftype": "file",
"chksum_type": "sha256",
@@ -2360,6 +3466,13 @@
"format": 1
},
{
+ "name": "tests/sanity/ignore-2.16.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fcb36eb462773c00b14c9e9f4db400f752ceed07d6ceb2a34833393f0b01134",
+ "format": 1
+ },
+ {
"name": "tests/unit",
"ftype": "dir",
"chksum_type": null,
@@ -2367,6 +3480,34 @@
"format": 1
},
{
+ "name": "tests/unit/compat",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bcc6ed7c3901ca3bd4b6e501f9fc44e7cf0c7c5fafbe067f58cce4e0682f60f0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/unittest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bc5679c48b6d6440fe6801c44c0447b63b27ef82b85f2f06fc39a342a4a80fb",
+ "format": 1
+ },
+ {
"name": "tests/unit/utils",
"ftype": "dir",
"chksum_type": null,
@@ -2500,10 +3641,661 @@
"format": 1
},
{
+ "name": "tests/unit/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_users.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84846412b1338f7c1acd2e7d2a112886b1170c948e1a8eba14c60499ce85f947",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_ip_neighbor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7812657b2836f8451e22b0e74d4d8a3fd849409b708b4706328df06aac8ed12e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_bgp_as_paths.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3391da894ca7437bdc673ccf934a96b97ddbc8a611ce2940baa3b7bc2f324a73",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_l3_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33b9c2ed0478106edc13fee0311dbfee94e301863e4e00b704081fd8f0cd8c86",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_ntp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "461f62b920d3d9903399327e0236a40d642f3f41de10bad24c065caa11ec0123",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_vxlans.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b780f21666769f5acdaee027504a61b6a7ad3bbd77092b88a94756de7a3fbdc6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_l3_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77e6a2e12d251ed8675c30a637d68495599b04ae95d4fa09188863babced33ec",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_port_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df079336249208347c9a68f6abb9ce5b11aadd8b04cbffd859a7a19733538e3e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_pki.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b16780017d7c8ceaac10253705e1ca6e277dded24a04e1585d7e1386d5b4d4b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11a9890867861ddfa4da07ca2f5fa2a68292805c20980463d17201f2b05f1ea0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_port_breakout.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4c7d62fcf9b4c8ca86eea87afdabe14b8f475daabd2e121c50b8bc7256366af8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_copp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f0f89b4157dfbbac078809763286a6e3f281e6bd915047ae3dfcb1bd127281f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_aaa.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf0a17e40e07834b3502c4e9e2c30cc4e8f2b86b4a3dedb3e29f7fb00b731f26",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_lag_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57ba750e25dd6e8a05a62017741d82737a1b9abc29930c31fc460bb815928510",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_vlans.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3d2475c5a04830284dc3d6dabcd8fb50dbd1a214bdb383e949877b082e2a874",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_bgp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9cf8726d21fb237aa68c4532b5a53ef1d769951e8bbc8ac50105a422c90a986",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_l2_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a59c951c23ab38b24b21adca3442344171974784c3b4fb4add0d7bacdd6e2a8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_tacacs_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8eeac0b91a39e401a518a11bf081d661b104c632a1c1e14c6e132a51432acf5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9952114bcc557fa8da116d22c23f69d2d4a0f9dc44a504f8eeeb15132cfe7cea",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_bfd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "739c57e36a10c62b40c3ce2196229522610a1d2c72875321be62b896d20ed563",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_bgp_communities.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2c97c5b00a38d94308290a62e142953e1cee142badda76d5da189f1e512d629",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_vrfs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "681b666257989b02b334c50d8b26de195ab78466d8743d71f3b70a34eb7de08e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_bgp_neighbors_af.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebf961a4a357a00ec2af0bfa2b531900f77d7d97ff9b9d5d5123a3e0c30ad373",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_dhcp_relay.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b52a319b46255437ebe87481ffb3f1e24f7b21e5c70e8bfecc079fa07ddfb2d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_prefix_lists.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48e8b4857249158f91327b33c70efe2fac2968e2d3f697abe6aec91d1092fa5f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/sonic_module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "216071749200be4782c98d617334e20684f33475aa8a28caf2cffb681c0a0a54",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_acl_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "937d2af485c722c75dbff22f69ce1a225e3bed7868150c34a24d07fab152d2fc",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_mclag.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e99fc4f4929a873f6d0ff316e1e1514e8d5210f3d9db9dc63d5ad3edda67d61",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7618ea5a4d586cd5e28e7cb2b9b730772f5396510f5629bbbb940a92831974a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_bgp_af.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fdeb4639e96d7558cbd7fed66ebf493b1bf13fd4df0f63ddd0526253318aebc4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_system.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c649de8273a577f48d3530293227900073fe8e4af4f4a20a0cda6cae925b520",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "731ffb9a14e7e8da385ea2bd25b198c8f6db21e27556f02ff0b6320eb07031b7",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_ip_neighbor.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3059428bfe7023a47725f581e44f8bcfd5db0cffed22ba734530b9576771ac65",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6675258e48c305b5b25d837decc58afbf8c39e59a814bd5b77b6ba9dd5f4161b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_users.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd9a364fec8cd983f0bec7530969585e92bf8c5fed3260985b3e2d1531b3e17a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_bgp_as_paths.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6d192121712a806565c91392649f9bea0167d3f229011fe85f9f84b35bf7935",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6650884d87f736e09997330bc84209581348183479a30bd95945a78a0714968c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_port_breakout.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "afda0c3a42f25f93d64ef580fc8ddd3d8b0c54e59547407f2f15fb35b7b497de",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_bgp_communities.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8730f1ccb2748ff19bc4913aa73be0805001e6294fcbe14bd1a5e35e493e1389",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_system.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c738fda63cc7b920300bc85f0146f4e2ac4a688ef3cd0ccb339271687e4b408",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_facts.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b86784ebea9530d30a397f672233acf8f17de1a8cabfa1b1ded87c87b14fe34b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_command.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73b43ad9e588b433190b5261a5dfbf78a154a91df67c1c77eae3b7733c5f7b62",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_vrfs.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95f59f0e3adf9657fcdc64495f2317738c597da8d6ec574f50dc722eadf985c4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_l2_acls.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92095adfe22cca87229adfcbe4deabf9a2d871e17f75e244dfd31c3b4828805d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_vlans.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2573b212e84c55195ebfd82647379c500405b798a417b449360b972e52a9d1b2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_ntp.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b70ce5dfbde06d04873df25d4908a6c7e36118d48c76538a0801e0e317e36407",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_vxlans.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa577758b7fce46da14c389e9a2ba1a6068c91a4fa5d53060f45db7ba23a645c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_acl_interfaces.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d25cdc82397e9446992b46525f2bc86e523620702a2be242ad25a1945e9f3ba",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_static_routes.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56bb744374ae377a7919fa393791d002c1fbfe7d924aee96f66d4348d7f84e34",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_bgp.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "796d02dc7a6256963db342978cbdab85785548c6a92d7ed4880b06803a92f792",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_bgp_ext_communities.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78df3938f6c6108ae4bbd61eb16e70fc47410895cb087949a1ec84dc2be31da9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_lag_interfaces.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db2efffba2efd2c664b7a11a3443cc59eb56eff0734ff53322700388247acd79",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_mclag.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ea9559192550e7e757b0083eb140fada8343dcb281906a6fa6a7dcd839a3295",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_bfd.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1548b86305e7ded6b3c4e2f0122d283e624abf86830b756b006c6fe32083967",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_radius_server.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a10862f910d398696fd2687694e9d28c3f9094c8ff8bfa97f7c7fa9bc49558c8",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_api.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "738de456dc6edd2f20f53e12c86286a2fccf2049a8f05c425ec0cf9ace60d1e9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_copp.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39a64c3a8ca40faabe77bf03911facf7ce79568d9a611408a7a2b291f804ec66",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_l2_interfaces.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bb6b1f86b55bfe2999b964bcd29dd9cdf045945645c6786727b230307ceb656",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_stp.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9507fe7193f23d26bae4319cbc3c62db5f8d438152b2715f6f6b7a50d6e88526",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_logging.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c78e20bd32dcec9e218e3007d3f29be61e34c0f6fea38ebc397ea619004ce49a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_l3_acls.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc8142a851881703d2405496f55be7100fdce1fa7ac3c17a22a1ff02e9b419e6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_mac.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d96d40d02da28a8565988d54c7390c66e6ea3a0fd8888f60f0758d6de2188b15",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_pki.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec23f706faf66b3693d412cf2efc5fddbe208f4c24e10d74d66de9d18eaa00f4",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_bgp_af.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe4eb640ad6306d6e273651552ac92c206ff3e926953143deae0dfd02d188216",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_tacacs_server.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24f036209532f7068430b1a17c7931645cd68041f67198ab1d541a2552ef9c71",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_interfaces.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05b5d6d6ef66f7fbe851ab890de6f8a47348be74a5eccd0b9af0372994da6996",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors_af.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c831b13f5a6a3fbc814cb08f2d31a8cb8b3d489c62e2822db02becc71a016183",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_dhcp_relay.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e3857d4a1c0bc982eb4c014935a5d45037aeeef489557a731d0684997143aee",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_aaa.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e67e6bfda5ee943c070db0918bb743e0ae2bcce5e552defefb31acb2460c607e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_port_group.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4c10d6c2ad330f39878c385b57318bceff8773787d93368a28ecb7fedad686e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_l3_interfaces.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d87cb1772f8cde12fa511dbfcdd891d65189b223b8fbb7c33aa8d90c34188fa1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_dhcp_snooping.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "265e10a30d2e2a0241874bed5e28eabf83c4472f9463111174305fd57e88716b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/fixtures/sonic_prefix_lists.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14bdd5088a6980f94a3c945095a1f6a72bb1271e9c91f2201c24596dd2dcc450",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_stp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c4fa56134678807051f50ea115676ef2daeb02e120a636e18e7e9317c9b64b6",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_l2_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ccf584d58fdef4f278c730af969b575207d49a0e5817aaad92353dcf5bc9278",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_bgp_ext_communities.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee0111f6cf42d91d8b26d5c5a7d3b1ddf6f8ccc6a1dd35f9f0baabf3b5043335",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50ff06b123aa80391b948ee7c1cbdeeb5fb61ab0260f799f1c40effc474614c9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_dhcp_snooping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e87e4de6f76beb57425202461ae8e2f59150977855f6b27165531a90749f0092",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_radius_server.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b549fb1c24643a5b02c9b9db20d35cf0e3812baa83c964ff0909401cda2a91d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_static_routes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7a16699b24bbfec3c02574a0fd478865a314e1fe064921a57108071f7cc000e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_bgp_neighbors.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cfbcb32220e63320fd084a4f5a854c8889d935741a60c8876a350d849966d68a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_mac.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "070cabd8116eeb8cc6f41ef00bec60fdb775422ffc97963e7eac684f767028db",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/sonic/test_sonic_logging.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e16524f514cbf5915c6de662ac92d2ae7848f7b5e5ff9e51855a241ec3657de",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/network/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0a1f40b1a4bc36faa044960fe9008d75b4a70f5507c7ed05753c0b439df7c24",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/modules/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "909818cefd5093894a41494d1e43bd625538f57821375a564c52fe4219960967",
+ "format": 1
+ },
+ {
+ "name": "test-requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e648ca0b7462d007b274d16f011185c624c8f9fb67cbee0b4e765efac87f14e8",
+ "format": 1
+ },
+ {
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e9e65f5f05a2180c6335355f0497ff6ec97bc6fe7b58a8e3bd9c3442899cbf8e",
+ "chksum_sha256": "63c52092d1668009f1241e246365442da3db34c636131005008c2fa51c930496",
"format": 1
},
{
@@ -2517,7 +4309,7 @@
"name": "changelogs/.plugin-cache.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b0c306452265a20c55ab0327c7a69da747859d5073f753fc2be7d3eaa675348f",
+ "chksum_sha256": "ab8b94b3c8e4b7230e515d3319300f6dfde08dbcbf2b985eb0ca9829585fa10a",
"format": 1
},
{
@@ -2528,6 +4320,685 @@
"format": 1
},
{
+ "name": "changelogs/archive_fragments/2.1.0",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/149-l2-interfaces-vlan-trunk-range-support-for-playbooks.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6e4c2fdb9d5c7cdd73282f5ef857f9248157ea1afbf9ac598997ea4b4c79fc2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/188-unit-tests-for-bgp-neighbors-af-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbda753887c058bbffda47d36b052778d193af72b9d22b6c26bfa0c98799f444",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/245-enhance-bgp-neighbors-unit-tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3cbe2950a2a8b40bc72cc1fddbb8c8c84dc9b9865979f246680f0a956436bd4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/200-unit-tests-for-l2-interfaces-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f4b7df2958608897f7c217785b8e19895145355ef521ec33ec70e53cb9e9ead",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/228-unit-tests-for-port-group-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "636a2dbacf8e8931c5c8c45c0ef3b3b93f271b5445b7a687bf1d91f9c9f4172c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/v2.1.0_summary.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad89753a4aa93b1b71edaa6a2f817e2790449f4192fff2c820836c4193e26547",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/235-replaced-overridden-for-tacacs-server-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0b9229139e100ad027db61f523d305be728c1ebe915cb7828658f2acaf800a2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/249-dhcp-relay-replaced-overridden-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b34cebeaaafefbf834d44d4aa0549480211c982eda28a76be83c4ebf4b4e7070",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/119-lag_interfaces-port-name-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cd47affa17ae646beba22527a09c22668db181e0ea474b5f7abab9b642d64baa",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/145-mclag-new-attributes.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e72f9aa77ba6204a04377118c78684e151f23812608dea99df827fb7036a74a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/224-ntp-clear-all-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9721c169efddea3936e05db66f522003b6585ae1bbb0cb0bef6ac0967de0675",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/255-prefix_lists_replaced_overridden_support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a87c4aff2c8d8813ef7c4e0b84dd128ae72cfc3e8426d9d759b10b6273a0de4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/226-unit-tests-for-logging-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665275271a150f45e40d5505b25a5ed3ffec2472c4cc44fd0470aeb42632a096",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/197-unit-tests-for-interfaces-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e322c453439484acaad04090adac3d1b2f90627453a8ef1707d0573bd31f4aa1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/258-change-logging-module-source-interface-naming.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72bd5a409e4cd1fbbbc0f3395dd3cda03b3b3aeae4f22c294cf571682926f182",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/242-users-replaced-overridden-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fa085731aaa5a0cf449dd73c2afba93ccdb9e4bbcf2f18ede632b409202153f4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/202-unit-tests-for-l3-interfaces-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5448eb086500b75e6d71a640f7c201446aa98a0871563500ee084b28f609253",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/209-unit-tests-for-prefix-lists-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9f1ee74a0be7ed27fdf3ecb79cd1d2201e14bcf904283f19ca1782e73335738",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/199-code-coverage-workflow-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dcde322b4ae9a703af34466f3aa143d6b4a559eff70b1fe200890bab5799eb1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/182-unit-tests-for-bgp-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bb536c5e5bd115cb0094b4be001baba0730b14b0fdfd32d5a31edd88264d74e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/128-add-several-attributes-to-interface-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7bd7c3765cfd44e2dd5557605ca063d2f8142b20fa2caef01557c7ea0f2072a5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/247-vxlans-replaced-overridden-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74c1ffa7f517c2eb9a12768cfa1ee39b1957c5dd61fdfdc1a7770d0592853a74",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/106-change-ntp-get-fact.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a8c3eed258826c87638ac712bcfd911cab6b662115564d6887d11e9a87ed3014",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/250-bgp-as-paths-fix-merged-deleted.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51bda25ca1035432edb4b60d2b8e803150597702349d20cb10399b392673effa",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/152-copp.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c27ed054e1e351e938d1055126364095b3a43ae67ff814bedcf2377597039a38",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/216-unit-tests-for-vrfs-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7185931f582405c7d74d57f36dd21928e2694e38f3af12a8c54e65c69750ca0",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/257-vrfs-cli-test-case-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d7ca4f37bdec6ce43df458ed8689c5b42a1963d3660255ab2140eb209169fee",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/237-aaa-replaced-overridden-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dba1f6a9d1573a404ce5fb2490df19da0f842840d36ce58d95cf26f3eef603d2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/124-l2-interfaces-oc-yang-vlan-range-format-config-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c2362d254f162c927a2269b15a65daafb4156d8fb23c6f3ce475ecd35bd4b7b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/186-unit-tests-for-bgp-ext-communities-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1832d0ca05bc879e913a3261b786a38d43c682536b6e423f16c165209ac901b4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/223-unit-tests-for-system-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57a5cc5e12871c831308635c58356df3e38982165bb4d3f017e8bfa15cf9a60b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/196-replaced-overridden-for-lag-interface-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5063fd4d27dd3187f30a0e1a574fc8c298c56e2550afd2e7e915fe2a819f3947",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/176-update-netcommon-version.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9a821830e86a4bf0da137da744b302c8fb9223f04096090c15d6ea9374a0fdc9",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/159-replaced-overridden-for-system-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a8e923f9cbf1c938e45d922fda741356e3275bc2a310d2cd233424eea2023cd",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/146-vrf-mgmt-bug-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c69085bf2db0f7c29b789bfcd52595c2181f1162128551a847b1c8ccc46eb434",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/177-bfd.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4f3e29535d1feccbe044dc95327482cc3cfaf6a4449b1d43e870004066f04d2",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/253-change-replaced-function-for-ip-neighbor-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e61759122b7322f5b7d5b3c0c51f8189cf69c2196e545f66fb3192b2c548227",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/210-unit-tests-for-radius-server-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b8911946d8327f60b69093cda80e84049ebed28fa5c8a34ef29082d7f46ab8d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/129-ntp-minpoll-maxpoll-config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ac8ec33b7da98fc6ffe4cb793c169b78bf9ed8cca4888e54c1781fffc0269df",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/233-bgp-neighbors-defaults-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e71284112a80fbfa412dca48386fbd939f13bce3cca173efb0d8ad2af63b18eb",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/157-mac.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad0c0fd7f4668e5bd0979f2c23fcdc4ad4d107d55e344b99585843978bb82437",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/229-unit-tests-for-port-breakout-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "631fcdb3ee9ec6a0b576af5ff4cb42ece858c0fd5d010082808050f0b51b3b8f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/231-l3-interfaces-delete-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53f9b152c9d62551596480f741a1efc50ec2b9a6d24fb7d16f2b75beb56eb7fe",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/150-replaced-overridden-for-logging-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed910e2599398dc3ccef9de7648dd2e7fa88d6979b15c10380fd2d0ecb87d3e8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/217-replaced-overridden-for-vlans-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c48a9f53c3307d9225eabc2c35641e66f663fb4a2c04846a91991fcece7bf05",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/180-regression-failures-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db62984e703dd22b1eeb4dffc112a1f80396ed3245da82b25f6a18dfb19e3e8b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/261-interfaces-timeout-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98ff91df7bbe6e9f5be04b63e3977eb3a4cc57b82f87658b4837e1eb46fc8234",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/191-unit-tests-for-bgp-af-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a898014ab3470844efb2821bb32ad2c33352cc36ee78887bd29975f6544850b6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/114-change-ntp-module-name-in-regression-script.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a5fc5498935e33885b3903f3b3cfbc405bd582b08db5e70b04b7580982cfa64",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/220-unit-tests-for-config-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d66e028d2bb3e6eed5008e7b987c52960ab41dd0b083b763d904aed9d85cdc6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/214-unit-tests-vlans-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51250ebbb062d1d53b546d5971d4ec07c4540799538c8855ffab9763a3570c86",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/227-replaced-overridden-for-port-group-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8aad90aeecf09994a6fa9b782646a3372f17ad551b2e6ffa25f33d031c1df8bf",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/107-change-ntp-key-values-in-regression-script.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d046f0dae04676361731428d29ceb848553838f78d6282aa3d77d9c39367643",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/156-replaced-overridden-for-vrfs-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e345d0b0c85ec6925ed46e8497a998b5b0f17f35acdff2eb8978917e3c6cb118",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/172-module-utils-import-remove-empties-from-ansible-lib.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc4f42c6c1aa3adecf1486b29c1acd9c8c25840e264834dc4c4fb6c52e05e4db",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/215-unit-tests-for-vxlans-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b220e64d0d4a4024d658176ac83c8042c675ecfaea4dc7a150e641be6a86bcae",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/195-aaa-login-authentication.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "acbf365f955ed27f745684452ee5e0600084ac3ed80a1b107ca38311fc1c5941",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/240-bgp-replaced-overridden-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5f79d958dd00ed8dc62bec7d0230cdea640aa28619c53163b5a941de10f9255",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/203-unit-tests-for-lag-interfaces-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ed1d97cedf9ab3dafeaa7212c05762d27def6994812013531c5be33b2ac686d",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/184-unit-tests-for-bgp-as-paths-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d158692f06482f8f2da4e9c97711f69dba5459ce9d6e8a66ea1c821b950400dd",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/262-vlan-mapping-bug-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1404acb34e5e7ae4dd6a8db1d30103b387ebf7f73e5a0d4ddef08f914fb79a73",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/130-vxlans-attribute-restriction-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5f19152d3186eea17439f4f5375cc0625a6659372814ac328a280c68a9cb09ea",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/118-add-ntp-prefer-attribute.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "619ab15ba2b2d4a8b88e888c2e30193704cd92e9baa88bb0201ed34e1674ff2b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/246-bgp-af-replaced-overridden-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e0c00649384d85f08625580adb0256e53366a66d25cc692a1b1d3b417309900",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/198-unit-tests-for-aaa-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89680addb1e277c7b6dbab77959afd5ebcd270cc9eb28c28ba730513ef249aa1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/218-unit-tests-for-api-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3df9e517eb46ad5aa504c77e3930c9a841ac6aae2b2fadf27de76622eb8a4c59",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/213-unit-tests-for-users-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f4910e4e2a46a65242d8c341da84aba1eac100cd8df9854bee4965f50e37147b",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/225-unit-tests-for-ip-neighbor-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a90d443bb531e3ed64fd3ad361603997111b8e5a9fec650287839b0e23fa9e3c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/160-sanity-check-errors-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da281479af1eb6633dc78ab6574ee79cffccfa8b508ebd75d299356a6fc11d80",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/151-replaced-overridden-for-ntp-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24ffff39364be20efcef0aca74e15cfafb67c3e55286384480504829115a8246",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/230-vrfs-delete-interface-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1433f40b00b40f9c4af805e6f40bac1a3df790234c4c10103ef1393211ebeb80",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/212-unit-tests-for-static-routes-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40f906a73d93fd6b9129ee2f2d96be9bb5ae11f43d9431154d963a6ac7e569dc",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/219-unit-tests-for-command-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "441c47066c728308b7abe67abd7a0d9057b54bbfa4dee006122e46745da0001a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/236-static-routes-replaced-overridden-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9c8cdf4e9973ea0c72401ffba3e98e2893b9c9afa403df105a8f5f530e9e279",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/208-unit-tests-for-tacacs-server-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "731ba989a8324e7b5942791d245a14fbe20c266850f60224167414e394a93332",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/185-unit-tests-for-bgp-communities-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e94508835f3c70ff46f91425b681a7e6f859769cee767551f077294828e33c8e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/207-unit-tests-for-ntp-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3504064c2344478ace44a12fcd4bd8a1d1663dffa94a4cf21b2baec5809071c3",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/141-add-route-distinguisher-target-attributes-to-bgp-af-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84c6e756a913a408c494ea576188c522e0e8854661198522406c35868d86ece6",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/183-unit-tests-for-bgp-af-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05bf36ee34d1e7c42fa5a002d143a276ff906c1e2e23e35a13839af8599accf8",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/187-unit-tests-for-bgp-neighbors-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "370bc1025deed2c8ff1dfb133975d221bba4f40b8881a438bfefdddb472d8b9a",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/254-update-replace-methods.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "314d0fcc5f6ddfd8f5de37b9df220dbf86dcded0ecb7ff9fb7e9e2770bde8cd4",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/244-added-rt-delay-attribute-to-bgp-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b23fc4a7f395f43d295fc60dc2d9203c6d6b6a25b07cc02c6353861456556ad",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/201-bgp-af-modify-vni-advertise-fix.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2342c0d30118bf95257c8b1889120d717ba02d1ac066fac244d117366b8caec5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/222-unit-tests-for-facts-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "560a1f617c149644afb38c2e1cf75b32818907a1392f528073e03569c742e2da",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/148-dhcp-relay-unit-tests.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a631b02aaf412fdd573609670d6c60c3c986c9a79d0dfb9b6e633e4cd51ac17",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/239-replaced-overridden-for-radius-server-resource-module.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "504b9a4723eb0deb6a324610e3abf012bc3c31ab512458dae200f3d346d71cee",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/113-change-ntp-module-name.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "409717d508bef1f049748409a5c72fdf98a2ef2714ef2576a39f008ac5564a1c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.1.0/221-l2-interfaces-replaced-overridden-support.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28203ff8c962f41793cd0db321fafb6aa4b2e8ed38f0f5b207fa1cd36ec9a56e",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.2.0",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.2.0/269-revert-aaa-breaking-changes.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c23c77698cd627620c741612f71b72f13f43dd592b438e1dc883b8b326129c36",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.2.0/270-galaxy-yml-netcommon-and-version-fixes.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dfce8e1ec5b130da0ed6354942be85a3532f5bc1d28ac2c284872e3323dfbf9f",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.2.0/v2.2.0_summary.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "20b675a4e33fea199634b3b8b1323bd02766a90d536fa2b201877dbdfd8f841c",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.4.0",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.4.0/v2.4.0_summary.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "164f388ec5778b9cae6a02fc8625e6ddb43813500768aef1b5e12a1222a754a5",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.4.0/322-docs-README-updates.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc73da3eddf77cb1996c46f37f0f413c41015754f84eed2c7713ac3804b24e93",
+ "format": 1
+ },
+ {
+ "name": "changelogs/archive_fragments/2.4.0/321-requirements-update-meta-runtime-ansible-version.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b828ac92681ac287b6c82845e5ad7249e6fc30a12474a856e58f905b8cf4307",
+ "format": 1
+ },
+ {
"name": "changelogs/archive_fragments/2.0.0",
"ftype": "dir",
"chksum_type": null,
@@ -2689,24 +5160,24 @@
"format": 1
},
{
- "name": "changelogs/CHANGELOG.rst",
+ "name": "changelogs/config.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5c07bc10602bb49ad903484c5c58ecd1bedf2197a6f320da391592823b891cfd",
+ "chksum_sha256": "399e761b5051610f04bd2c4f9ebca480a3d6e8a2d82e8e60ef2211a825aaa682",
"format": 1
},
{
- "name": "changelogs/config.yaml",
+ "name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "85862c6553f33777c8fd2f7fe3cfbe2d840deafc202b404052c6c829fb64495e",
+ "chksum_sha256": "aa104b57cecdbf13eafb305fc7abc66551fc3413da9c623effbda9ac2eaaa267",
"format": 1
},
{
- "name": "changelogs/changelog.yaml",
+ "name": "bindep.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2ad2a65538878b7704f2cae149e38beebaf5bc92d3ff7d245b6dbd9ef4762877",
+ "chksum_sha256": "80645079eb025b3a905b4775ac545d080a3d7d35d537c31e04f7197c94315ab5",
"format": 1
},
{
@@ -2717,6 +5188,55 @@
"format": 1
},
{
+ "name": ".github/CODEOWNERS",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6da2c1b1fdb7763d55fe592e9d55af7ac8b87b7ea896b57a4d28e0e66f44d4e",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/feature_request.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ffbee443a3f80c91eb59268069f8d01947cf0f1b8f45342026ad9224d2b998d2",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/bug_report.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11cae2d85e28f56cb5ae8386e3167f75dbb2ea6f09cec0bb87fb6f5a112bf0a3",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05905c8f244d51298bbd1778c286c8a6c9f7adf0d0e5a5f72f764d71ec82cc64",
+ "format": 1
+ },
+ {
+ "name": ".github/ISSUE_TEMPLATE/ask_a_question.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc3938f48861224f03b9f2a71e63f3329e3161506451c14b911aa0ef1abc201a",
+ "format": 1
+ },
+ {
+ "name": ".github/PULL_REQUEST_TEMPLATE.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67476fe5a2ebebe213883092cbc25295e0bc1a4f553d6a1805fe4f77d03c763b",
+ "format": 1
+ },
+ {
"name": ".github/workflows",
"ftype": "dir",
"chksum_type": null,
@@ -2727,7 +5247,14 @@
"name": ".github/workflows/ansible-test.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0dc2fd66d9bb583a726caf8281f456be2130992b6f93d3155a7bc1a711b855fc",
+ "chksum_sha256": "14382f9d48e55607f50d5a1935a3dafad3aaca829a7542cda50ea2495e403eda",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/code-coverage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3838cf11c8377683180dfaba3186e81d47715578f4bfa6f37de779dcc34b10c9",
"format": 1
},
{
@@ -2892,6 +5419,13 @@
"format": 1
},
{
+ "name": "playbooks/common_examples/mgmt_vrf_config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98796ef0a1de9da997b16eaad459d6f5408484928a356062e151fe315247c6e9",
+ "format": 1
+ },
+ {
"name": "playbooks/common_examples/sonic_api.yaml",
"ftype": "file",
"chksum_type": "sha256",
@@ -2906,6 +5440,13 @@
"format": 1
},
{
+ "name": "playbooks/common_examples/mgmt_vrf_off.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84bf6e04674e215b9a7b24e45ddb53f46e1726a7b7c0d5317819251b95d55e0f",
+ "format": 1
+ },
+ {
"name": "playbooks/common_examples/patch.txt",
"ftype": "file",
"chksum_type": "sha256",
@@ -2913,6 +5454,13 @@
"format": 1
},
{
+ "name": "playbooks/common_examples/mgmt_vrf_on.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb6d7f0c12f319a4070d208282d816ca7f58bdecfb8b1ca15a52cb65abaca6bf",
+ "format": 1
+ },
+ {
"name": "plugins",
"ftype": "dir",
"chksum_type": null,
@@ -2972,7 +5520,7 @@
"name": "plugins/module_utils/network/sonic/facts/vxlans/vxlans.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c0ce8d53537f8d62d93cb6eab94f613269a9abb3fd6089a251344f29ace1581",
+ "chksum_sha256": "9c862191ebab05f72bc18cdfbc4d489873391fd9eaa3a46cfb8c28714b18f8f4",
"format": 1
},
{
@@ -2986,7 +5534,7 @@
"name": "plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "175568e94084854f2f4338d76b8fe62871258d200dd422db6b77998a1b534c6b",
+ "chksum_sha256": "3c85acbaff4badf880e0cdbe5b03d9df7c07e79cb96e337d65eb2f6e813acf7f",
"format": 1
},
{
@@ -3000,7 +5548,7 @@
"name": "plugins/module_utils/network/sonic/facts/facts.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1ce8bd508d419b7a1c80302d56ca3b23b30f0dfaaca99fb5af06b27369077f6d",
+ "chksum_sha256": "6944f26cecc9afdba32f35ea4bdcc09bfd5a74776b1d5eb334e8c9dcd515c22d",
"format": 1
},
{
@@ -3011,6 +5559,48 @@
"format": 1
},
{
+ "name": "plugins/module_utils/network/sonic/facts/mac",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/mac/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/mac/mac.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c15d6e9c7984e7611729fbc059217440e98685e890fb8fe0401c599a01014864",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/dhcp_relay",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/dhcp_relay/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/dhcp_relay/dhcp_relay.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e1375b8462f7377f5f0092672377c8e6f5599fb5f367730f60e39a9f3c5f927",
+ "format": 1
+ },
+ {
"name": "plugins/module_utils/network/sonic/facts/users",
"ftype": "dir",
"chksum_type": null,
@@ -3021,7 +5611,21 @@
"name": "plugins/module_utils/network/sonic/facts/users/users.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "aa4a5a009e2bd6b77a97c254e0f7f462fe556d08028902740e9707f8804bbd13",
+ "chksum_sha256": "2bbf09a24dff32142bc9f77aba5d71f7dd3f306708398af3b75d9a4cb1b62e9a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/port_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/port_group/port_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4366c3540cdf6cd7aa5caf8ff2eed4b74d4c5c63be01bdb6829ebead0216c0ba",
"format": 1
},
{
@@ -3042,7 +5646,7 @@
"name": "plugins/module_utils/network/sonic/facts/aaa/aaa.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ef4b054752c855757ba2727a9838b5aa61cf88699765c9b842eecc9727048ff5",
+ "chksum_sha256": "d64144e0152dcc788ea4cb146f9c1bba788db714b6c89d6a40cb6a36edfdc1ab",
"format": 1
},
{
@@ -3063,7 +5667,28 @@
"name": "plugins/module_utils/network/sonic/facts/static_routes/static_routes.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "78d654467abcbf68095d79cb59c29ba6470ca4e5ac426a9595b7e3f37fc931ae",
+ "chksum_sha256": "7deeafd0ac091d6bfe972837242d884f6e3b867b0c67931011eb432dfa740c22",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/bfd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/bfd/bfd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3cb8e8dbe2178894bf4fba9a857d01f645ee441f855d8fbda9b8315c2afafcd",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/bfd/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
@@ -3077,7 +5702,7 @@
"name": "plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "494bdf331a5806f341f0125b6ba46ff888a935c4aff4d5b896f405764fc59a8d",
+ "chksum_sha256": "8cb702778fb2dac4785484630f9a02a5951fc6c75031669104a9dc50e5fddb3a",
"format": 1
},
{
@@ -3091,7 +5716,7 @@
"name": "plugins/module_utils/network/sonic/facts/vrfs/vrfs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7ed445c1fa06bfb70ed56605171012c5b5fd9c13b1f0cd3fb81af6214ddc3ae0",
+ "chksum_sha256": "17f84db9762528614e53caf903b7ca82ef0ac0a3e565a5539ba4aaf20030da38",
"format": 1
},
{
@@ -3112,7 +5737,28 @@
"name": "plugins/module_utils/network/sonic/facts/interfaces/interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d2eccbe6d74967f1198f8d1b5858a259a09850226ecc4d940d016be5e44b6e81",
+ "chksum_sha256": "37b7058bce770d9c628946a2133a340cea934ca5c2012cbd10ba17856cb68735",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/acl_interfaces",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/acl_interfaces/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/acl_interfaces/acl_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4ffb0de7a16618e7963992795aa35cbb4a5a0c1822eb7d2384aefaa433caf1d",
"format": 1
},
{
@@ -3133,7 +5779,7 @@
"name": "plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "41c01d93815ca5cbb62cd6ac2918041e5a67b792d11dfe291932f0f64b2ce4e1",
+ "chksum_sha256": "969f2079fc54fba0dc76eb226b29b1b2c1b18c5ff59520fd16cbb48463e474ac",
"format": 1
},
{
@@ -3154,7 +5800,28 @@
"name": "plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6322ce72c60c1098c7b04ba2c05dc2be5c70ff1760373f131005369995efa440",
+ "chksum_sha256": "d7e93b3c900c29ad02a24d359296ccbfc34fdc6b9e83a985e4f9b0eec049733b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/route_maps",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/route_maps/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/route_maps/route_maps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55aa98fa6e781d27bc08360923649f9e26e3df85382dd93d21ad2e64752f5d98",
"format": 1
},
{
@@ -3175,7 +5842,7 @@
"name": "plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "abc6c91a997dd3a42dcac331eb3d7b9b8ffc91fd2d80dc389752ef8e5d7e9d37",
+ "chksum_sha256": "feea671befcaccf560df99f816f6e4b71e507bf8b9d69417bbc4f0c61128b356",
"format": 1
},
{
@@ -3217,7 +5884,7 @@
"name": "plugins/module_utils/network/sonic/facts/bgp/bgp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "48d0915a4d169eb7accb4d921fa2956c2019e7e757fd4747bea093d2c6637b4f",
+ "chksum_sha256": "cead6cef11f16a576d894cf9dd7f349bda5760ddb3edae7d9e9b3221315cf69b",
"format": 1
},
{
@@ -3238,7 +5905,28 @@
"name": "plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d7daff29f60266f474a5ecb39569367146eee672c8bc7b5882cedd713d7c32ef",
+ "chksum_sha256": "2a9b1228cee27b451e6b4658e7412468550a3cae76687b4bcf1218ec32be6bcb",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/vlan_mapping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/vlan_mapping/vlan_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "daf715775a622cfb63723d94c7693fb1c8113dbe6c2a4b0cd317737d1c0bcc40",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/vlan_mapping/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
@@ -3259,7 +5947,21 @@
"name": "plugins/module_utils/network/sonic/facts/ntp/ntp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c8142bfefd6b7392123b7785a0c90ed43e6bf24ad4881fbd6d1df42c0c43fa27",
+ "chksum_sha256": "4ce5751969c185b75ee3060b20529d0c75c09ea03383f8db670fbc98153d9d93",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/logging",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/logging/logging.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "310fb9044fd61b3269adfabc4d95d510a362c69348bd39a571c048f1d71863a7",
"format": 1
},
{
@@ -3280,7 +5982,49 @@
"name": "plugins/module_utils/network/sonic/facts/system/system.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7716880b9ab1847ad3a9a4c5f70fdfd7176c33853d4ee9b4711d94bcbf51cc29",
+ "chksum_sha256": "6e6988fabfec74823e24990bc41eaf2c2e6659967c5876d04c4a44e962e891d6",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/copp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/copp/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/copp/copp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47dc1005584c5da56b289d8d85e00db4198eebf640596986f40eeaad0092538d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/l3_acls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/l3_acls/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/l3_acls/l3_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ccbe0a9aef7b0f3fa0e09cca865cab8e49933a3e7ea9005ea6e7ee5bed042d9",
"format": 1
},
{
@@ -3301,7 +6045,28 @@
"name": "plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7f2c4b27f8142c32eaa6219d00b261145ba5990b73ee96f2113ce3b8714b44c2",
+ "chksum_sha256": "69777f5af2956b007c936f2a89cdea350d91686af68c5e9a15da6f51248de7ef",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/dhcp_snooping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/dhcp_snooping/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/dhcp_snooping/dhcp_snooping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aecc4b601e9501b5a5f6a041ec61f668d6a6fb3debfcb9f2023282c2f3c150e2",
"format": 1
},
{
@@ -3322,7 +6087,28 @@
"name": "plugins/module_utils/network/sonic/facts/vlans/vlans.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "571ecaf2bef5d15c91f39a0b22922282757d9b32305d4221e9967815bc195a39",
+ "chksum_sha256": "9757329e83f21738a279cbdb34b04deecc29a2372186d82d88ecf70948613612",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/pki",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/pki/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/pki/pki.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65e4c4aab7c87ada7baf0603092cbc0ce66a7bee5b7921c8a182dff9b4b1207f",
"format": 1
},
{
@@ -3336,7 +6122,7 @@
"name": "plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f8c0dc71948a994b45b2ab55601891f26665cc538b23a01e55f8f947afc859bd",
+ "chksum_sha256": "9580d7d41d7edcb682e053789f2e2f47f9c9cbf6c9f25a93c67f03e2cb3130a6",
"format": 1
},
{
@@ -3350,7 +6136,7 @@
"name": "plugins/module_utils/network/sonic/facts/mclag/mclag.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cd027811ad981ae9b946f7247b648baccab1cbc9b877784ddc9cdb578fe68cc3",
+ "chksum_sha256": "94c2015c4b30bf3d07617687419e189eccda35c5919454806f872eb15986a0e1",
"format": 1
},
{
@@ -3371,7 +6157,21 @@
"name": "plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58292688b6f354c7e19bfc59bdcc91f76fc68cdf7ce4e0968c36ba0fde3e7334",
+ "chksum_sha256": "e67c382ea7b79cdb4204d2b5b6febea030b8d00951a571d1ba7e539d7a84af4d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/ip_neighbor",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/ip_neighbor/ip_neighbor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75c71dc60e69d40cace6257acdcf91c713e9d69f1a9d8721609dc8a739ed9ff7",
"format": 1
},
{
@@ -3385,7 +6185,7 @@
"name": "plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58fb25ed857a13d6af43a2609b90f09ac135694ce44997fd428242815e145b5f",
+ "chksum_sha256": "1484ae89f8a19134c1c397d6281d9d0e9765a7013b3ac9eb64bc74cfb5acdfd6",
"format": 1
},
{
@@ -3399,7 +6199,49 @@
"name": "plugins/module_utils/network/sonic/facts/radius_server/radius_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a3f05a591387d9c24cb7dc066621ac75229b9fb5f871e165546daea9fa2d6a87",
+ "chksum_sha256": "d87aba748530b8484b855bb3022d6d0bdc94e02c83ed546ef339631f1c63989d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/lldp_global",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/lldp_global/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/lldp_global/lldp_global.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c489b523367dc46bc3dccfc9a44effbca57b4781714c57f823d1efcef8139411",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/l2_acls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/l2_acls/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/l2_acls/l2_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4def1d2f7d9301f2eee96f109cdbbfee2cdf9d12c7c18bc6f1856f2a8f576ff",
"format": 1
},
{
@@ -3420,7 +6262,28 @@
"name": "plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "35667a83682027b3b6051ab7729a43847de0092318b8645a4910b9b2ac003d39",
+ "chksum_sha256": "4d2b35a3c7694851868e776b829113d94c9e807ce393e3ab4ec41bba82d47bbb",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/stp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/stp/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/facts/stp/stp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34c1e2f946f6486080bd39e48598a1173215f7766eea1d1dcd5dcd5f18ef5131",
"format": 1
},
{
@@ -3448,7 +6311,7 @@
"name": "plugins/module_utils/network/sonic/argspec/facts/facts.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a4e95f45024b8de1d0898d87a33fa01311168b82f0de6ec7efa012b927f3fe08",
+ "chksum_sha256": "8340af43f5a7bef0aa242a2bc355c9352110450146acc6db9db89484f22556ae",
"format": 1
},
{
@@ -3476,7 +6339,7 @@
"name": "plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "de5a9c713994b11f97572c67af335fef9ef883c5ae28bbe8b3d29509f1479708",
+ "chksum_sha256": "9955ea45cd45213636339519cc74f807f10dbeb0f6a9191750a7ef4a684fe992",
"format": 1
},
{
@@ -3490,7 +6353,7 @@
"name": "plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ea892c0cb638d806819a81b730c8bf272d1d22f5a785feab6bb7899d5d8290f5",
+ "chksum_sha256": "c1282954f966ff09c49902e13f4bb40020baa893605b8b80e88ef1a9cbee2b0c",
"format": 1
},
{
@@ -3508,6 +6371,48 @@
"format": 1
},
{
+ "name": "plugins/module_utils/network/sonic/argspec/mac",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/mac/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/mac/mac.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5dc85318a649550e94af47943ae3256b6243f1291feb47f41c809c26dd47e9f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/dhcp_relay",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/dhcp_relay/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/dhcp_relay/dhcp_relay.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90249eaea49f761822bb5aea2fa18fbfb6da80111c220d1715597dc6ca72e814",
+ "format": 1
+ },
+ {
"name": "plugins/module_utils/network/sonic/argspec/users",
"ftype": "dir",
"chksum_type": null,
@@ -3518,7 +6423,28 @@
"name": "plugins/module_utils/network/sonic/argspec/users/users.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e7016795dc961f47c1363b62c733419ed2f45bb51083b8c13a9cdcdc78393b80",
+ "chksum_sha256": "a85cc07e6862626bbcce6606eadce7d06cda92451c04de84099aa7cbf3464f2d",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/port_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/port_group/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/port_group/port_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f80a2047cd7e0f4d1f25768caafc926fe70c75dc0ab703743705bed4aa19ec2d",
"format": 1
},
{
@@ -3539,7 +6465,7 @@
"name": "plugins/module_utils/network/sonic/argspec/aaa/aaa.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "099cb25564ace85aff3f9b665f0e8291cf30cc566393aa73fe7fd7495adb75b5",
+ "chksum_sha256": "61219ac1fa1c852ed1662fcea00fd733097880ae437479ec83fa38b702fcb21e",
"format": 1
},
{
@@ -3560,7 +6486,28 @@
"name": "plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "664fa1bfe5e35df445395c8d97c3664d61b3616e46a1e566dfae8dffe5d3cd51",
+ "chksum_sha256": "527115990932ccc7e3d8e52491340b29d092ba91833b82a173dd812d58ee292a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/bfd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/bfd/bfd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a03413305d8eb78e576fbff71f7aad402c9138ec077d29510d0b006b527b9af2",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/bfd/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
@@ -3574,7 +6521,7 @@
"name": "plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ed14043c2d3655852bddd40ee00d07bb3731e109d738a8df15cdfe8c859dfcc3",
+ "chksum_sha256": "386ae329105705ac5076412c975ef753154cdfc4176344f583d2b2623f0b72a3",
"format": 1
},
{
@@ -3588,7 +6535,7 @@
"name": "plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ac08fa8db8afd66c4a6856700ca38615bcf8b8fba6d712981493f5452ea61c62",
+ "chksum_sha256": "e7397f371379fa70a1f68e0a9de6875a894264b3bfc48bc04a0eb41d908c2214",
"format": 1
},
{
@@ -3609,7 +6556,28 @@
"name": "plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9858adae2a8083270cc1d513b77fce9fe93c9789c5968c8e843b843ed940bfab",
+ "chksum_sha256": "04e92b4c1ceb5c318e76a933dd5c08a7c05a009d4a40038d076e43169c70d56e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/acl_interfaces",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/acl_interfaces/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/acl_interfaces/acl_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b5050121018f557f0d1b8f6bc81a99005be8dbec77c27fc8258bc82d57023a4",
"format": 1
},
{
@@ -3630,7 +6598,7 @@
"name": "plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "64e16da1a2cc3af929d265a73536e9ce637861ecd8723c0043bc7536ca9acbba",
+ "chksum_sha256": "c1c69b72962c5030db0e623ffe66d51db1b0456cb87b8c1dd1815b011c58f650",
"format": 1
},
{
@@ -3651,7 +6619,21 @@
"name": "plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "880c709b98414d38453eadce51b2771ed5d6c8bdd3fb6cbde45b60191e4568f9",
+ "chksum_sha256": "e6ac6392af89a494fffb2c9af04d5f1441fbf3e0f2d15bb9caeadea480ada9ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/route_maps",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/route_maps/route_maps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d8538cc2bd9211274678c40cfc2f86a39e41fbd5710d1a89af8124bacdf1e04",
"format": 1
},
{
@@ -3672,7 +6654,7 @@
"name": "plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "980ce16a72689da30138007e45e755e9f32b020769e884b8b856505acf8d9863",
+ "chksum_sha256": "fa023f3538e7a1238bbd9fb04648264040b6581a70f4e6b842e6a63724a799c9",
"format": 1
},
{
@@ -3693,7 +6675,7 @@
"name": "plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3047173ed7946102928cd2b23bfd7117ad4574d86f2a6330bb5a74ffaccd6368",
+ "chksum_sha256": "1fd06fe051d5e2a5adf9385c8fe1333cbd9cb953734017b435b0b5a98f48493c",
"format": 1
},
{
@@ -3714,7 +6696,7 @@
"name": "plugins/module_utils/network/sonic/argspec/bgp/bgp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "28692ff70358779f277a4bb9f5c2dc2b9b53ab4288b2102e01ae5394c4a12cc9",
+ "chksum_sha256": "e04590a9c664009fb627e95b787f245b873db928bb14ae9556579c8f60a53422",
"format": 1
},
{
@@ -3739,6 +6721,27 @@
"format": 1
},
{
+ "name": "plugins/module_utils/network/sonic/argspec/vlan_mapping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/vlan_mapping/vlan_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd0e8cb34175ac5eeede2e26a1f4fe186da14e0881c0a7c40865a45c63c67756",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/vlan_mapping/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
"name": "plugins/module_utils/network/sonic/argspec/ntp",
"ftype": "dir",
"chksum_type": null,
@@ -3749,7 +6752,21 @@
"name": "plugins/module_utils/network/sonic/argspec/ntp/ntp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "027ba50a79f0347df9702030f8849258711e516df33da4b8949b51cad88d3c86",
+ "chksum_sha256": "bbed7abf20c5f65bb31829cce51784ef04e7049a59ceb7f1236fc00065e033b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/logging",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/logging/logging.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6058f4857729da1dacd9312b9fa2b494961d1cd6b5866b6760097194c4eaebb5",
"format": 1
},
{
@@ -3770,7 +6787,49 @@
"name": "plugins/module_utils/network/sonic/argspec/system/system.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "53707e1f9e6d035be3514ea21d61a0fec4a0dd75ebeb4e44a803f63a3020be7d",
+ "chksum_sha256": "e4a2559f9e125a53cbef71dac79bfa891122493192933d1c84087f3fdbe86450",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/copp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/copp/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/copp/copp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d95c938992425142ee78d568dda0a91eb349364dd807e5b2facc379af81401f6",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/l3_acls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/l3_acls/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/l3_acls/l3_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1525d60212ad72a57ce568d4ef13a274a471fbaf9649decd7f85f7b096e6b8dd",
"format": 1
},
{
@@ -3791,7 +6850,28 @@
"name": "plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d9ccf45561ffa483c765a8c0ce00a2818e271c43134f9c46b3dfb3aee80f12c7",
+ "chksum_sha256": "ef546a5a09f39e7f062bcee63e92dc5e410d8a9ee5ef26daa89d2ff95c3aa9af",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/dhcp_snooping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/dhcp_snooping/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/dhcp_snooping/dhcp_snooping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c9d94c9f0341098db28a0bf7a2c9de85ff0ab5b291dc07c3be0e74700cfcd87",
"format": 1
},
{
@@ -3812,7 +6892,28 @@
"name": "plugins/module_utils/network/sonic/argspec/vlans/vlans.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c0f599c09eaf915361ca18d5898de652abea87835ababe1ac726f8367a6fabd5",
+ "chksum_sha256": "dd437ac0c25fcb80fe78be467e598879a88fa150e5ac0c582c5db3116bce0220",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/pki",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/pki/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/pki/pki.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2803b45f839e95fae01b43efa67977bd0516fc47153e7026375de2451cb254dd",
"format": 1
},
{
@@ -3826,7 +6927,7 @@
"name": "plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4dcd58e947b120be713fa3f571f6c5485cab6bdfe7efc75a4e90cec626a46a71",
+ "chksum_sha256": "a23ba33a273db878d89e3d2d10f406843358ec206abc9a15e5aed34db2b5f5c7",
"format": 1
},
{
@@ -3840,7 +6941,7 @@
"name": "plugins/module_utils/network/sonic/argspec/mclag/mclag.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "df57beee6331242c7c1cced8fea388ec78291d8ce2b32c20c5e0e50738bfb9f2",
+ "chksum_sha256": "ad32353a4503f116dd8ff78a666d65f5070d453f1828f583e78112e9d9aa3fab",
"format": 1
},
{
@@ -3861,7 +6962,21 @@
"name": "plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cb90a1d43f0acc3ab981e9579533505e4c23fc7d5c045b3266e13ad41b4606fa",
+ "chksum_sha256": "908f8cb10b2c81c70b45dd29dfb339410324fae2814b6a9a051bcbcbaa05ba42",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/ip_neighbor",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/ip_neighbor/ip_neighbor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc1ba714079549b79bac9d0551fc24f8376384f404e83d9d4ee05ab8e1ed174e",
"format": 1
},
{
@@ -3875,7 +6990,7 @@
"name": "plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3762f248b6f01066931467b74ffbe37b8d8c535fc97fe571f6d01acb1669f74c",
+ "chksum_sha256": "84ed10ad236c601747b93503e435619cbd429de6d2a6ab85401c5003c91146a6",
"format": 1
},
{
@@ -3889,7 +7004,49 @@
"name": "plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "00a2640dd2eacd73a6ee61c7f00f913964e9a3072f0550682f6d4732ff119c58",
+ "chksum_sha256": "4786969ad3843ff6b0844b3ea0410cb91dcd8eb8da3c25355afc857d21b75652",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/lldp_global",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/lldp_global/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/lldp_global/lldp_global.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "899b1767cd75963775ab4d216182ed85ac8cb538308e20bfcf988edc4cc4e120",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/l2_acls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/l2_acls/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/l2_acls/l2_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91cdd1aae4371f0ea68895b40028e46b3c65f1fa97d61b1ef7c0532a33789350",
"format": 1
},
{
@@ -3914,6 +7071,27 @@
"format": 1
},
{
+ "name": "plugins/module_utils/network/sonic/argspec/stp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/stp/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/argspec/stp/stp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d058343ea2c3cce3fe58078701f66cebbe91944cc9402d37ca7bc31b98542da",
+ "format": 1
+ },
+ {
"name": "plugins/module_utils/network/sonic/utils",
"ftype": "dir",
"chksum_type": null,
@@ -3924,7 +7102,7 @@
"name": "plugins/module_utils/network/sonic/utils/bgp_utils.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "efbf8c72fe520e860020fceef08d8e3a9c3aaa10fb3601999d96d604aceb09a7",
+ "chksum_sha256": "9ee1ba4d06a035bbbd92dc54ba63342bd0173ef2af71a7a285255477c7869001",
"format": 1
},
{
@@ -3938,21 +7116,28 @@
"name": "plugins/module_utils/network/sonic/utils/interfaces_util.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "356468c4f1fdb40153c2b220ddb2c5441feecd4a177067aef748edc824feaa1a",
+ "chksum_sha256": "50ede8649f0a1bc79a6fcb45eab44c4d1f5bd5c69940a16b770de53057829005",
"format": 1
},
{
"name": "plugins/module_utils/network/sonic/utils/utils.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6b6f8897c68aaff47fd0bc497e5d99bbf71d5a78837c000f2b0d4ae719c44300",
+ "chksum_sha256": "c45e4e5b1e4c04a993faacb702adb5ee9e64aecc6ce55714b0233d9b9620e5ff",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/utils/formatted_diff_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "158be0d32a29c7cd89f0643774252798f9890a90871e05183c1b8d0f235ccab1",
"format": 1
},
{
"name": "plugins/module_utils/network/sonic/sonic.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2852d2baa75d3b3a9260f156c500738eb2d37ef2861a6d63e20290c93bc7c8c1",
+ "chksum_sha256": "4a709417d10692943de02955526e60f2171c92d13005ed7612848881ff933e67",
"format": 1
},
{
@@ -3973,7 +7158,7 @@
"name": "plugins/module_utils/network/sonic/config/vxlans/vxlans.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "751480b64c2188fb97cc3eabde60a7d328fc780bb82dac9dbb215ce75da58b3e",
+ "chksum_sha256": "5acba99c206f0873f313c05eb3a27c346c11a766457b4aef26eefaff9f1ab78e",
"format": 1
},
{
@@ -3987,7 +7172,7 @@
"name": "plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "79d2b8680dc3f023e536ab624ad00c3b36c2d592a22f9cde98ffa66bc95fa2c8",
+ "chksum_sha256": "28a9de1ab8d6aba8d77836348ebb01c13ab61a95d72986736586118b49703275",
"format": 1
},
{
@@ -3998,6 +7183,48 @@
"format": 1
},
{
+ "name": "plugins/module_utils/network/sonic/config/mac",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/mac/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/mac/mac.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22484d36e29561f186b66c6b957ff8cfc6a7200026ee1cb04252d1ad41baf8ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/dhcp_relay",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/dhcp_relay/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/dhcp_relay/dhcp_relay.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c89f8fc65d0bae28e0d3d384ad0a73cfbbdca5865d8fde6cd241b5617bb818ac",
+ "format": 1
+ },
+ {
"name": "plugins/module_utils/network/sonic/config/users",
"ftype": "dir",
"chksum_type": null,
@@ -4008,7 +7235,21 @@
"name": "plugins/module_utils/network/sonic/config/users/users.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "45344f3b1a07ad06af3e0cda474f78bebde079b4a929d27a9e99a888aa3231fc",
+ "chksum_sha256": "73568a1acf800cb2ac37b78efac670468252e90dfb27a473ada8988e9dad8afa",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/port_group",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/port_group/port_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1800f3d4804abbb6af1e4ee6fa85eacce80c67f6f512f13f8a3eba8c466381b0",
"format": 1
},
{
@@ -4029,7 +7270,7 @@
"name": "plugins/module_utils/network/sonic/config/aaa/aaa.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eed3f6afdc941372f55212449b300bdfc5fa0150eb5068dd98848133ae8eb7ff",
+ "chksum_sha256": "0c62cfcf15301239d3606d505b5cdebb06480becb236a2cfa42d18e67857141d",
"format": 1
},
{
@@ -4050,7 +7291,28 @@
"name": "plugins/module_utils/network/sonic/config/static_routes/static_routes.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "146d536b99180f3f99e0c0e9c0d0b97487b2e1d6b736c85e448bb17b6dbe96f5",
+ "chksum_sha256": "57e15a7aa859388fdaec5d19f511cfddc791dc7aeae397c3fef392930d6a2062",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/bfd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/bfd/bfd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "752c743958b770fb88377181d4ef2eead55047f8bd6c4909067d968a693a5aa8",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/bfd/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
@@ -4064,7 +7326,7 @@
"name": "plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1b925177c3c5e66b32066d1c16d45c73878da5e19f7f14d309aeabebe90ef8c8",
+ "chksum_sha256": "6edbaeae363836fc6bc19fff0623260e7f1b288461bcea41c462717247d412d2",
"format": 1
},
{
@@ -4078,7 +7340,7 @@
"name": "plugins/module_utils/network/sonic/config/vrfs/vrfs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "121221ad6e55fffad0b00ca0a5e7cd58b35a4e7a575481dfd074c6b41411ad31",
+ "chksum_sha256": "19dacdae548ee992bf9cd87a2e2c77cba13461c338b547bb79e1fe2fa4cce433",
"format": 1
},
{
@@ -4092,7 +7354,28 @@
"name": "plugins/module_utils/network/sonic/config/interfaces/interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e4289ea78fef1e900174cf355a7093f5ea144573abdf7bcab63879430d8a1d3d",
+ "chksum_sha256": "7b4be292490ab0b82514f040e6cec06914304ba87d0341a67b7f006faa719e8f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/acl_interfaces",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/acl_interfaces/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/acl_interfaces/acl_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "882b2ebf820225a6f40112d845d5bedc2b83997ad284f9eae66552f2d846f149",
"format": 1
},
{
@@ -4113,7 +7396,7 @@
"name": "plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "873dc9442732042397ac3346598dbfc8040eff5b388a6a6c5462d156ea6c999e",
+ "chksum_sha256": "25bb8b114f354df51dadddec84f09ba4c1b7bfa3bd02d22bb98d7e397098ffb2",
"format": 1
},
{
@@ -4134,7 +7417,21 @@
"name": "plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a66f6fa13607ce0aae48f7ec944cc7410fd2443efa696fc59d254803402aea99",
+ "chksum_sha256": "47674b96bcd45f570cd925d51dc814dd08b95ccc7b43c3e7498bd2d9450398d7",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/route_maps",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/route_maps/route_maps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a2b4dfd291ab4349cc77ab7e54f508aff0ded37ea1a95d11ff57c92ab345400",
"format": 1
},
{
@@ -4148,7 +7445,7 @@
"name": "plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "42ea3ce2e2c15ec958f3072986def9bb6bbeee7e523a54e0cde7923ab7e855cb",
+ "chksum_sha256": "c7b84fd62451f70102d4d0d19bb6f555246fc9533899e2778ceb363c24f79834",
"format": 1
},
{
@@ -4169,7 +7466,7 @@
"name": "plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fad647ed365fd0991abcb3f70efb94c443dc8ab0fe19751690cfb5b529467763",
+ "chksum_sha256": "a9c9ca5d1921e85c5cb443950d75c5d5fa9089e4624a23205fd35b5d205bf689",
"format": 1
},
{
@@ -4183,7 +7480,7 @@
"name": "plugins/module_utils/network/sonic/config/bgp/bgp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "294427bd5c538c82eca5212bcfb57967230aa64892c5e2073ad12bf889381a97",
+ "chksum_sha256": "66a2fd56a5dd1d1793ba1b8c3c8da0e945680565911c957227a77a3532aa6e87",
"format": 1
},
{
@@ -4197,7 +7494,28 @@
"name": "plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f94c033887e9c1065c0a8155bb7e6f36888e2e66c7e475d86400dc87660a6489",
+ "chksum_sha256": "3fe330259fd9ea780794cdae8b6c5fa7505416d3909c22b0efc27f5e4ed3498e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/vlan_mapping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/vlan_mapping/vlan_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ba00006e25c72c156c1d6c0276f592550e5361d4924175abb5ba6be6980bf6b",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/vlan_mapping/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
@@ -4211,7 +7529,21 @@
"name": "plugins/module_utils/network/sonic/config/ntp/ntp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a19fdad43d518a26619fc9fb39a2d6ef3362cd1cc89252d74f69dc760ed815b3",
+ "chksum_sha256": "957a4e72b376a79d51305ad90fae0166e40fff23bdca5ec45ab26b04c8b7bc24",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/logging",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/logging/logging.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0580847184f421c94d6e2fdcbe5465959961dfbe78f71df74d7bce993d962043",
"format": 1
},
{
@@ -4232,7 +7564,49 @@
"name": "plugins/module_utils/network/sonic/config/system/system.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4a59efae25b1106a79087ecdfcb268fe2c6566ea61af795cce9d806fbe9f89e8",
+ "chksum_sha256": "e363bca18f96b07836ecd9d8790978da2c766235f11f713732348871b968dffc",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/copp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/copp/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/copp/copp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a13cc33ec860d16e53cf73aadec658c8d07a430134d038112d2f195c2f7cd83",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/l3_acls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/l3_acls/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/l3_acls/l3_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2441d50c4fe032a7ba192f808da64aae49bdf2aa17ce88289dd15e9c4f5f6f2",
"format": 1
},
{
@@ -4246,7 +7620,28 @@
"name": "plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7fff98321324ac64feb8edc5f8042f16bd4fa145c36781d53e86978d6edd9637",
+ "chksum_sha256": "c0b8e152a86919f3d014b9df29b11ef358c1a5fd7bd6b19e33f3de91a82278de",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/dhcp_snooping",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/dhcp_snooping/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/dhcp_snooping/dhcp_snooping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0aad3f0c466a28f653bbd75cbbaee5d18fc61b75da7eb2039fb1fe956d005f2",
"format": 1
},
{
@@ -4260,7 +7655,28 @@
"name": "plugins/module_utils/network/sonic/config/vlans/vlans.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0f26c8eeb4cd0b4de7fd058ef2a7d1e7f728cd5d4acb0f78f19d5ea729569255",
+ "chksum_sha256": "b8744022a7dd6c9d05577421b758d0ecc40710e9ba11611139b7a7b4903e8836",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/pki",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/pki/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/pki/pki.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f86f3ddf782c183cbacece171fe026aaf2b2c46ba32b428424f3b81cc584fd63",
"format": 1
},
{
@@ -4274,7 +7690,7 @@
"name": "plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6019047cfa54495d68c7ddca3b1b39bdb90d0e8854dd7835d8a0ad31fe55226a",
+ "chksum_sha256": "7770859ec20edd871b4c60ddde0cf2f2e2a1085e3ac67d119bddcd53f0a88468",
"format": 1
},
{
@@ -4288,7 +7704,7 @@
"name": "plugins/module_utils/network/sonic/config/mclag/mclag.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8cb43549024ce183291b1dbfd8512ee25d9401a9413e631457cdc1852dbdf98e",
+ "chksum_sha256": "4c39a47929eebf90ad31a8376fa5b15575a7c41a9a9d6360ffaa420637b26780",
"format": 1
},
{
@@ -4302,7 +7718,21 @@
"name": "plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bd007d97137f9f87e82a2e3b3e545cdc811908ff7bf3ae142d304b0732ddc8db",
+ "chksum_sha256": "70c75fe20a4b0bb23e6e91b9faac5651e36aec58810916e3e174c9d98e367798",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/ip_neighbor",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/ip_neighbor/ip_neighbor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "38e8afceb0c3efbe724a8961cc4e4ebdc28e5cac0c63dad450aa3243f1bae6d8",
"format": 1
},
{
@@ -4316,7 +7746,7 @@
"name": "plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cbb277af89e7ebe04824f8c6c31cbfba3882f67a084861507153a2ef3c50c01c",
+ "chksum_sha256": "fe8e9eea28fb241490c3cdc4847e2f72c800d170a1f298e8611fd8d948127467",
"format": 1
},
{
@@ -4330,7 +7760,49 @@
"name": "plugins/module_utils/network/sonic/config/radius_server/radius_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "76ad9bce200ded760e69515a4694b1dbbd32347a2257f5b2841cd64644d48724",
+ "chksum_sha256": "302d4e7347100ab38525b96cd18381999c38f88d0c4c36edf60dc22f04cc84fe",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/lldp_global",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/lldp_global/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/lldp_global/lldp_global.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de1f768f1b6e97d90a9a4e24d1d1778aa108266875e817ab232abad7d2d90bb5",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/l2_acls",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/l2_acls/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/l2_acls/l2_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9031167a9874efa11bee2d7fe0c108e9f4834793a29ab60740f7978946d52e2d",
"format": 1
},
{
@@ -4344,7 +7816,28 @@
"name": "plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b34c814844873084b563d07af9334c6ee2d5cb11ae805a05c4a039c08c1d03dc",
+ "chksum_sha256": "bbd063fb2329be4503a1beb8dde1dbd0ea07c3fbb6a4c7a18abe56597985f268",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/stp",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/stp/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/network/sonic/config/stp/stp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e7ff773040559ebfcd0f5b73220238a7e186398d53f97c112aa6b962251a84ea",
"format": 1
},
{
@@ -4414,7 +7907,7 @@
"name": "plugins/cliconf/sonic.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4fbeb05a4a01583778cad49752c4fa8f3562926b4f7cbb399700aa0f59b1168b",
+ "chksum_sha256": "447bdc617826b39f3d11e2852897c79ae4db64648bfe509e45a9fd87b53af23d",
"format": 1
},
{
@@ -4439,10 +7932,17 @@
"format": 1
},
{
+ "name": "plugins/modules/sonic_l3_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1801d73137632f113b989fc101c18b29e5a60af4f99c52316c0aebca52cd3d11",
+ "format": 1
+ },
+ {
"name": "plugins/modules/sonic_vrfs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f65a8f6e26c21af62aca63db39f12c28226e173706e2475a336f5774c9d0fd6e",
+ "chksum_sha256": "d09b615234f23dd67d53ee486a99943c0d81b357c253456704c5b647742ca5c6",
"format": 1
},
{
@@ -4456,175 +7956,280 @@
"name": "plugins/modules/sonic_tacacs_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a610ddc06cd5a8088ed2eae495f4e99f2adbbf3242aac9503b82317ee7dc1212",
+ "chksum_sha256": "da42242527f9dab3237e657359546769bf3eb7cb83264c660a261bdb7026a113",
"format": 1
},
{
"name": "plugins/modules/sonic_radius_server.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a64fe7ce6837f85d5ebdc5772ab8649ff4e07ee79c2ff56bf29dec4286cc260a",
+ "chksum_sha256": "a11a15204f50cb1710ebaf49d4025e8bcd3c8d5a4a3962942b840609ee1a304e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_mac.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfc9845da37d8a01bd27175ad93de2ec01b8a1d05dba6997ba29d3d13d95a43b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_port_group.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb803b34c21eeacde6c3fe314b83fddba8610926da654de45240fb25a109be4e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_ip_neighbor.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "accf797e421239f33b701a2d2ae7818f46c26a7e4428198ea187cc8cca15f385",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_lldp_global.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19b5edffcc6d176407e74f3d8ea281a1acd267e468c52439e9f14585b2f56358",
"format": 1
},
{
"name": "plugins/modules/sonic_vlans.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0820305b7619cfc6080ec69893ea8093bef0224f7078833a9da4ee139e901257",
+ "chksum_sha256": "837f8d60d3141bb8a7489cec95401b04111e246bf1502d3900c1951c6ea25154",
"format": 1
},
{
"name": "plugins/modules/sonic_aaa.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c2a2be0a9b65b42e19f099adf045b875961c5876d7ef17457828bf1c7ffd2833",
+ "chksum_sha256": "5e0367da89779b1fd77ede17f153e486059653537afc129399c4633b78bc5301",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_copp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd729289ef0441c7a6709f1daf3f8d0a9a76a28fdec8ceaa3b582b2bb7c370ae",
"format": 1
},
{
"name": "plugins/modules/sonic_l2_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d1ee00011322c29a0936114c42775a91932c6b46605109a0c7f53911018f3e85",
+ "chksum_sha256": "329e5654e34c2febab2bf3624345eff6a310e1d9fae1d5e849dc0d5c2695bf68",
"format": 1
},
{
"name": "plugins/modules/sonic_ntp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "92cfdb9b5e31bddf0ef61f01b255e1f4754c2fe6a1ce47571202ef3155b69d51",
+ "chksum_sha256": "9eee6712594bb51a088afd81b0cad244909b6bb78110e1f462ac2357f55392fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_dhcp_relay.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0823a8ddd0955e548a6d0bec452ef041d2611bc20f7a22d3e2be5327dd6f4894",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_dhcp_snooping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77bb3d3e35a46aaf35980a2ca27d30832de6ba7543f5f71212a461518654eb02",
"format": 1
},
{
"name": "plugins/modules/sonic_mclag.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e55c254e12c77ea06035f9cdd87a4248e89f7cd9d0df068266e7226c78d5fd20",
+ "chksum_sha256": "2923c5a306e5251584038af5c0a58e8bcf8ef5230e4ec02e944b9c04902b729c",
"format": 1
},
{
"name": "plugins/modules/sonic_bgp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5e531d9a1f281831911dba58b4e5568bea27ae13b5a46119fd0a50c234d80a4f",
+ "chksum_sha256": "0c7da2ba338a8be6b4512f9a9fbcc9443da6f55b8322433682ed37b07c905028",
"format": 1
},
{
"name": "plugins/modules/sonic_l3_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b9c8607ca719658fa1a5f4bf20e1bf0b71f06618664b56aa3f3f771c0416d535",
+ "chksum_sha256": "87a3d765bb9407e43b633b1deeef5c4c3634cc249ee2235e14b16f01a100aefa",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_acl_interfaces.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ff78ada1d10b818c3ae8e9c83823fa0fa42e05faae2ec8c3929244b78397390",
"format": 1
},
{
"name": "plugins/modules/sonic_system.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "daf0edd6622a20743c5c9f51d4c4a7732389719390f512885cd3af012e65ad92",
+ "chksum_sha256": "0266c47a530ddcc434cc10d31b1692eb9b874e4dd77d60366d2d632fc996359c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_vlan_mapping.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e818b7fa043182c6f0052f95db41547ffa02be6fa8aaabb998a09126cdb87ed0",
"format": 1
},
{
"name": "plugins/modules/sonic_users.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1104943f68ff99954e8481b915fefb206af930a9c6d6edc79f43aee233ea1db5",
+ "chksum_sha256": "ebe26def6331fe488be1c7431f398c804cd75c473426a19c6e73a01ef4b16091",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_l2_acls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8cee67cd1d9445e86ebc2462a00d2011511e9267958949b392403da720c3fc18",
"format": 1
},
{
"name": "plugins/modules/sonic_bgp_neighbors.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "417b1a3e3b325ffbef1b84db13dedef88c9e935d441d3b2f1cd8ed79eaa933bd",
+ "chksum_sha256": "63681f4185bd7aa6954bf4b8aa8c87d1d2824276c27f1ee5a7ff2e296178a8df",
"format": 1
},
{
"name": "plugins/modules/sonic_bgp_af.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "869e043591404007ed592618bcb282ed59c64c88e02e77b8fca083323bce30dd",
+ "chksum_sha256": "c3aacc4b881fa318b8037a8e2c5fd48b683f36dc8635071193b428d4113264f1",
"format": 1
},
{
"name": "plugins/modules/sonic_port_breakout.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1896147e09a083d1fbce1900b21c29572b6dd992884947c2ffb4fb9379148e2f",
+ "chksum_sha256": "bd7c03e4ab8a0c16f8da318725e8ae69e6ba8eb93a782329e90e9e5dabf35bf7",
"format": 1
},
{
"name": "plugins/modules/sonic_bgp_ext_communities.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a0949a6e579a7eaef3d4b355ecaf328c8357c8b4d1ca7f8cf5624ed95582b81e",
+ "chksum_sha256": "e8e0ae69099a79826f1732110f2aa01751d91530630bbdc0559487ff7bc569b1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_stp.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f9f5bf5d9003a22f6b89cdb5a57edff7931a764c232ebac9a4401071296e026",
"format": 1
},
{
"name": "plugins/modules/sonic_lag_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "77d851e7f2915eb77407dccabfb7e9a84a02c740e3bde4067ae22ddd11932088",
+ "chksum_sha256": "f20a7e72cc89eb39b43bf5d04929da070a62a1b57fbdca27f3b10f9c69dfd27c",
"format": 1
},
{
"name": "plugins/modules/sonic_bgp_as_paths.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e2c98cdd58166716bd27b7c658dd75849959877c0f987a1ad5f189d33762ee8e",
+ "chksum_sha256": "fc279ab490d4c025eeaef174e4ad2e582e88e4f978d6b87063eda38faad2e8e4",
"format": 1
},
{
"name": "plugins/modules/sonic_config.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2f596c49a79cc793a11a9f045602ae9dc226dd49b0e6ed5ebebb901814db2c3a",
+ "chksum_sha256": "419e86557940d2e1ddeeb46cfa27463ae108f25b6dc9f2e39ea62fd54831460d",
"format": 1
},
{
"name": "plugins/modules/sonic_facts.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "66e57eab9be3a92fabd25d58d4b50ad4989b5ed82930b9fa26005a778d004aaa",
+ "chksum_sha256": "3384a94f121e500936d58c39d7b9c64dd0a48933a722f0215f6d954dbd6f5dca",
"format": 1
},
{
"name": "plugins/modules/sonic_bgp_communities.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3053900903a2b5998e14d9a055cd0c907e9d4a1fcbcc76aff3efb616977ce038",
+ "chksum_sha256": "bad602f180c3f3b632e7f7179e2f8a74c4f531e4db782b47aec1262c26b5832b",
"format": 1
},
{
"name": "plugins/modules/sonic_bgp_neighbors_af.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "06e2639cc7ce2b03bd29e3909bf26525cefbd47035d9cb28927a96ccb91a16ad",
+ "chksum_sha256": "5cca331d24851606c9fa2cc6ed514c8ce1d614fa2fcd891ae0afed30c83a95fa",
"format": 1
},
{
"name": "plugins/modules/sonic_prefix_lists.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b7226bb99d6699bf1f974bafb6659d23949b94afb80cac714c374a2ffb126ad7",
+ "chksum_sha256": "b1fc8df7bb0e4b6b9462b947dd340ec00304c885f97c6812226016b8d291d74d",
"format": 1
},
{
"name": "plugins/modules/sonic_static_routes.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d0a90d0134fdcbb81da9195eadbd91821ba400199c32c6f8750c549a6653b58a",
+ "chksum_sha256": "588edb1a3b552ed38657b7bceda79dd80ef876b237ff743b505d69a63a264ee4",
"format": 1
},
{
"name": "plugins/modules/sonic_interfaces.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3d602d47c8e1bc50ba18d8929117ba98da1dade7d01b4099f7ddec558eeafba0",
+ "chksum_sha256": "9939b603d8eb85216a84f5cea1107aa826f966a0eeff705e30d4f8ee7a65045c",
"format": 1
},
{
"name": "plugins/modules/sonic_vxlans.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3c10ca81fd5969960f897c93b2159f6ac6c9ecd05084272433da32ab156f176a",
+ "chksum_sha256": "6f37906c6e578fefce8d697197b330b83df8777dd65f67372fa6c1e51396aeee",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_bfd.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "929e559af34df300a09096f470599512cdbd21c99500d8dc1fe3a6cb8a7ebb93",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_logging.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e57448b8cfe6b311c4bab0a2f7d27f1de7b7737ebca01f299bedf51cae528c7",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_pki.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a62144d8151e36ef9fceb79a798f45c5b89d35bfe4e2d472b8764035146c144d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/sonic_route_maps.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8dbb8bbb55defa96b6421dc379994627ef8c814d962a9ee74288fca502847c60",
"format": 1
},
{
@@ -4663,6 +8268,13 @@
"format": 1
},
{
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f56ba07cd40eeadaf3d0a40b370cbf8c3863ea967e8abf3e1df7b086126dbe0e",
+ "format": 1
+ },
+ {
"name": "LICENSE",
"ftype": "file",
"chksum_type": "sha256",
@@ -4677,6 +8289,62 @@
"format": 1
},
{
+ "name": "docs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/BRANCHING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03626549a59abf648ee59163b3b8acbf66c36513cb1e76d6e277bc044c926e30",
+ "format": 1
+ },
+ {
+ "name": "docs/SUPPORT.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "489515b299c57466195c73f4f779e074d8ccd881d1c40ffe8ebdfc1460df558a",
+ "format": 1
+ },
+ {
+ "name": "docs/ADDITIONAL_INFORMATION.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ad23c71f8cb335215a86959658c76614e40a609698610948cd03e466ea08b72",
+ "format": 1
+ },
+ {
+ "name": "docs/ISSUE_TRIAGE.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc9e301d33c99bd57c412a9b93e8e8c7e7a6d9a1a24cafd0c7391b7bbddb2501",
+ "format": 1
+ },
+ {
+ "name": "docs/COMMITTER_GUIDE.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f6ce7fe985d513fed1f142c1ab7643f2e7a47f6379885b97cb28fcd2f4124c7",
+ "format": 1
+ },
+ {
+ "name": "docs/MAINTAINER_GUIDE.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1ffd49773af4fc0f200fb01495ce566b911d7cc3b3d6d11287d41b6521b9545",
+ "format": 1
+ },
+ {
+ "name": "docs/CONTRIBUTING.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "729aed78631d8be8920e96c3a6f49b2efaeb7a64ce4abc846c6ba1bfe0c0e32f",
+ "format": 1
+ },
+ {
"name": "meta",
"ftype": "dir",
"chksum_type": null,
@@ -4687,7 +8355,7 @@
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d6e301db0d0e6b8d8df2dac7fe249f0d21f5deadb47edb23f31fc25a87ea704f",
+ "chksum_sha256": "be750c50a25d2ffa34b8ba4734bd36edc8e4d3ad6c2dc46bf0d6fd9b5a9da4ec",
"format": 1
},
{
diff --git a/ansible_collections/dellemc/enterprise_sonic/MANIFEST.json b/ansible_collections/dellemc/enterprise_sonic/MANIFEST.json
index 06fd002e1..3d6e88fc2 100644
--- a/ansible_collections/dellemc/enterprise_sonic/MANIFEST.json
+++ b/ansible_collections/dellemc/enterprise_sonic/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "dellemc",
"name": "enterprise_sonic",
- "version": "2.0.0",
+ "version": "2.4.0",
"authors": [
"Senthil Kumar Ganesan <Senthil_Kumar_Ganesa@Dell.com>",
"Abirami <Abirami_N@Dell.com>",
@@ -11,7 +11,11 @@
"Nirai Madai <Niraimadaiselvam_Mar@Dell.com>",
"Shade Talabi <Shade_Talabi@Dell.com>",
"Kerry Meyer <Kerry_Meyer@Dell.com>",
- "Mingjun Zhang <Mingjun_Zhang@Dell.com>"
+ "Mingjun Zhang <Mingjun_Zhang@Dell.com>",
+ "Arun Saravanan Balachandran (arun_saravanan_balac@dell.com)",
+ "Santhosh Kumar T (santhosh_kumar_t@dell.com)",
+ "Divya Balasubramanian <Divya_Balasubramania@DELL.com>",
+ "Cypher Miller <Cypher.Miller@Dell.com>"
],
"readme": "README.md",
"tags": [
@@ -25,7 +29,7 @@
"license": [],
"license_file": "LICENSE",
"dependencies": {
- "ansible.netcommon": ">=2.0.0"
+ "ansible.netcommon": ">5.0.0"
},
"repository": "https://github.com/ansible-collections/dellemc.enterprise_sonic",
"documentation": null,
@@ -36,7 +40,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f419780826940a762216126025170e01d39987da0d8fb273560a2488307493f5",
+ "chksum_sha256": "0d2bd086845a8c77b0eb30ed113910a41421b8f0410012a6b68954aa500cc208",
"format": 1
},
"format": 1
diff --git a/ansible_collections/dellemc/enterprise_sonic/README.md b/ansible_collections/dellemc/enterprise_sonic/README.md
index 4c17c9e9c..d4a698d73 100644
--- a/ansible_collections/dellemc/enterprise_sonic/README.md
+++ b/ansible_collections/dellemc/enterprise_sonic/README.md
@@ -31,17 +31,50 @@ Name | Description | Connection type
Collection network resource modules
-----------------------------------
-Listed are the SONiC Ansible network resource modules which need ***httpapi*** as the connection type. Supported operations are ***merged*** and ***deleted***.
-
-| **Interfaces** | **BGP** | **VRF** | **Users** |
-| -------------- | ------- | ------- | ------- |
-| [**sonic_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-interfaces-module)|[**sonic_bgp**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-module)| [**sonic_vrfs**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vrfs_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vrfs-module)|[**sonic_users**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_users_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-users-module)|
-| [**sonic_l2_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_l2_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-l2-interfaces-module)| [**sonic_bgp_af**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_af_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-af-module)| **MCLAG** | **AAA** |
-| [**sonic_l3_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_l3_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-l3-interfaces-module) |[**sonic_bgp_as_paths**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_as_paths_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-as-paths-module)| [**sonic_mclag**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_mclag_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-mclag-module)| [**sonic_aaa**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_aaa_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-aaa-module)|
-|**Port channel**|[**sonic_bgp_communities**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_communities_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-communities-module)| **VxLANs** |[**sonic_tacacs_server**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_tacacs_server_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-tacacs-server-module)|
-|[**sonic_lag_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_lag_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-lag-interfaces-module)|[**sonic_bgp_ext_communities**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_ext_communities_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-ext-communities-module)| [**sonic_vxlans**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vxlans_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vxlans-module)|[**sonic_radius_server**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_radius_server_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-radius-server-module)|
-|**VLANs**|[**sonic_bgp_neighbors**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_neighbors_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-neighbors-module)| **Port breakout** | **System** |
-|[**sonic_vlans**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vlans_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vlans-module)|[**sonic_bgp_neighbors_af**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_neighbors_af_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-neighbors-af-module)|[**sonic_port_breakout**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_port_breakout_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-port-breakout-module) |[**sonic_system**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_system_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-system-module) |
+Listed are the SONiC Ansible network resource modules which need ***httpapi*** as the connection type. Supported operations are ***merged***, ***deleted***, ***replaced*** and ***overridden***.
+
+Name | Description
+--- | ---
+[**sonic_aaa**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_aaa_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-aaa-module)| Manage AAA and its parameters
+[**sonic_acl_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_acl_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-acl-interfaces-module)| Manage access control list (ACL) to interface binding
+[**sonic_bfd**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bfd_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bfd-module)| Manage BFD configuration
+[**sonic_bgp**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-module)| Manage global BGP and its parameters
+[**sonic_bgp_af**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_af_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-af-module)| Manage global BGP address-family and its parameters
+[**sonic_bgp_as_paths**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_as_paths_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-as-paths-module)| Manage BGP autonomous system path (or as-path-list) and its parameters
+[**sonic_bgp_communities**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_communities_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-communities-module)| Manage BGP community and its parameters
+[**sonic_bgp_ext_communities**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_ext_communities_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-ext-communities-module)| Manage BGP extended community-list and its parameters
+[**sonic_bgp_neighbors**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_neighbors_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-neighbors-module)| Manage a BGP neighbor and its parameters
+[**sonic_bgp_neighbors_af**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_bgp_neighbors_af_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-bgp-neighbors-af-module)| Manage the BGP neighbor address-family and its parameters
+[**sonic_copp**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_copp_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-copp-module)| Manage CoPP configuration
+[**sonic_dhcp_relay**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_dhcp_relay_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-dhcp-relay-module)| Manage DHCP and DHCPv6 relay configurations
+[**sonic_dhcp_snooping**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_dhcp_snooping_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-dhcp-snooping-module)| Manage DHCP Snooping
+[**sonic_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-interfaces-module)| Configure Interface attributes
+[**sonic_ip_neighbor**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_sonic_ip_neighbor_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-sonic-ip-neighbor-module)| Manage IP neighbor global configuration
+[**sonic_l2_acls**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_l2_acls_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-sonic-l2-acls-module)| Manage Layer 2 access control lists (ACL) configurations
+[**sonic_l2_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_l2_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-l2-interfaces-module)| Configure interface-to-VLAN association
+[**sonic_l3_acls**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_l3_acls_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-sonic-l3-acls-module)| Manage Layer 3 access control lists (ACL) configurations
+[**sonic_l3_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_l3_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-l3-interfaces-module)| Configure the IPv4 and IPv6 parameters on Interfaces
+[**sonic_lag_interfaces**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_lag_interfaces_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-lag-interfaces-module)| Manage link aggregation group (LAG) interface parameters
+[**sonic_lldp_global**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_sonic_lldp_global_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-sonic-lldp-global-module)| Manage Global LLDP configurations
+[**sonic_logging**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_logging_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-logging-module)| Manage logging configuration
+[**sonic_mac**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_mac_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-mac-module)| Manage MAC configuration
+[**sonic_mclag**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_mclag_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-mclag-module)| Manage multi chassis link aggregation groups domain (MCLAG) and its parameters
+[**sonic_ntp**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_ntp_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-ntp-module)| Manage NTP configuration
+[**sonic_pki**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_pki_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-pki-module)| Manages PKI attributes
+[**sonic_port_breakout**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_port_breakout_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-port-breakout-module)| Configure port breakout settings on physical interfaces
+[**sonic_port_group**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_port_group_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-port-group-module)| Manage port group configuration
+[**sonic_prefix_lists**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_prefix_lists_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-prefix-lists-module)| Manage prefix list configuration
+[**sonic_radius_server**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_radius_server_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-radius-server-module)| Manage RADIUS server and its parameters
+[**sonic_route_maps**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_route_maps_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-route-maps-module)| Manage route map configuration
+[**sonic_static_routes**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_static_routes_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-static-routes-module)| Manage static routes configuration
+[**sonic_stp**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_stp_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-stp-module)| Manage STP configuration
+[**sonic_system**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_system_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-system-module)| Configure system parameters
+[**sonic_tacacs_server**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_tacacs_server_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-tacacs-server-module)| Manage TACACS server and its parameters
+[**sonic_users**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_users_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-users-module)| Manage users and its parameters
+[**sonic_vlan_mapping**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vlan_mapping_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vlan-mapping-module)| Configure vlan mappings
+[**sonic_vlans**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vlans_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vlans-module)| Manage VLAN and its parameters
+[**sonic_vrfs**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vrfs_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vrfs-module)| Manage VRFs and associate VRFs to interfaces
+[**sonic_vxlans**](https://docs.ansible.com/ansible/latest/collections/dellemc/enterprise_sonic/sonic_vxlans_module.html#ansible-collections-dellemc-enterprise-sonic-sonic-vxlans-module)| Manage VxLAN EVPN and its parameters
Sample use case playbooks
-------------------------
@@ -53,9 +86,9 @@ Name | Description
Version compatibility
----------------------
-* Recommended Ansible version 2.10 or higher
+* Recommended Ansible version 2.14 or higher (This is required for enterprise_sonic collection version >= 2.4.0).
* Enterprise SONiC Distribution by Dell Technologies version 3.1 or higher
-* Recommended Python 3.5 or higher, or Python 2.7
+* Recommended Python 3.9 or higher (This is required for enterprise_sonic collection version >= 2.4.0.).
* Dell Enterprise SONiC images for releases 3.1 - 3.5: Use Ansible Enterprise SONiC collection version 1.1.0 or later 1.m.n versions (from the 1.x branch of this repo)
* Dell Enterprise SONiC images for release 4.0 and later 4.x.y releases: Use Ansible Enterprise SONiC collection version 2.0.0 or later 2.m.n releases (from the "2.x" branch of this repo).
* In general: Dell Enterprise SONiC release versions "R.x.y" are supported by Ansible Enterprise SONiC collection versions "R-2.m.n" on branch "R-2.x".
diff --git a/ansible_collections/dellemc/enterprise_sonic/bindep.txt b/ansible_collections/dellemc/enterprise_sonic/bindep.txt
new file mode 100644
index 000000000..ba9c980fb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/bindep.txt
@@ -0,0 +1,6 @@
+# This is a cross-platform list tracking distribution packages needed by tests;
+# see https://docs.openstack.org/infra/bindep/ for additional information.
+
+gcc-c++ [doc test platform:rpm]
+python3-devel [test platform:rpm]
+python3 [test platform:rpm]
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml
index 9d3d51d00..8b0d92009 100644
--- a/ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/.plugin-cache.yaml
@@ -10,6 +10,7 @@ plugins:
name: sonic
version_added: null
connection: {}
+ filter: {}
httpapi:
sonic:
description: HttpApi Plugin for devices supporting Restconf SONIC API
@@ -23,11 +24,21 @@ plugins:
name: sonic_aaa
namespace: ''
version_added: 1.1.0
+ sonic_acl_interfaces:
+ description: Manage access control list (ACL) to interface binding on SONiC
+ name: sonic_acl_interfaces
+ namespace: ''
+ version_added: 2.1.0
sonic_api:
description: Manages REST operations on devices running Enterprise SONiC
name: sonic_api
namespace: ''
version_added: 1.0.0
+ sonic_bfd:
+ description: Manage BFD configuration on SONiC
+ name: sonic_bfd
+ namespace: ''
+ version_added: 2.1.0
sonic_bgp:
description: Manage global BGP and its parameters
name: sonic_bgp
@@ -73,6 +84,21 @@ plugins:
name: sonic_config
namespace: ''
version_added: 1.0.0
+ sonic_copp:
+ description: Manage CoPP configuration on SONiC
+ name: sonic_copp
+ namespace: ''
+ version_added: 2.1.0
+ sonic_dhcp_relay:
+ description: Manage DHCP and DHCPv6 relay configurations on SONiC
+ name: sonic_dhcp_relay
+ namespace: ''
+ version_added: 2.1.0
+ sonic_dhcp_snooping:
+ description: Manage DHCP Snooping on SONiC
+ name: sonic_dhcp_snooping
+ namespace: ''
+ version_added: 2.3.0
sonic_facts:
description: Collects facts on devices running Enterprise SONiC
name: sonic_facts
@@ -84,12 +110,27 @@ plugins:
name: sonic_interfaces
namespace: ''
version_added: 1.0.0
+ sonic_ip_neighbor:
+ description: Manage IP neighbor global configuration on SONiC.
+ name: sonic_ip_neighbor
+ namespace: ''
+ version_added: 2.1.0
+ sonic_l2_acls:
+ description: Manage Layer 2 access control lists (ACL) configurations on SONiC
+ name: sonic_l2_acls
+ namespace: ''
+ version_added: 2.1.0
sonic_l2_interfaces:
description: Configure interface-to-VLAN association that is based on access
or trunk mode
name: sonic_l2_interfaces
namespace: ''
version_added: 1.0.0
+ sonic_l3_acls:
+ description: Manage Layer 3 access control lists (ACL) configurations on SONiC
+ name: sonic_l3_acls
+ namespace: ''
+ version_added: 2.1.0
sonic_l3_interfaces:
description: Configure the IPv4 and IPv6 parameters on Interfaces such as, Eth,
LAG, VLAN, and loopback
@@ -101,6 +142,21 @@ plugins:
name: sonic_lag_interfaces
namespace: ''
version_added: 1.0.0
+ sonic_lldp_global:
+ description: Manage Global LLDP configurations on SONiC
+ name: sonic_lldp_global
+ namespace: ''
+ version_added: 2.1.0
+ sonic_logging:
+ description: Manage logging configuration on SONiC.
+ name: sonic_logging
+ namespace: ''
+ version_added: 2.1.0
+ sonic_mac:
+ description: Manage MAC configuration on SONiC
+ name: sonic_mac
+ namespace: ''
+ version_added: 2.1.0
sonic_mclag:
description: Manage multi chassis link aggregation groups domain (MCLAG) and
its parameters
@@ -112,11 +168,21 @@ plugins:
name: sonic_ntp
namespace: ''
version_added: 2.0.0
+ sonic_pki:
+ description: Manages PKI attributes of Enterprise Sonic
+ name: sonic_pki
+ namespace: ''
+ version_added: 2.3.0
sonic_port_breakout:
description: Configure port breakout settings on physical interfaces
name: sonic_port_breakout
namespace: ''
version_added: 1.0.0
+ sonic_port_group:
+ description: Manages port group configuration on SONiC.
+ name: sonic_port_group
+ namespace: ''
+ version_added: 2.1.0
sonic_prefix_lists:
description: prefix list configuration handling for SONiC
name: sonic_prefix_lists
@@ -127,11 +193,21 @@ plugins:
name: sonic_radius_server
namespace: ''
version_added: 1.0.0
+ sonic_route_maps:
+ description: route map configuration handling for SONiC
+ name: sonic_route_maps
+ namespace: ''
+ version_added: 2.1.0
sonic_static_routes:
description: Manage static routes configuration on SONiC
name: sonic_static_routes
namespace: ''
version_added: 2.0.0
+ sonic_stp:
+ description: Manage STP configuration on SONiC
+ name: sonic_stp
+ namespace: ''
+ version_added: 2.3.0
sonic_system:
description: Configure system parameters
name: sonic_system
@@ -147,6 +223,11 @@ plugins:
name: sonic_users
namespace: ''
version_added: 1.1.0
+ sonic_vlan_mapping:
+ description: Configure vlan mappings on SONiC.
+ name: sonic_vlan_mapping
+ namespace: ''
+ version_added: 2.1.0
sonic_vlans:
description: Manage VLAN and its parameters
name: sonic_vlans
@@ -166,5 +247,6 @@ plugins:
netconf: {}
shell: {}
strategy: {}
+ test: {}
vars: {}
-version: 2.0.0
+version: 2.4.0
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/CHANGELOG.rst b/ansible_collections/dellemc/enterprise_sonic/changelogs/CHANGELOG.rst
deleted file mode 100644
index c9a571cd8..000000000
--- a/ansible_collections/dellemc/enterprise_sonic/changelogs/CHANGELOG.rst
+++ /dev/null
@@ -1,119 +0,0 @@
-======================================
-Dellemc.Enterprise_Sonic Release Notes
-======================================
-
-.. contents:: Topics
-
-
-v2.0.0
-======
-
-Release Summary
----------------
-
-This release provides Dell SONiC Enterprise Ansible Collection support for SONiC 4.x images. It is the first release for the 2.x branch of the collection. Subsequent enhancements for support of SONiC 4.x images will also be provided as needed on the 2.x branch. This release also contains bugfixes and enhancements to supplement the Ansible functionality provided previously for SONiC 3.x images. The changelog describes changes made to the modules and plugins included in this collection since release 1.1.0.
-
-
-Major Changes
--------------
-
-- Added 'static_routes' module to collection (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/82).
-- Added a resource module for NTP support (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/99).
-- Added a resource module for support of prefix lists (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/100).
-- Updated backend REST API request formats in all applicable modules for compatibility with SONiC 4.x openconfig YANG compliant REST APIs. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/53)
-
-Minor Changes
--------------
-
-- Added an execution-environment.yml file to the "meta" directory to enable use of Ansible execution environment infrastructure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88).
-- bgp_af - Added support for BGP options to configure usage and advertisement of vxlan primary IP address related attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/62).
-- bgp_as_paths - updated module examples with 'permit' attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102)
-- bgp_neighbors - Add BGP peer group support for multiple attributes. The added attributes correspond to the same set of attributes added for BGP neighbors with PR 72 (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81).
-- bgp_neighbors - Add support for multiple attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72).
-- bgp_neighbors - add an auth_pwd dictionary and nbr_description attribute to the argspec (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/67).
-- bgp_neighbors - added prefix-list related peer-group attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101).
-- bgp_neighbors_af - added prefix-list related neighbor attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101).
-- playbook - updated examples to reflect module changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102)
-- sonic_vxlans - Add configuration capability for the primary IP address of a vxlan vtep to facilitate vxlan path redundundancy (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/58).
-- vlans - Added support for the vlan "description" attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/98).
-- workflow - Added stable-2.13 to the sanity test matrix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
-
-Breaking Changes / Porting Guide
---------------------------------
-
-- bgp_af - Add the route_advertise_list dictionary to the argspec to replace the deleted, obsolete advertise_prefix attribute used for SONiC 3.x images on the 1.x branch of this collection. This change corresponds to a SONiC 4.0 OC YANG REST compliance change for the BGP AF REST API. It enables specification of a route map in conjunction with each route advertisement prefix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/63).
-- bgp_af - remove the obsolete 'advertise_prefix' attribute from argspec and config code. This and subsequent co-req replacement with the new route advertise list argument structure require corresponding changes in playbooks previoulsly used for configuring route advertise prefixes for SONiC 3.x images. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60)
-- bgp_neighbors - Replace the previously defined standalone "bfd" attribute with a bfd dictionary containing multiple attributes. This change corresponds to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images must be modified for useon SONiC 4.0 images to use the new definition for the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72).
-- bgp_neighbors - Replace, for BGP peer groups, the previously defined standalone "bfd" attribute with a bfd dictionary containing multiple attributes. This change corresponds to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images must be modified for useon SONiC 4.0 images to use the new definition for the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81).
-
-Bugfixes
---------
-
-- Fixed regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103).
-- Fixed regression test sequencing and other regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/85).
-- aaa - Fixed a bug in facts gathering by providing required conditional branching (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90)
-- aaa - Modify regression test sequencing to enable correct testing of the functionality for this module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/78).
-- bgp_neighbors - remove string conversion of timer attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60)
-- port_breakout - Fixed a bug in formulation of port breakout REST APIs (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88).
-- sonic - Fix a bug in handling of interface names in standard interface naming mode (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103).
-- sonic_command - Fix bugs in handling of CLI commands involving a prompt and answer sequence (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/76/files).
-- users - Fixed a bug in facts gathering (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
-- vxlan - update Vxlan test cases to comply with SONiC behavior (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/105).
-
-New Modules
------------
-
-- dellemc.enterprise_sonic.sonic_ntp - Manage NTP configuration on SONiC.
-- dellemc.enterprise_sonic.sonic_prefix_lists - prefix list configuration handling for SONiC
-- dellemc.enterprise_sonic.sonic_static_routes - Manage static routes configuration on SONiC
-
-v1.1.0
-======
-
-New Modules
------------
-
-- dellemc.enterprise_sonic.sonic_aaa - AAA resource module.
-- dellemc.enterprise_sonic.sonic_radius_server - RADIUS resource module.
-- dellemc.enterprise_sonic.sonic_system - SYSTEM resource module.
-- dellemc.enterprise_sonic.sonic_tacacs_server - TACACS Server resource module.
-
-v1.0.0
-======
-
-New Plugins
------------
-
-Cliconf
-~~~~~~~
-
-- dellemc.enterprise_sonic.sonic - Use Ansible CLICONF to run commands on Enterprise SONiC.
-
-Httpapi
-~~~~~~~
-
-- dellemc.enterprise_sonic.sonic - Use Ansible HTTPAPI to run commands on Enterprise SONiC.
-
-New Modules
------------
-
-- dellemc.enterprise_sonic.sonic_api - Perform REST operations through the Management Framework REST API.
-- dellemc.enterprise_sonic.sonic_bgp - BGP resource module.
-- dellemc.enterprise_sonic.sonic_bgp_af - BGP AF resource module.
-- dellemc.enterprise_sonic.sonic_bgp_as_paths - BGP AS path resource module.
-- dellemc.enterprise_sonic.sonic_bgp_communities - BGP communities resource module.
-- dellemc.enterprise_sonic.sonic_bgp_ext_communities - BGP Ext communities resource module.
-- dellemc.enterprise_sonic.sonic_bgp_neighbors - BGP neighbors resource module.
-- dellemc.enterprise_sonic.sonic_bgp_neighbors_af - BGP neighbors AF resource module.
-- dellemc.enterprise_sonic.sonic_command - Run commands through Management Framework CLI.
-- dellemc.enterprise_sonic.sonic_config - Manage configuration through the Management Framework CLI.
-- dellemc.enterprise_sonic.sonic_interfaces - Interface resource module.
-- dellemc.enterprise_sonic.sonic_l2_interfaces - Layer 2 interface resource module.
-- dellemc.enterprise_sonic.sonic_l3_interfaces - Layer 3 interface resource module.
-- dellemc.enterprise_sonic.sonic_lag_interfaces - Link aggregation (LAG) resource module.
-- dellemc.enterprise_sonic.sonic_mclag - MCLAG resource module.
-- dellemc.enterprise_sonic.sonic_port_breakout - port breakout resource module.
-- dellemc.enterprise_sonic.sonic_users - USERS resource module.
-- dellemc.enterprise_sonic.sonic_vlans - VLAN resource module.
-- dellemc.enterprise_sonic.sonic_vrfs - VRF resource module.
-- dellemc.enterprise_sonic.sonic_vxlans - VxLAN EVPN resource module.
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/106-change-ntp-get-fact.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/106-change-ntp-get-fact.yaml
new file mode 100644
index 000000000..6232a4619
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/106-change-ntp-get-fact.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ntp - change NTP get facts to get default parameters (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/106).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/107-change-ntp-key-values-in-regression-script.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/107-change-ntp-key-values-in-regression-script.yaml
new file mode 100644
index 000000000..83371c643
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/107-change-ntp-key-values-in-regression-script.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ntp - change NTP key values in NTP regression test script (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/107).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/113-change-ntp-module-name.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/113-change-ntp-module-name.yaml
new file mode 100644
index 000000000..2760de791
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/113-change-ntp-module-name.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ntp - change NTP module name (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/113).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/114-change-ntp-module-name-in-regression-script.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/114-change-ntp-module-name-in-regression-script.yaml
new file mode 100644
index 000000000..4db143be3
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/114-change-ntp-module-name-in-regression-script.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ntp - change NTP module names in NTP regression test script (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/114).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/118-add-ntp-prefer-attribute.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/118-add-ntp-prefer-attribute.yaml
new file mode 100644
index 000000000..3f263712f
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/118-add-ntp-prefer-attribute.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ntp - Added prefer attribute to NTP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/118)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/119-lag_interfaces-port-name-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/119-lag_interfaces-port-name-fix.yaml
new file mode 100644
index 000000000..6db3bf2fa
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/119-lag_interfaces-port-name-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_lag_interfaces - Fixed port name issue (GitHub issue#153) (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/119)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/124-l2-interfaces-oc-yang-vlan-range-format-config-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/124-l2-interfaces-oc-yang-vlan-range-format-config-support.yaml
new file mode 100644
index 000000000..6713e8948
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/124-l2-interfaces-oc-yang-vlan-range-format-config-support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_l2_interfaces - Add support for parsing configuration containing the OC Yang vlan range syntax (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/124).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/128-add-several-attributes-to-interface-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/128-add-several-attributes-to-interface-resource-module.yaml
new file mode 100644
index 000000000..ee45fa81a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/128-add-several-attributes-to-interface-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_interfaces - Added speed, auto-negotiate, advertised-speed and FEC to interface resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/128)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/129-ntp-minpoll-maxpoll-config.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/129-ntp-minpoll-maxpoll-config.yaml
new file mode 100644
index 000000000..3d66d5bfe
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/129-ntp-minpoll-maxpoll-config.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ntp - change NTP resource module to make minpoll and maxpoll be configured together (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/129).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/130-vxlans-attribute-restriction-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/130-vxlans-attribute-restriction-fix.yaml
new file mode 100644
index 000000000..d0e1bb1d3
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/130-vxlans-attribute-restriction-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_vxlans - Removed required_together restriction for evpn_nvo and source_ip attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/130)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/141-add-route-distinguisher-target-attributes-to-bgp-af-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/141-add-route-distinguisher-target-attributes-to-bgp-af-module.yaml
new file mode 100644
index 000000000..09ed96844
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/141-add-route-distinguisher-target-attributes-to-bgp-af-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_af - Added several attributes to support configuration of route distinguisher and route target (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/141
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/145-mclag-new-attributes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/145-mclag-new-attributes.yaml
new file mode 100644
index 000000000..eb4649894
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/145-mclag-new-attributes.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_mclag - Added delay_restore, gateway_mac, and peer_gateway attributes to module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/145)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/146-vrf-mgmt-bug-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/146-vrf-mgmt-bug-fix.yaml
new file mode 100644
index 000000000..c7e95a1cd
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/146-vrf-mgmt-bug-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_vrfs - Added tasks as a workaround to mgmt VRF bug (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/146)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/148-dhcp-relay-unit-tests.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/148-dhcp-relay-unit-tests.yaml
new file mode 100644
index 000000000..cf62e4b32
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/148-dhcp-relay-unit-tests.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_dhcp_relay - Added a common unit tests module and unit tests for dhcp relay module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/148)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/149-l2-interfaces-vlan-trunk-range-support-for-playbooks.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/149-l2-interfaces-vlan-trunk-range-support-for-playbooks.yaml
new file mode 100644
index 000000000..515cf9975
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/149-l2-interfaces-vlan-trunk-range-support-for-playbooks.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_l2_interfaces - Add support for specifying vlan trunk ranges in Ansible playbooks (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/149).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/150-replaced-overridden-for-logging-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/150-replaced-overridden-for-logging-resource-module.yaml
new file mode 100644
index 000000000..7f151d860
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/150-replaced-overridden-for-logging-resource-module.yaml
@@ -0,0 +1,2 @@
+major_changes:
+ - sonic_logging - Added replaced and overridden states support for logging resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/150)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/151-replaced-overridden-for-ntp-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/151-replaced-overridden-for-ntp-resource-module.yaml
new file mode 100644
index 000000000..96a38748a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/151-replaced-overridden-for-ntp-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ntp - Added replaced and overridden states support for NTP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/151)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/152-copp.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/152-copp.yaml
new file mode 100644
index 000000000..d05370fb7
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/152-copp.yaml
@@ -0,0 +1,2 @@
+major_changes:
+ - Added copp module to collection (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/152).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/156-replaced-overridden-for-vrfs-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/156-replaced-overridden-for-vrfs-resource-module.yaml
new file mode 100644
index 000000000..e35488b32
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/156-replaced-overridden-for-vrfs-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_vrfs - Added replaced and overridden states support for VRF resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/156)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/157-mac.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/157-mac.yaml
new file mode 100644
index 000000000..56b2919ae
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/157-mac.yaml
@@ -0,0 +1,2 @@
+major_changes:
+ - Added mac module to collection (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/157).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/159-replaced-overridden-for-system-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/159-replaced-overridden-for-system-resource-module.yaml
new file mode 100644
index 000000000..e0a239933
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/159-replaced-overridden-for-system-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_system - Added replaced and overridden states support for system resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/159)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/160-sanity-check-errors-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/160-sanity-check-errors-fix.yaml
new file mode 100644
index 000000000..dc874ed24
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/160-sanity-check-errors-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fixed sanity check errors in the collection caused by Ansible library changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/160).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/172-module-utils-import-remove-empties-from-ansible-lib.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/172-module-utils-import-remove-empties-from-ansible-lib.yaml
new file mode 100644
index 000000000..136cfaf93
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/172-module-utils-import-remove-empties-from-ansible-lib.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - module_utils - Change the location for importing remove_empties from the obsolete Netcommon location to the offically required Ansible library location to fix sanity errors (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/172).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/176-update-netcommon-version.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/176-update-netcommon-version.yaml
new file mode 100644
index 000000000..083ad868e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/176-update-netcommon-version.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - install-Updated the required ansible.netcommon version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/176)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/177-bfd.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/177-bfd.yaml
new file mode 100644
index 000000000..e18973959
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/177-bfd.yaml
@@ -0,0 +1,2 @@
+major_changes:
+ - Added bfd module to collection (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/177).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/180-regression-failures-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/180-regression-failures-fix.yaml
new file mode 100644
index 000000000..6511f8878
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/180-regression-failures-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - Fixed regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/180).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/182-unit-tests-for-bgp-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/182-unit-tests-for-bgp-resource-module.yaml
new file mode 100644
index 000000000..f6627f6ed
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/182-unit-tests-for-bgp-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp - Add unit tests for BGP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/182)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/183-unit-tests-for-bgp-af-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/183-unit-tests-for-bgp-af-resource-module.yaml
new file mode 100644
index 000000000..032bb3308
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/183-unit-tests-for-bgp-af-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_af - Add unit tests for BGP AF resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/183)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/184-unit-tests-for-bgp-as-paths-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/184-unit-tests-for-bgp-as-paths-resource-module.yaml
new file mode 100644
index 000000000..5e008055f
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/184-unit-tests-for-bgp-as-paths-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_as_paths - Add unit tests for BGP AS paths resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/184)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/185-unit-tests-for-bgp-communities-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/185-unit-tests-for-bgp-communities-resource-module.yaml
new file mode 100644
index 000000000..1bac125de
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/185-unit-tests-for-bgp-communities-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_communities - Add unit tests for BGP communities resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/185)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/186-unit-tests-for-bgp-ext-communities-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/186-unit-tests-for-bgp-ext-communities-resource-module.yaml
new file mode 100644
index 000000000..531e29e18
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/186-unit-tests-for-bgp-ext-communities-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_ext_communities - Add unit tests for BGP ext communities resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/186)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/187-unit-tests-for-bgp-neighbors-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/187-unit-tests-for-bgp-neighbors-resource-module.yaml
new file mode 100644
index 000000000..4c72e6f0e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/187-unit-tests-for-bgp-neighbors-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_neighbors - Add unit tests for BGP neighbors resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/187)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/188-unit-tests-for-bgp-neighbors-af-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/188-unit-tests-for-bgp-neighbors-af-resource-module.yaml
new file mode 100644
index 000000000..aa63260d9
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/188-unit-tests-for-bgp-neighbors-af-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_neighbors_af - Add unit tests for BGP neighbors AF resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/188)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/191-unit-tests-for-bgp-af-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/191-unit-tests-for-bgp-af-resource-module.yaml
new file mode 100644
index 000000000..d0dfb74d0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/191-unit-tests-for-bgp-af-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_af - Add unit tests for BGP AF resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/191)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/195-aaa-login-authentication.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/195-aaa-login-authentication.yaml
new file mode 100644
index 000000000..45a8ad91d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/195-aaa-login-authentication.yaml
@@ -0,0 +1,2 @@
+breaking_changes:
+ - aaa - Added default_auth attribute to the argspec to replace the deleted group and local attributes. This change allows for ordered login authentication. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/195).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/196-replaced-overridden-for-lag-interface-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/196-replaced-overridden-for-lag-interface-resource-module.yaml
new file mode 100644
index 000000000..7fcd9bbc0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/196-replaced-overridden-for-lag-interface-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_lag_interface - Added replaced and overridden states support for LAG interface resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/196)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/197-unit-tests-for-interfaces-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/197-unit-tests-for-interfaces-resource-module.yaml
new file mode 100644
index 000000000..d89d54eca
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/197-unit-tests-for-interfaces-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_interfaces - Add unit tests for interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/197)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/198-unit-tests-for-aaa-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/198-unit-tests-for-aaa-resource-module.yaml
new file mode 100644
index 000000000..6661f4eea
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/198-unit-tests-for-aaa-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_aaa - Add unit tests for AAA resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/198)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/199-code-coverage-workflow-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/199-code-coverage-workflow-fix.yaml
new file mode 100644
index 000000000..0e32dd4a8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/199-code-coverage-workflow-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - workflows-Fixed dependency installation issue in the code coverage workflow (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/199)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/200-unit-tests-for-l2-interfaces-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/200-unit-tests-for-l2-interfaces-resource-module.yaml
new file mode 100644
index 000000000..7ffb3592e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/200-unit-tests-for-l2-interfaces-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_l2_interfaces - Add unit tests for l2_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/200)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/201-bgp-af-modify-vni-advertise-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/201-bgp-af-modify-vni-advertise-fix.yaml
new file mode 100644
index 000000000..f59fa5991
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/201-bgp-af-modify-vni-advertise-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_bgp_af - Fixed issue with vnis and advertise modification for a single BGP AF (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/201)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/202-unit-tests-for-l3-interfaces-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/202-unit-tests-for-l3-interfaces-resource-module.yaml
new file mode 100644
index 000000000..7ffd76899
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/202-unit-tests-for-l3-interfaces-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_l3_interfaces - Add unit tests for l3_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/202)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/203-unit-tests-for-lag-interfaces-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/203-unit-tests-for-lag-interfaces-resource-module.yaml
new file mode 100644
index 000000000..8a8b99a36
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/203-unit-tests-for-lag-interfaces-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_lag_interfaces - Add unit tests for lag_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/203)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/207-unit-tests-for-ntp-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/207-unit-tests-for-ntp-resource-module.yaml
new file mode 100644
index 000000000..c84312e11
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/207-unit-tests-for-ntp-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ntp - Add unit tests for NTP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/207)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/208-unit-tests-for-tacacs-server-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/208-unit-tests-for-tacacs-server-resource-module.yaml
new file mode 100644
index 000000000..a1241cf55
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/208-unit-tests-for-tacacs-server-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_tacacs_server - Add unit tests for TACACS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/208)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/209-unit-tests-for-prefix-lists-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/209-unit-tests-for-prefix-lists-resource-module.yaml
new file mode 100644
index 000000000..bd4dd0cc7
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/209-unit-tests-for-prefix-lists-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_prefix_lists - Add unit tests for prefix lists resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/209)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/210-unit-tests-for-radius-server-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/210-unit-tests-for-radius-server-resource-module.yaml
new file mode 100644
index 000000000..db3f82dc1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/210-unit-tests-for-radius-server-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_radius_server - Add unit tests for RADIUS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/210)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/212-unit-tests-for-static-routes-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/212-unit-tests-for-static-routes-resource-module.yaml
new file mode 100644
index 000000000..9fa01a76f
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/212-unit-tests-for-static-routes-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_static_routes - Add unit tests for static routes resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/212)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/213-unit-tests-for-users-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/213-unit-tests-for-users-resource-module.yaml
new file mode 100644
index 000000000..45e2b5221
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/213-unit-tests-for-users-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_users - Add unit tests for users resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/213)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/214-unit-tests-vlans-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/214-unit-tests-vlans-resource-module.yaml
new file mode 100644
index 000000000..71e4b0014
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/214-unit-tests-vlans-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_vlans - Add unit tests for Vlans resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/214)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/215-unit-tests-for-vxlans-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/215-unit-tests-for-vxlans-resource-module.yaml
new file mode 100644
index 000000000..70927073e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/215-unit-tests-for-vxlans-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_vxlans - Add unit tests for VxLans resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/215)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/216-unit-tests-for-vrfs-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/216-unit-tests-for-vrfs-resource-module.yaml
new file mode 100644
index 000000000..bdd6f6314
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/216-unit-tests-for-vrfs-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_vrfs - Add unit tests for VRFS resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/216)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/217-replaced-overridden-for-vlans-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/217-replaced-overridden-for-vlans-resource-module.yaml
new file mode 100644
index 000000000..6627b44b1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/217-replaced-overridden-for-vlans-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_vlans - Added replaced and overridden states support for VLAN resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/217)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/218-unit-tests-for-api-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/218-unit-tests-for-api-resource-module.yaml
new file mode 100644
index 000000000..31ca08e44
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/218-unit-tests-for-api-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_api - Add unit tests for api resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/218)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/219-unit-tests-for-command-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/219-unit-tests-for-command-resource-module.yaml
new file mode 100644
index 000000000..2eaafc1e8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/219-unit-tests-for-command-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_command - Add unit tests for command resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/219)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/220-unit-tests-for-config-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/220-unit-tests-for-config-resource-module.yaml
new file mode 100644
index 000000000..46210e167
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/220-unit-tests-for-config-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_config - Add unit tests for config resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/220)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/221-l2-interfaces-replaced-overridden-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/221-l2-interfaces-replaced-overridden-support.yaml
new file mode 100644
index 000000000..25b8da767
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/221-l2-interfaces-replaced-overridden-support.yaml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - sonic_l2_interfaces - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/221).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/222-unit-tests-for-facts-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/222-unit-tests-for-facts-resource-module.yaml
new file mode 100644
index 000000000..0f03fc697
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/222-unit-tests-for-facts-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_facts - Add unit tests for facts resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/222)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/223-unit-tests-for-system-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/223-unit-tests-for-system-resource-module.yaml
new file mode 100644
index 000000000..03759ac63
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/223-unit-tests-for-system-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_system - Add unit tests for system resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/223)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/224-ntp-clear-all-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/224-ntp-clear-all-fix.yaml
new file mode 100644
index 000000000..fcc0e16fe
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/224-ntp-clear-all-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_ntp - fixed the issue (GitHub issue#205) with NTP clear all without config given (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/224)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/225-unit-tests-for-ip-neighbor-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/225-unit-tests-for-ip-neighbor-resource-module.yaml
new file mode 100644
index 000000000..32f04ca0c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/225-unit-tests-for-ip-neighbor-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ip_neighbor - Add unit tests for IP neighbor resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/225)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/226-unit-tests-for-logging-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/226-unit-tests-for-logging-resource-module.yaml
new file mode 100644
index 000000000..575bbc6f1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/226-unit-tests-for-logging-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_logging - Add unit tests for logging resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/226)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/227-replaced-overridden-for-port-group-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/227-replaced-overridden-for-port-group-resource-module.yaml
new file mode 100644
index 000000000..dab5dce82
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/227-replaced-overridden-for-port-group-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_port_group - Added replaced and overridden states support for port group resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/227)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/228-unit-tests-for-port-group-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/228-unit-tests-for-port-group-resource-module.yaml
new file mode 100644
index 000000000..140d4ef59
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/228-unit-tests-for-port-group-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_port_group - Add unit tests for port group resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/228)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/229-unit-tests-for-port-breakout-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/229-unit-tests-for-port-breakout-resource-module.yaml
new file mode 100644
index 000000000..bcb668c7d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/229-unit-tests-for-port-breakout-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_port_breakout - Add unit tests for port breakout resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/229)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/230-vrfs-delete-interface-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/230-vrfs-delete-interface-fix.yaml
new file mode 100644
index 000000000..0a45fdb39
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/230-vrfs-delete-interface-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_vrfs - fixed the issue (GitHub issue#194) with VRF when deleting interface(https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/230)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/231-l3-interfaces-delete-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/231-l3-interfaces-delete-fix.yaml
new file mode 100644
index 000000000..4a824d92b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/231-l3-interfaces-delete-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_l3_interfaces - Fixed IP address deletion issue (GitHub issue#170) (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/231)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/233-bgp-neighbors-defaults-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/233-bgp-neighbors-defaults-fix.yaml
new file mode 100644
index 000000000..a446deeef
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/233-bgp-neighbors-defaults-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_neighbors - Fixed handling of default attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/233)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/235-replaced-overridden-for-tacacs-server-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/235-replaced-overridden-for-tacacs-server-resource-module.yaml
new file mode 100644
index 000000000..299c9ddc5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/235-replaced-overridden-for-tacacs-server-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_tacacs_server - Add replaced and overridden states support for TACACS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/235)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/236-static-routes-replaced-overridden-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/236-static-routes-replaced-overridden-support.yaml
new file mode 100644
index 000000000..520745a72
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/236-static-routes-replaced-overridden-support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_static_routes - Added support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/236)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/237-aaa-replaced-overridden-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/237-aaa-replaced-overridden-support.yaml
new file mode 100644
index 000000000..99adf5771
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/237-aaa-replaced-overridden-support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_aaa - Added replaced and overridden states support for AAA resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/237)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/239-replaced-overridden-for-radius-server-resource-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/239-replaced-overridden-for-radius-server-resource-module.yaml
new file mode 100644
index 000000000..5a33f41f6
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/239-replaced-overridden-for-radius-server-resource-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_radius_server - Added replaced and overridden states support for RADIUS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/239)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/240-bgp-replaced-overridden-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/240-bgp-replaced-overridden-support.yaml
new file mode 100644
index 000000000..391feb478
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/240-bgp-replaced-overridden-support.yaml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - sonic_bgp - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/240).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/242-users-replaced-overridden-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/242-users-replaced-overridden-support.yaml
new file mode 100644
index 000000000..41d7490de
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/242-users-replaced-overridden-support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_users - Add replaced and overridden states support for users resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/242)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/244-added-rt-delay-attribute-to-bgp-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/244-added-rt-delay-attribute-to-bgp-module.yaml
new file mode 100644
index 000000000..002bb57ef
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/244-added-rt-delay-attribute-to-bgp-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp - Added rt_delay attribute to module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/244)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/245-enhance-bgp-neighbors-unit-tests.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/245-enhance-bgp-neighbors-unit-tests.yaml
new file mode 100644
index 000000000..0898bf4b5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/245-enhance-bgp-neighbors-unit-tests.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bgp_neighbors - Enhance unit tests for BGP Neighbors resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/245)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/246-bgp-af-replaced-overridden-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/246-bgp-af-replaced-overridden-support.yaml
new file mode 100644
index 000000000..c749422fb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/246-bgp-af-replaced-overridden-support.yaml
@@ -0,0 +1,3 @@
+---
+minor_changes:
+ - sonic_bgp_af - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/246).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/247-vxlans-replaced-overridden-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/247-vxlans-replaced-overridden-support.yaml
new file mode 100644
index 000000000..3e395a184
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/247-vxlans-replaced-overridden-support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_vxlans - Added support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/247)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/249-dhcp-relay-replaced-overridden-support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/249-dhcp-relay-replaced-overridden-support.yaml
new file mode 100644
index 000000000..72912650a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/249-dhcp-relay-replaced-overridden-support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_dhcp_relay - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/249).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/250-bgp-as-paths-fix-merged-deleted.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/250-bgp-as-paths-fix-merged-deleted.yaml
new file mode 100644
index 000000000..df69660e5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/250-bgp-as-paths-fix-merged-deleted.yaml
@@ -0,0 +1,3 @@
+---
+bugfixes:
+ - sonic_bgp_as_paths - Fix issues with merged and deleted states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/250)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/253-change-replaced-function-for-ip-neighbor-module.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/253-change-replaced-function-for-ip-neighbor-module.yaml
new file mode 100644
index 000000000..996c5ba80
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/253-change-replaced-function-for-ip-neighbor-module.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_ip_neighbor - Change the replaced function in ip_neighbor resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/253)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/254-update-replace-methods.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/254-update-replace-methods.yaml
new file mode 100644
index 000000000..0e20c7ecf
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/254-update-replace-methods.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_bfd, sonic_copp - Updated replaced methods (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/254)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/255-prefix_lists_replaced_overridden_support.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/255-prefix_lists_replaced_overridden_support.yaml
new file mode 100644
index 000000000..a6d8731ed
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/255-prefix_lists_replaced_overridden_support.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_prefix_lists - Added support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/255)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/257-vrfs-cli-test-case-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/257-vrfs-cli-test-case-fix.yaml
new file mode 100644
index 000000000..629a1973b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/257-vrfs-cli-test-case-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_vrfs - Fixed spacing issue in CLI test case (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/257)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/258-change-logging-module-source-interface-naming.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/258-change-logging-module-source-interface-naming.yaml
new file mode 100644
index 000000000..bddc22b99
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/258-change-logging-module-source-interface-naming.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_logging - change logging get facts for source_interface naming (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/258).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/261-interfaces-timeout-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/261-interfaces-timeout-fix.yaml
new file mode 100644
index 000000000..73711a3d8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/261-interfaces-timeout-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_interfaces - Fixed command timeout issue (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/261)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/262-vlan-mapping-bug-fix.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/262-vlan-mapping-bug-fix.yaml
new file mode 100644
index 000000000..2a27ef3a7
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/262-vlan-mapping-bug-fix.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - sonic_vlan_mapping - Removed platform checks (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/262)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/v2.1.0_summary.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/v2.1.0_summary.yaml
new file mode 100644
index 000000000..01ff50fbd
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.1.0/v2.1.0_summary.yaml
@@ -0,0 +1,8 @@
+release_summary: |
+ | Release Date: 2023-0515
+ | This release provides enhanced Dell Enterprise SONiC Ansible Collection support for SONiC 4.x images.
+ | In addition to new resource modules to support previously existing functionality, it provides
+ | support for the "QinQ" (Vlan Mapping function introduced with SONiC release 4.1. It also provides
+ | bug fixes and enhancements for support of features that were initially introduced in previous
+ | Enterprise SONiC Ansible releases. The changelog describes changes made to the modules and plugins
+ | included in this collection since release 2.0.0.
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/269-revert-aaa-breaking-changes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/269-revert-aaa-breaking-changes.yaml
new file mode 100644
index 000000000..9cd346678
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/269-revert-aaa-breaking-changes.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - sonic_aaa - Reverted breaking changes for AAA nodule (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/269)
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/270-galaxy-yml-netcommon-and-version-fixes.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/270-galaxy-yml-netcommon-and-version-fixes.yaml
new file mode 100644
index 000000000..733f96f5a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/270-galaxy-yml-netcommon-and-version-fixes.yaml
@@ -0,0 +1,2 @@
+minor_changes:
+ - galaxy_yml - Enable installation of Ansible Netcomon versions after 5.0.0 and update the enterprise_sonic release version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/270).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/v2.2.0_summary.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/v2.2.0_summary.yaml
new file mode 100644
index 000000000..32016bf0e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.2.0/v2.2.0_summary.yaml
@@ -0,0 +1,15 @@
+release_summary: |
+ | Release Date: 2023-0531
+ | This release provides Ansible compliance changes required on top of the changes included in
+ | the 2.1.0 release of the enterprise_sonic Ansible network resource module collection.
+ | It addresses two issues raised by the Ansible core team with the content of the 2.1.0 release.
+ | 1) Back out the "breaking_change" made in the sonic_aaa resource module to fix a functional
+ | shortcoming in the enterprise_sonic Ansible collection. Although the change is still needed,
+ | it will be deferred to a "major" release.
+ | 2) Re-enable installation of new Ansible Netcommon repo instances when installing the
+ | enterprise_sonic Ansible collection. The 2.1.0 enterprise_sonic Ansible release included a
+ | workaround for a bug introduced in the 5.0.0 version of the Ansible Netcommon repo. This
+ | workaround was implemented in the "galaxy.yml" file for the enterprise_sonic
+ | 2.1.0 release. New versions of Ansible Netcommon were published after the problematic 5.0.0
+ | version and the revised "galaxy.yml" file for this release enables installation of these
+ | newer versions.
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/321-requirements-update-meta-runtime-ansible-version.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/321-requirements-update-meta-runtime-ansible-version.yaml
new file mode 100644
index 000000000..f12ddb105
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/321-requirements-update-meta-runtime-ansible-version.yaml
@@ -0,0 +1,2 @@
+bugfixes:
+ - requirements - Update requires_ansible version in meta/runtime.yml to the oldest supported version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/321).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/322-docs-README-updates.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/322-docs-README-updates.yaml
new file mode 100644
index 000000000..6404045ed
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/322-docs-README-updates.yaml
@@ -0,0 +1,2 @@
+trivial:
+ - requirements - Update the README file to document required Ansible and Python updates for this release (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/322).
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/v2.4.0_summary.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/v2.4.0_summary.yaml
new file mode 100644
index 000000000..54bb92870
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/archive_fragments/2.4.0/v2.4.0_summary.yaml
@@ -0,0 +1,15 @@
+release_summary: |
+ | Release Date: 2024-0108
+ | This release provides an Ansible compliance change required on top of the changes included in
+ | the 2.3.0 release of the enterprise_sonic Ansible network resource module collection.
+ | It addresses an issue raised by the Ansible core team with the content of the 2.3.0 release,
+ | and provides accompanying documentation changes in the README file. Additional details are
+ | described below.
+ | 1) Update the "requires_ansible" version in the meta/runtime.yml file for this collection
+ | to the oldest supported version of ansible-core. (This was recently changed by Redhat/Ansible
+ | to version "2.14.0".)
+ | 2) Update the README file "Recommended version" values for Ansible and Python in accordance
+ | with the previous change item to modify the oldest supported version of ansible-core which,
+ | in turn, requires a Python version >= "3.9".
+ | 3) Update the list of resource modules in the README file to include all currently available
+ | resource modules for this collection.
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml
index 0ce34f5e4..6c6dd1b91 100644
--- a/ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/changelog.yaml
@@ -2,88 +2,93 @@ ancestor: null
releases:
1.0.0:
modules:
- - description: Perform REST operations through the Management Framework REST API.
+ - description: Manages REST operations on devices running Enterprise SONiC
name: sonic_api
namespace: ''
- - description: BGP resource module.
+ - description: Manage global BGP and its parameters
name: sonic_bgp
namespace: ''
- - description: BGP AF resource module.
+ - description: Manage global BGP address-family and its parameters
name: sonic_bgp_af
namespace: ''
- - description: BGP AS path resource module.
+ - description: Manage BGP autonomous system path (or as-path-list) and its parameters
name: sonic_bgp_as_paths
namespace: ''
- - description: BGP communities resource module.
+ - description: Manage BGP community and its parameters
name: sonic_bgp_communities
namespace: ''
- - description: BGP Ext communities resource module.
+ - description: Manage BGP extended community-list and its parameters
name: sonic_bgp_ext_communities
namespace: ''
- - description: BGP neighbors resource module.
+ - description: Manage a BGP neighbor and its parameters
name: sonic_bgp_neighbors
namespace: ''
- - description: BGP neighbors AF resource module.
+ - description: Manage the BGP neighbor address-family and its parameters
name: sonic_bgp_neighbors_af
namespace: ''
- - description: Run commands through Management Framework CLI.
+ - description: Runs commands on devices running Enterprise SONiC
name: sonic_command
namespace: ''
- - description: Manage configuration through the Management Framework CLI.
+ - description: Manages configuration sections on devices running Enterprise SONiC
name: sonic_config
namespace: ''
- - description: Interface resource module.
+ - description: Configure Interface attributes on interfaces such as, Eth, LAG,
+ VLAN, and loopback. (create a loopback interface if it does not exist.)
name: sonic_interfaces
namespace: ''
- - description: Layer 2 interface resource module.
+ - description: Configure interface-to-VLAN association that is based on access
+ or trunk mode
name: sonic_l2_interfaces
namespace: ''
- - description: Layer 3 interface resource module.
+ - description: Configure the IPv4 and IPv6 parameters on Interfaces such as, Eth,
+ LAG, VLAN, and loopback
name: sonic_l3_interfaces
namespace: ''
- - description: Link aggregation (LAG) resource module.
+ - description: Manage link aggregation group (LAG) interface parameters
name: sonic_lag_interfaces
namespace: ''
- - description: MCLAG resource module.
+ - description: Manage multi chassis link aggregation groups domain (MCLAG) and
+ its parameters
name: sonic_mclag
namespace: ''
- - description: port breakout resource module.
+ - description: Configure port breakout settings on physical interfaces
name: sonic_port_breakout
namespace: ''
- - description: USERS resource module.
+ - description: Manage users and its parameters
name: sonic_users
namespace: ''
- - description: VLAN resource module.
+ - description: Manage VLAN and its parameters
name: sonic_vlans
namespace: ''
- - description: VRF resource module.
+ - description: Manage VRFs and associate VRFs to interfaces such as, Eth, LAG,
+ VLAN, and loopback
name: sonic_vrfs
namespace: ''
- - description: VxLAN EVPN resource module.
+ - description: Manage VxLAN EVPN and its parameters
name: sonic_vxlans
namespace: ''
plugins:
cliconf:
- - description: Use Ansible CLICONF to run commands on Enterprise SONiC.
+ - description: Use sonic cliconf to run command on Dell OS10 platform
name: sonic
namespace: null
httpapi:
- - description: Use Ansible HTTPAPI to run commands on Enterprise SONiC.
+ - description: HttpApi Plugin for devices supporting Restconf SONIC API
name: sonic
namespace: null
release_date: '2020-12-07'
1.1.0:
modules:
- - description: AAA resource module.
+ - description: Manage AAA and its parameters
name: sonic_aaa
namespace: ''
- - description: RADIUS resource module.
+ - description: Manage RADIUS server and its parameters
name: sonic_radius_server
namespace: ''
- - description: SYSTEM resource module.
+ - description: Configure system parameters
name: sonic_system
namespace: ''
- - description: TACACS Server resource module.
+ - description: Manage TACACS server and its parameters
name: sonic_tacacs_server
namespace: ''
release_date: '2021-05-28'
@@ -95,63 +100,57 @@ releases:
on the 1.x branch of this collection. This change corresponds to a SONiC 4.0
OC YANG REST compliance change for the BGP AF REST API. It enables specification
of a route map in conjunction with each route advertisement prefix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/63).
- - bgp_af - remove the obsolete 'advertise_prefix' attribute from argspec and
+ - bgp_af - Remove the obsolete 'advertise_prefix' attribute from argspec and
config code. This and subsequent co-req replacement with the new route advertise
list argument structure require corresponding changes in playbooks previoulsly
- used for configuring route advertise prefixes for SONiC 3.x images. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60)
+ used for configuring route advertise prefixes for SONiC 3.x images. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60).
- bgp_neighbors - Replace the previously defined standalone "bfd" attribute
with a bfd dictionary containing multiple attributes. This change corresponds
to the revised SONiC 4.x implementation of OC YANG compatible REST APIs. Playbooks
previously using the bfd attributes for SONiC 3.x images must be modified
- for useon SONiC 4.0 images to use the new definition for the bfd attribute
+ for use on SONiC 4.0 images to use the new definition for the bfd attribute
argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72).
- bgp_neighbors - Replace, for BGP peer groups, the previously defined standalone
"bfd" attribute with a bfd dictionary containing multiple attributes. This
change corresponds to the revised SONiC 4.x implementation of OC YANG compatible
REST APIs. Playbooks previously using the bfd attributes for SONiC 3.x images
- must be modified for useon SONiC 4.0 images to use the new definition for
+ must be modified for use on SONiC 4.0 images to use the new definition for
the bfd attribute argspec structure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81).
bugfixes:
- Fixed regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103).
- Fixed regression test sequencing and other regression test bugs in multiple
modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/85).
- - aaa - Fixed a bug in facts gathering by providing required conditional branching
- (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90)
- - aaa - Modify regression test sequencing to enable correct testing of the functionality
- for this module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/78).
- - bgp_neighbors - remove string conversion of timer attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60)
+ - bgp_neighbors - Remove string conversion of timer attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/60).
- port_breakout - Fixed a bug in formulation of port breakout REST APIs (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88).
- sonic - Fix a bug in handling of interface names in standard interface naming
mode (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/103).
+ - sonic_aaa - Fix a bug in facts gathering by providing required conditional
+ branching (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
+ - sonic_aaa - Modify regression test sequencing to enable correct testing of
+ the functionality for this module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/78).
- sonic_command - Fix bugs in handling of CLI commands involving a prompt and
answer sequence (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/76/files).
- users - Fixed a bug in facts gathering (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
- vxlan - update Vxlan test cases to comply with SONiC behavior (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/105).
- major_changes:
- - Added 'static_routes' module to collection (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/82).
- - Added a resource module for NTP support (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/99).
- - Added a resource module for support of prefix lists (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/100).
- - Updated backend REST API request formats in all applicable modules for compatibility
- with SONiC 4.x openconfig YANG compliant REST APIs. (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/53)
minor_changes:
- - Added an execution-environment.yml file to the "meta" directory to enable
- use of Ansible execution environment infrastructure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88).
- - bgp_af - Added support for BGP options to configure usage and advertisement
+ - Add an execution-environment.yml file to the "meta" directory to enable use
+ of Ansible execution environment infrastructure (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/88).
+ - bgp_af - Add support for BGP options to configure usage and advertisement
of vxlan primary IP address related attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/62).
- - bgp_as_paths - updated module examples with 'permit' attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102)
+ - bgp_as_paths - Update module examples with 'permit' attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102).
- bgp_neighbors - Add BGP peer group support for multiple attributes. The added
attributes correspond to the same set of attributes added for BGP neighbors
with PR 72 (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/81).
- - bgp_neighbors - Add support for multiple attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72).
- - bgp_neighbors - add an auth_pwd dictionary and nbr_description attribute to
+ - bgp_neighbors - Add an auth_pwd dictionary and nbr_description attribute to
the argspec (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/67).
- - bgp_neighbors - added prefix-list related peer-group attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101).
- - bgp_neighbors_af - added prefix-list related neighbor attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101).
- - playbook - updated examples to reflect module changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102)
+ - bgp_neighbors - Add prefix-list related peer-group attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101).
+ - bgp_neighbors - Add support for multiple attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/72).
+ - bgp_neighbors_af - Add prefix-list related neighbor attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/101).
+ - playbook - Update examples to reflect module changes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/102).
- sonic_vxlans - Add configuration capability for the primary IP address of
a vxlan vtep to facilitate vxlan path redundundancy (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/58).
- - vlans - Added support for the vlan "description" attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/98).
- - workflow - Added stable-2.13 to the sanity test matrix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
+ - vlans - Add support for the vlan "description" attribute (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/98).
+ - workflow - Add stable-2.13 to the sanity test matrix (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/90).
release_summary: 'This release provides Dell SONiC Enterprise Ansible Collection
support for SONiC 4.x images. It is the first release for the 2.x branch of
the collection. Subsequent enhancements for support of SONiC 4.x images will
@@ -195,3 +194,530 @@ releases:
name: sonic_static_routes
namespace: ''
release_date: '2022-09-02'
+ 2.1.0:
+ changes:
+ breaking_changes:
+ - sonic_aaa - Add default_auth attribute to the argspec to replace the deleted
+ group and local attributes. This change allows for ordered login authentication.
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/195).
+ bugfixes:
+ - Fix regression test bugs in multiple modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/180).
+ - Fix sanity check errors in the collection caused by Ansible library changes
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/160).
+ - install - Update the required ansible.netcommon version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/176).
+ - sonic_bgp_af - Fix issue with vnis and advertise modification for a single
+ BGP AF (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/201).
+ - sonic_bgp_as_paths - Fix issues with merged and deleted states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/250).
+ - sonic_interfaces - Fix command timeout issue (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/261).
+ - sonic_l3_interfaces - Fix IP address deletion issue (GitHub issue#170) (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/231).
+ - sonic_lag_interfaces - Fix port name issue (GitHub issue#153) (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/119).
+ - sonic_neighbors - Fix handling of default attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/233).
+ - sonic_ntp - Fix the issue (GitHub issue#205) with NTP clear all without config
+ given (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/224).
+ - sonic_vlan_mapping - Remove platform checks (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/262).
+ - sonic_vrfs - Add tasks as a workaround to mgmt VRF bug (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/146).
+ - sonic_vrfs - Fix spacing issue in CLI test case (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/257).
+ - sonic_vrfs - Fix the issue (GitHub issue#194) with VRF when deleting interface(https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/230).
+ - sonic_vxlans - Remove required_together restriction for evpn_nvo and source_ip
+ attributes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/130).
+ - workflows - Fix dependency installation issue in the code coverage workflow
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/199).
+ minor_changes:
+ - module_utils - Change the location for importing remove_empties from the obsolete
+ Netcommon location to the offically required Ansible library location to fix
+ sanity errors (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/172).
+ - sonic_aaa - Add replaced and overridden states support for AAA resource module
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/237).
+ - sonic_aaa - Add unit tests for AAA resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/198).
+ - sonic_api - Add unit tests for api resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/218).
+ - sonic_bfd, sonic_copp - Update replaced methods (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/254).
+ - sonic_bgp - Add rt_delay attribute to module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/244).
+ - sonic_bgp - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/240).
+ - sonic_bgp - Add unit tests for BGP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/182).
+ - sonic_bgp_af - Add several attributes to support configuration of route distinguisher
+ and route target (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/141).
+ - sonic_bgp_af - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/246).
+ - sonic_bgp_af - Add unit tests for BGP AF resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/183).
+ - sonic_bgp_af - Modify BGP AF resource module unit tests to adjust for changes
+ in the resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/191).
+ - sonic_bgp_as_paths - Add unit tests for BGP AS paths resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/184).
+ - sonic_bgp_communities - Add unit tests for BGP communities resource module
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/185).
+ - sonic_bgp_ext_communities - Add unit tests for BGP ext communities resource
+ module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/186).
+ - sonic_bgp_neighbors - Add unit tests for BGP neighbors resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/187).
+ - sonic_bgp_neighbors - Enhance unit tests for BGP Neighbors resource module
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/245).
+ - sonic_bgp_neighbors_af - Add unit tests for BGP neighbors AF resource module
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/188).
+ - sonic_command - Add unit tests for command resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/219).
+ - sonic_config - Add unit tests for config resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/220).
+ - sonic_dhcp_relay - Add a common unit tests module and unit tests for dhcp
+ relay module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/148).
+ - sonic_dhcp_relay - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/249).
+ - sonic_facts - Add unit tests for facts resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/222).
+ - sonic_interfaces - Add speed, auto-negotiate, advertised-speed and FEC to
+ interface resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/128).
+ - sonic_interfaces - Add unit tests for interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/197).
+ - sonic_ip_neighbor - Add unit tests for IP neighbor resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/225).
+ - sonic_ip_neighbor - Change the replaced function in ip_neighbor resource module
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/253).
+ - sonic_l2_interfaces - Add support for parsing configuration containing the
+ OC Yang vlan range syntax (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/124).
+ - sonic_l2_interfaces - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/221).
+ - sonic_l2_interfaces - Add support for specifying vlan trunk ranges in Ansible
+ playbooks (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/149).
+ - sonic_l2_interfaces - Add unit tests for l2_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/200).
+ - sonic_l3_interfaces - Add unit tests for l3_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/202).
+ - sonic_lag_interface - Add replaced and overridden states support for LAG interface
+ resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/196).
+ - sonic_lag_interfaces - Add unit tests for lag_interfaces resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/203).
+ - sonic_logging - Add replaced and overridden states support for logging resource
+ module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/150).
+ - sonic_logging - Add unit tests for logging resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/226).
+ - sonic_logging - Change logging get facts for source_interface naming (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/258).
+ - sonic_mclag - Add delay_restore, gateway_mac, and peer_gateway attributes
+ to module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/145).
+ - sonic_ntp - Add prefer attribute to NTP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/118).
+ - sonic_ntp - Add replaced and overridden states support for NTP resource module
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/151).
+ - sonic_ntp - Add unit tests for NTP resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/207).
+ - sonic_ntp - Change NTP get facts to get default parameters (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/106).
+ - sonic_ntp - Change NTP key values in NTP regression test script (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/107).
+ - sonic_ntp - Change NTP module name (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/113).
+ - sonic_ntp - Change NTP module names in NTP regression test script (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/114).
+ - sonic_ntp - Change NTP resource module to make minpoll and maxpoll be configured
+ together (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/129).
+ - sonic_port_breakout - Add unit tests for port breakout resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/229).
+ - sonic_port_group - Add replaced and overridden states support for port group
+ resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/227).
+ - sonic_port_group - Add unit tests for port group resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/228).
+ - sonic_prefix_lists - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/255).
+ - sonic_prefix_lists - Add unit tests for prefix lists resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/209).
+ - sonic_radius_server - Add replaced and overridden states support for RADIUS
+ server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/239).
+ - sonic_radius_server - Add unit tests for RADIUS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/210).
+ - sonic_static_routes - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/236).
+ - sonic_static_routes - Add unit tests for static routes resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/212).
+ - sonic_system - Add replaced and overridden states support for system resource
+ module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/159).
+ - sonic_system - Add unit tests for system resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/223).
+ - sonic_tacacs_server - Add replaced and overridden states support for TACACS
+ server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/235).
+ - sonic_tacacs_server - Add unit tests for TACACS server resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/208).
+ - sonic_users - Add replaced and overridden states support for users resource
+ module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/242).
+ - sonic_users - Add unit tests for users resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/213).
+ - sonic_vlans - Add replaced and overridden states support for VLAN resource
+ module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/217).
+ - sonic_vlans - Add unit tests for Vlans resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/214).
+ - sonic_vrfs - Add replaced and overridden states support for VRF resource module
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/156).
+ - sonic_vrfs - Add unit tests for VRFS resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/216).
+ - sonic_vxlans - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/247).
+ - sonic_vxlans - Add unit tests for VxLans resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/215).
+ release_summary: '| Release Date: 2023-0515
+
+ | This release provides enhanced Dell Enterprise SONiC Ansible Collection
+ support for SONiC 4.x images.
+
+ | In addition to new resource modules to support previously existing functionality,
+ it provides
+
+ | support for the "QinQ" (Vlan Mapping) function introduced with SONiC release
+ 4.1. It also provides
+
+ | bug fixes and enhancements for support of features that were initially introduced
+ in previous
+
+ | Enterprise SONiC Ansible releases. The changelog describes changes made
+ to the modules and plugins
+
+ | included in this collection since release 2.0.0.
+
+ '
+ fragments:
+ - 106-change-ntp-get-fact.yaml
+ - 107-change-ntp-key-values-in-regression-script.yaml
+ - 113-change-ntp-module-name.yaml
+ - 114-change-ntp-module-name-in-regression-script.yaml
+ - 118-add-ntp-prefer-attribute.yaml
+ - 119-lag_interfaces-port-name-fix.yaml
+ - 124-sonic_l2_interfaces-oc-yang-vlan-range-format-config-support.yaml
+ - 128-add-several-attributes-to-interface-resource-module.yaml
+ - 129-ntp-minpoll-maxpoll-config.yaml
+ - 130-vxlans-attribute-restriction-fix.yaml
+ - 141-add-route-distinguisher-target-attributes-to-bgp-af-module.yaml
+ - 145-mclag-new-attributes.yaml
+ - 146-vrf-mgmt-bug-fix.yaml
+ - 148-dhcp-relay-unit-tests.yaml
+ - 149-l2-interfaces-vlan-trunk-range-support-for-playbooks.yaml
+ - 150-replaced-overridden-for-logging-resource-module.yaml
+ - 151-replaced-overridden-for-ntp-resource-module.yaml
+ - 152-copp.yaml
+ - 156-replaced-overridden-for-vrfs-resource-module.yaml
+ - 157-mac.yaml
+ - 159-replaced-overridden-for-system-resource-module.yaml
+ - 160-sanity-check-errors-fix.yaml
+ - 172-module-utils-import-remove-empties-from-ansible-lib.yaml
+ - 176-update-netcommon-version.yaml
+ - 177-bfd.yaml
+ - 180-regression-failures-fix.yaml
+ - 182-unit-tests-for-bgp-resource-module.yaml
+ - 183-unit-tests-for-bgp-af-resource-module.yaml
+ - 184-unit-tests-for-bgp-as-paths-resource-module.yaml
+ - 185-unit-tests-for-bgp-communities-resource-module.yaml
+ - 186-unit-tests-for-bgp-ext-communities-resource-module.yaml
+ - 187-unit-tests-for-bgp-neighbors-resource-module.yaml
+ - 188-unit-tests-for-bgp-neighbors-af-resource-module.yaml
+ - 191-unit-tests-for-bgp-af-resource-module.yaml
+ - 195-aaa-login-authentication.yaml
+ - 196-replaced-overridden-for-lag-interface-resource-module.yaml
+ - 197-unit-tests-for-interfaces-resource-module.yaml
+ - 198-unit-tests-for-aaa-resource-module.yaml
+ - 199-code-coverage-workflow-fix.yaml
+ - 200-unit-tests-for-l2-interfaces-resource-module.yaml
+ - 201-bgp-af-modify-vni-advertise-fix.yaml
+ - 202-unit-tests-for-l3-interfaces-resource-module.yaml
+ - 203-unit-tests-for-lag-interfaces-resource-module.yaml
+ - 207-unit-tests-for-ntp-resource-module.yaml
+ - 208-unit-tests-for-tacacs-server-resource-module.yaml
+ - 209-unit-tests-for-prefix-lists-resource-module.yaml
+ - 210-unit-tests-for-radius-server-resource-module.yaml
+ - 212-unit-tests-for-static-routes-resource-module.yaml
+ - 213-unit-tests-for-users-resource-module.yaml
+ - 214-unit-tests-vlans-resource-module.yaml
+ - 215-unit-tests-for-vxlans-resource-module.yaml
+ - 216-unit-tests-for-vrfs-resource-module.yaml
+ - 217-replaced-overridden-for-vlans-resource-module.yaml
+ - 218-unit-tests-for-api-resource-module.yaml
+ - 219-unit-tests-for-command-resource-module.yaml
+ - 220-unit-tests-for-config-resource-module.yaml
+ - 221-l2-interfaces-replaced-overridden-support.yaml
+ - 222-unit-tests-for-facts-resource-module.yaml
+ - 223-unit-tests-for-system-resource-module.yaml
+ - 224-ntp-clear-all-fix.yaml
+ - 225-unit-tests-for-ip-neighbor-resource-module.yaml
+ - 226-unit-tests-for-logging-resource-module.yaml
+ - 227-replaced-overridden-for-port-group-resource-module.yaml
+ - 228-unit-tests-for-port-group-resource-module.yaml
+ - 229-unit-tests-for-port-breakout-resource-module.yaml
+ - 230-vrfs-delete-interface-fix.yaml
+ - 231-l3-interfaces-delete-fix.yaml
+ - 233-bgp-neighbors-defaults-fix.yaml
+ - 235-replaced-overridden-for-tacacs-server-resource-module.yaml
+ - 236-static-routes-replaced-overridden-support.yaml
+ - 237-aaa-replaced-overridden-support.yaml
+ - 239-replaced-overridden-for-radius-server-resource-module.yaml
+ - 240-bgp-replaced-overridden-support.yaml
+ - 242-users-replaced-overridden-support.yaml
+ - 244-added-rt-delay-attribute-to-bgp-module.yaml
+ - 245-enhance-bgp-neighbors-unit-tests.yaml
+ - 246-bgp-af-replaced-overridden-support.yaml
+ - 247-vxlans-replaced-overridden-support.yaml
+ - 249-dhcp-relay-replaced-overridden-support.yaml
+ - 250-bgp-as-paths-fix-merged-deleted.yaml
+ - 253-change-replaced-function-for-ip-neighbor-module.yaml
+ - 254-update-replace-methods.yaml
+ - 255-prefix_lists_replaced_overridden_support.yaml
+ - 257-vrfs-cli-test-case-fix.yaml
+ - 258-change-logging-module-source-interface-naming.yaml
+ - 261-interfaces-timeout-fix.yaml
+ - 262-vlan-mapping-bug-fix.yaml
+ - v2.1.0_summary.yaml
+ modules:
+ - description: Manage access control list (ACL) to interface binding on SONiC
+ name: sonic_acl_interfaces
+ namespace: ''
+ - description: Manage BFD configuration on SONiC
+ name: sonic_bfd
+ namespace: ''
+ - description: Manage CoPP configuration on SONiC
+ name: sonic_copp
+ namespace: ''
+ - description: Manage DHCP and DHCPv6 relay configurations on SONiC
+ name: sonic_dhcp_relay
+ namespace: ''
+ - description: Manage IP neighbor global configuration on SONiC
+ name: sonic_ip_neighbor
+ namespace: ''
+ - description: Manage Layer 2 access control lists (ACL) configurations on SONiC
+ name: sonic_l2_acls
+ namespace: ''
+ - description: Manage Layer 3 access control lists (ACL) configurations on SONiC
+ name: sonic_l3_acls
+ namespace: ''
+ - description: Manage Global LLDP configurations on SONiC
+ name: sonic_lldp_global
+ namespace: ''
+ - description: Manage logging configuration on SONiC
+ name: sonic_logging
+ namespace: ''
+ - description: Manage MAC configuration on SONiC
+ name: sonic_mac
+ namespace: ''
+ - description: Manages port group configuration on SONiC
+ name: sonic_port_group
+ namespace: ''
+ - description: route map configuration handling for SONiC
+ name: sonic_route_maps
+ namespace: ''
+ - description: Configure vlan mappings on SONiC
+ name: sonic_vlan_mapping
+ namespace: ''
+ release_date: '2023-05-15'
+ 2.2.0:
+ changes:
+ minor_changes:
+ - galaxy_yml - Enable installation of Ansible Netcomon versions after 5.0.0
+ and update the enterprise_sonic release version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/270).
+ - sonic_aaa - Revert breaking changes for AAA nodule (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/269).
+ release_summary: '| Release Date: 2023-06-01
+
+ | This release provides Ansible compliance changes required on top of the
+ changes included in
+
+ | the 2.1.0 release of the enterprise_sonic Ansible network resource module
+ collection.
+
+ | It addresses two issues raised by the Ansible core team with the content
+ of the 2.1.0 release.
+
+ | 1) Back out the "breaking_change" made in the sonic_aaa resource module
+ to fix a functional
+
+ | shortcoming in the enterprise_sonic Ansible collection. Although the change
+ is still needed,
+
+ | it will be deferred to a "major" release.
+
+ | 2) Re-enable installation of new Ansible Netcommon repo instances when installing
+ the
+
+ | enterprise_sonic Ansible collection. The 2.1.0 enterprise_sonic Ansible
+ release included a
+
+ | workaround for a bug introduced in the 5.0.0 version of the Ansible Netcommon
+ repo. This
+
+ | workaround was implemented in the "galaxy.yml" file for the enterprise_sonic
+
+ | 2.1.0 release. New versions of Ansible Netcommon were published after the
+ problematic 5.0.0
+
+ | version and the revised "galaxy.yml" file for this release enables installation
+ of these
+
+ | newer versions.
+
+ '
+ fragments:
+ - 269-revert-aaa-breaking-changes.yaml
+ - 270-galaxy-yml-netcommon-and-version-fixes.yaml
+ - v2.2.0_summary.yaml
+ release_date: '2023-05-31'
+ 2.3.0:
+ changes:
+ bugfixes:
+ - sonic_bgp_communities - Fix incorrect "facts" handling for parsing of a BGP
+ community list configured with an empty "members" list (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/319).
+ - sonic_bgp_neighbors - Fix prefix-limit issue (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/289).
+ - sonic_interfaces - Add warnings when speed and auto_negotiate is configured
+ at same time (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/314).
+ - sonic_interfaces - Fix support for standard naming interfaces (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/314).
+ - sonic_interfaces - Prevent configuring speed in port group interfaces (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/314).
+ - sonic_stp - Correct the commands list for STP delete state (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/302).
+ minor_changes:
+ - sonic_aaa - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/304).
+ - sonic_aaa - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_acl_interfaces - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/306).
+ - sonic_acl_interfaces - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_bgp_as_paths - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/290).
+ - sonic_bgp_communities - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/251).
+ - sonic_bgp_ext_communities - Add support for replaced and overridden states
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/252).
+ - sonic_interfaces - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/301).
+ - sonic_interfaces - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/314).
+ - sonic_interfaces - Change deleted design for interfaces module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/310).
+ - sonic_interfaces - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_ip_neighbor - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/285).
+ - sonic_ip_neighbor - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_l2_acls - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/306).
+ - sonic_l2_acls - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_l2_interfaces - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/303).
+ - sonic_l2_interfaces - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_l3_acls - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/306).
+ - sonic_l3_acls - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_l3_interfaces - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/241).
+ - sonic_lag_interfaces - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/303).
+ - sonic_lag_interfaces - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_logging - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/285).
+ - sonic_logging - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_mclag - Add VLAN range support for 'unique_ip' and 'peer_gateway' options
+ (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/288).
+ - sonic_mclag - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/288).
+ - sonic_ntp - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/281).
+ - sonic_ntp - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_port_breakout - Add Ansible support for all port breakout modes now
+ allowed in Enterprise SONiC (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/276).
+ - sonic_port_breakout - Add support for replaced and overridden states (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/291).
+ - sonic_port_group - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/284).
+ - sonic_port_group - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_radius_server - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/279).
+ - sonic_radius_server - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_static_routes - Add playbook check and diff modes support for static
+ routes resource module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/313).
+ - sonic_static_routes - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_system - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/284).
+ - sonic_system - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_tacacs_server - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/281).
+ - sonic_tacacs_server - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_users - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/304).
+ - sonic_users - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_vlans - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/301).
+ - sonic_vlans - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - sonic_vrfs - Add mgmt VRF replaced state handling to sonic_vrfs module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/298).
+ - sonic_vrfs - Add mgmt VRF support to sonic_vrfs module (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/293).
+ - sonic_vrfs - Add support for playbook check and diff modes (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/285).
+ - sonic_vrfs - Enhance config diff generation function (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/318).
+ - tests - Add UTs for BFD, COPP, and MAC modules (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/287).
+ - tests - Enable contiguous execution of all regression integration tests on
+ an S5296f (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/277).
+ - tests - Fix the bgp CLI test base_cfg_path derivation of the bgp role_path
+ by avoiding relative pathing from the possibly external playbook_dir (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/283).
+ release_summary: '| Release Date: 2024-0103
+
+ | This release provides the functionality enhancements listed below, along
+ with fixes for
+
+ | problems found in regression testing or reported by users. The main functionality
+ enhancements
+
+ | provided are the following items.
+
+ | 1) Complete the support for "replaced" and "overridden" state handling for
+ all resource modules except for the bgp_neighbors and bgp_neighbors_af modules.
+
+ | With this release, the required support has been added for any resource
+ modules that were not
+
+ | provided with this support for the 2.1.0 release with the two exceptions
+ noted above.
+
+ | 2) Provide initial support for the "--check" and "--diff" mode options for
+ playbook execution. This
+
+ | release provides the common utility support for these options for use by
+ all resource modules.
+
+ | It also provides the specific resource module changes required for implementation
+ of the
+
+ | functionality in many of the existing resource modules. (The "--check" and
+ "--diff" mode support
+
+ | for the remaining resource modules is planned for inclusion in the next
+ release.)
+
+ | 3) New resource modules for "Public Key Infrastructure", STP, and DHCP Snooping.
+
+ | 4) Support for "ranges" of vlans (e.g ''2-100'') in tasks for the mclag
+ resource module.
+
+ | Please refer to the "CHANGELOG.rst" file at the top directory level of this
+ repo for additional
+
+ | details on the contents of this release.
+
+ '
+ fragments:
+ - 241-replaced-overridden-for-l3-interfaces-module.yaml
+ - 251-replaced-overridden-for-bgp-communities-module.yaml
+ - 252-replaced-overridden-for-bgp-ext-communities-module.yaml
+ - 274-add-changelog-workflow.yaml
+ - 276-add-new-port-breakout-modes.yaml
+ - 277-enable-running-all-regression-tests-on-s5296.yaml
+ - 279-playbook-check-diff-modes-and-radius-server-implement.yaml
+ - 281-playbook-check-diff-modes-for-ntp-module.yaml
+ - 282-update-changelog-workflow.yaml
+ - 283-bgp-cli-auto-regression-fix.yaml
+ - 284-playbook-check-diff-modes-for-system-and-port-group-modules.yaml
+ - 285-playbook-check-diff-modes-for-vrfs-logging-ip-neighbor.yaml
+ - 287-add-uts-for-bfd-copp-mac.yaml
+ - 288-mclag-replaced-overridden-vlan-range-support.yaml
+ - 289-bgp-neighbors-prefix-limit-fix.yaml
+ - 290-bgp-as-paths-replaced-overridden-support.yaml
+ - 291-port-breakout-replaced-overridden-support.yaml
+ - 293-add-mgmt-vrf-handling-in-vrfs-module.yaml
+ - 294-verbose-regression-report-template.yaml
+ - 298-add-mgmt-vrf-replaced-state-handling-in-vrfs-module.yaml
+ - 301-playbook-check-diff-modes-for-vlans-interfaces.yaml
+ - 302-stp-commands-delete-state-fix.yaml
+ - 303-playbook-check-diff-modes-for-l2-lag-interfaces.yaml
+ - 304-playbook-check-diff-modes-for-users-aaa.yaml
+ - 306-playbook-check-diff-modes-for-acls.yaml
+ - 310-change-deleted-design-for-interfaces-module.yaml
+ - 313-laybook-check-diff-modes-for-static-route.yaml
+ - 314-replaced-overridden-for-interfaces-module.yaml
+ - 318-playbook-diff-mode-enhancement.yaml
+ - 319-bgp-communities-no-member-facts-fix.yaml
+ - v2.3.0_summary.yaml
+ modules:
+ - description: Manage DHCP Snooping on SONiC
+ name: sonic_dhcp_snooping
+ namespace: ''
+ - description: Manages PKI attributes of Enterprise Sonic
+ name: sonic_pki
+ namespace: ''
+ - description: Manage STP configuration on SONiC
+ name: sonic_stp
+ namespace: ''
+ release_date: '2024-01-03'
+ 2.4.0:
+ changes:
+ bugfixes:
+ - requirements - Update requires_ansible version in meta/runtime.yml to the
+ oldest supported version (https://github.com/ansible-collections/dellemc.enterprise_sonic/pull/321).
+ release_summary: '| Release Date: 2024-0108
+
+ | This release provides an Ansible compliance change required on top of the
+ changes included in
+
+ | the 2.3.0 release of the enterprise_sonic Ansible network resource module
+ collection.
+
+ | It addresses an issue raised by the Ansible core team with the content of
+ the 2.3.0 release,
+
+ | and provides accompanying documentation changes in the README file. Additional
+ details are
+
+ | described below.
+
+ | 1) Update the "requires_ansible" version in the meta/runtime.yml file for
+ this collection
+
+ | to the oldest supported version of ansible-core. (This was recently changed
+ by Redhat/Ansible
+
+ | to version "2.14.0".)
+
+ | 2) Update the README file "Recommended version" values for Ansible and Python
+ in accordance
+
+ | with the previous change item to modify the oldest supported version of
+ ansible-core which,
+
+ | in turn, requires a Python version >= "3.9".
+
+ | 3) Update the list of resource modules in the README file to include all
+ currently available
+
+ | resource modules for this collection.
+
+ '
+ fragments:
+ - 321-requirements-update-meta-runtime-ansible-version.yaml
+ - 322-docs-README-updates.yaml
+ - v2.4.0_summary.yaml
+ release_date: '2024-01-08'
diff --git a/ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml b/ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml
index b857f936f..158fea109 100644
--- a/ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/changelogs/config.yaml
@@ -1,4 +1,4 @@
-changelog_filename_template: CHANGELOG.rst
+changelog_filename_template: ../CHANGELOG.rst
changelog_filename_version_depth: 0
changes_file: changelog.yaml
changes_format: combined
diff --git a/ansible_collections/dellemc/enterprise_sonic/docs/ADDITIONAL_INFORMATION.md b/ansible_collections/dellemc/enterprise_sonic/docs/ADDITIONAL_INFORMATION.md
new file mode 100644
index 000000000..6b2a34808
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/docs/ADDITIONAL_INFORMATION.md
@@ -0,0 +1,15 @@
+Additional Information
+## Release cadence
+<release cadence>
+
+<strong>TBD </strong>
+
+## Versioning
+* This product releases follow [semantic versioning](https://semver.org/).
+
+## Deprecation
+* <Product> deprecation cycle is aligned with [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html).
+
+<Any other additional information,
+E.g.
+Enabling/Ignoring SSL certificate validation> \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/docs/BRANCHING.md b/ansible_collections/dellemc/enterprise_sonic/docs/BRANCHING.md
new file mode 100644
index 000000000..2fd9f9570
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/docs/BRANCHING.md
@@ -0,0 +1 @@
+TBD \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/docs/COMMITTER_GUIDE.md b/ansible_collections/dellemc/enterprise_sonic/docs/COMMITTER_GUIDE.md
new file mode 100644
index 000000000..5ee9da4b1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/docs/COMMITTER_GUIDE.md
@@ -0,0 +1,43 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the GPL, Version 3.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.gnu.org/licenses/gpl-3.0.txt
+-->
+
+# Committer Guidelines
+
+These are the guidelines for people with commit privileges on the GitHub repository. Committers act as members of the Core Team and not necessarily employees of Dell.
+
+These guidelines apply to everyone and as Committers you have been given access to commit changes because you exhibit good judgment and have demonstrated your commitment to the vision of the project. We trust that you will use these privileges wisely and not abuse it.
+
+If these privileges are abused in any way and the quality of the project is compromised, our trust will be diminished and you may be asked to not commit or lose these privileges all together.
+
+## General Rules
+
+### Don't
+
+* Introduce Ansible sanity failures.
+* Introduce enterprise_sonic regression test failures.
+* Commit directly.
+* Compromise backward compatibility.
+* Disrespect your Community Team members. Help them grow.
+* Think it is someone elses job to test your code. Write tests for all the code you produce.
+* Forget to keep thing simple.
+* Create technical debt. Fix-in-place and make it the highest priority above everything else.
+
+
+### Do
+
+* Always follow the defined coding guideline
+* Keep the design of your software clean and maintainable.
+* Squash your commits, avoid merges.
+* Write tests for all your deliverables.
+* Automate everything.
+* Maintain a high code coverage, equal to or greater than 90%.
+* Keep an open communication with other Committers.
+* Ask questions.
+* Document your contributions and remember to keep it simple.
diff --git a/ansible_collections/dellemc/enterprise_sonic/docs/CONTRIBUTING.md b/ansible_collections/dellemc/enterprise_sonic/docs/CONTRIBUTING.md
new file mode 100644
index 000000000..3109bbcbc
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/docs/CONTRIBUTING.md
@@ -0,0 +1,207 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the GPL, Version 3.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.gnu.org/licenses/gpl-3.0.txt
+-->
+
+# How to Contribute
+
+Become one of the contributors to this project! We strive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](CODE_OF_CONDUCT.md).
+
+## Table of Contents
+
+* [Become a contributor](#Become-a-contributor)
+* [Contributor agreement](#Contributor-Agreement)
+* [Submitting issues](#Submitting-issues)
+* [Triage issues](#Triage-issues)
+* [Your first contribution](#Your-first-contribution)
+* [Branching](#Branching)
+* [Signing your commits](#Signing-your-commits)
+* [Pull requests](#Pull-requests)
+* [Code reviews](#Code-reviews)
+* [Code Style](#Code-Style)
+
+## Become a contributor
+
+You can contribute to this project in several ways. Here are some examples:
+
+* Contribute to the project documentation and codebase.
+* Report and triage bugs.
+* Feature requests
+* Write technical documentation and blog posts for users and contributors.
+* Help others by answering questions about this project.
+
+## Contributor Agreement
+All contributions shall be made under the Developer Certification of Origin ("DCO") (see http://elinux.org/Developer_Certificate_Of_Origin) which is reproduced below. Specifically, the Git commit message for the contribution should contain the following tag information signifying use of the DCO:
+
+
+"Signed-off-by: [Your Name] [youremail@company.com]"
+
+
+________________________________________
+### Developer's Certificate of Origin 1.1
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I have the right to submit it under the open source license indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right under that license to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license), as indicated in the file; or
+
+(c) The contribution was provided directly to me by some other person who certified (a), (b) or (c) and I have not modified it.
+
+(d) I understand and agree that this project and the contribution are public and that a record of the contribution (including all personal information I submit with it, including my sign-off) is maintained indefinitely and may be redistributed consistent with this project or the open source license(s) involved.
+
+## Submitting issues
+
+All issues related to the associated Ansible modules, roles, playbooks, regardless of the service/repository the issue belongs to (see table above), should be submitted [here](https://github.com/ansible-collections/dellemc.enterprise_sonic/issues). Issues will be triaged and labels will be used to indicate the type of issue. This section outlines the types of issues that can be submitted.
+
+### Report bugs
+
+We aim to track and document everything related to the repository via the Issues page. The code and documentation are released with no warranties or SLAs and are intended to be supported through a community driven process.
+
+Before submitting a new issue, make sure someone hasn't already reported the problem. Look through the [existing issues](https://github.com/ansible-collections/dellemc.enterprise_sonic/issues) for similar issues.
+
+Report a bug by submitting a [bug report](https://github.com/ansible-collections/dellemc.enterprise_sonic/issues/new?labels=type%2Fbug%2C+needs-triage&template=bug_report.md&title=%5BBUG%5D%3A). Make sure that you provide as much information as possible on how to reproduce the bug.
+
+When opening a Bug please include the following information to help with debugging:
+
+1. Version of relevant software: this software, Python version, Dell Server/Storage Platform, etc.
+2. Details of the issue explaining the problem: what, when, where
+3. The expected outcome that was not met (if any)
+4. Supporting troubleshooting information. __Note: Do not provide private company information that could compromise your company's security.__
+
+An Issue __must__ be created before submitting any pull request. Any pull request that is created should be linked to an Issue.
+
+### Feature request
+
+If you have an idea of how to improve this project, submit a [feature request](https://github.com/ansible-collections/dellemc.enterprise_sonic/issues/new?labels=type%2Ffeature-request%2C+needs-triage&template=feature_request.md&title=%5BFEATURE%5D%3A).
+
+### Answering questions
+
+If you have a question and you can't find the answer in the documentation or issues, the next step is to submit a [question.](https://github.com/ansible-collections/dellemc.enterprise_sonic/issues/new?labels=type%2Fquestion&template=ask-a-question.md&title=%5BQUESTION%5D%3A)
+
+We'd love your help in answering pending questions asked by other CSM users.
+
+## Triage issues
+
+Triage helps ensure that issues resolve quickly by:
+
+* Ensuring the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took.
+* Giving a contributor the information they need before they commit to resolving an issue.
+* Streamlining the bug fixing process by eliminating duplicate issues.
+
+
+If you don't have the knowledge or time to code, consider helping with _issue triage_. The Dell dellemc.enterprise_sonic community will thank you for saving them time by spending some of yours.
+
+Read more about the ways you can [Triage issues](ISSUE_TRIAGE.md).
+
+## Testing
+
+See [here](https://github.com/ansible-collections/dellemc.enterprise_sonic/tree/collections/README.md) for further information on testing.
+
+## Debugging
+To debug <product> using IDE, see [here](https://docs.ansible.com/ansible/latest/dev_guide/debugging.html)
+
+
+## Your first contribution
+
+Unsure where to begin contributing? Start by browsing issues labeled `beginner friendly` or `help wanted`.
+
+* [Beginner-friendly](https://github.com/ansible-collections/dellemc.enterprise_sonic/issues?q=is%3Aopen+is%3Aissue+label%3A%22beginner+friendly%22) issues are generally straightforward to complete.
+* [Help wanted](https://github.com/ansible-collections/dellemc.enterprise_sonic/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) issues are problems we would like the community to help us with regardless of complexity.
+
+When you're ready to contribute, it's time to create a pull request.
+
+## Branching
+
+* [Branching Strategy](BRANCHING.md)
+
+## Signing your commits
+
+We require that developers sign off their commits to certify that they have permission to contribute the code in a pull request. This way of certifying is commonly known as the [Developer Certificate of Origin (DCO)](https://developercertificate.org/). We encourage all contributors to read the DCO text before signing a commit and making contributions.
+
+GitHub will prevent a pull request from being merged if there are any unsigned commits.
+
+### Signing a commit
+
+GPG (GNU Privacy Guard) will be used to sign commits. Follow the instructions [here](https://docs.github.com/en/free-pro-team@latest/github/authenticating-to-github/signing-commits) to create a GPG key and configure your GitHub account to use that key.
+
+Make sure you have your username and e-mail set. This will be required for your signed commit to be properly verified. Check the following references:
+
+* Setting up your github username [reference](https://help.github.com/articles/setting-your-username-in-git/)
+* Setting up your e-mail address [reference](https://help.github.com/articles/setting-your-commit-email-address-in-git/)
+
+Once Git and your GitHub account have been properly configured, you can add the -S flag to the git commits:
+
+```console
+$ git commit -S -m your commit message
+# Creates a signed commit
+```
+
+### Commit message format
+
+This repository uses the guidelines for commit messages outlined in [How to Write a Git Commit Message](https://chris.beams.io/posts/git-commit/)
+
+## Pull Requests
+
+If this is your first time contributing to an open-source project on GitHub, make sure you read about [Creating a pull request](https://help.github.com/en/articles/creating-a-pull-request).
+
+A pull request must always link to at least one GitHub issue. If that is not the case, create a GitHub issue and link it.
+
+To increase the chance of having your pull request accepted, make sure your pull request follows these guidelines:
+
+* Title and description matches the implementation.
+* Commits within the pull request follow the formatting guidelines.
+* The pull request closes one related issue.
+* The pull request contains necessary tests that verify the intended behavior.
+* If your pull request has conflicts, rebase your branch onto the main branch.
+
+If the pull request fixes a bug:
+
+* The pull request description must include `Fixes #<issue number>`.
+* To avoid regressions, the pull request should include tests that replicate the fixed bug.
+
+The owning team _squashes_ all commits into one when we accept a pull request. The title of the pull request becomes the subject line of the squashed commit message. We still encourage contributors to write informative commit messages, as they become a part of the Git commit body.
+
+We use the pull request title when we generate change logs for releases. As such, we strive to make the title as informative as possible.
+
+Make sure that the title for your pull request uses the same format as the subject line in the commit message.
+
+### Quality Gates for pull requests
+
+GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that gets checked into the code repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out to the maintainers of the project for [support](https://github.com/ansible-collections/dellemc.enterprise_sonic/blob/main/docs/SUPPORT.md).
+
+#### Code build/test/coverage
+
+[GitHub action](https://github.com/ansible-collections/dellemc.enterprise_sonic/actions) that runs unit tests automatically and checks the code coverage tool, [code-coverage.yml](https://github.com/ansible-collections/dellemc.enterprise_sonic/blob/main/.github/workflows/code-coverage.yml), runs unit tests automatically and checks that the code coverage of each package meets a configured threshold (currently 90%). An error is flagged if a given pull request does not meet the test coverage threshold and blocks the pull request from being merged. When it fails, it is expected that the contributor will look into the log, understand the problem and resolve the issue.
+
+Alternatively, users can manually run the unit test and check the coverage using the 'ansible-test' command as given in the following example. These commands must be run from the Ansible collection "install" directory for the enterprise sonic collection (e.g. "cd ~/.ansible/collections/ansible_collections/dellemc/enterprise_sonic").
+
+```
+ansible-test  units --color --python 3.10 --coverage -vvvv tests/unit/modules/network/sonic/test_sonic_bgp_neighbors.py
+ansible-test coverage report
+
+```
+
+## Code Reviews
+
+All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help](https://help.github.com/articles/about-pull-requests/) for more information on using pull requests.
+
+A pull request must satisfy the following for it to be merged:
+
+* A pull request will require at least 2 maintainer approvals.
+* Maintainers must perform a review to ensure the changes adhere to guidelines laid out in this document.
+* If any commits are made after the PR has been approved, the PR approval will automatically be removed and the above process must happen again.
+
+## Code Style
+
+For the Python code in the repository, we expect the code styling outlined in [Ansible python guide]( https://docs.ansible.com/ansible/latest/dev_guide/developing_python_3.html). In addition to this, we have the following supplements:
+
+* Contributions should adhere to ansible Coding standard guidelines as we follow these standards.
+* Should include [test](https://github.com/ansible-collections/dellemc.enterprise_sonic/tree/main/tests) scripts for your changes.
+* Do not submit a contribution request on our deprecated modules. They are just meant for backward compatibility.
+
+
diff --git a/ansible_collections/dellemc/enterprise_sonic/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/enterprise_sonic/docs/ISSUE_TRIAGE.md
new file mode 100644
index 000000000..09cd93ee0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/docs/ISSUE_TRIAGE.md
@@ -0,0 +1,291 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the GPL, Version 3.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.gnu.org/licenses/gpl-3.0.txt
+-->
+
+# Triage issues
+
+The main goals of an issue triage are to categorize open issues based on priority and to ensure that these issues contain adequate information for contributors to work on them.
+
+> **Note:** This information is for project Maintainers, Owners, and Admins. If you are a Contributor, then you will not be able to perform most of the tasks in this topic.
+
+The core maintainers of this project are responsible for categorizing all incoming issues and delegating any critical or important issue to other maintainers. Triage provides an important way to contribute to an open source project.
+
+A triage helps resolve issues quickly by :
+
+- Ensuring the intent of an issue is clear. The issue should clearly describe the problem the end user is experiencing as well as the steps to reproduce the problem.
+- Giving a contributor the information they need before they commit to resolving an issue.
+- Streamlining the development process by identifying and closing duplicate issues.
+
+If you don't have the knowledge or time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours.
+
+## 1. Find issues that need triage
+
+The easiest way to find issues that haven't been triaged is to search for issues with the `needs-triage` label.
+
+## 2. Ensure the issue contains basic information
+
+When creating a new issue, the author must provide all of the information required by the project's GitHub issue template
+
+### Standard issue information that must be included
+
+The following section describes the various issue templates and the expected content.
+
+#### Bug reports
+
+A bug report should contain explanation of what the problem is, what the expected outcome is, and how to reproduce the problem. Additionally, any other supporting information such as screenshots, console logs, and environment details should be included in the bug report.
+
+ - Product Name/Version
+ - Component/Module Name
+ - Ansible Collection Version
+ - Configuration
+ - Steps to Reproduce
+ - Expected/Actual Behavior
+ - Logs/Screenshots
+ - Any other additional information...
+
+#### Feature requests
+
+Should explain what feature that the author wants to be added and why that is needed.
+
+#### Ask a Question requests
+
+In general, if the issue description and title are perceived as questions no more information is needed.
+
+### Good practices
+
+To make issue handling easier for everyone, it is suggested to:
+
+- Make sure that issue titles are named to explain the subject of the issue, have correct spelling, and don't include irrelevant information and/or sensitive information.
+- Make sure that issue descriptions don't include irrelevant information.
+- Make sure that issues do not contain sensitive information.
+- Make sure that issues have all relevant fields filled in.
+- If an issue is unclear, then try to edit the title and description for more clarity or leave a comment requesting that edits to the issue be made.
+
+### Dealing with missing information
+
+Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. Label issue with `triage/needs-information`.
+
+If the author provides the standard information but you are still unable to triage the issue, then request additional information. Do this kindly and politely because you are asking for more of the author's time. Label issue with `triage/needs-information`.
+
+If the author does not respond to the requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided.
+
+If you receive a notification that additional information was provided but you are no longer working on issue triage, then you should delegate the issue to the current person working on issue triage.
+
+## 3. Categorizing an issue
+
+### Duplicate issues
+
+Make sure it's not a duplicate by searching existing issues using related terms from the issue title and description. If you think the issue may be a potential duplicate and can't find the original existing issue, then please reach out to one of the maintainers and ask for help. If you identify that the issue is a duplicate of an existing issue:
+
+1. Add a comment `duplicate of #<issue number>`
+2. Add the `triage/duplicate` label
+
+### Bug reports
+
+If it's not perfectly clear that it's an actual bug, quickly try to reproduce it.
+
+**It's a bug/it can be reproduced:**
+
+1. Add a comment describing detailed steps for how to reproduce it, if applicable.
+2. If maintainers aren't able to address the issue in a timely manner, then label the issue with `help wanted` and an optional `beginner friendly`. Also, include pointers to the problematic source code. Doing this allows us to solicit help from the community to get the issue resolved.
+3. Move on to [prioritizing the issue](#4-prioritization-of-issues).
+
+**It can't be reproduced:**
+
+1. Either request that [more information](#2-ensure-the-issue-contains-basic-information) is needed to investigate the issue more thoroughly. Provide details in a comment. <br>
+or <br>
+2. [delegate further investigations](#investigation-of-issues) to someone else. Provide details in a comment.
+
+**It works as intended/by design:**
+
+1. Kindly and politely add a comment explaining briefly why we think it works as intended and close the issue.
+2. Label the issue `triage/works-as-intended`.
+3. Remove the `needs-triage` label.
+
+### Feature requests
+
+1. If the feature request does not align with the product vision, add a comment indicating so, remove the `needs-triage` label and close the issue
+2. Otherwise, move on to [prioritizing the issue](#4-prioritization-of-issues). Assign the appropriate priority label to the issue, add the appropriate comments to the issue, and remove the `needs-triage` label.
+
+## 4. Prioritization of issues
+In general, bugs and feature request issues should be labeled with a priority.
+
+This can be the most difficult task when triaging issues since it requires a lot of knowledge, context, and experience before being able to start feeling comfortable adding a certain priority label.
+
+In order to gain comfort with prioritizing issues, consulting with experienced project members on issues is highly encouraged.
+
+In case there is an uncertainty around the prioritization of an issue, please ask the maintainers for help.
+
+| Label | Description |
+| --------------------------------- | ------------------------------------------------------------------------------------------------------------------------ |
+| `priority/critical` | Highest priority. Must be actively worked on as someone's immediate top priority. |
+| `priority/high` | Must be worked on soon, ideally in time for the next release. |
+| `priority/low` | Lowest priority. Possibly useful, but not critical to functionality. |
+
+### Critical priority
+
+1. If an issue has been categorized and any of the following criteria apply to it, then the issue should be labeled as critical and must be actively worked on as someone's immediate top priority.
+
+ - Results in any data loss
+ - Critical security or performance issues
+ - Problem that makes a feature unusable
+ - Multiple users experience a severe problem affecting their business, users, etc.
+
+2. Label the issue `priority/critical`.
+3. Escalate the problem to the maintainers.
+4. Assign the issue or ask a maintainer for help assigning the issue to someone as their immediate top priority.
+5. Add the issue to the next upcoming release milestone.
+
+### High priority
+
+1. Label the issue `priority/high`.
+2. Add the issue to the next upcoming release milestone.
+3. Prioritize it or assign someone to work on it now or very soon.
+4. Consider requesting [help from the community](#5-requesting-help-from-the-community).
+
+### Low priority
+
+1. If the issue is deemed possibly useful but is a low priority, then label the issue `priority/low`.
+2. The functional impact of an issue determines how high of a priority the issue is.
+3. Consider requesting [help from the community](#5-requesting-help-from-the-community).
+
+## 5. Requesting help from the community
+
+Due to constraints, maintainers aren't always able to resolve issues in a timely manner, especially those of lower priority. For such issues, consider requesting help from the community. Use your best judgement. In general, requesting help from the community means that a contribution has a good chance of getting accepted and merged.
+
+In many cases, the issue author or the community is most suitable to contribute changes since they're experts in their domain. Also, it's very common for someone to seek help from the community on a problem.
+
+1. Kindly and politely add a comment which will notify the users subscribed to the issue of any updates.
+ - Explain that the issue would be nice to get resolved, but it isn't prioritized to be worked on by maintainers for the foreseeable future.
+ - If possible or applicable, try to help contributors get started by adding pointers to relevant source code or files as well as adding any helpful input on how to resolve the issue.
+2. Label the issue with `help wanted`.
+3. If applicable, label the issue with `beginner friendly` to denote that the issue is suitable for a beginner to work on.
+
+## Investigation of issues
+
+When an issue has all basic information provided, but the reported problem cannot be reproduced at a first glance, label the issue `triage/needs-information` and post a comment explaining why this label is being applied. Depending on the perceived severity and/or number of upvotes, the investigation will either be delegated to another maintainer for further investigation or put on hold until someone else (maintainer or contributor) picks it up and eventually starts investigating it.
+
+
+Even if you don't have the time or the knowledge to investigate an issue we highly recommend that you upvote the issue if you happen to have the same problem. If you have further details that may help with investigating the issue please provide as much information as possible.
+
+## External pull requests
+
+Part of issue triage should also be triaging of external PRs. The main goal should be to make sure PRs from external contributors have an owner/reviewer and are not forgotten.
+
+1. Check new external PRs which do not have a reviewer.
+1. Check if there is a link to an existing issue.
+1. If not and you know which issue it is solving, add the link yourself, otherwise ask the author to link the issue or create one.
+1. Assign a reviewer based on who was handling the linked issue or what code or feature the PR touches (look at who was the last to make changes there if all else fails).
+
+## GitHub issue management workflow
+
+The following section describes the triage workflow for new GitGHub issues that get created.
+
+### GitHub Issue: Bug
+
+This workflow starts off with a GitHub issue of type bug being created.
+
+1. Collaborator or maintainer creates a GitHub bug using the appropriate GitHub issue template
+2. By default a bug will be created with the `type/bug` and `needs-triage` labels
+
+The following flowchart defines the workflow,
+
+
+```
+ +--------------------------+
+ | New bug issue opened/more|
+ | information added |
+ +-------------|------------+
+ |
+ |
+ +----------------------------------+ NO +--------------|-------------+
+ | label: triage/needs-information --------- All required information |
+ | | | contained in issue? |
+ +-----------------------------|----+ +--------------|-------------+
+ | | YES
+ | |
+ +--------------------------+ | +---------------------+ YES +---------------------------------------+
+ |label: | | | Duplicate Issue? ------- Comment `Duplicate of #<issue number>`
+ |triage/needs-investigation| | NO | | | Remove needs-triage label |
+ +------|-------------------+ | +----------|----------+ | label: triage/duplicate |
+ | | | NO +-----------------|---------------------+
+ YES | | | |
+ | +---------------|----+ NO +------------|------------+ |
+ | |Needs investigation?|---------- Can it be reproduced? | |
+ |------- | +------------|------------+ |
+ +--------------------+ | YES |
+ | +----------|----------+
+ +-------------------------+ +------------|------------+ | Close Issue |
+ | Add release-found label |------------------ Works as intended? | | |
+ | label: release-found/* | NO | | +----------|----------+
+ +------------|------------+ +------------|------------+ |
+ | | |
+ | | YES |
+ | +----------------|----------------+ |
+ +--------------------------+ | Add comment | |
+ | Add area label | | Remove needs-triage label ------------------|
+ | label: area/* | | label: triage/works-as-intended |
+ +-------------|------------+ +---------------------------------+
+ |
+ | +----------+
+ | | Done ----------------------------------------
+ | +----|-----+ |
+ | |NO |
+ | | +------------------|------------------+
+ +------------|-------------+ +----|----------------+ YES | Add details to issue |
+ | ------------ Signal Community? ---------- label: help wanted |
+ |Remove needs-triage label | | | | label: beginner friendly (optional)|
+ +--------------------------+ +---------------------+ +-------------------------------------+
+
+```
+
+### GitHub Issue: Feature request
+
+```
+ +---------------------------------+
+ |New feature request issue opened/|
+ |more information added |
+ +----------------|----------------+
+ |
+ |
+ +---------------------------------+ NO +-------------|------------+
+ | label: triage/needs-information ---------- All required information |
+ | | | contained in issue? |
+ +---------------------------------+ +-------------|------------+
+ |
+ |
+ +---------------------------------------+ |
+ |Comment `Duplicate of #<issue number>` | YES +----------|----------+
+ |Remove needs-triage label ------- Duplicate issue? |
+ |label: triage/duplicate | | |
+ +-----|---------------------------------+ +-----------|---------+
+ | |NO
+ | +-------------------------+ NO +-----------------------------+
+ | |Add comment |-------- Is this a valid feature? |
+ | |Remove needs-triage label| | |
+ | +------|------------------+ +--------------|--------------+
+ | | | YES
+ | | |
+ | | +---------------|--------------+
+ | | | label: type/feature |
+ +-|---------|---+ +--------+ | Remove needs-triage label |
+ | Close issue | | Done ------ Remove type/feature-request |
+ | | | | | milestone? |
+ +---------------+ +--------+ +------------------------------+
+```
+If the author does not respond to a request for more information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be reopened when the necessary information is provided.
+
+In some cases you may receive a request you do not wish to accept.  Perhaps the request doesn't align with the project scope or vision.  It is important to tactfully handle requests that don't meet the project standards.
+
+1. Acknowledge the person behind the request and thank them for their interest and request.
+2. Explain why it didn't fit into the scope of the project or vision.
+3. Don't leave an unwanted request open.  Immediately close the request you do not wish to accept.
+
+
+
diff --git a/ansible_collections/dellemc/enterprise_sonic/docs/MAINTAINER_GUIDE.md b/ansible_collections/dellemc/enterprise_sonic/docs/MAINTAINER_GUIDE.md
new file mode 100644
index 000000000..4652b0ef6
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/docs/MAINTAINER_GUIDE.md
@@ -0,0 +1 @@
+At this point, only the Dell team is contributing. A guide shall be provided when it is opened for external maintainers. \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/docs/SUPPORT.md b/ansible_collections/dellemc/enterprise_sonic/docs/SUPPORT.md
new file mode 100644
index 000000000..8c429321a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/docs/SUPPORT.md
@@ -0,0 +1,13 @@
+<!--
+Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+
+Licensed under the GPL, Version 3.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ https://www.gnu.org/licenses/gpl-3.0.txt
+-->
+
+# Support
+
+ * To report any defects, request functionality, or provide general feedback to the development team, please open an Issue using the [Github issue tracker](https://github.com/ansible-collections/dellemc.enterprise_sonic/issues). \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml b/ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml
index d4f511c1f..877679f24 100644
--- a/ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/meta/runtime.yml
@@ -1,4 +1,4 @@
-requires_ansible: '>=2.9.10'
+requires_ansible: '>=2.14.0'
plugin_routing:
action:
sonic_config:
@@ -51,3 +51,5 @@ plugin_routing:
redirect: dellemc.enterprise_sonic.sonic
sonic_system:
redirect: dellemc.enterprise_sonic.sonic
+ sonic_pki:
+ redirect: dellemc.enterprise_sonic.sonic
diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_config.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_config.yaml
new file mode 100644
index 000000000..24794ad76
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_config.yaml
@@ -0,0 +1,27 @@
+---
+# Execution of a task that adds or deletes the configuration of the
+# management VRF ("mgmt") causes a disruption of the
+# management interface connection on which playbook configuration
+# commands are executing. As a result, playbook execution is aborted
+# unless the management VRF configuration task is constructed to
+# ignore errors.
+#
+# This example demonstrates how to combine tasks that configure other
+# resource modules ("sonic_vrfs", in this case) with tasks that add or remove
+# "management VRF" configuration.
+
+- name: Example playbook of configuring with mgmt_vrf_on and mgmt_vrf_off tasks.
+ hosts: datacenter
+ gather_facts: False
+ connection: httpapi
+ collections:
+ - dellemc.enterprise_sonic
+ tasks:
+ - include_tasks: mgmt_vrf_on.yaml
+ - name: Add VLANs
+ sonic_vlans:
+ config:
+ - vlan_id: 11
+ - vlan_id: 12
+ state: merged
+ - include_tasks: mgmt_vrf_off.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_off.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_off.yaml
new file mode 100644
index 000000000..c1c72f9ee
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_off.yaml
@@ -0,0 +1,6 @@
+- name: Delete mgmt VRF configuration
+ sonic_vrfs:
+ config:
+ - name: mgmt
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_on.yaml b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_on.yaml
new file mode 100644
index 000000000..109b14fc0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/playbooks/common_examples/mgmt_vrf_on.yaml
@@ -0,0 +1,6 @@
+- name: Create mgmt VRF configuration
+ sonic_vrfs:
+ config:
+ - name: mgmt
+ state: merged
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py b/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py
index 37f1d872a..e5cc7630f 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/cliconf/sonic.py
@@ -32,8 +32,6 @@ description:
import json
-from itertools import chain
-
from ansible.errors import AnsibleConnectionFailure
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.common._collections_compat import Mapping
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py
index 86040892a..a61b7307b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/aaa/aaa.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -60,7 +60,7 @@ class AaaArgs(object): # pylint: disable=R0903
'type': 'dict'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'overridden', 'replaced'],
'default': 'merged', 'type': 'str'
}
} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/os10/plugins/module_utils/network/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/acl_interfaces/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/plugins/module_utils/network/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/acl_interfaces/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/acl_interfaces/acl_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/acl_interfaces/acl_interfaces.py
new file mode 100644
index 000000000..45f7bf480
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/acl_interfaces/acl_interfaces.py
@@ -0,0 +1,82 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_acl_interfaces module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class Acl_interfacesArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_acl_interfaces module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'elements': 'dict',
+ 'options': {
+ 'access_groups': {
+ 'elements': 'dict',
+ 'options': {
+ 'acls': {
+ 'elements': 'dict',
+ 'options': {
+ 'direction': {
+ 'choices': ['in', 'out'],
+ 'required': True,
+ 'type': 'str'
+ },
+ 'name': {
+ 'required': True,
+ 'type': 'str'
+ }
+ },
+ 'type': 'list'
+ },
+ 'type': {
+ 'choices': ['mac', 'ipv4', 'ipv6'],
+ 'required': True,
+ 'type': 'str'
+ }
+ },
+ 'type': 'list'
+ },
+ 'name': {
+ 'required': True,
+ 'type': 'str'
+ }
+ },
+ 'type': 'list'
+ },
+ 'state': {
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/os10/plugins/modules/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bfd/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/plugins/modules/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bfd/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bfd/bfd.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bfd/bfd.py
new file mode 100644
index 000000000..57532795e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bfd/bfd.py
@@ -0,0 +1,89 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_bfd module
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class BfdArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_bfd module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'options': {
+ 'multi_hops': {
+ 'elements': 'dict',
+ 'options': {
+ 'detect_multiplier': {'default': 3, 'type': 'int'},
+ 'enabled': {'default': True, 'type': 'bool'},
+ 'local_address': {'required': True, 'type': 'str'},
+ 'min_ttl': {'default': 254, 'type': 'int'},
+ 'passive_mode': {'default': False, 'type': 'bool'},
+ 'profile_name': {'type': 'str'},
+ 'receive_interval': {'default': 300, 'type': 'int'},
+ 'remote_address': {'required': True, 'type': 'str'},
+ 'transmit_interval': {'default': 300, 'type': 'int'},
+ 'vrf': {'required': True, 'type': 'str'}},
+ 'type': 'list'},
+ 'profiles': {
+ 'elements': 'dict',
+ 'options': {
+ 'detect_multiplier': {'default': 3, 'type': 'int'},
+ 'echo_interval': {'default': 300, 'type': 'int'},
+ 'echo_mode': {'default': False, 'type': 'bool'},
+ 'enabled': {'default': True, 'type': 'bool'},
+ 'min_ttl': {'default': 254, 'type': 'int'},
+ 'passive_mode': {'default': False, 'type': 'bool'},
+ 'profile_name': {'required': True, 'type': 'str'},
+ 'receive_interval': {'default': 300, 'type': 'int'},
+ 'transmit_interval': {'default': 300, 'type': 'int'}},
+ 'type': 'list'},
+ 'single_hops': {
+ 'elements': 'dict',
+ 'options': {
+ 'detect_multiplier': {'default': 3, 'type': 'int'},
+ 'echo_interval': {'default': 300, 'type': 'int'},
+ 'echo_mode': {'default': False, 'type': 'bool'},
+ 'enabled': {'default': True, 'type': 'bool'},
+ 'interface': {'required': True, 'type': 'str'},
+ 'local_address': {'required': True, 'type': 'str'},
+ 'passive_mode': {'default': False, 'type': 'bool'},
+ 'profile_name': {'type': 'str'},
+ 'receive_interval': {'default': 300, 'type': 'int'},
+ 'remote_address': {'required': True, 'type': 'str'},
+ 'transmit_interval': {'default': 300, 'type': 'int'},
+ 'vrf': {'required': True, 'type': 'str'}},
+ 'type': 'list'}
+ },
+ 'type': 'dict'
+ },
+ 'state': {'choices': ['merged', 'deleted', 'replaced', 'overridden'], 'default': 'merged', 'type': 'str'}
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py
index fb7618133..8d494dddd 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp/bgp.py
@@ -79,6 +79,7 @@ class BgpArgs(object): # pylint: disable=R0903
},
"type": "dict"
},
+ 'rt_delay': {'type': 'int'},
'timers': {
'options': {
'holdtime': {'type': 'int'},
@@ -91,7 +92,7 @@ class BgpArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged'
}
} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py
index ac22210ee..336d49b47 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_af/bgp_af.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -71,6 +71,21 @@ class Bgp_afArgs(object): # pylint: disable=R0903
'required': True,
'type': 'str'
},
+ 'rd': {'type': 'str'},
+ 'rt_in': {'type': 'list', 'elements': 'str'},
+ 'rt_out': {'type': 'list', 'elements': 'str'},
+ 'vnis': {
+ 'elements': 'dict',
+ 'options': {
+ 'advertise_default_gw': {'type': 'bool'},
+ 'advertise_svi_ip': {'type': 'bool'},
+ 'rd': {'type': 'str'},
+ 'rt_in': {'type': 'list', 'elements': 'str'},
+ 'rt_out': {'type': 'list', 'elements': 'str'},
+ 'vni_number': {'required': True, 'type': 'int'}
+ },
+ 'type': 'list'
+ },
'max_path': {
'options': {
'ebgp': {'type': 'int'},
@@ -111,7 +126,7 @@ class Bgp_afArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'overridden', 'replaced'],
'default': 'merged'
}
} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py
index dec9b930e..d9d4ed766 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_as_paths/bgp_as_paths.py
@@ -43,6 +43,6 @@ class Bgp_as_pathsArgs(object): # pylint: disable=R0903
'type': 'list'},
'name': {'required': True, 'type': 'str'}},
'type': 'list'},
- 'state': {'choices': ['merged', 'deleted'],
+ 'state': {'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged',
'type': 'str'}} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py
index 867e55204..c90fab8e9 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_communities/bgp_communities.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -54,6 +54,6 @@ class Bgp_communitiesArgs(object): # pylint: disable=R0903
'default': 'standard',
'type': 'str'}},
'type': 'list'},
- 'state': {'choices': ['merged', 'deleted'],
+ 'state': {'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged',
'type': 'str'}} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py
index aec0f364a..4cee6182b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/bgp_ext_communities/bgp_ext_communities.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -68,7 +68,7 @@ class Bgp_ext_communitiesArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/copp/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/copp/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/copp/copp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/copp/copp.py
new file mode 100644
index 000000000..889c614c6
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/copp/copp.py
@@ -0,0 +1,59 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_copp module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class CoppArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_copp module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'options': {
+ 'copp_groups': {
+ 'elements': 'dict',
+ 'options': {
+ 'cbs': {'type': 'str'},
+ 'cir': {'type': 'str'},
+ 'copp_name': {'required': True, 'type': 'str'},
+ 'queue': {'type': 'int'},
+ 'trap_action': {'type': 'str'},
+ 'trap_priority': {'type': 'int'}
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'state': {'choices': ['merged', 'deleted', 'replaced', 'overridden'], 'default': 'merged', 'type': 'str'}
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/os6/plugins/action/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_relay/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os6/plugins/action/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_relay/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_relay/dhcp_relay.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_relay/dhcp_relay.py
new file mode 100644
index 000000000..0ca834487
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_relay/dhcp_relay.py
@@ -0,0 +1,94 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_dhcp_relay module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class Dhcp_relayArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_dhcp_relay module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'elements': 'dict',
+ 'options': {
+ 'ipv4': {
+ 'options': {
+ 'circuit_id': {
+ 'choices': ['%h:%p', '%i', '%p'],
+ 'type': 'str'
+ },
+ 'link_select': {'type': 'bool'},
+ 'max_hop_count': {'type': 'int'},
+ 'policy_action': {
+ 'choices': ['append', 'discard', 'replace'],
+ 'type': 'str'
+ },
+ 'server_addresses': {
+ 'elements': 'dict',
+ 'options': {
+ 'address': {'type': 'str'}
+ },
+ 'type': 'list'
+ },
+ 'source_interface': {'type': 'str'},
+ 'vrf_name': {'type': 'str'},
+ 'vrf_select': {'type': 'bool'}
+ },
+ 'type': 'dict'
+ },
+ 'ipv6': {
+ 'options': {
+ 'max_hop_count': {'type': 'int'},
+ 'server_addresses': {
+ 'elements': 'dict',
+ 'options': {
+ 'address': {'type': 'str'}
+ },
+ 'type': 'list'
+ },
+ 'source_interface': {'type': 'str'},
+ 'vrf_name': {'type': 'str'},
+ 'vrf_select': {'type': 'bool'}
+ },
+ 'type': 'dict'
+ },
+ 'name': {'required': True, 'type': 'str'}
+ },
+ 'type': 'list'
+ },
+ 'state': {
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/os6/plugins/cliconf/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_snooping/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os6/plugins/cliconf/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_snooping/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_snooping/dhcp_snooping.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_snooping/dhcp_snooping.py
new file mode 100644
index 000000000..6cf2ecacd
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/dhcp_snooping/dhcp_snooping.py
@@ -0,0 +1,81 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_dhcp_snooping module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class Dhcp_snoopingArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_dhcp_snooping module"""
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'options': {
+ 'afis': {
+ 'elements': 'dict',
+ 'options': {
+ 'afi': {
+ 'choices': ['ipv4', 'ipv6'],
+ 'required': True,
+ 'type': 'str',
+ },
+ 'enabled': {'type': 'bool'},
+ 'source_bindings': {
+ 'elements': 'dict',
+ 'options': {
+ 'mac_addr': {'required': True, 'type': 'str'},
+ 'ip_addr': {'type': 'str'},
+ 'intf_name': {'type': 'str'},
+ 'vlan_id': {'type': 'int'},
+ },
+ 'type': 'list',
+ },
+ 'trusted': {
+ 'elements': 'dict',
+ 'options': {
+ 'intf_name': {'required': True, 'type': 'str'},
+ },
+ 'type': 'list',
+ },
+ 'verify_mac': {'type': 'bool'},
+ 'vlans': {'elements': 'str', 'type': 'list'},
+ },
+ 'type': 'list',
+ }
+ },
+ 'type': 'dict',
+ },
+ 'state': {
+ 'choices': ['merged', 'deleted', 'overridden', 'replaced'],
+ 'default': 'merged',
+ 'type': 'str',
+ },
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py
index 3a4d02989..6b27194eb 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/facts/facts.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -35,6 +35,7 @@ class FactsArgs(object): # pylint: disable=R0903
'bgp_ext_communities',
'mclag',
'prefix_lists',
+ 'vlan_mapping',
'vrfs',
'vxlans',
'users',
@@ -44,7 +45,22 @@ class FactsArgs(object): # pylint: disable=R0903
'tacacs_server',
'radius_server',
'static_routes',
- 'ntp'
+ 'ntp',
+ 'logging',
+ 'pki',
+ 'ip_neighbor',
+ 'port_group',
+ 'dhcp_relay',
+ 'dhcp_snooping',
+ 'acl_interfaces',
+ 'l2_acls',
+ 'l3_acls',
+ 'lldp_global',
+ 'mac',
+ 'bfd',
+ 'copp',
+ 'route_maps',
+ 'stp'
]
argument_spec = {
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py
index 76c36a90b..407dbde6b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/interfaces/interfaces.py
@@ -44,12 +44,33 @@ class InterfacesArgs(object): # pylint: disable=R0903
"description": {"type": "str"},
"enabled": {"type": "bool"},
"mtu": {"type": "int"},
- "name": {"required": True, "type": "str"}
+ "name": {"required": True, "type": "str"},
+ "speed": {"type": "str",
+ "choices": ["SPEED_10MB",
+ "SPEED_100MB",
+ "SPEED_1GB",
+ "SPEED_2500MB",
+ "SPEED_5GB",
+ "SPEED_10GB",
+ "SPEED_20GB",
+ "SPEED_25GB",
+ "SPEED_40GB",
+ "SPEED_50GB",
+ "SPEED_100GB",
+ "SPEED_400GB"]},
+ "auto_negotiate": {"type": "bool"},
+ "advertised_speed": {"type": "list", "elements": "str"},
+ "fec": {"type": "str",
+ "choices": ["FEC_RS",
+ "FEC_FC",
+ "FEC_DISABLED",
+ "FEC_DEFAULT",
+ "FEC_AUTO"]}
},
"type": "list"
},
"state": {
- "choices": ["merged", "deleted"],
+ "choices": ["merged", "replaced", "overridden", "deleted"],
"default": "merged",
"type": "str"
}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ip_neighbor/ip_neighbor.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ip_neighbor/ip_neighbor.py
new file mode 100644
index 000000000..fef1c67c0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ip_neighbor/ip_neighbor.py
@@ -0,0 +1,56 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_ip_neighbor module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class Ip_neighborArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_ip_neighbor module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'options': {
+ 'ipv4_arp_timeout': {'type': 'int'},
+ 'ipv4_drop_neighbor_aging_time': {'type': 'int'},
+ 'ipv6_drop_neighbor_aging_time': {'type': 'int'},
+ 'ipv6_nd_cache_expiry': {'type': 'int'},
+ 'num_local_neigh': {'type': 'int'}
+ },
+ 'type': 'dict'
+ },
+ 'state': {
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/os6/plugins/doc_fragments/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_acls/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os6/plugins/doc_fragments/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_acls/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_acls/l2_acls.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_acls/l2_acls.py
new file mode 100644
index 000000000..5b8ba4f87
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_acls/l2_acls.py
@@ -0,0 +1,129 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_l2_acls module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class L2_aclsArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_l2_acls module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'elements': 'dict',
+ 'options': {
+ 'name': {'required': True, 'type': 'str'},
+ 'remark': {'type': 'str'},
+ 'rules': {
+ 'elements': 'dict',
+ 'mutually_exclusive': [['ethertype', 'vlan_tag_format']],
+ 'options': {
+ 'action': {
+ 'choices': ['deny', 'discard', 'do-not-nat', 'permit', 'transit'],
+ 'type': 'str'
+ },
+ 'dei': {
+ 'choices': [0, 1],
+ 'type': 'int'
+ },
+ 'destination': {
+ 'mutually_exclusive': [['any', 'host', 'address']],
+ 'options': {
+ 'address': {'type': 'str'},
+ 'address_mask': {'type': 'str'},
+ 'any': {'type': 'bool'},
+ 'host': {'type': 'str'}
+ },
+ 'required_one_of': [['any', 'host', 'address']],
+ 'required_together': [['address', 'address_mask']],
+ 'type': 'dict'
+ },
+ 'ethertype': {
+ 'mutually_exclusive': [['value', 'arp', 'ipv4', 'ipv6']],
+ 'options': {
+ 'arp': {'type': 'bool'},
+ 'ipv4': {'type': 'bool'},
+ 'ipv6': {'type': 'bool'},
+ 'value': {'type': 'str'}
+ },
+ 'type': 'dict'
+ },
+ 'pcp': {
+ 'mutually_exclusive': [
+ ['value', 'traffic_type'],
+ ['mask', 'traffic_type']
+ ],
+ 'options': {
+ 'mask': {'type': 'int'},
+ 'traffic_type': {
+ 'choices': ['be', 'bk', 'ee', 'ca', 'vi', 'vo', 'ic', 'nc'],
+ 'type': 'str'
+ },
+ 'value': {'type': 'int'}
+ },
+ 'required_by': {'mask': ['value']},
+ 'type': 'dict'
+ },
+ 'remark': {'type': 'str'},
+ 'sequence_num': {'required': True, 'type': 'int'},
+ 'source': {
+ 'mutually_exclusive': [['any', 'host', 'address']],
+ 'options': {
+ 'address': {'type': 'str'},
+ 'address_mask': {'type': 'str'},
+ 'any': {'type': 'bool'},
+ 'host': {'type': 'str'}
+ },
+ 'required_one_of': [['any', 'host', 'address']],
+ 'required_together': [['address', 'address_mask']],
+ 'type': 'dict'
+ },
+ 'vlan_id': {'type': 'int'},
+ 'vlan_tag_format': {
+ 'options': {
+ 'multi_tagged': {'type': 'bool'}
+ },
+ 'type': 'dict'
+ }
+ },
+ 'required_together': [['action', 'source', 'destination']],
+ 'type': 'list'
+ }
+ },
+ 'type': 'list'
+ },
+ 'state': {
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py
index bbebe2d54..b5ce4ad8e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l2_interfaces/l2_interfaces.py
@@ -53,7 +53,7 @@ class L2_interfacesArgs(object): # pylint: disable=R0903
'allowed_vlans': {
'elements': 'dict',
'options': {
- 'vlan': {'type': 'int'}
+ 'vlan': {'type': 'str'}
},
'type': 'list'
}
@@ -64,7 +64,7 @@ class L2_interfacesArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/os6/plugins/module_utils/network/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_acls/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os6/plugins/module_utils/network/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_acls/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_acls/l3_acls.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_acls/l3_acls.py
new file mode 100644
index 000000000..7339201ef
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_acls/l3_acls.py
@@ -0,0 +1,223 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_l3_acls module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class L3_aclsArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_l3_acls module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'elements': 'dict',
+ 'options': {
+ 'acls': {
+ 'elements': 'dict',
+ 'options': {
+ 'name': {'required': True, 'type': 'str'},
+ 'remark': {'type': 'str'},
+ 'rules': {
+ 'elements': 'dict',
+ 'options': {
+ 'action': {
+ 'choices': ['deny', 'discard', 'do-not-nat', 'permit', 'transit'],
+ 'type': 'str'
+ },
+ 'destination': {
+ 'mutually_exclusive': [['any', 'host', 'prefix']],
+ 'options': {
+ 'any': {'type': 'bool'},
+ 'host': {'type': 'str'},
+ 'port_number': {
+ 'mutually_exclusive': [['eq', 'gt', 'lt', 'range']],
+ 'options': {
+ 'eq': {'type': 'int'},
+ 'gt': {'type': 'int'},
+ 'lt': {'type': 'int'},
+ 'range': {
+ 'options': {
+ 'begin': {'type': 'int'},
+ 'end': {'type': 'int'}
+ },
+ 'required_together': [['begin', 'end']],
+ 'type': 'dict'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'prefix': {'type': 'str'}
+ },
+ 'required_one_of': [['any', 'host', 'prefix']],
+ 'type': 'dict'
+ },
+ 'dscp': {
+ 'mutually_exclusive': [[
+ 'value', 'af11', 'af12', 'af13', 'af21', 'af22', 'af23', 'af31', 'af32', 'af33',
+ 'cs1', 'cs2', 'cs3', 'cs4', 'cs5', 'cs6', 'cs7', 'default', 'ef', 'voice_admit'
+ ]],
+ 'options': {
+ 'af11': {'type': 'bool'},
+ 'af12': {'type': 'bool'},
+ 'af13': {'type': 'bool'},
+ 'af21': {'type': 'bool'},
+ 'af22': {'type': 'bool'},
+ 'af23': {'type': 'bool'},
+ 'af31': {'type': 'bool'},
+ 'af32': {'type': 'bool'},
+ 'af33': {'type': 'bool'},
+ 'af41': {'type': 'bool'},
+ 'af42': {'type': 'bool'},
+ 'af43': {'type': 'bool'},
+ 'cs1': {'type': 'bool'},
+ 'cs2': {'type': 'bool'},
+ 'cs3': {'type': 'bool'},
+ 'cs4': {'type': 'bool'},
+ 'cs5': {'type': 'bool'},
+ 'cs6': {'type': 'bool'},
+ 'cs7': {'type': 'bool'},
+ 'default': {'type': 'bool'},
+ 'ef': {'type': 'bool'},
+ 'value': {'type': 'int'},
+ 'voice_admit': {'type': 'bool'}
+ },
+ 'type': 'dict'
+ },
+ 'protocol': {
+ 'mutually_exclusive': [['name', 'number']],
+ 'options': {
+ 'name': {
+ 'choices': ['ip', 'ipv6', 'icmp', 'icmpv6', 'tcp', 'udp'],
+ 'type': 'str'
+ },
+ 'number': {'type': 'int'}
+ },
+ 'required_one_of': [['name', 'number']],
+ 'type': 'dict'
+ },
+ 'protocol_options': {
+ 'mutually_exclusive': [['icmp', 'icmpv6', 'tcp']],
+ 'options': {
+ 'icmp': {
+ 'options': {
+ 'code': {'type': 'int'},
+ 'type': {'type': 'int'}
+ },
+ 'type': 'dict'
+ },
+ 'icmpv6': {
+ 'options': {
+ 'code': {'type': 'int'},
+ 'type': {'type': 'int'}
+ },
+ 'type': 'dict'
+ },
+ 'tcp': {
+ 'mutually_exclusive': [
+ ['established', 'ack', 'not_ack'],
+ ['established', 'fin', 'not_fin'],
+ ['established', 'psh', 'not_psh'],
+ ['established', 'rst', 'not_rst'],
+ ['established', 'syn', 'not_syn'],
+ ['established', 'urg', 'not_urg']
+ ],
+ 'options': {
+ 'ack': {'type': 'bool'},
+ 'established': {'type': 'bool'},
+ 'fin': {'type': 'bool'},
+ 'not_ack': {'type': 'bool'},
+ 'not_fin': {'type': 'bool'},
+ 'not_psh': {'type': 'bool'},
+ 'not_rst': {'type': 'bool'},
+ 'not_syn': {'type': 'bool'},
+ 'not_urg': {'type': 'bool'},
+ 'psh': {'type': 'bool'},
+ 'rst': {'type': 'bool'},
+ 'syn': {'type': 'bool'},
+ 'urg': {'type': 'bool'}
+ },
+ 'type': 'dict'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'remark': {'type': 'str'},
+ 'sequence_num': {'required': True, 'type': 'int'},
+ 'source': {
+ 'mutually_exclusive': [['any', 'host', 'prefix']],
+ 'options': {
+ 'any': {'type': 'bool'},
+ 'host': {'type': 'str'},
+ 'port_number': {
+ 'mutually_exclusive': [['eq', 'gt', 'lt', 'range']],
+ 'options': {
+ 'eq': {'type': 'int'},
+ 'gt': {'type': 'int'},
+ 'lt': {'type': 'int'},
+ 'range': {
+ 'options': {
+ 'begin': {'type': 'int'},
+ 'end': {'type': 'int'}
+ },
+ 'required_together': [['begin', 'end']],
+ 'type': 'dict'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'prefix': {'type': 'str'}
+ },
+ 'required_one_of': [['any', 'host', 'prefix']],
+ 'type': 'dict'
+ },
+ 'vlan_id': {'type': 'int'}
+ },
+ 'required_together': [['action', 'protocol', 'source', 'destination']],
+ 'type': 'list'
+ }
+ },
+ 'type': 'list'
+ },
+ 'address_family': {
+ 'choices': ['ipv4', 'ipv6'],
+ 'required': True,
+ 'type': 'str'
+ }
+ },
+ 'type': 'list'
+ },
+ 'state': {
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py
index 6e83289cc..b32d7f92e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/l3_interfaces/l3_interfaces.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -74,7 +74,7 @@ class L3_interfacesArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py
index 867d61a27..fc349232f 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lag_interfaces/lag_interfaces.py
@@ -60,7 +60,7 @@ class Lag_interfacesArgs(object): # pylint: disable=R0903
"type": "list"
},
"state": {
- "choices": ["merged", "deleted"],
+ "choices": ["merged", "replaced", "overridden", "deleted"],
"default": "merged",
"type": "str"
}
diff --git a/ansible_collections/dellemc/os6/plugins/modules/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lldp_global/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os6/plugins/modules/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lldp_global/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lldp_global/lldp_global.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lldp_global/lldp_global.py
new file mode 100644
index 000000000..8f9cd9af0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/lldp_global/lldp_global.py
@@ -0,0 +1,81 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_lldp_global module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class Lldp_globalArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_lldp_global module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'options': {
+ 'enable': {
+ 'type': 'bool'
+ },
+ 'hello_time': {
+ 'type': 'int'
+ },
+ 'mode': {
+ 'choices': ['receive', 'transmit'],
+ 'type': 'str'
+ },
+ 'multiplier': {
+ 'type': 'int'
+ },
+ 'system_description': {
+ 'type': 'str'
+ },
+ 'system_name': {
+ 'type': 'str'
+ },
+ 'tlv_select': {
+ 'options': {
+ 'management_address': {
+ 'type': 'bool'
+ },
+ 'system_capabilities': {
+ 'type': 'bool'
+ }
+ },
+ 'type': 'dict'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'state': {
+ 'choices': ['merged', 'deleted'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/logging/logging.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/logging/logging.py
new file mode 100644
index 000000000..a83d9eef4
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/logging/logging.py
@@ -0,0 +1,64 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_logging module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class LoggingArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_logging module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'options': {
+ 'remote_servers': {
+ 'elements': 'dict',
+ 'options': {
+ 'host': {'required': True,
+ 'type': 'str'},
+ 'message_type': {'choices': ['log', 'event'],
+ 'type': 'str'},
+ 'remote_port': {'type': 'int'},
+ 'source_interface': {'type': 'str'},
+ 'vrf': {'type': 'str'}
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'state': {
+ 'choices': ['merged', "replaced", "overridden", 'deleted'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/os6/plugins/terminal/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mac/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os6/plugins/terminal/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mac/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mac/mac.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mac/mac.py
new file mode 100644
index 000000000..d46155377
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mac/mac.py
@@ -0,0 +1,66 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_mac module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class MacArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_mac module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'elements': 'dict',
+ 'options': {
+ 'mac': {
+ 'options': {
+ 'aging_time': {'default': '600', 'type': 'int'},
+ 'dampening_interval': {'default': '5', 'type': 'int'},
+ 'dampening_threshold': {'default': '5', 'type': 'int'},
+ 'mac_table_entries': {
+ 'elements': 'dict',
+ 'options': {
+ 'interface': {'type': 'str'},
+ 'mac_address': {'required': True, 'type': 'str'},
+ 'vlan_id': {'required': True, 'type': 'int'}
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'vrf_name': {'default': 'default', 'type': 'str'}
+ },
+ 'type': 'list'
+ },
+ 'state': {'choices': ['merged', 'deleted', 'replaced', 'overridden'], 'default': 'merged', 'type': 'str'}
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py
index be3c38ca2..0d9b45eb0 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/mclag/mclag.py
@@ -41,6 +41,8 @@ class MclagArgs(object): # pylint: disable=R0903
'config': {
'options': {
'domain_id': {'required': True, 'type': 'int'},
+ 'gateway_mac': {'type': 'str'},
+ 'delay_restore': {'type': 'int'},
'keepalive': {'type': 'int'},
'peer_address': {'type': 'str'},
'peer_link': {'type': 'str'},
@@ -56,6 +58,18 @@ class MclagArgs(object): # pylint: disable=R0903
},
'type': 'dict'
},
+ 'peer_gateway': {
+ 'options': {
+ 'vlans': {
+ 'elements': 'dict',
+ 'options': {
+ 'vlan': {'type': 'str'}
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'dict'
+ },
'session_timeout': {'type': 'int'},
'source_address': {'type': 'str'},
'system_mac': {'type': 'str'},
@@ -75,7 +89,7 @@ class MclagArgs(object): # pylint: disable=R0903
'type': 'dict'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py
index 062520af9..0d357cc2e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/ntp/ntp.py
@@ -64,8 +64,10 @@ class NtpArgs(object): # pylint: disable=R0903
'type': 'str'},
'key_id': {'type': 'int', 'no_log': True},
'maxpoll': {'type': 'int'},
- 'minpoll': {'type': 'int'}
+ 'minpoll': {'type': 'int'},
+ 'prefer': {'type': 'bool'}
},
+ 'required_together': [['minpoll', 'maxpoll']],
'type': 'list'
},
'source_interfaces': {
@@ -82,7 +84,7 @@ class NtpArgs(object): # pylint: disable=R0903
'type': 'dict'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', "replaced", "overridden", 'deleted'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/pki/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/pki/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/pki/pki.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/pki/pki.py
new file mode 100644
index 000000000..5f4aa32af
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/pki/pki.py
@@ -0,0 +1,78 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell EMC
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_pki module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class PkiArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_pki module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'options': {
+ 'security_profiles': {
+ 'elements': 'dict',
+ 'options':
+ {
+ 'cdp_list': {'elements': 'str', 'type': 'list'},
+ 'certificate_name': {'type': 'str'},
+ 'key_usage_check': {'type': 'bool'},
+ 'ocsp_responder_list':
+ {
+ 'elements': 'str',
+ 'type': 'list'
+ },
+ 'peer_name_check': {'type': 'bool'},
+ 'profile_name': {'required': True, 'type': 'str'},
+ 'revocation_check': {'type': 'bool'},
+ 'trust_store': {'type': 'str'}
+ },
+ 'type': 'list'
+ },
+ 'trust_stores': {
+ 'elements': 'dict',
+ 'options': {
+ 'ca_name': {'elements': 'str', 'type': 'list'},
+ 'name': {'required': True, 'type': 'str'}
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'state': {
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py
index 3b8f4a5a3..90d736ff3 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_breakout/port_breakout.py
@@ -42,8 +42,10 @@ class Port_breakoutArgs(object): # pylint: disable=R0903
'elements': 'dict',
'options': {
'mode': {
- 'choices': ['1x100G', '1x400G', '1x40G', '2x100G', '2x200G',
- '2x50G', '4x100G', '4x10G', '4x25G', '4x50G'],
+ 'choices': ['1x10G', '1x25G', '1x40G', '1x50G', '1x100G',
+ '1x200G', '1x400G', '2x10G', '2x25G', '2x40G',
+ '2x50G', '2x100G', '2x200G', '4x10G', '4x25G',
+ '4x50G', '4x100G', '8x10G', '8x25G', '8x50G'],
'type': 'str'
},
'name': {'required': True, 'type': 'str'}
@@ -51,7 +53,7 @@ class Port_breakoutArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged'
}
} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/os9/plugins/action/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_group/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/plugins/action/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_group/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_group/port_group.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_group/port_group.py
new file mode 100644
index 000000000..9db29de2e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/port_group/port_group.py
@@ -0,0 +1,66 @@
+#
+# -*- coding: utf-8 -*-
+# © Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_port_group module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class Port_groupArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_port_group module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'elements': 'dict',
+ 'options': {
+ 'id': {'required': True, 'type': 'str'},
+ 'speed': {'choices': ['SPEED_10MB',
+ 'SPEED_100MB',
+ 'SPEED_1GB',
+ 'SPEED_2500MB',
+ 'SPEED_5GB',
+ 'SPEED_10GB',
+ 'SPEED_20GB',
+ 'SPEED_25GB',
+ 'SPEED_40GB',
+ 'SPEED_50GB',
+ 'SPEED_100GB',
+ 'SPEED_400GB'],
+ 'type': 'str'}
+ },
+ 'type': 'list'
+ },
+ 'state': {
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py
index d043ae6f8..17de2eeae 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/prefix_lists/prefix_lists.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -64,7 +64,7 @@ class Prefix_listsArgs: # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py
index a56147a5b..0ef029d7b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/radius_server/radius_server.py
@@ -59,7 +59,7 @@ class Radius_serverArgs(object): # pylint: disable=R0903
},
'key': {'type': 'str', 'no_log': True},
'name': {'type': 'str'},
- 'port': {'type': 'int'},
+ 'port': {'type': 'int', 'default': 1812},
'priority': {'type': 'int'},
'retransmit': {'type': 'int'},
'source_interface': {'type': 'str'},
@@ -72,12 +72,12 @@ class Radius_serverArgs(object): # pylint: disable=R0903
'type': 'dict'
},
'statistics': {'type': 'bool'},
- 'timeout': {'type': 'int'}
+ 'timeout': {'type': 'int', 'default': 5}
},
'type': 'dict'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
'default': 'merged'
}
} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/route_maps/route_maps.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/route_maps/route_maps.py
new file mode 100644
index 000000000..f36fbbcb0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/route_maps/route_maps.py
@@ -0,0 +1,196 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_route_maps module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class Route_mapsArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_route_maps module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'elements': 'dict',
+ 'options': {
+ 'map_name': {'required': True, 'type': 'str'},
+ 'action': {
+ 'choices': ['permit', 'deny'],
+ 'type': 'str'
+ },
+ 'sequence_num': {
+ 'type': 'int'
+ },
+ 'match': {
+ 'options': {
+ 'as_path': {'type': 'str'},
+ 'community': {'type': 'str'},
+ 'evpn': {
+ 'options': {
+ 'default_route': {'type': 'bool'},
+ 'route_type': {
+ 'choices': ['macip', 'multicast', 'prefix'],
+ 'type': 'str'
+ },
+ 'vni': {'type': 'int'}
+ },
+ 'required_one_of': [['default_route', 'route_type', 'vni']],
+ 'type': 'dict'
+ },
+ 'ext_comm': {'type': 'str'},
+ 'interface': {'type': 'str'},
+ 'ip': {
+ 'options': {
+ 'address': {'type': 'str'},
+ 'next_hop': {'type': 'str'}
+ },
+ 'required_one_of': [['address', 'next_hop']],
+ 'type': 'dict'
+ },
+ 'ipv6': {
+ 'options': {
+ 'address': {
+ 'required': True,
+ 'type': 'str'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'local_preference': {'type': 'int'},
+ 'metric': {'type': 'int'},
+ 'origin': {
+ 'choices': ['egp', 'igp', 'incomplete'],
+ 'type': 'str'
+ },
+ 'peer': {
+ 'mutually_exclusive': [['ip', 'ipv6', 'interface']],
+ 'options': {
+ 'interface': {'type': 'str'},
+ 'ip': {'type': 'str'},
+ 'ipv6': {'type': 'str'}
+ },
+ 'required_one_of': [['ip', 'ipv6', 'interface']],
+ 'type': 'dict'
+ },
+ 'source_protocol': {
+ 'choices': ['bgp', 'connected', 'ospf', 'static'],
+ 'type': 'str'
+ },
+ 'source_vrf': {'type': 'str'},
+ 'tag': {'type': 'int'}
+ },
+ 'type': 'dict'
+ },
+ 'set': {
+ 'options': {
+ 'as_path_prepend': {'type': 'str'},
+ 'comm_list_delete': {'type': 'str'},
+ 'community': {
+ 'options': {
+ 'community_number': {
+ 'elements': 'str',
+ 'type': 'list'
+ },
+ 'community_attributes': {
+ 'elements': 'str',
+ 'type': 'list',
+ 'mutually_exclusive': [
+ ['none', 'local_as'],
+ ['none', 'no_advertise'],
+ ['none', 'no_export'],
+ ['none', 'no_peer'],
+ ['none', 'additive']
+ ],
+ 'choices': [
+ 'local_as',
+ 'no_advertise',
+ 'no_export',
+ 'no_peer',
+ 'additive',
+ 'none'
+ ]
+ },
+ },
+ 'type': 'dict'
+ },
+ 'extcommunity': {
+ 'options': {
+ 'rt': {
+ 'elements': 'str',
+ 'type': 'list'
+ },
+ 'soo': {
+ 'elements': 'str',
+ 'type': 'list'
+ }
+ },
+ 'required_one_of': [['rt', 'soo']],
+ 'type': 'dict'
+ },
+ 'ip_next_hop': {'type': 'str'},
+ 'ipv6_next_hop': {
+ 'options': {
+ 'global_addr': {'type': 'str'},
+ 'prefer_global': {'type': 'bool'}
+ },
+ 'required_one_of': [['global_addr', 'prefer_global']],
+ 'type': 'dict'},
+ 'local_preference': {'type': 'int'},
+ 'metric': {
+ 'mutually_exclusive': [['value', 'rtt_action']],
+ 'required_one_of': [['value', 'rtt_action']],
+ 'options': {
+ 'rtt_action': {
+ 'choices': ['set', 'add', 'subtract'],
+ 'type': 'str'
+ },
+ 'value': {'type': 'int'}
+ },
+ 'type': 'dict'
+ },
+ 'origin': {
+ 'choices': ['egp', 'igp', 'incomplete'],
+ 'type': 'str'
+ },
+ 'weight': {'type': 'int'}
+ },
+ 'type': 'dict'
+ },
+ 'call': {'type': 'str'},
+ },
+ 'type': 'list'
+ },
+ 'state': {
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py
index a146f1ecd..3dbaf3045 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/static_routes/static_routes.py
@@ -72,7 +72,7 @@ class Static_routesArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'overridden', 'replaced'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/os9/plugins/cliconf/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/stp/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/plugins/cliconf/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/stp/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/stp/stp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/stp/stp.py
new file mode 100644
index 000000000..145632051
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/stp/stp.py
@@ -0,0 +1,152 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_stp module
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class StpArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_stp module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'mutually_exclusive': [['mstp', 'pvst', 'rapid_pvst']],
+ 'options': {
+ 'global': {
+ 'options': {
+ 'bpdu_filter': {'default': False, 'type': 'bool'},
+ 'bridge_priority': {'default': 32768, 'type': 'int'},
+ 'disabled_vlans': {'elements': 'str', 'type': 'list'},
+ 'enabled_protocol': {'choices': ['mst', 'pvst', 'rapid_pvst'], 'type': 'str'},
+ 'fwd_delay': {'default': 15, 'type': 'int'},
+ 'hello_time': {'default': 2, 'type': 'int'},
+ 'loop_guard': {'default': False, 'type': 'bool'},
+ 'max_age': {'default': 20, 'type': 'int'},
+ 'portfast': {'default': False, 'type': 'bool'},
+ 'root_guard_timeout': {'type': 'int'}
+ },
+ 'type': 'dict'
+ },
+ 'interfaces': {
+ 'elements': 'dict',
+ 'options': {
+ 'bpdu_filter': {'default': False, 'type': 'bool'},
+ 'bpdu_guard': {'default': False, 'type': 'bool'},
+ 'cost': {'type': 'int'},
+ 'edge_port': {'default': False, 'type': 'bool'},
+ 'guard': {'choices': ['loop', 'root', 'none'], 'type': 'str'},
+ 'intf_name': {'required': True, 'type': 'str'},
+ 'link_type': {'choices': ['point-to-point', 'shared'], 'type': 'str'},
+ 'port_priority': {'type': 'int'},
+ 'portfast': {'default': False, 'type': 'bool'},
+ 'shutdown': {'default': False, 'type': 'bool'},
+ 'stp_enable': {'default': True, 'type': 'bool'},
+ 'uplink_fast': {'default': False, 'type': 'bool'}
+ },
+ 'type': 'list'
+ },
+ 'mstp': {
+ 'options': {
+ 'fwd_delay': {'type': 'int'},
+ 'hello_time': {'type': 'int'},
+ 'max_age': {'type': 'int'},
+ 'max_hop': {'type': 'int'},
+ 'mst_instances': {
+ 'elements': 'dict',
+ 'options': {
+ 'bridge_priority': {'type': 'int'},
+ 'mst_id': {'required': True, 'type': 'int'},
+ 'vlans': {'elements': 'str', 'type': 'list'},
+ 'interfaces': {
+ 'elements': 'dict',
+ 'options': {
+ 'cost': {'type': 'int'},
+ 'intf_name': {'required': True, 'type': 'str'},
+ 'port_priority': {'type': 'int'}
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'list'
+ },
+ 'mst_name': {'type': 'str'},
+ 'revision': {'type': 'int'}
+ },
+ 'type': 'dict'
+ },
+ 'pvst': {
+ 'elements': 'dict',
+ 'options': {
+ 'bridge_priority': {'type': 'int'},
+ 'fwd_delay': {'type': 'int'},
+ 'hello_time': {'type': 'int'},
+ 'vlan_id': {'required': True, 'type': 'int'},
+ 'max_age': {'type': 'int'},
+ 'interfaces': {
+ 'elements': 'dict',
+ 'options': {
+ 'cost': {'type': 'int'},
+ 'intf_name': {'required': True, 'type': 'str'},
+ 'port_priority': {'type': 'int'}
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'list'
+ },
+ 'rapid_pvst': {
+ 'elements': 'dict',
+ 'options': {
+ 'bridge_priority': {'type': 'int'},
+ 'fwd_delay': {'type': 'int'},
+ 'hello_time': {'type': 'int'},
+ 'vlan_id': {'required': True, 'type': 'int'},
+ 'max_age': {'type': 'int'},
+ 'interfaces': {
+ 'elements': 'dict',
+ 'options': {
+ 'cost': {'type': 'int'},
+ 'intf_name': {'required': True, 'type': 'str'},
+ 'port_priority': {'type': 'int'}
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'list'
+ }
+ },
+ 'type': 'dict'
+ },
+ 'state': {
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
+ 'default': 'merged', 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py
index b08c5f4bc..df835c156 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/system/system.py
@@ -57,7 +57,7 @@ class SystemArgs(object): # pylint: disable=R0903
'type': 'dict'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py
index aad1746d4..98df26913 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/tacacs_server/tacacs_server.py
@@ -69,12 +69,12 @@ class Tacacs_serverArgs(object): # pylint: disable=R0903
'type': 'dict'
},
'source_interface': {'type': 'str'},
- 'timeout': {'type': 'int'}
+ 'timeout': {'type': 'int', 'default': 5}
},
'type': 'dict'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
'default': 'merged'
}
} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py
index db23d78e0..7adca72a1 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/users/users.py
@@ -44,7 +44,7 @@ class UsersArgs(object): # pylint: disable=R0903
'name': {'required': True, 'type': 'str'},
'password': {'type': 'str', 'no_log': True},
'role': {
- 'choices': ['admin', 'operator'],
+ 'choices': ['admin', 'operator', 'netadmin', 'secadmin'],
'type': 'str'
},
'update_password': {
@@ -56,7 +56,7 @@ class UsersArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'overridden', 'replaced'],
'default': 'merged'
}
} # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/os9/plugins/doc_fragments/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlan_mapping/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/plugins/doc_fragments/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlan_mapping/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlan_mapping/vlan_mapping.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlan_mapping/vlan_mapping.py
new file mode 100644
index 000000000..ced5833fa
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlan_mapping/vlan_mapping.py
@@ -0,0 +1,64 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The arg spec for the sonic_vlan_mapping module
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+class Vlan_mappingArgs(object): # pylint: disable=R0903
+ """The arg spec for the sonic_vlan_mapping module
+ """
+
+ def __init__(self, **kwargs):
+ pass
+
+ argument_spec = {
+ 'config': {
+ 'elements': 'dict',
+ 'options': {
+ 'mapping': {
+ 'elements': 'dict',
+ 'options': {
+ 'dot1q_tunnel': {'type': 'bool', 'default': False},
+ 'inner_vlan': {'type': 'int'},
+ 'priority': {'type': 'int'},
+ 'service_vlan': {'required': True, 'type': 'int'},
+ 'vlan_ids': {'elements': 'str', 'type': 'list'}
+ },
+ 'type': 'list'
+ },
+ 'name': {'required': True, 'type': 'str'}
+ },
+ 'type': 'list'
+ },
+ 'state': {
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
+ 'default': 'merged',
+ 'type': 'str'
+ }
+ } # pylint: disable=C0301
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py
index 971fc8571..7ae8b5a47 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vlans/vlans.py
@@ -47,7 +47,7 @@ class VlansArgs(object): # pylint: disable=R0903
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'replaced', 'overridden', 'deleted'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py
index e074936a7..992906044 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vrfs/vrfs.py
@@ -59,7 +59,7 @@ class VrfsArgs(object): # pylint: disable=R0903
"type": "list"
},
"state": {
- "choices": ["merged", "deleted"],
+ "choices": ["merged", "replaced", "overridden", "deleted"],
"default": "merged",
"type": "str"
}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py
index dd475b78a..e610eaca8 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/argspec/vxlans/vxlans.py
@@ -62,11 +62,10 @@ class VxlansArgs(object): # pylint: disable=R0903
'type': 'list'
}
},
- 'required_together': [['source_ip', 'evpn_nvo']],
'type': 'list'
},
'state': {
- 'choices': ['merged', 'deleted'],
+ 'choices': ['merged', 'deleted', 'replaced', 'overridden'],
'default': 'merged',
'type': 'str'
}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py
index 85f93bc73..036567f0a 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/aaa/aaa.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -31,6 +31,10 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
to_request,
edit_config
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ get_new_config,
+ get_formatted_config_diff
+)
PATCH = 'patch'
DELETE = 'delete'
@@ -89,6 +93,16 @@ class Aaa(ConfigBase):
if result['changed']:
result['after'] = changed_aaa_facts
+ new_config = changed_aaa_facts
+ old_config = existing_aaa_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_aaa_facts)
+ result['after(generated)'] = new_config
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -123,15 +137,22 @@ class Aaa(ConfigBase):
state = self._module.params['state']
if not want:
want = {}
+ if not have:
+ have = {}
+
+ diff = self.get_diff_aaa(want, have)
if state == 'deleted':
commands = self._state_deleted(want, have)
elif state == 'merged':
- diff = get_diff(want, have)
- commands = self._state_merged(want, have, diff)
+ commands = self._state_merged(diff)
+ elif state == 'replaced':
+ commands = self._state_replaced(diff)
+ elif state == 'overridden':
+ commands = self._state_overridden(want, have)
return commands
- def _state_merged(self, want, have, diff):
+ def _state_merged(self, diff):
""" The command generator when state is merged
:rtype: A list
@@ -171,6 +192,49 @@ class Aaa(ConfigBase):
commands = update_states(diff_want, "deleted")
return commands, requests
+ def _state_replaced(self, diff):
+ """ The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = []
+ requests = []
+ if diff:
+ requests = self.get_create_aaa_request(diff)
+ if len(requests) > 0:
+ commands = update_states(diff, "replaced")
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ if have and have != want:
+ del_requests = self.get_delete_all_aaa_request(have)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ if not have and want:
+ mod_commands = want
+ mod_requests = self.get_create_aaa_request(mod_commands)
+
+ if len(mod_requests) > 0:
+ requests.extend(mod_requests)
+ commands.extend(update_states(mod_commands, "overridden"))
+
+ return commands, requests
+
def get_create_aaa_request(self, commands):
requests = []
aaa_path = 'data/openconfig-system:system/aaa'
@@ -183,13 +247,17 @@ class Aaa(ConfigBase):
def build_create_aaa_payload(self, commands):
payload = {}
+ auth_method_list = []
if "authentication" in commands and commands["authentication"]:
- payload = {"openconfig-system:aaa": {"authentication": {"config": {"authentication-method": []}}}}
+ payload = {"openconfig-system:aaa": {"authentication": {"config": {}}}}
if "local" in commands["authentication"]["data"] and commands["authentication"]["data"]["local"]:
- payload['openconfig-system:aaa']['authentication']['config']['authentication-method'].append("local")
+ auth_method_list.append('local')
if "group" in commands["authentication"]["data"] and commands["authentication"]["data"]["group"]:
auth_method = commands["authentication"]["data"]["group"]
- payload['openconfig-system:aaa']['authentication']['config']['authentication-method'].append(auth_method)
+ auth_method_list.append(auth_method)
+ if auth_method_list:
+ cfg = {'authentication-method': auth_method_list}
+ payload['openconfig-system:aaa']['authentication']['config'].update(cfg)
if "fail_through" in commands["authentication"]["data"]:
cfg = {'failthrough': str(commands["authentication"]["data"]["fail_through"])}
payload['openconfig-system:aaa']['authentication']['config'].update(cfg)
@@ -234,3 +302,53 @@ class Aaa(ConfigBase):
method = DELETE
request = {'path': path, 'method': method}
return request
+
+ # Current SONiC code behavior for patch overwrites the OC authentication-method leaf-list
+ # This function serves as a workaround for the issue, allowing the user to append to the
+ # OC authentication-method leaf-list.
+ def get_diff_aaa(self, want, have):
+ diff_cfg = {}
+ diff_authentication = {}
+ diff_data = {}
+
+ authentication = want.get('authentication', None)
+ if authentication:
+ data = authentication.get('data', None)
+ if data:
+ fail_through = data.get('fail_through', None)
+ local = data.get('local', None)
+ group = data.get('group', None)
+
+ cfg_authentication = have.get('authentication', None)
+ if cfg_authentication:
+ cfg_data = cfg_authentication.get('data', None)
+ if cfg_data:
+ cfg_fail_through = cfg_data.get('fail_through', None)
+ cfg_local = cfg_data.get('local', None)
+ cfg_group = cfg_data.get('group', None)
+
+ if fail_through is not None and fail_through != cfg_fail_through:
+ diff_data['fail_through'] = fail_through
+ if local and local != cfg_local:
+ diff_data['local'] = local
+ if group and group != cfg_group:
+ diff_data['group'] = group
+
+ diff_local = diff_data.get('local', None)
+ diff_group = diff_data.get('group', None)
+ if diff_local and not diff_group and cfg_group:
+ diff_data['group'] = cfg_group
+ if diff_group and not diff_local and cfg_local:
+ diff_data['local'] = cfg_local
+ else:
+ if fail_through is not None:
+ diff_data['fail_through'] = fail_through
+ if local:
+ diff_data['local'] = local
+ if group:
+ diff_data['group'] = group
+ if diff_data:
+ diff_authentication['data'] = diff_data
+ diff_cfg['authentication'] = diff_authentication
+
+ return diff_cfg
diff --git a/ansible_collections/dellemc/os9/plugins/module_utils/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/acl_interfaces/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/plugins/module_utils/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/acl_interfaces/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/acl_interfaces/acl_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/acl_interfaces/acl_interfaces.py
new file mode 100644
index 000000000..0413a585d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/acl_interfaces/acl_interfaces.py
@@ -0,0 +1,499 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_acl_interfaces class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+ remove_empties,
+ validate_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ update_states,
+ normalize_interface_name
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
+from ansible.module_utils.connection import ConnectionError
+
+DELETE = 'delete'
+POST = 'post'
+
+TEST_KEYS = [
+ {'config': {'name': ''}},
+ {'access_groups': {'type': ''}},
+ {'acls': {'name': ''}}
+]
+
+TEST_KEYS_formatted_diff = [
+ {'config': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {'access_groups': {'type': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {'acls': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
+
+acl_type_to_payload_map = {
+ 'mac': 'ACL_L2',
+ 'ipv4': 'ACL_IPV4',
+ 'ipv6': 'ACL_IPV6'
+}
+
+
+class Acl_interfaces(ConfigBase):
+ """
+ The sonic_acl_interfaces class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'acl_interfaces',
+ ]
+
+ acl_interfaces_path = 'data/openconfig-acl:acl/interfaces/interface={intf_name}'
+ ingress_acl_set_path = acl_interfaces_path + '/ingress-acl-sets/ingress-acl-set={acl_name},{acl_type}'
+ egress_acl_set_path = acl_interfaces_path + '/egress-acl-sets/egress-acl-set={acl_name},{acl_type}'
+
+ def __init__(self, module):
+ super(Acl_interfaces, self).__init__(module)
+
+ def get_acl_interfaces_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ acl_interfaces_facts = facts['ansible_network_resources'].get('acl_interfaces')
+ if not acl_interfaces_facts:
+ return []
+ return acl_interfaces_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+
+ existing_acl_interfaces_facts = self.get_acl_interfaces_facts()
+ commands, requests = self.set_config(existing_acl_interfaces_facts)
+ if commands:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+
+ changed_acl_interfaces_facts = self.get_acl_interfaces_facts()
+
+ result['before'] = existing_acl_interfaces_facts
+ if result['changed']:
+ result['after'] = changed_acl_interfaces_facts
+
+ result['commands'] = commands
+
+ new_config = changed_acl_interfaces_facts
+ old_config = existing_acl_interfaces_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_acl_interfaces_facts,
+ TEST_KEYS_formatted_diff)
+ self.post_process_generated_config(new_config)
+ result['after(generated)'] = new_config
+ if self._module._diff:
+ self.sort_config(new_config)
+ self.sort_config(old_config)
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_acl_interfaces_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ if want:
+ want = self.validate_and_normalize_config(want)
+ else:
+ want = []
+
+ have = existing_acl_interfaces_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+ if state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(want, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have)
+
+ return commands, requests
+
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ del_commands = []
+ add_commands = []
+
+ have_interfaces = self.get_interface_names(have)
+ want_interfaces = self.get_interface_names(want)
+ interfaces_to_replace = have_interfaces.intersection(want_interfaces)
+
+ del_diff = get_diff(have, want, TEST_KEYS)
+ for cmd in del_diff:
+ if cmd['name'] in interfaces_to_replace:
+ del_commands.append(cmd)
+
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests.extend(self.get_interfaces_acl_unbind_requests(del_commands))
+
+ add_diff = get_diff(want, have, TEST_KEYS)
+ # Handle scenarios in replaced state, when only the interface
+ # name is specified for deleting all ACL bindings in it.
+ for cmd in add_diff:
+ if cmd.get('access_groups'):
+ add_commands.append(cmd)
+
+ if add_commands:
+ commands.extend(update_states(add_commands, 'replaced'))
+ requests.extend(self.get_interfaces_acl_bind_requests(add_commands))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ del_commands = []
+
+ have_interfaces = self.get_interface_names(have)
+ want_interfaces = self.get_interface_names(want)
+ interfaces_to_delete = have_interfaces.difference(want_interfaces)
+ interfaces_to_override = have_interfaces.intersection(want_interfaces)
+
+ del_diff = get_diff(have, want, TEST_KEYS)
+ for cmd in del_diff:
+ if cmd['name'] in interfaces_to_delete:
+ del_commands.append({'name': cmd['name']})
+ elif cmd['name'] in interfaces_to_override:
+ del_commands.append(cmd)
+
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests.extend(self.get_interfaces_acl_unbind_requests(del_commands))
+
+ diff = get_diff(want, have, TEST_KEYS)
+ if diff:
+ commands.extend(update_states(diff, 'overridden'))
+ requests.extend(self.get_interfaces_acl_bind_requests(diff))
+
+ return commands, requests
+
+ def _state_merged(self, want, have):
+ """ The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = []
+ requests = []
+
+ diff = get_diff(want, have, TEST_KEYS)
+ if diff:
+ requests = self.get_interfaces_acl_bind_requests(diff)
+ commands = update_states(diff, 'merged')
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ requests = []
+
+ if not want:
+ # Delete all interface ACL bindings in the chassis
+ for cfg in have:
+ commands.append({'name': cfg['name']})
+ else:
+ want_dict = self._convert_config_list_to_dict(want)
+ have_dict = self._convert_config_list_to_dict(have)
+
+ for intf_name, access_groups in want_dict.items():
+ have_obj = have_dict.get(intf_name)
+ if not have_obj:
+ continue
+
+ if not access_groups:
+ commands.append({'name': intf_name})
+ else:
+ access_groups_to_del = []
+ for acl_type, acls in access_groups.items():
+ acls_to_delete = []
+ if not have_obj.get(acl_type):
+ continue
+
+ # Delete all bindings of ACLs belonging to a type in an
+ # interface, if only the ACL type is provided
+ if not acls:
+ for acl_name, direction in have_obj[acl_type].items():
+ acls_to_delete.append({'name': acl_name, 'direction': direction})
+ else:
+ for acl_name, direction in acls.items():
+ if have_obj[acl_type].get(acl_name) and direction == have_obj[acl_type][acl_name]:
+ acls_to_delete.append({'name': acl_name, 'direction': direction})
+
+ if acls_to_delete:
+ access_groups_to_del.append({'type': acl_type, 'acls': acls_to_delete})
+
+ if access_groups_to_del:
+ commands.append({'name': intf_name, 'access_groups': access_groups_to_del})
+
+ if commands:
+ requests = self.get_interfaces_acl_unbind_requests(commands)
+ commands = update_states(commands, 'deleted')
+
+ return commands, requests
+
+ def get_interfaces_acl_bind_requests(self, commands):
+ """Get requests to bind specified ACLs for all interfaces
+ specified the commands
+ """
+ requests = []
+
+ for command in commands:
+ intf_name = command['name']
+ url = self.acl_interfaces_path.format(intf_name=intf_name)
+ for access_group in command['access_groups']:
+ for acl in access_group['acls']:
+ if acl['direction'] == 'in':
+ payload = {
+ 'openconfig-acl:config': {
+ 'id': intf_name
+ },
+ 'openconfig-acl:interface-ref': {
+ 'config': {
+ 'interface': intf_name.split('.')[0]
+ }
+ },
+ 'openconfig-acl:ingress-acl-sets': {
+ 'ingress-acl-set': [
+ {
+ 'set-name': acl['name'],
+ 'type': acl_type_to_payload_map[access_group['type']],
+ 'config': {
+ 'set-name': acl['name'],
+ 'type': acl_type_to_payload_map[access_group['type']]
+ }
+ }
+ ]
+ }
+ }
+ else:
+ payload = {
+ 'openconfig-acl:config': {
+ 'id': intf_name
+ },
+ 'openconfig-acl:interface-ref': {
+ 'config': {
+ 'interface': intf_name.split('.')[0]
+ }
+ },
+ 'openconfig-acl:egress-acl-sets': {
+ 'egress-acl-set': [
+ {
+ 'set-name': acl['name'],
+ 'type': acl_type_to_payload_map[access_group['type']],
+ 'config': {
+ 'set-name': acl['name'],
+ 'type': acl_type_to_payload_map[access_group['type']]
+ }
+ }
+ ]
+ }
+ }
+
+ # Update the payload for subinterfaces
+ if '.' in intf_name:
+ payload['openconfig-acl:interface-ref']['config']['subinterface'] = int(intf_name.split('.')[1])
+
+ requests.append({'path': url, 'method': POST, 'data': payload})
+
+ return requests
+
+ def get_interfaces_acl_unbind_requests(self, commands):
+ """Get requests to unbind specified ACLs for all interfaces
+ specified in the commands
+ """
+ requests = []
+
+ for command in commands:
+ intf_name = command['name']
+ # Delete all acl bindings in an interface, if only the
+ # interface name is provided
+ if not command.get('access_groups'):
+ url = self.acl_interfaces_path.format(intf_name=intf_name)
+ requests.append({'path': url, 'method': DELETE})
+ else:
+ for access_group in command['access_groups']:
+ for acl in access_group['acls']:
+ if acl['direction'] == 'in':
+ url = self.ingress_acl_set_path.format(intf_name=intf_name, acl_name=acl['name'],
+ acl_type=acl_type_to_payload_map[access_group['type']])
+ requests.append({'path': url, 'method': DELETE})
+ else:
+ url = self.egress_acl_set_path.format(intf_name=intf_name, acl_name=acl['name'],
+ acl_type=acl_type_to_payload_map[access_group['type']])
+ requests.append({'path': url, 'method': DELETE})
+
+ return requests
+
+ def validate_and_normalize_config(self, config_list):
+ """Validate and normalize the given config"""
+ # Remove empties and validate the config with argument spec
+ config_list = [remove_empties(config) for config in config_list]
+ validate_config(self._module.argument_spec, {'config': config_list})
+ normalize_interface_name(config_list, self._module)
+
+ state = self._module.params['state']
+ # When state is deleted, empty access_groups and acls are
+ # supported and therefore no futher changes are required.
+ if state == 'deleted':
+ return config_list
+
+ updated_config_list = []
+ for config in config_list:
+ if not config.get('access_groups'):
+ # When state is replaced, if only the interface name is
+ # specified for deleting all ACL bindings in it do not
+ # remove that config.
+ if state == 'replaced':
+ updated_config_list.append(config)
+ else:
+ access_group_list = []
+ for access_group in config['access_groups']:
+ if access_group.get('acls'):
+ access_group_list.append(access_group)
+
+ if access_group_list:
+ updated_config_list.append({'name': config['name'], 'access_groups': access_group_list})
+
+ return updated_config_list
+
+ @staticmethod
+ def get_interface_names(config_list):
+ """Get a set of interface names available in the given
+ config_list dict
+ """
+ interface_names = set()
+ for config in config_list:
+ interface_names.add(config['name'])
+
+ return interface_names
+
+ @staticmethod
+ def _convert_config_list_to_dict(config_list):
+ config_dict = {}
+
+ for config in config_list:
+ config_dict[config['name']] = {}
+ if config.get('access_groups'):
+ for access_group in config['access_groups']:
+ config_dict[config['name']][access_group['type']] = {}
+ if access_group.get('acls'):
+ for acl in access_group['acls']:
+ config_dict[config['name']][access_group['type']][acl['name']] = acl['direction']
+
+ return config_dict
+
+ def sort_config(self, configs):
+ # natsort provides better result.
+ # The use of natsort causes sanity error due to it is not available in
+ # python version currently used.
+ # new_config = natsorted(new_config, key=lambda x: x['name'])
+ # For time-being, use simple "sort"
+ configs.sort(key=lambda x: x['name'])
+
+ for conf in configs:
+ ags = conf.get('access_groups', [])
+ if ags:
+ ags.sort(key=lambda x: x['type'])
+ for ag in ags:
+ if ag.get('acls', []):
+ ag['acls'].sort(key=lambda x: x['name'])
+
+ def post_process_generated_config(self, configs):
+ for conf in configs[:]:
+ ags = conf.get('access_groups', [])
+ if ags:
+ for ag in ags[:]:
+ if not ag.get('acls', []):
+ ags.remove(ag)
+
+ if not conf.get('access_groups', []):
+ configs.remove(conf)
diff --git a/ansible_collections/dellemc/os9/plugins/module_utils/network/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/plugins/module_utils/network/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/bfd.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/bfd.py
new file mode 100644
index 000000000..484c4203d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bfd/bfd.py
@@ -0,0 +1,734 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_bfd class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ get_replaced_config,
+ send_requests,
+ remove_empties,
+ update_states
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+from copy import deepcopy
+
+
+BFD_PATH = '/data/openconfig-bfd:bfd'
+PATCH = 'patch'
+DELETE = 'delete'
+TEST_KEYS = [
+ {'profiles': {'profile_name': ''}},
+ {'single_hops': {'remote_address': '', 'vrf': '', 'interface': '', 'local_address': ''}},
+ {'multi_hops': {'remote_address': '', 'vrf': '', 'local_address': ''}}
+]
+
+
+class Bfd(ConfigBase):
+ """
+ The sonic_bfd class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'bfd',
+ ]
+
+ def __init__(self, module):
+ super(Bfd, self).__init__(module)
+
+ def get_bfd_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ bfd_facts = facts['ansible_network_resources'].get('bfd')
+ if not bfd_facts:
+ return {}
+ return bfd_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+ commands = []
+
+ existing_bfd_facts = self.get_bfd_facts()
+ commands, requests = self.set_config(existing_bfd_facts)
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_bfd_facts = self.get_bfd_facts()
+
+ result['before'] = existing_bfd_facts
+ if result['changed']:
+ result['after'] = changed_bfd_facts
+
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_bfd_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ have = existing_bfd_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ state = self._module.params['state']
+ diff = get_diff(want, have, TEST_KEYS)
+
+ if state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(diff)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have, diff)
+ return commands, requests
+
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ replaced_config = get_replaced_config(want, have, TEST_KEYS)
+
+ if replaced_config:
+ self.sort_lists_in_config(replaced_config)
+ self.sort_lists_in_config(have)
+ is_delete_all = (replaced_config == have)
+ requests = self.get_delete_bfd_requests(replaced_config, have, is_delete_all)
+ send_requests(self._module, requests)
+
+ commands = want
+ else:
+ commands = diff
+
+ requests = []
+
+ if commands:
+ requests = self.get_modify_bfd_request(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ self.sort_lists_in_config(want)
+ self.sort_lists_in_config(have)
+
+ if have and have != want:
+ is_delete_all = True
+ requests = self.get_delete_bfd_requests(have, None, is_delete_all)
+ send_requests(self._module, requests)
+ have = []
+
+ commands = []
+ requests = []
+
+ if not have and want:
+ commands = want
+ requests = self.get_modify_bfd_request(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "overridden")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_merged(self, diff):
+ """ The command generator when state is merged
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff
+ requests = self.get_modify_bfd_request(commands)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+ :param want: the objects from which the configuration should be removed
+ :param obj_in_have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ is_delete_all = False
+ want = remove_empties(want)
+ if not want:
+ commands = deepcopy(have)
+ is_delete_all = True
+ else:
+ commands = deepcopy(want)
+
+ self.remove_default_entries(commands)
+ requests = self.get_delete_bfd_requests(commands, have, is_delete_all)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+ return commands, requests
+
+ def get_modify_bfd_request(self, commands):
+ request = None
+
+ profiles = commands.get('profiles', None)
+ single_hops = commands.get('single_hops', None)
+ multi_hops = commands.get('multi_hops', None)
+ bfd_dict = {}
+ bfd_profile_dict = {}
+ bfd_shop_dict = {}
+ bfd_mhop_dict = {}
+
+ if profiles:
+ profile_list = []
+ for profile in profiles:
+ profile_dict = {}
+ config_dict = {}
+ profile_name = profile.get('profile_name', None)
+ enabled = profile.get('enabled', None)
+ transmit_interval = profile.get('transmit_interval', None)
+ receive_interval = profile.get('receive_interval', None)
+ detect_multiplier = profile.get('detect_multiplier', None)
+ passive_mode = profile.get('passive_mode', None)
+ min_ttl = profile.get('min_ttl', None)
+ echo_interval = profile.get('echo_interval', None)
+ echo_mode = profile.get('echo_mode', None)
+
+ if profile_name:
+ profile_dict['profile-name'] = profile_name
+ config_dict['profile-name'] = profile_name
+ if enabled is not None:
+ config_dict['enabled'] = enabled
+ if transmit_interval:
+ config_dict['desired-minimum-tx-interval'] = transmit_interval
+ if receive_interval:
+ config_dict['required-minimum-receive'] = receive_interval
+ if detect_multiplier:
+ config_dict['detection-multiplier'] = detect_multiplier
+ if passive_mode is not None:
+ config_dict['passive-mode'] = passive_mode
+ if min_ttl:
+ config_dict['minimum-ttl'] = min_ttl
+ if echo_interval:
+ config_dict['desired-minimum-echo-receive'] = echo_interval
+ if echo_mode is not None:
+ config_dict['echo-active'] = echo_mode
+ if config_dict:
+ profile_dict['config'] = config_dict
+ profile_list.append(profile_dict)
+ if profile_list:
+ bfd_profile_dict['profile'] = profile_list
+
+ if single_hops:
+ single_hop_list = []
+ for hop in single_hops:
+ hop_dict = {}
+ config_dict = {}
+ remote_address = hop.get('remote_address', None)
+ vrf = hop.get('vrf', None)
+ interface = hop.get('interface', None)
+ local_address = hop.get('local_address', None)
+ enabled = hop.get('enabled', None)
+ transmit_interval = hop.get('transmit_interval', None)
+ receive_interval = hop.get('receive_interval', None)
+ detect_multiplier = hop.get('detect_multiplier', None)
+ passive_mode = hop.get('passive_mode', None)
+ echo_interval = hop.get('echo_interval', None)
+ echo_mode = hop.get('echo_mode', None)
+ profile_name = hop.get('profile_name', None)
+
+ if remote_address:
+ hop_dict['remote-address'] = remote_address
+ config_dict['remote-address'] = remote_address
+ if vrf:
+ hop_dict['vrf'] = vrf
+ config_dict['vrf'] = vrf
+ if interface:
+ hop_dict['interface'] = interface
+ config_dict['interface'] = interface
+ if local_address:
+ hop_dict['local-address'] = local_address
+ config_dict['local-address'] = local_address
+ if enabled is not None:
+ config_dict['enabled'] = enabled
+ if transmit_interval:
+ config_dict['desired-minimum-tx-interval'] = transmit_interval
+ if receive_interval:
+ config_dict['required-minimum-receive'] = receive_interval
+ if detect_multiplier:
+ config_dict['detection-multiplier'] = detect_multiplier
+ if passive_mode is not None:
+ config_dict['passive-mode'] = passive_mode
+ if echo_interval:
+ config_dict['desired-minimum-echo-receive'] = echo_interval
+ if echo_mode is not None:
+ config_dict['echo-active'] = echo_mode
+ if profile_name:
+ config_dict['profile-name'] = profile_name
+ if config_dict:
+ hop_dict['config'] = config_dict
+ single_hop_list.append(hop_dict)
+ if single_hop_list:
+ bfd_shop_dict['single-hop'] = single_hop_list
+
+ if multi_hops:
+ multi_hop_list = []
+ for hop in multi_hops:
+ hop_dict = {}
+ config_dict = {}
+ remote_address = hop.get('remote_address', None)
+ vrf = hop.get('vrf', None)
+ local_address = hop.get('local_address', None)
+ enabled = hop.get('enabled', None)
+ transmit_interval = hop.get('transmit_interval', None)
+ receive_interval = hop.get('receive_interval', None)
+ detect_multiplier = hop.get('detect_multiplier', None)
+ passive_mode = hop.get('passive_mode', None)
+ min_ttl = hop.get('min_ttl', None)
+ profile_name = hop.get('profile_name', None)
+
+ if remote_address:
+ hop_dict['remote-address'] = remote_address
+ config_dict['remote-address'] = remote_address
+ if vrf:
+ hop_dict['vrf'] = vrf
+ config_dict['vrf'] = vrf
+ if local_address:
+ hop_dict['local-address'] = local_address
+ config_dict['local-address'] = local_address
+ if enabled is not None:
+ config_dict['enabled'] = enabled
+ if transmit_interval:
+ config_dict['desired-minimum-tx-interval'] = transmit_interval
+ if receive_interval:
+ config_dict['required-minimum-receive'] = receive_interval
+ if detect_multiplier:
+ config_dict['detection-multiplier'] = detect_multiplier
+ if passive_mode is not None:
+ config_dict['passive-mode'] = passive_mode
+ if min_ttl:
+ config_dict['minimum-ttl'] = min_ttl
+ if profile_name:
+ config_dict['profile-name'] = profile_name
+ if config_dict:
+ config_dict['interface'] = 'null'
+ hop_dict['interface'] = 'null'
+ hop_dict['config'] = config_dict
+ multi_hop_list.append(hop_dict)
+ if multi_hop_list:
+ bfd_mhop_dict['multi-hop'] = multi_hop_list
+
+ if bfd_profile_dict:
+ bfd_dict['openconfig-bfd-ext:bfd-profile'] = bfd_profile_dict
+ if bfd_shop_dict:
+ bfd_dict['openconfig-bfd-ext:bfd-shop-sessions'] = bfd_shop_dict
+ if bfd_mhop_dict:
+ bfd_dict['openconfig-bfd-ext:bfd-mhop-sessions'] = bfd_mhop_dict
+ if bfd_dict:
+ payload = {'openconfig-bfd:bfd': bfd_dict}
+ request = {'path': BFD_PATH, 'method': PATCH, 'data': payload}
+
+ return request
+
+ def get_delete_bfd_requests(self, commands, have, is_delete_all):
+ requests = []
+
+ if not commands:
+ return requests
+
+ if is_delete_all:
+ requests.extend(self.get_delete_all_bfd_cfg_requests(commands))
+ else:
+ requests.extend(self.get_delete_bfd_profile_requests(commands, have))
+ requests.extend(self.get_delete_bfd_shop_requests(commands, have))
+ requests.extend(self.get_delete_bfd_mhop_requests(commands, have))
+
+ return requests
+
+ def get_delete_bfd_profile_requests(self, commands, have):
+ requests = []
+
+ profiles = commands.get('profiles', None)
+ if profiles:
+ for profile in profiles:
+ profile_name = profile.get('profile_name', None)
+ enabled = profile.get('enabled', None)
+ transmit_interval = profile.get('transmit_interval', None)
+ receive_interval = profile.get('receive_interval', None)
+ detect_multiplier = profile.get('detect_multiplier', None)
+ passive_mode = profile.get('passive_mode', None)
+ min_ttl = profile.get('min_ttl', None)
+ echo_interval = profile.get('echo_interval', None)
+ echo_mode = profile.get('echo_mode', None)
+
+ cfg_profiles = have.get('profiles', None)
+ if cfg_profiles:
+ for cfg_profile in cfg_profiles:
+ cfg_profile_name = cfg_profile.get('profile_name', None)
+ cfg_enabled = cfg_profile.get('enabled', None)
+ cfg_transmit_interval = cfg_profile.get('transmit_interval', None)
+ cfg_receive_interval = cfg_profile.get('receive_interval', None)
+ cfg_detect_multiplier = cfg_profile.get('detect_multiplier', None)
+ cfg_passive_mode = cfg_profile.get('passive_mode', None)
+ cfg_min_ttl = cfg_profile.get('min_ttl', None)
+ cfg_echo_interval = cfg_profile.get('echo_interval', None)
+ cfg_echo_mode = cfg_profile.get('echo_mode', None)
+
+ if profile_name == cfg_profile_name:
+ if enabled is not None and enabled == cfg_enabled:
+ requests.append(self.get_delete_profile_attr_request(profile_name, 'enabled'))
+ if transmit_interval and transmit_interval == cfg_transmit_interval:
+ requests.append(self.get_delete_profile_attr_request(profile_name, 'desired-minimum-tx-interval'))
+ if receive_interval and receive_interval == cfg_receive_interval:
+ requests.append(self.get_delete_profile_attr_request(profile_name, 'required-minimum-receive'))
+ if detect_multiplier and detect_multiplier == cfg_detect_multiplier:
+ requests.append(self.get_delete_profile_attr_request(profile_name, 'detection-multiplier'))
+ if passive_mode is not None and passive_mode == cfg_passive_mode:
+ requests.append(self.get_delete_profile_attr_request(profile_name, 'passive-mode'))
+ if min_ttl and min_ttl == cfg_min_ttl:
+ requests.append(self.get_delete_profile_attr_request(profile_name, 'minimum-ttl'))
+ if echo_interval and echo_interval == cfg_echo_interval:
+ requests.append(self.get_delete_profile_attr_request(profile_name, 'desired-minimum-echo-receive'))
+ if echo_mode is not None and echo_mode == cfg_echo_mode:
+ requests.append(self.get_delete_profile_attr_request(profile_name, 'echo-active'))
+ if (enabled is None and not transmit_interval and not receive_interval and not detect_multiplier and passive_mode is None
+ and not min_ttl and not echo_interval and echo_mode is None):
+ requests.append(self.get_delete_profile_request(profile_name))
+
+ return requests
+
+ def get_delete_bfd_shop_requests(self, commands, have):
+ requests = []
+
+ single_hops = commands.get('single_hops', None)
+ if single_hops:
+ for hop in single_hops:
+ remote_address = hop.get('remote_address', None)
+ vrf = hop.get('vrf', None)
+ interface = hop.get('interface', None)
+ local_address = hop.get('local_address', None)
+ enabled = hop.get('enabled', None)
+ transmit_interval = hop.get('transmit_interval', None)
+ receive_interval = hop.get('receive_interval', None)
+ detect_multiplier = hop.get('detect_multiplier', None)
+ passive_mode = hop.get('passive_mode', None)
+ echo_interval = hop.get('echo_interval', None)
+ echo_mode = hop.get('echo_mode', None)
+ profile_name = hop.get('profile_name', None)
+
+ cfg_single_hops = have.get('single_hops', None)
+ if cfg_single_hops:
+ for cfg_hop in cfg_single_hops:
+ cfg_remote_address = cfg_hop.get('remote_address', None)
+ cfg_vrf = cfg_hop.get('vrf', None)
+ cfg_interface = cfg_hop.get('interface', None)
+ cfg_local_address = cfg_hop.get('local_address', None)
+ cfg_enabled = cfg_hop.get('enabled', None)
+ cfg_transmit_interval = cfg_hop.get('transmit_interval', None)
+ cfg_receive_interval = cfg_hop.get('receive_interval', None)
+ cfg_detect_multiplier = cfg_hop.get('detect_multiplier', None)
+ cfg_passive_mode = cfg_hop.get('passive_mode', None)
+ cfg_echo_interval = cfg_hop.get('echo_interval', None)
+ cfg_echo_mode = cfg_hop.get('echo_mode', None)
+ cfg_profile_name = cfg_hop.get('profile_name', None)
+
+ if remote_address == cfg_remote_address and vrf == cfg_vrf and interface == cfg_interface and local_address == cfg_local_address:
+ if enabled is not None and enabled == cfg_enabled:
+ requests.append(self.get_delete_shop_attr_request(remote_address, interface, vrf, local_address, 'enabled'))
+ if transmit_interval and transmit_interval == cfg_transmit_interval:
+ requests.append(self.get_delete_shop_attr_request(remote_address, interface, vrf, local_address,
+ 'desired-minimum-tx-interval'))
+ if receive_interval and receive_interval == cfg_receive_interval:
+ requests.append(self.get_delete_shop_attr_request(remote_address, interface, vrf, local_address, 'required-minimum-receive'))
+ if detect_multiplier and detect_multiplier == cfg_detect_multiplier:
+ requests.append(self.get_delete_shop_attr_request(remote_address, interface, vrf, local_address, 'detection-multiplier'))
+ if passive_mode is not None and passive_mode == cfg_passive_mode:
+ requests.append(self.get_delete_shop_attr_request(remote_address, interface, vrf, local_address, 'passive-mode'))
+ if echo_interval and echo_interval == cfg_echo_interval:
+ requests.append(self.get_delete_shop_attr_request(remote_address, interface, vrf, local_address,
+ 'desired-minimum-echo-receive'))
+ if echo_mode is not None and echo_mode == cfg_echo_mode:
+ requests.append(self.get_delete_shop_attr_request(remote_address, interface, vrf, local_address, 'echo-active'))
+ if profile_name and profile_name == cfg_profile_name:
+ requests.append(self.get_delete_shop_attr_request(remote_address, interface, vrf, local_address, 'profile-name'))
+ if (enabled is None and not transmit_interval and not receive_interval and not detect_multiplier and passive_mode is None
+ and not echo_interval and echo_mode is None and not profile_name):
+ requests.append(self.get_delete_shop_request(remote_address, interface, vrf, local_address))
+
+ return requests
+
+ def get_delete_bfd_mhop_requests(self, commands, have):
+ requests = []
+
+ multi_hops = commands.get('multi_hops', None)
+ if multi_hops:
+ for hop in multi_hops:
+ remote_address = hop.get('remote_address', None)
+ vrf = hop.get('vrf', None)
+ local_address = hop.get('local_address', None)
+ enabled = hop.get('enabled', None)
+ transmit_interval = hop.get('transmit_interval', None)
+ receive_interval = hop.get('receive_interval', None)
+ detect_multiplier = hop.get('detect_multiplier', None)
+ passive_mode = hop.get('passive_mode', None)
+ min_ttl = hop.get('min_ttl', None)
+ profile_name = hop.get('profile_name', None)
+
+ cfg_multi_hops = have.get('multi_hops', None)
+ if cfg_multi_hops:
+ for cfg_hop in cfg_multi_hops:
+ cfg_remote_address = cfg_hop.get('remote_address', None)
+ cfg_vrf = cfg_hop.get('vrf', None)
+ cfg_local_address = cfg_hop.get('local_address', None)
+ cfg_enabled = cfg_hop.get('enabled', None)
+ cfg_transmit_interval = cfg_hop.get('transmit_interval', None)
+ cfg_receive_interval = cfg_hop.get('receive_interval', None)
+ cfg_detect_multiplier = cfg_hop.get('detect_multiplier', None)
+ cfg_passive_mode = cfg_hop.get('passive_mode', None)
+ cfg_min_ttl = cfg_hop.get('min_ttl', None)
+ cfg_profile_name = cfg_hop.get('profile_name', None)
+
+ if remote_address == cfg_remote_address and vrf == cfg_vrf and local_address == cfg_local_address:
+ if enabled is not None and enabled == cfg_enabled:
+ requests.append(self.get_delete_mhop_attr_request(remote_address, vrf, local_address, 'enabled'))
+ if transmit_interval and transmit_interval == cfg_transmit_interval:
+ requests.append(self.get_delete_mhop_attr_request(remote_address, vrf, local_address, 'desired-minimum-tx-interval'))
+ if receive_interval and receive_interval == cfg_receive_interval:
+ requests.append(self.get_delete_mhop_attr_request(remote_address, vrf, local_address, 'required-minimum-receive'))
+ if detect_multiplier and detect_multiplier == cfg_detect_multiplier:
+ requests.append(self.get_delete_mhop_attr_request(remote_address, vrf, local_address, 'detection-multiplier'))
+ if passive_mode is not None and passive_mode == cfg_passive_mode:
+ requests.append(self.get_delete_mhop_attr_request(remote_address, vrf, local_address, 'passive-mode'))
+ if min_ttl and min_ttl == cfg_min_ttl:
+ requests.append(self.get_delete_mhop_attr_request(remote_address, vrf, local_address, 'minimum-ttl'))
+ if profile_name and profile_name == cfg_profile_name:
+ requests.append(self.get_delete_mhop_attr_request(remote_address, vrf, local_address, 'profile-name'))
+ if (enabled is None and not transmit_interval and not receive_interval and not detect_multiplier and passive_mode is None
+ and not min_ttl and not profile_name):
+ requests.append(self.get_delete_mhop_request(remote_address, vrf, local_address))
+
+ return requests
+
+ def get_delete_all_bfd_cfg_requests(self, commands):
+ requests = []
+ profiles = commands.get('profiles', None)
+ single_hops = commands.get('single_hops', None)
+ multi_hops = commands.get('multi_hops', None)
+
+ if profiles:
+ url = '%s/openconfig-bfd-ext:bfd-profile/profile' % (BFD_PATH)
+ requests.append({'path': url, 'method': DELETE})
+ if single_hops:
+ url = '%s/openconfig-bfd-ext:bfd-shop-sessions/single-hop' % (BFD_PATH)
+ requests.append({'path': url, 'method': DELETE})
+ if multi_hops:
+ url = '%s/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop' % (BFD_PATH)
+ requests.append({'path': url, 'method': DELETE})
+
+ return requests
+
+ def get_delete_profile_request(self, profile_name):
+ url = '%s/openconfig-bfd-ext:bfd-profile/profile=%s' % (BFD_PATH, profile_name)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_profile_attr_request(self, profile_name, attr):
+ url = '%s/openconfig-bfd-ext:bfd-profile/profile=%s/config/%s' % (BFD_PATH, profile_name, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_shop_request(self, remote_address, interface, vrf, local_address):
+ url = '%s/openconfig-bfd-ext:bfd-shop-sessions/single-hop=%s,%s,%s,%s' % (BFD_PATH, remote_address, interface, vrf, local_address)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_shop_attr_request(self, remote_address, interface, vrf, local_address, attr):
+ url = '%s/openconfig-bfd-ext:bfd-shop-sessions/single-hop=%s,%s,%s,%s/config/%s' % (BFD_PATH, remote_address, interface, vrf, local_address, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mhop_request(self, remote_address, vrf, local_address):
+ url = '%s/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=%s,null,%s,%s' % (BFD_PATH, remote_address, vrf, local_address)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mhop_attr_request(self, remote_address, vrf, local_address, attr):
+ url = '%s/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=%s,null,%s,%s/config/%s' % (BFD_PATH, remote_address, vrf, local_address, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_profile_name(self, profile_name):
+ return profile_name.get('profile_name')
+
+ def sort_lists_in_config(self, config):
+ if 'profiles' in config and config['profiles'] is not None:
+ config['profiles'].sort(key=self.get_profile_name)
+ if 'single_hops' in config and config['single_hops'] is not None:
+ config['single_hops'].sort(key=lambda x: (x['remote_address'], x['interface'], x['vrf'], x['local_address']))
+ if 'multi_hops' in config and config['multi_hops'] is not None:
+ config['multi_hops'].sort(key=lambda x: (x['remote_address'], x['vrf'], x['local_address']))
+
+ def remove_default_entries(self, data):
+
+ profiles = data.get('profiles', None)
+ single_hops = data.get('single_hops', None)
+ multi_hops = data.get('multi_hops', None)
+
+ if profiles:
+ for profile in profiles:
+ enabled = profile.get('enabled', None)
+ transmit_interval = profile.get('transmit_interval', None)
+ receive_interval = profile.get('receive_interval', None)
+ detect_multiplier = profile.get('detect_multiplier', None)
+ passive_mode = profile.get('passive_mode', None)
+ min_ttl = profile.get('min_ttl', None)
+ echo_interval = profile.get('echo_interval', None)
+ echo_mode = profile.get('echo_mode', None)
+
+ if enabled:
+ profile.pop('enabled')
+ if transmit_interval == 300:
+ profile.pop('transmit_interval')
+ if receive_interval == 300:
+ profile.pop('receive_interval')
+ if detect_multiplier == 3:
+ profile.pop('detect_multiplier')
+ if passive_mode is False:
+ profile.pop('passive_mode')
+ if min_ttl == 254:
+ profile.pop('min_ttl')
+ if echo_interval == 300:
+ profile.pop('echo_interval')
+ if echo_mode is False:
+ profile.pop('echo_mode')
+
+ if single_hops:
+ for hop in single_hops:
+ enabled = hop.get('enabled', None)
+ transmit_interval = hop.get('transmit_interval', None)
+ receive_interval = hop.get('receive_interval', None)
+ detect_multiplier = hop.get('detect_multiplier', None)
+ passive_mode = hop.get('passive_mode', None)
+ echo_interval = hop.get('echo_interval', None)
+ echo_mode = hop.get('echo_mode', None)
+
+ if enabled:
+ hop.pop('enabled')
+ if transmit_interval == 300:
+ hop.pop('transmit_interval')
+ if receive_interval == 300:
+ hop.pop('receive_interval')
+ if detect_multiplier == 3:
+ hop.pop('detect_multiplier')
+ if passive_mode is False:
+ hop.pop('passive_mode')
+ if echo_interval == 300:
+ hop.pop('echo_interval')
+ if echo_mode is False:
+ hop.pop('echo_mode')
+
+ if multi_hops:
+ for hop in multi_hops:
+ enabled = hop.get('enabled', None)
+ transmit_interval = hop.get('transmit_interval', None)
+ receive_interval = hop.get('receive_interval', None)
+ detect_multiplier = hop.get('detect_multiplier', None)
+ passive_mode = hop.get('passive_mode', None)
+ min_ttl = hop.get('min_ttl', None)
+
+ if enabled:
+ hop.pop('enabled')
+ if transmit_interval == 300:
+ hop.pop('transmit_interval')
+ if receive_interval == 300:
+ hop.pop('receive_interval')
+ if detect_multiplier == 3:
+ hop.pop('detect_multiplier')
+ if passive_mode is False:
+ hop.pop('passive_mode')
+ if min_ttl == 254:
+ hop.pop('min_ttl')
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py
index fd4d5c57e..69c7ca455 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp/bgp.py
@@ -13,18 +13,12 @@ created
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-try:
- from urllib import quote
-except ImportError:
- from urllib.parse import quote
-
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties,
to_list,
- search_obj_in_list,
- remove_empties
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
@@ -32,10 +26,8 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
edit_config
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
- dict_to_set,
update_states,
get_diff,
- remove_empties_from_list
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request
from ansible.module_utils.connection import ConnectionError
@@ -120,6 +112,11 @@ class Bgp(ConfigBase):
to the desired configuration
"""
want = self._module.params['config']
+ if want:
+ want = [remove_empties(conf) for conf in want]
+ else:
+ want = []
+
have = existing_bgp_facts
resp = self.set_state(want, have)
return to_list(resp)
@@ -137,19 +134,85 @@ class Bgp(ConfigBase):
requests = []
state = self._module.params['state']
- diff = get_diff(want, have, TEST_KEYS)
-
if state == 'overridden':
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
- commands, requests = self._state_deleted(want, have, diff)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
- commands, requests = self._state_merged(want, have, diff)
+ commands, requests = self._state_merged(want, have)
elif state == 'replaced':
- commands, requests = self._state_replaced(want, have, diff)
+ commands, requests = self._state_replaced(want, have)
return commands, requests
- def _state_merged(self, want, have, diff):
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ del_commands, del_requests = self.get_delete_commands_requests_for_replaced_overridden(want, have, 'replaced')
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+
+ add_commands = get_diff(want, have, TEST_KEYS)
+ if add_commands:
+ for command in add_commands:
+ as_val = command['bgp_as']
+ vrf_name = command['vrf_name']
+
+ # max_med -> on_startup options are modified or deleted at once.
+ # Diff might not reflect the correct commands if only one of
+ # them is modified. So, update the command with want value.
+ if command.get('max_med'):
+ for cfg in want:
+ if cfg['vrf_name'] == vrf_name and cfg['bgp_as'] == as_val:
+ command['max_med'] = cfg['max_med']
+ break
+
+ commands.extend(update_states(add_commands, 'replaced'))
+ requests.extend(self.get_modify_bgp_requests(add_commands, have))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ del_commands, del_requests = self.get_delete_commands_requests_for_replaced_overridden(want, have, 'overridden')
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+
+ add_commands = get_diff(want, have, TEST_KEYS)
+ if add_commands:
+ for command in add_commands:
+ as_val = command['bgp_as']
+ vrf_name = command['vrf_name']
+
+ # max_med -> on_startup options are modified or deleted at once.
+ # Diff will not reflect the correct commands if only one of
+ # them is modified. So, update the command with want value.
+ if command.get('max_med'):
+ for cfg in want:
+ if cfg['vrf_name'] == vrf_name and cfg['bgp_as'] == as_val:
+ command['max_med'] = cfg['max_med']
+ break
+
+ commands.extend(update_states(add_commands, 'overridden'))
+ requests.extend(self.get_modify_bgp_requests(add_commands, have))
+
+ return commands, requests
+
+ def _state_merged(self, want, have):
""" The command generator when state is merged
:param want: the additive configuration as a dictionary
@@ -158,7 +221,7 @@ class Bgp(ConfigBase):
:returns: the commands necessary to merge the provided into
the current configuration
"""
- commands = diff
+ commands = get_diff(want, have, TEST_KEYS)
requests = self.get_modify_bgp_requests(commands, have)
if commands and len(requests) > 0:
commands = update_states(commands, "merged")
@@ -167,7 +230,7 @@ class Bgp(ConfigBase):
return commands, requests
- def _state_deleted(self, want, have, diff):
+ def _state_deleted(self, want, have):
""" The command generator when state is deleted
:param want: the objects from which the configuration should be removed
@@ -269,6 +332,7 @@ class Bgp(ConfigBase):
requests = []
router_id = command.get('router_id', None)
+ rt_delay = command.get('rt_delay', None)
timers = command.get('timers', None)
holdtime = None
keepalive = None
@@ -282,6 +346,10 @@ class Bgp(ConfigBase):
url = '%s=%s/%s/global/config/router-id' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
requests.append({"path": url, "method": DELETE})
+ if rt_delay and match.get('rt_delay', None):
+ url = '%s=%s/%s/global/config/route-map-process-delay' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
+ requests.append({"path": url, "method": DELETE})
+
if holdtime and match['timers'].get('holdtime', None) != 180:
url = '%s=%s/%s/global/config/hold-time' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
requests.append({"path": url, "method": DELETE})
@@ -320,7 +388,7 @@ class Bgp(ConfigBase):
if not match:
continue
# if there is specific parameters to delete then delete those alone
- if cmd.get('router_id', None) or cmd.get('log_neighbor_changes', None) or cmd.get('bestpath', None):
+ if cmd.get('router_id', None) or cmd.get('log_neighbor_changes', None) or cmd.get('bestpath', None) or cmd.get('rt_delay', None):
requests.extend(self.get_delete_specific_bgp_param_request(cmd, match))
else:
# delete entire bgp
@@ -471,7 +539,7 @@ class Bgp(ConfigBase):
payload = {}
if holdtime is not None:
- payload['hold-time'] = str(holdtime)
+ payload['hold-time'] = holdtime
if payload:
url = '%s=%s/%s/global/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.holdtime_path)
@@ -485,7 +553,7 @@ class Bgp(ConfigBase):
payload = {}
if keepalive_interval is not None:
- payload['keepalive-interval'] = str(keepalive_interval)
+ payload['keepalive-interval'] = keepalive_interval
if payload:
url = '%s=%s/%s/global/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path, self.keepalive_path)
@@ -514,7 +582,7 @@ class Bgp(ConfigBase):
return request
- def get_modify_global_config_request(self, vrf_name, router_id, as_val):
+ def get_modify_global_config_request(self, vrf_name, router_id, as_val, rt_delay):
request = None
method = PATCH
payload = {}
@@ -524,6 +592,8 @@ class Bgp(ConfigBase):
cfg['router-id'] = router_id
if as_val:
cfg['as'] = float(as_val)
+ if rt_delay:
+ cfg['route-map-process-delay'] = rt_delay
if cfg:
payload['openconfig-network-instance:config'] = cfg
@@ -547,6 +617,7 @@ class Bgp(ConfigBase):
max_med = None
holdtime = None
keepalive_interval = None
+ rt_delay = None
if 'bgp_as' in conf:
as_val = conf['bgp_as']
@@ -558,6 +629,8 @@ class Bgp(ConfigBase):
bestpath = conf['bestpath']
if 'max_med' in conf:
max_med = conf['max_med']
+ if 'rt_delay' in conf:
+ rt_delay = conf['rt_delay']
if 'timers' in conf and conf['timers']:
if 'holdtime' in conf['timers']:
holdtime = conf['timers']['holdtime']
@@ -569,7 +642,7 @@ class Bgp(ConfigBase):
if new_bgp_req:
requests.append(new_bgp_req)
- global_req = self.get_modify_global_config_request(vrf_name, router_id, as_val)
+ global_req = self.get_modify_global_config_request(vrf_name, router_id, as_val, rt_delay)
if global_req:
requests.append(global_req)
@@ -596,3 +669,112 @@ class Bgp(ConfigBase):
requests.extend(max_med_reqs)
return requests
+
+ def get_delete_commands_requests_for_replaced_overridden(self, want, have, state):
+ """Returns the commands and requests necessary to remove applicable
+ current configurations when state is replaced or overridden
+ """
+ commands = []
+ requests = []
+ if not have:
+ return commands, requests
+
+ for conf in have:
+ as_val = conf['bgp_as']
+ vrf_name = conf['vrf_name']
+
+ match_cfg = next((cfg for cfg in want if cfg['vrf_name'] == vrf_name and cfg['bgp_as'] == as_val), None)
+ # Delete entire BGP if not specified in overridden
+ if not match_cfg:
+ if state == 'overridden':
+ commands.append(conf)
+ requests.append(self.get_delete_single_bgp_request(vrf_name))
+ continue
+
+ # Delete config in BGP AS that are replaced/overridden
+ # - Modified attributes are not deleted, since they will be
+ # updated by merge.
+ # - log_neighbor_changes is enabled by default, therefore
+ # it will be enabled if not specified and currently
+ # disabled for an existing BGP AS.
+ command = {}
+
+ if conf.get('router_id') and not match_cfg.get('router_id'):
+ command['router_id'] = conf['router_id']
+
+ if conf.get('rt_delay') and match_cfg.get('rt_delay') is None:
+ command['rt_delay'] = conf['rt_delay']
+
+ if not conf.get('log_neighbor_changes') and match_cfg.get('log_neighbor_changes') is None:
+ command['log_neighbor_changes'] = False
+ requests.append(self.get_modify_log_change_request(vrf_name, True))
+
+ # max_med -> on_startup options are deleted at once.
+ # Update the commands appropriately.
+ if conf.get('max_med') and (not match_cfg.get('max_med') or conf['max_med']['on_startup'] != match_cfg['max_med']['on_startup']):
+ command['max_med'] = conf['max_med']
+
+ if conf.get('timers'):
+ timer_command = {}
+ timers = conf['timers']
+ match_timers = match_cfg.get('timers', {})
+ if timers.get('holdtime') is not None and match_timers.get('holdtime') is None and timers['holdtime'] != 180:
+ timer_command['holdtime'] = timers['holdtime']
+ if timers.get('keepalive_interval') is not None and match_timers.get('keepalive_interval') is None and timers['keepalive_interval'] != 60:
+ timer_command['keepalive_interval'] = timers['keepalive_interval']
+
+ if timer_command:
+ command['timers'] = timer_command
+
+ if conf.get('bestpath'):
+ bestpath_command = {}
+ bestpath = conf['bestpath']
+ match_bestpath = match_cfg.get('bestpath', {})
+ if bestpath.get('as_path'):
+ as_path_command = {}
+ as_path = bestpath['as_path']
+ match_as_path = match_bestpath.get('as_path', {})
+ for option in ('confed', 'ignore', 'multipath_relax', 'multipath_relax_as_set'):
+ if as_path.get(option) and match_as_path.get(option) is None:
+ as_path_command[option] = True
+
+ if as_path_command:
+ bestpath_command['as_path'] = as_path_command
+
+ if bestpath.get('compare_routerid') and match_bestpath.get('compare_routerid') is None:
+ bestpath_command['compare_routerid'] = True
+
+ if bestpath.get('med'):
+ med_command = {}
+ med = bestpath['med']
+ match_med = match_bestpath.get('med', {})
+ for option in ('confed', 'missing_as_worst', 'always_compare_med'):
+ if med.get(option) and match_med.get(option) is None:
+ med_command[option] = True
+
+ if med_command:
+ bestpath_command['med'] = med_command
+
+ if bestpath_command:
+ command['bestpath'] = bestpath_command
+
+ if command:
+ command['bgp_as'] = as_val
+ command['vrf_name'] = vrf_name
+ commands.append(command)
+ requests.extend(self.get_delete_specific_bgp_param_request(command, command))
+
+ if requests:
+ # reorder the requests to get default vrfs at end of the requests. so deletion will get success
+ default_vrf_reqs = []
+ other_vrf_reqs = []
+ for req in requests:
+ if '=default/' in req['path']:
+ default_vrf_reqs.append(req)
+ else:
+ other_vrf_reqs.append(req)
+ requests.clear()
+ requests.extend(other_vrf_reqs)
+ requests.extend(default_vrf_reqs)
+
+ return commands, requests
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py
index 2a5c4cfca..05d74fff2 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_af/bgp_af.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -14,17 +14,16 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
try:
- from urllib import quote
+ from urllib import quote_plus
except ImportError:
- from urllib.parse import quote
+ from urllib.parse import quote_plus
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
- ConfigBase,
+ ConfigBase
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties,
to_list,
- search_obj_in_list,
- remove_empties
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
@@ -32,14 +31,11 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
edit_config
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
- dict_to_set,
update_states,
- get_diff,
- remove_empties_from_list,
+ get_diff
)
-from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import (
- validate_bgps,
+ validate_bgps
)
from ansible.module_utils.connection import ConnectionError
@@ -49,7 +45,8 @@ TEST_KEYS = [
{'config': {'vrf_name': '', 'bgp_as': ''}},
{'afis': {'afi': '', 'safi': ''}},
{'redistribute': {'protocol': ''}},
- {'route_advertise_list': {'advertise_afi': ''}}
+ {'route_advertise_list': {'advertise_afi': ''}},
+ {'vnis': {'vni_number': ''}}
]
@@ -71,9 +68,31 @@ class Bgp_af(ConfigBase):
protocol_bgp_path = 'protocols/protocol=BGP,bgp/bgp'
l2vpn_evpn_config_path = 'l2vpn-evpn/openconfig-bgp-evpn-ext:config'
l2vpn_evpn_route_advertise_path = 'l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise'
+ l2vpn_evpn_vnis_path = 'l2vpn-evpn/openconfig-bgp-evpn-ext:vnis'
afi_safi_path = 'global/afi-safis/afi-safi'
table_connection_path = 'table-connections/table-connection'
+ advertise_attrs_map = {
+ 'advertise_pip': 'advertise-pip',
+ 'advertise_pip_ip': 'advertise-pip-ip',
+ 'advertise_pip_peer_ip': 'advertise-pip-peer-ip',
+ 'advertise_svi_ip': 'advertise-svi-ip',
+ 'advertise_default_gw': 'advertise-default-gw',
+ 'advertise_all_vni': 'advertise-all-vni',
+ 'rd': 'route-distinguisher',
+ 'rt_in': 'import-rts',
+ 'rt_out': 'export-rts'
+ }
+ non_list_advertise_attrs = (
+ 'advertise_pip',
+ 'advertise_pip_ip',
+ 'advertise_pip_peer_ip',
+ 'advertise_svi_ip',
+ 'advertise_default_gw',
+ 'advertise_all_vni',
+ 'rd'
+ )
+
def __init__(self, module):
super(Bgp_af, self).__init__(module)
@@ -125,7 +144,15 @@ class Bgp_af(ConfigBase):
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
+ state = self._module.params['state']
want = self._module.params['config']
+ if want:
+ # In state deleted, specific empty parameters are supported
+ if state != 'deleted':
+ want = [remove_empties(conf) for conf in want]
+ else:
+ want = []
+
have = existing_bgp_af_facts
resp = self.set_state(want, have)
return to_list(resp)
@@ -143,19 +170,64 @@ class Bgp_af(ConfigBase):
requests = []
state = self._module.params['state']
- diff = get_diff(want, have, TEST_KEYS)
-
if state == 'overridden':
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
- commands, requests = self._state_deleted(want, have, diff)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
- commands, requests = self._state_merged(want, have, diff)
+ commands, requests = self._state_merged(want, have)
elif state == 'replaced':
- commands, requests = self._state_replaced(want, have, diff)
+ commands, requests = self._state_replaced(want, have)
+
return commands, requests
- def _state_merged(self, want, have, diff):
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ validate_bgps(self._module, want, have)
+
+ del_commands, del_requests = self.get_delete_commands_requests_for_replaced_overridden(want, have, 'replaced')
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+
+ add_commands = get_diff(want, have, TEST_KEYS)
+ if add_commands:
+ commands.extend(update_states(add_commands, 'replaced'))
+ requests.extend(self.get_modify_bgp_af_requests(add_commands, have))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ validate_bgps(self._module, want, have)
+
+ del_commands, del_requests = self.get_delete_commands_requests_for_replaced_overridden(want, have, 'overridden')
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+
+ add_commands = get_diff(want, have, TEST_KEYS)
+ if add_commands:
+ commands.extend(update_states(add_commands, 'overridden'))
+ requests.extend(self.get_modify_bgp_af_requests(add_commands, have))
+
+ return commands, requests
+
+ def _state_merged(self, want, have):
""" The command generator when state is merged
:param want: the additive configuration as a dictionary
@@ -164,7 +236,7 @@ class Bgp_af(ConfigBase):
:returns: the commands necessary to merge the provided into
the current configuration
"""
- commands = diff
+ commands = get_diff(want, have, TEST_KEYS)
validate_bgps(self._module, commands, have)
requests = self.get_modify_bgp_af_requests(commands, have)
if commands and len(requests) > 0:
@@ -173,7 +245,7 @@ class Bgp_af(ConfigBase):
commands = []
return commands, requests
- def _state_deleted(self, want, have, diff):
+ def _state_deleted(self, want, have):
""" The command generator when state is deleted
:param want: the objects from which the configuration should be removed
@@ -191,7 +263,6 @@ class Bgp_af(ConfigBase):
commands = want
requests = self.get_delete_bgp_af_requests(commands, have, is_delete_all)
- requests.extend(self.get_delete_route_advertise_requests(commands, have, is_delete_all))
if commands and len(requests) > 0:
commands = update_states(commands, "deleted")
@@ -208,7 +279,7 @@ class Bgp_af(ConfigBase):
return ({"path": url, "method": PATCH, "data": pay_load})
- def get_modify_advertise_request(self, vrf_name, conf_afi, conf_safi, conf_addr_fam):
+ def get_modify_evpn_adv_cfg_request(self, vrf_name, conf_afi, conf_safi, conf_addr_fam):
request = None
conf_adv_pip = conf_addr_fam.get('advertise_pip', None)
conf_adv_pip_ip = conf_addr_fam.get('advertise_pip_ip', None)
@@ -216,26 +287,30 @@ class Bgp_af(ConfigBase):
conf_adv_svi_ip = conf_addr_fam.get('advertise_svi_ip', None)
conf_adv_all_vni = conf_addr_fam.get('advertise_all_vni', None)
conf_adv_default_gw = conf_addr_fam.get('advertise_default_gw', None)
+ conf_rd = conf_addr_fam.get('rd', None)
+ conf_rt_in = conf_addr_fam.get('rt_in', [])
+ conf_rt_out = conf_addr_fam.get('rt_out', [])
afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper()
evpn_cfg = {}
- if conf_adv_pip:
+ if conf_adv_pip is not None:
evpn_cfg['advertise-pip'] = conf_adv_pip
-
if conf_adv_pip_ip:
evpn_cfg['advertise-pip-ip'] = conf_adv_pip_ip
-
if conf_adv_pip_peer_ip:
evpn_cfg['advertise-pip-peer-ip'] = conf_adv_pip_peer_ip
-
- if conf_adv_svi_ip:
+ if conf_adv_svi_ip is not None:
evpn_cfg['advertise-svi-ip'] = conf_adv_svi_ip
-
- if conf_adv_all_vni:
+ if conf_adv_all_vni is not None:
evpn_cfg['advertise-all-vni'] = conf_adv_all_vni
-
- if conf_adv_default_gw:
+ if conf_adv_default_gw is not None:
evpn_cfg['advertise-default-gw'] = conf_adv_default_gw
+ if conf_rd:
+ evpn_cfg['route-distinguisher'] = conf_rd
+ if conf_rt_in:
+ evpn_cfg['import-rts'] = conf_rt_in
+ if conf_rt_out:
+ evpn_cfg['export-rts'] = conf_rt_out
if evpn_cfg:
url = '%s=%s/%s/global' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
@@ -247,6 +322,52 @@ class Bgp_af(ConfigBase):
return request
+ def get_modify_evpn_vnis_request(self, vrf_name, conf_afi, conf_safi, conf_addr_fam):
+ request = None
+ conf_vnis = conf_addr_fam.get('vnis', [])
+ afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper()
+ vnis_dict = {}
+ vni_list = []
+
+ if conf_vnis:
+ for vni in conf_vnis:
+ vni_dict = {}
+ cfg = {}
+ vni_number = vni.get('vni_number', None)
+ adv_default_gw = vni.get('advertise_default_gw', None)
+ adv_svi_ip = vni.get('advertise_svi_ip', None)
+ rd = vni.get('rd', None)
+ rt_in = vni.get('rt_in', [])
+ rt_out = vni.get('rt_out', [])
+
+ if vni_number:
+ cfg['vni-number'] = vni_number
+ if adv_default_gw is not None:
+ cfg['advertise-default-gw'] = adv_default_gw
+ if adv_svi_ip is not None:
+ cfg['advertise-svi-ip'] = adv_svi_ip
+ if rd:
+ cfg['route-distinguisher'] = rd
+ if rt_in:
+ cfg['import-rts'] = rt_in
+ if rt_out:
+ cfg['export-rts'] = rt_out
+ if cfg:
+ vni_dict['config'] = cfg
+ vni_dict['vni-number'] = vni_number
+ vni_list.append(vni_dict)
+
+ if vni_list:
+ vnis_dict['vni'] = vni_list
+ url = '%s=%s/%s/global' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
+ afi_safi_load = {'afi-safi-name': ("openconfig-bgp-types:%s" % (afi_safi))}
+ afi_safi_load['l2vpn-evpn'] = {'openconfig-bgp-evpn-ext:vnis': vnis_dict}
+ afi_safis_load = {'afi-safis': {'afi-safi': [afi_safi_load]}}
+ pay_load = {'openconfig-network-instance:global': afi_safis_load}
+ request = {"path": url, "method": PATCH, "data": pay_load}
+
+ return request
+
def get_modify_route_advertise_list_request(self, vrf_name, conf_afi, conf_safi, conf_addr_fam):
request = []
route_advertise = []
@@ -259,7 +380,7 @@ class Bgp_af(ConfigBase):
if advertise_afi:
advertise_afi_safi = '%s_UNICAST' % advertise_afi.upper()
url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
- url += '/%s=%s/%s' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_route_advertise_path)
+ url += '/%s=%s/%s/route-advertise-list' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_route_advertise_path)
cfg = None
if route_map:
route_map_list = [route_map]
@@ -267,7 +388,7 @@ class Bgp_af(ConfigBase):
else:
cfg = {'advertise-afi-safi': advertise_afi_safi}
route_advertise.append({'advertise-afi-safi': advertise_afi_safi, 'config': cfg})
- pay_load = {'openconfig-bgp-evpn-ext:route-advertise': {'route-advertise-list': route_advertise}}
+ pay_load = {'openconfig-bgp-evpn-ext:route-advertise-list': route_advertise}
request = {"path": url, "method": PATCH, "data": pay_load}
return request
@@ -373,9 +494,15 @@ class Bgp_af(ConfigBase):
if request:
requests.append(request)
elif conf_afi == "l2vpn" and conf_safi == 'evpn':
- adv_req = self.get_modify_advertise_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)
- if adv_req:
- requests.append(adv_req)
+ cfg_req = self.get_modify_evpn_adv_cfg_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)
+ vni_req = self.get_modify_evpn_vnis_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)
+ rt_adv_req = self.get_modify_route_advertise_list_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)
+ if cfg_req:
+ requests.append(cfg_req)
+ if vni_req:
+ requests.append(vni_req)
+ if rt_adv_req:
+ requests.append(rt_adv_req)
return requests
def get_modify_all_af_requests(self, conf_addr_fams, vrf_name):
@@ -418,16 +545,19 @@ class Bgp_af(ConfigBase):
if conf_afi == 'ipv4' and conf_safi == 'unicast':
conf_dampening = conf_addr_fam.get('dampening', None)
- if conf_dampening:
+ if conf_dampening is not None:
request = self.get_modify_dampening_request(vrf_name, conf_afi, conf_safi, conf_dampening)
if request:
requests.append(request)
if conf_afi == "l2vpn" and conf_safi == "evpn":
- adv_req = self.get_modify_advertise_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)
+ cfg_req = self.get_modify_evpn_adv_cfg_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)
+ vni_req = self.get_modify_evpn_vnis_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)
rt_adv_req = self.get_modify_route_advertise_list_request(vrf_name, conf_afi, conf_safi, conf_addr_fam)
- if adv_req:
- requests.append(adv_req)
+ if cfg_req:
+ requests.append(cfg_req)
+ if vni_req:
+ requests.append(vni_req)
if rt_adv_req:
requests.append(rt_adv_req)
@@ -451,13 +581,14 @@ class Bgp_af(ConfigBase):
have_redis_arr = mat_addr_fam.get('redistribute', [])
have_redis = None
have_route_map = None
- # Check the route_map, if existing route_map is different from required route_map, delete the existing route map
- if conf_route_map and have_redis_arr:
+ if have_redis_arr:
have_redis = next((redis_cfg for redis_cfg in have_redis_arr if conf_redis['protocol'] == redis_cfg['protocol']), None)
- if have_redis:
- have_route_map = have_redis.get('route_map', None)
- if have_route_map and have_route_map != conf_route_map:
- requests.append(self.get_delete_route_map_request(vrf_name, conf_afi, have_redis, have_route_map))
+
+ # Check the route_map, if existing route_map is different from required route_map, delete the existing route map
+ if conf_route_map and have_redis:
+ have_route_map = have_redis.get('route_map', None)
+ if have_route_map and have_route_map != conf_route_map:
+ requests.append(self.get_delete_redistribute_route_map_request(vrf_name, conf_afi, have_redis, have_route_map))
modify_redis = {}
if conf_metric is not None:
@@ -465,7 +596,7 @@ class Bgp_af(ConfigBase):
if conf_route_map:
modify_redis['route_map'] = conf_route_map
- if modify_redis:
+ if modify_redis or have_redis is None:
modify_redis['protocol'] = conf_redis['protocol']
modify_redis_arr.append(modify_redis)
@@ -531,50 +662,100 @@ class Bgp_af(ConfigBase):
return ({'path': url, 'method': DELETE})
- def get_delete_route_advertise_requests(self, commands, have, is_delete_all):
+ def get_delete_all_vnis_request(self, vrf_name, conf_afi, conf_safi, conf_vnis):
+ requests = []
+ for vni in conf_vnis:
+ requests.append(self.get_delete_vni_request(vrf_name, conf_afi, conf_safi, vni['vni_number']))
+
+ return requests
+
+ def get_delete_vni_request(self, vrf_name, conf_afi, conf_safi, vni_number):
+ afi_safi = ('%s_%s' % (conf_afi, conf_safi)).upper()
+ url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
+ url += '/%s=%s/%s/vni=%s' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_vnis_path, vni_number)
+
+ return ({'path': url, 'method': DELETE})
+
+ def get_delete_vni_cfg_attr_request(self, vrf_name, conf_afi, conf_safi, vni_number, attr):
+ afi_safi = ('%s_%s' % (conf_afi, conf_safi)).upper()
+ url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
+ url += '/%s=%s/%s/vni=%s' % (self.afi_safi_path, afi_safi, self.l2vpn_evpn_vnis_path, vni_number)
+ url += '/config/%s' % attr
+
+ return ({'path': url, 'method': DELETE})
+
+ def get_delete_rt(self, conf_rt, mat_rt):
+ del_rt_list = []
+ for rt in conf_rt:
+ if mat_rt and rt in mat_rt:
+ del_rt_list.append(rt)
+ encoded_del_rt_list = quote_plus(','.join(del_rt_list))
+
+ return encoded_del_rt_list
+
+ def get_delete_route_advertise_requests(self, vrf_name, conf_afi, conf_safi, conf_route_adv_list, is_delete_all, mat_route_adv_list):
requests = []
- if not is_delete_all:
- for cmd in commands:
- vrf_name = cmd['vrf_name']
- addr_fams = cmd.get('address_family', None)
- if addr_fams:
- addr_fams = addr_fams.get('afis', [])
- if not addr_fams:
- return requests
- for addr_fam in addr_fams:
- afi = addr_fam.get('afi', None)
- safi = addr_fam.get('safi', None)
- route_advertise_list = addr_fam.get('route_advertise_list', [])
- if route_advertise_list:
- for rt_adv in route_advertise_list:
- advertise_afi = rt_adv.get('advertise_afi', None)
- route_map = rt_adv.get('route_map', None)
- # Check if the commands to be deleted are configured
- for conf in have:
- conf_vrf_name = conf['vrf_name']
- conf_addr_fams = conf.get('address_family', None)
- if conf_addr_fams:
- conf_addr_fams = conf_addr_fams.get('afis', [])
- for conf_addr_fam in conf_addr_fams:
- conf_afi = conf_addr_fam.get('afi', None)
- conf_safi = conf_addr_fam.get('safi', None)
- conf_route_advertise_list = conf_addr_fam.get('route_advertise_list', [])
- if conf_route_advertise_list:
- for conf_rt_adv in conf_route_advertise_list:
- conf_advertise_afi = conf_rt_adv.get('advertise_afi', None)
- conf_route_map = conf_rt_adv.get('route_map', None)
- # Deletion at route-advertise level
- if (not advertise_afi and vrf_name == conf_vrf_name and afi == conf_afi and safi == conf_safi):
- requests.append(self.get_delete_route_advertise_request(vrf_name, afi, safi))
- # Deletion at advertise-afi-safi level
- if (advertise_afi and not route_map and vrf_name == conf_vrf_name and afi == conf_afi and safi ==
- conf_safi and advertise_afi == conf_advertise_afi):
- requests.append(self.get_delete_route_advertise_list_request(vrf_name, afi, safi, advertise_afi))
- # Deletion at route-map level
- if (route_map and vrf_name == conf_vrf_name and afi == conf_afi and safi == conf_safi
- and advertise_afi == conf_advertise_afi and route_map == conf_route_map):
- requests.append(self.get_delete_route_advertise_route_map_request(vrf_name, afi, safi,
- advertise_afi, route_map))
+ if is_delete_all:
+ requests.append(self.get_delete_route_advertise_request(vrf_name, conf_afi, conf_safi))
+ else:
+ for conf_rt_adv in conf_route_adv_list:
+ conf_advertise_afi = conf_rt_adv.get('advertise_afi', None)
+ conf_route_map = conf_rt_adv.get('route_map', None)
+ # Check if the commands to be deleted are configured
+ for mat_rt_adv in mat_route_adv_list:
+ mat_advertise_afi = mat_rt_adv.get('advertise_afi', None)
+ mat_route_map = mat_rt_adv.get('route_map', None)
+ # Deletion at advertise-afi-safi level
+ if (not conf_route_map and conf_advertise_afi == mat_advertise_afi):
+ requests.append(self.get_delete_route_advertise_list_request(vrf_name, conf_afi, conf_safi, conf_advertise_afi))
+ # Deletion at route-map level
+ if (conf_route_map and conf_advertise_afi == mat_advertise_afi and conf_route_map == mat_route_map):
+ requests.append(self.get_delete_route_advertise_route_map_request(vrf_name, conf_afi, conf_safi, conf_advertise_afi, conf_route_map))
+
+ return requests
+
+ def get_delete_vnis_requests(self, vrf_name, conf_afi, conf_safi, conf_vnis, is_delete_all, mat_vnis):
+ requests = []
+ if is_delete_all:
+ requests.extend(self.get_delete_all_vnis_request(vrf_name, conf_afi, conf_safi, conf_vnis))
+ else:
+ for conf_vni in conf_vnis:
+ conf_vni_number = conf_vni.get('vni_number', None)
+ conf_adv_default_gw = conf_vni.get('advertise_default_gw', None)
+ conf_adv_svi_ip = conf_vni.get('advertise_svi_ip', None)
+ conf_rd = conf_vni.get('rd', None)
+ conf_rt_in = conf_vni.get('rt_in', None)
+ conf_rt_out = conf_vni.get('rt_out', None)
+ # Check if the commands to be deleted are configured
+ for mat_vni in mat_vnis:
+ mat_vni_number = mat_vni.get('vni_number', None)
+ mat_adv_default_gw = mat_vni.get('advertise_default_gw', None)
+ mat_adv_svi_ip = mat_vni.get('advertise_svi_ip', None)
+ mat_rd = mat_vni.get('rd', None)
+ mat_rt_in = mat_vni.get('rt_in', None)
+ mat_rt_out = mat_vni.get('rt_out', None)
+ # Deletion at vni-number level
+ if (conf_vni_number and conf_vni_number == mat_vni_number and not conf_adv_default_gw and not conf_adv_svi_ip and not conf_rd and not
+ conf_rt_in and not conf_rt_out):
+ requests.append(self.get_delete_vni_request(vrf_name, conf_afi, conf_safi, conf_vni_number))
+ # Deletion at config/attribute level
+ if conf_vni_number == mat_vni_number:
+ if conf_adv_default_gw is not None and conf_adv_default_gw == mat_adv_default_gw:
+ requests.append(self.get_delete_vni_cfg_attr_request(vrf_name, conf_afi, conf_safi, conf_vni_number, 'advertise-default-gw'))
+ if conf_adv_svi_ip is not None and conf_adv_svi_ip == mat_adv_svi_ip:
+ requests.append(self.get_delete_vni_cfg_attr_request(vrf_name, conf_afi, conf_safi, conf_vni_number, 'advertise-svi-ip'))
+ if conf_rd and conf_rd == mat_rd:
+ requests.append(self.get_delete_vni_cfg_attr_request(vrf_name, conf_afi, conf_safi, conf_vni_number, 'route-distinguisher'))
+ if conf_rt_in:
+ del_rt_list = self.get_delete_rt(conf_rt_in, mat_rt_in)
+ if del_rt_list:
+ requests.append(self.get_delete_vni_cfg_attr_request(vrf_name, conf_afi, conf_safi, conf_vni_number, 'import-rts=%s' %
+ del_rt_list))
+ if conf_rt_out:
+ del_rt_list = self.get_delete_rt(conf_rt_out, mat_rt_out)
+ if del_rt_list:
+ requests.append(self.get_delete_vni_cfg_attr_request(vrf_name, conf_afi, conf_safi, conf_vni_number, 'export-rts=%s' %
+ del_rt_list))
return requests
@@ -588,11 +769,10 @@ class Bgp_af(ConfigBase):
def get_delete_address_family_request(self, vrf_name, conf_afi, conf_safi):
request = None
- if conf_afi != "l2vpn":
- afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper()
- url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
- url += '/%s=openconfig-bgp-types:%s' % (self.afi_safi_path, afi_safi)
- request = {"path": url, "method": DELETE}
+ afi_safi = ("%s_%s" % (conf_afi, conf_safi)).upper()
+ url = '%s=%s/%s' % (self.network_instance_path, vrf_name, self.protocol_bgp_path)
+ url += '/%s=openconfig-bgp-types:%s' % (self.afi_safi_path, afi_safi)
+ request = {"path": url, "method": DELETE}
return request
@@ -630,27 +810,42 @@ class Bgp_af(ConfigBase):
conf_max_path = conf_addr_fam.get('max_path', None)
conf_dampening = conf_addr_fam.get('dampening', None)
conf_network = conf_addr_fam.get('network', [])
+ conf_route_adv_list = conf_addr_fam.get('route_advertise_list', [])
+ conf_rd = conf_addr_fam.get('rd', None)
+ conf_rt_in = conf_addr_fam.get('rt_in', [])
+ conf_rt_out = conf_addr_fam.get('rt_out', [])
+ conf_vnis = conf_addr_fam.get('vnis', [])
if is_delete_all:
- if conf_adv_pip:
- requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip'))
if conf_adv_pip_ip:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-ip'))
if conf_adv_pip_peer_ip:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-peer-ip'))
- if conf_adv_svi_ip:
+ if conf_adv_pip is not None:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip'))
+ if conf_adv_svi_ip is not None:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-svi-ip'))
- if conf_adv_all_vni:
+ if conf_adv_all_vni is not None:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-all-vni'))
if conf_dampening:
requests.append(self.get_delete_dampening_request(vrf_name, conf_afi, conf_safi))
if conf_network:
requests.extend(self.get_delete_network_request(vrf_name, conf_afi, conf_safi, conf_network, is_delete_all, None))
- if conf_adv_default_gw:
+ if conf_adv_default_gw is not None:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-default-gw'))
+ if conf_route_adv_list:
+ requests.extend(self.get_delete_route_advertise_requests(vrf_name, conf_afi, conf_safi, conf_route_adv_list, is_delete_all, None))
+ if conf_rd:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'route-distinguisher'))
+ if conf_rt_in:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'import-rts'))
+ if conf_rt_out:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'export-rts'))
if conf_redis_arr:
requests.extend(self.get_delete_redistribute_requests(vrf_name, conf_afi, conf_safi, conf_redis_arr, is_delete_all, None))
if conf_max_path:
requests.extend(self.get_delete_max_path_requests(vrf_name, conf_afi, conf_safi, conf_max_path, is_delete_all, None))
+ if conf_vnis:
+ requests.extend(self.get_delete_vnis_requests(vrf_name, conf_afi, conf_safi, conf_vnis, is_delete_all, None))
addr_family_del_req = self.get_delete_address_family_request(vrf_name, conf_afi, conf_safi)
if addr_family_del_req:
requests.append(addr_family_del_req)
@@ -674,54 +869,87 @@ class Bgp_af(ConfigBase):
mat_max_path = match_addr_fam.get('max_path', None)
mat_dampening = match_addr_fam.get('dampening', None)
mat_network = match_addr_fam.get('network', [])
-
- if (conf_adv_pip is None and conf_adv_pip_ip is None and conf_adv_pip_peer_ip is None and conf_adv_svi_ip is None
- and conf_adv_all_vni is None and not conf_redis_arr and conf_adv_default_gw is None
- and not conf_max_path and conf_dampening is None and not conf_network):
- if mat_advt_pip:
- requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip'))
+ mat_route_adv_list = match_addr_fam.get('route_advertise_list', None)
+ mat_rd = match_addr_fam.get('rd', None)
+ mat_rt_in = match_addr_fam.get('rt_in', [])
+ mat_rt_out = match_addr_fam.get('rt_out', [])
+ mat_vnis = match_addr_fam.get('vnis', [])
+
+ if (conf_adv_pip is None and not conf_adv_pip_ip and not conf_adv_pip_peer_ip and conf_adv_svi_ip is None
+ and conf_adv_all_vni is None and not conf_redis_arr and conf_adv_default_gw is None and not conf_max_path and conf_dampening is
+ None and not conf_network and not conf_route_adv_list and not conf_rd and not conf_rt_in and not conf_rt_out and not conf_vnis):
if mat_advt_pip_ip:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-ip'))
if mat_advt_pip_peer_ip:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-peer-ip'))
- if mat_advt_svi_ip:
+ if mat_advt_pip is not None:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip'))
+ if mat_advt_svi_ip is not None:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-svi-ip'))
if mat_advt_all_vni is not None:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-all-vni'))
- if mat_dampening is not None:
+ if mat_dampening:
requests.append(self.get_delete_dampening_request(vrf_name, conf_afi, conf_safi))
- if mat_advt_defaut_gw:
+ if mat_advt_defaut_gw is not None:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-default-gw'))
+ if mat_route_adv_list:
+ requests.extend(self.get_delete_route_advertise_requests(vrf_name, conf_afi, conf_safi, mat_route_adv_list, is_delete_all,
+ mat_route_adv_list))
+ if mat_rd:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'route-distinguisher'))
+ if mat_rt_in:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'import-rts'))
+ if mat_rt_out:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'export-rts'))
if mat_redis_arr:
requests.extend(self.get_delete_redistribute_requests(vrf_name, conf_afi, conf_safi, mat_redis_arr, False, mat_redis_arr))
if mat_max_path:
requests.extend(self.get_delete_max_path_requests(vrf_name, conf_afi, conf_safi, mat_max_path, is_delete_all, mat_max_path))
if mat_network:
requests.extend(self.get_delete_network_request(vrf_name, conf_afi, conf_safi, mat_network, False, mat_network))
+ if mat_vnis:
+ requests.extend(self.get_delete_vnis_requests(vrf_name, conf_afi, conf_safi, mat_vnis, is_delete_all, mat_vnis))
addr_family_del_req = self.get_delete_address_family_request(vrf_name, conf_afi, conf_safi)
if addr_family_del_req:
requests.append(addr_family_del_req)
else:
- if conf_adv_pip and mat_advt_pip:
- requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip'))
- if conf_adv_pip_ip and mat_advt_pip_ip:
+ if conf_adv_pip_ip and conf_adv_pip_ip == mat_advt_pip_ip:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-ip'))
- if conf_adv_pip_peer_ip and mat_advt_pip_peer_ip:
+ if conf_adv_pip_peer_ip and conf_adv_pip_peer_ip == mat_advt_pip_peer_ip:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip-peer-ip'))
- if conf_adv_svi_ip and mat_advt_svi_ip:
+ if conf_adv_pip is not None and conf_adv_pip == mat_advt_pip:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-pip'))
+ if conf_adv_svi_ip is not None and conf_adv_svi_ip == mat_advt_svi_ip:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-svi-ip'))
- if conf_adv_all_vni and mat_advt_all_vni:
+ if conf_adv_all_vni is not None and conf_adv_all_vni == mat_advt_all_vni:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-all-vni'))
- if conf_dampening and mat_dampening:
+ if conf_dampening and conf_dampening == mat_dampening:
requests.append(self.get_delete_dampening_request(vrf_name, conf_afi, conf_safi))
- if conf_adv_default_gw and mat_advt_defaut_gw:
+ if conf_adv_default_gw is not None and conf_adv_default_gw == mat_advt_defaut_gw:
requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'advertise-default-gw'))
+ if conf_route_adv_list and mat_route_adv_list:
+ requests.extend(self.get_delete_route_advertise_requests(vrf_name, conf_afi, conf_safi, conf_route_adv_list, is_delete_all,
+ mat_route_adv_list))
+ if conf_rd and conf_rd == mat_rd:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'route-distinguisher'))
+ if conf_rt_in:
+ del_rt_list = self.get_delete_rt(conf_rt_in, mat_rt_in)
+ if del_rt_list:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'import-rts=%s' %
+ del_rt_list))
+ if conf_rt_out:
+ del_rt_list = self.get_delete_rt(conf_rt_out, mat_rt_out)
+ if del_rt_list:
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, conf_afi, conf_safi, 'export-rts=%s' %
+ del_rt_list))
if conf_redis_arr and mat_redis_arr:
requests.extend(self.get_delete_redistribute_requests(vrf_name, conf_afi, conf_safi, conf_redis_arr, False, mat_redis_arr))
if conf_max_path and mat_max_path:
requests.extend(self.get_delete_max_path_requests(vrf_name, conf_afi, conf_safi, conf_max_path, is_delete_all, mat_max_path))
if conf_network and mat_network:
requests.extend(self.get_delete_network_request(vrf_name, conf_afi, conf_safi, conf_network, False, mat_network))
+ if conf_vnis and mat_vnis:
+ requests.extend(self.get_delete_vnis_requests(vrf_name, conf_afi, conf_safi, conf_vnis, is_delete_all, mat_vnis))
break
return requests
@@ -761,14 +989,14 @@ class Bgp_af(ConfigBase):
mat_ebgp = mat_max_path.get('ebgp', None)
mat_ibgp = mat_max_path.get('ibgp', None)
- if (conf_ebgp and mat_ebgp) or is_delete_all:
- requests.append({'path': url + 'ebgp', 'method': DELETE})
- if (conf_ibgp and mat_ibgp) or is_delete_all:
- requests.append({'path': url + 'ibgp', 'method': DELETE})
+ if (conf_ebgp and mat_ebgp and mat_ebgp != 1) or (is_delete_all and conf_ebgp != 1):
+ requests.append({'path': url + 'ebgp/config/maximum-paths', 'method': DELETE})
+ if (conf_ibgp and mat_ibgp and mat_ibgp != 1) or (is_delete_all and conf_ibgp != 1):
+ requests.append({'path': url + 'ibgp/config/maximum-paths', 'method': DELETE})
return requests
- def get_delete_route_map_request(self, vrf_name, conf_afi, conf_redis, conf_route_map):
+ def get_delete_redistribute_route_map_request(self, vrf_name, conf_afi, conf_redis, conf_route_map):
addr_family = "openconfig-types:%s" % (conf_afi.upper())
conf_protocol = conf_redis['protocol'].upper()
if conf_protocol == 'CONNECTED':
@@ -779,6 +1007,17 @@ class Bgp_af(ConfigBase):
url += '%s,%s,%s/config/import-policy=%s' % (src_protocol, dst_protocol, addr_family, conf_route_map)
return ({'path': url, 'method': DELETE})
+ def get_delete_redistribute_metric_request(self, vrf_name, conf_afi, conf_redis):
+ addr_family = "openconfig-types:%s" % (conf_afi.upper())
+ conf_protocol = conf_redis['protocol'].upper()
+ if conf_protocol == 'CONNECTED':
+ conf_protocol = "DIRECTLY_CONNECTED"
+ src_protocol = "openconfig-policy-types:%s" % (conf_protocol)
+ dst_protocol = "openconfig-policy-types:BGP"
+ url = '%s=%s/%s=' % (self.network_instance_path, vrf_name, self.table_connection_path)
+ url += '%s,%s,%s/config/metric' % (src_protocol, dst_protocol, addr_family)
+ return {'path': url, 'method': DELETE}
+
def get_delete_redistribute_requests(self, vrf_name, conf_afi, conf_safi, conf_redis_arr, is_delete_all, mat_redis_arr):
requests = []
for conf_redis in conf_redis_arr:
@@ -846,3 +1085,190 @@ class Bgp_af(ConfigBase):
match_cfg = next((have_cfg for have_cfg in have if have_cfg['vrf_name'] == vrf_name and have_cfg['bgp_as'] == as_val), None)
requests.extend(self.get_delete_single_bgp_af_request(cmd, is_delete_all, match_cfg))
return requests
+
+ def get_delete_commands_requests_for_replaced_overridden(self, want, have, state):
+ """Returns the commands and requests necessary to remove applicable
+ current configurations when state is replaced or overridden
+ """
+ commands = []
+ requests = []
+ if not have:
+ return commands, requests
+
+ for conf in have:
+ as_val = conf['bgp_as']
+ vrf_name = conf['vrf_name']
+ if conf.get('address_family') and conf['address_family'].get('afis'):
+ afi_list = conf['address_family']['afis']
+ else:
+ continue
+
+ match_cfg = next((cfg for cfg in want if cfg['vrf_name'] == vrf_name and cfg['bgp_as'] == as_val), None)
+ if not match_cfg:
+ # Delete all address-families in BGPs that are not
+ # specified in overridden
+ if state == 'overridden':
+ commands.append(conf)
+ requests.extend(self.get_delete_single_bgp_af_request(conf, True))
+ continue
+
+ match_afi_list = []
+ if match_cfg.get('address_family') and match_cfg['address_family'].get('afis'):
+ match_afi_list = match_cfg['address_family']['afis']
+
+ # Delete AF configs in BGPs that are replaced/overridden
+ afi_command_list = []
+ for afi_conf in afi_list:
+ afi_command = {}
+ afi = afi_conf['afi']
+ safi = afi_conf['safi']
+
+ match_afi_cfg = next((afi_cfg for afi_cfg in match_afi_list if afi_cfg['afi'] == afi and afi_cfg['safi'] == safi), None)
+ # Delete address-families that are not specified
+ if not match_afi_cfg:
+ afi_command_list.append(afi_conf)
+ requests.extend(self.get_delete_single_bgp_af_request({'bgp_as': as_val, 'vrf_name': vrf_name, 'address_family': {'afis': [afi_conf]}},
+ True))
+ continue
+
+ if afi == 'ipv4' and safi == 'unicast':
+ if afi_conf.get('dampening') and match_afi_cfg.get('dampening') is None:
+ afi_command['dampening'] = afi_conf['dampening']
+ requests.append(self.get_delete_dampening_request(vrf_name, afi, safi))
+
+ if afi == 'l2vpn' and safi == 'evpn':
+ for option in self.non_list_advertise_attrs:
+ if afi_conf.get(option) is not None and match_afi_cfg.get(option) is None:
+ afi_command[option] = afi_conf[option]
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, afi, safi, self.advertise_attrs_map[option]))
+
+ for option in ('rt_in', 'rt_out'):
+ if afi_conf.get(option):
+ del_rt = self._get_diff_list(afi_conf[option], match_afi_cfg.get(option, []))
+ if del_rt:
+ afi_command[option] = del_rt
+ requests.append(self.get_delete_advertise_attribute_request(vrf_name, afi, safi,
+ '{0}={1}'.format(self.advertise_attrs_map[option],
+ quote_plus(','.join(del_rt)))))
+
+ if afi_conf.get('route_advertise_list'):
+ route_adv_list = []
+ match_route_adv_list = match_afi_cfg.get('route_advertise_list', [])
+ for route_adv in afi_conf['route_advertise_list']:
+ advertise_afi = route_adv['advertise_afi']
+ route_map = route_adv.get('route_map')
+ match_route_adv = next((adv_cfg for adv_cfg in match_route_adv_list if adv_cfg['advertise_afi'] == advertise_afi), None)
+ if not match_route_adv:
+ route_adv_list.append(route_adv)
+ requests.append(self.get_delete_route_advertise_list_request(vrf_name, afi, safi, advertise_afi))
+ # Delete existing route-map before configuring
+ # new route-map.
+ elif route_map and route_map != match_route_adv.get('route_map'):
+ route_adv_list.append(route_adv)
+ requests.append(self.get_delete_route_advertise_route_map_request(vrf_name, afi, safi, advertise_afi, route_map))
+
+ if route_adv_list:
+ afi_command['route_advertise_list'] = route_adv_list
+
+ if afi_conf.get('vnis'):
+ vni_command_list = []
+ match_vni_list = match_afi_cfg.get('vnis', [])
+ for vni_conf in afi_conf['vnis']:
+ vni_number = vni_conf['vni_number']
+ match_vni = next((vni_cfg for vni_cfg in match_vni_list if vni_cfg['vni_number'] == vni_number), None)
+ # Delete entire VNIs that are not specified
+ if not match_vni:
+ vni_command_list.append(vni_conf)
+ requests.append(self.get_delete_vni_request(vrf_name, afi, safi, vni_number))
+ else:
+ vni_command = {}
+ for option in ('advertise_default_gw', 'advertise_svi_ip', 'rd'):
+ if vni_conf.get(option) is not None and match_vni.get(option) is None:
+ vni_command[option] = vni_conf[option]
+ requests.append(self.get_delete_vni_cfg_attr_request(vrf_name, afi, safi, vni_number,
+ self.advertise_attrs_map[option]))
+
+ for option in ('rt_in', 'rt_out'):
+ if vni_conf.get(option):
+ del_rt = self._get_diff_list(vni_conf[option], match_vni.get(option, []))
+ if del_rt:
+ vni_command[option] = del_rt
+ requests.append(self.get_delete_vni_cfg_attr_request(vrf_name, afi, safi, vni_number,
+ '{0}={1}'.format(self.advertise_attrs_map[option],
+ quote_plus(','.join(del_rt)))))
+
+ if vni_command:
+ vni_command['vni_number'] = vni_number
+ vni_command_list.append(vni_command)
+
+ if vni_command_list:
+ afi_command['vnis'] = vni_command_list
+
+ elif afi in ['ipv4', 'ipv6'] and safi == 'unicast':
+ if afi_conf.get('network'):
+ del_network = self._get_diff_list(afi_conf['network'], match_afi_cfg.get('network', []))
+ if del_network:
+ afi_command['network'] = del_network
+ requests.extend(self.get_delete_network_request(vrf_name, afi, safi, del_network, True, None))
+
+ if afi_conf.get('redistribute'):
+ match_redis_list = match_afi_cfg.get('redistribute')
+ if not match_redis_list:
+ afi_command['redistribute'] = afi_conf['redistribute']
+ requests.extend(self.get_delete_redistribute_requests(vrf_name, afi, safi, afi_conf['redistribute'], True, None))
+ else:
+ redis_command_list = []
+ for redis_conf in afi_conf['redistribute']:
+ protocol = redis_conf['protocol']
+ match_redis = next((redis_cfg for redis_cfg in match_redis_list if redis_cfg['protocol'] == protocol), None)
+ # Delete complete protocol redistribute
+ # configuration if not specified
+ if not match_redis:
+ redis_command_list.append(redis_conf)
+ requests.extend(self.get_delete_redistribute_requests(vrf_name, afi, safi, [redis_conf], True, None))
+ # Delete metric, route_map for specified
+ # protocol if they are not specified.
+ else:
+ redis_command = {}
+ if redis_conf.get('metric') is not None and match_redis.get('metric') is None:
+ redis_command['metric'] = redis_conf['metric']
+ requests.append(self.get_delete_redistribute_metric_request(vrf_name, afi, redis_conf))
+ if redis_conf.get('route_map') is not None and match_redis.get('route_map') is None:
+ redis_command['route_map'] = redis_conf['route_map']
+ requests.append(self.get_delete_redistribute_route_map_request(vrf_name, afi, redis_conf, redis_command['route_map']))
+
+ if redis_command:
+ redis_command['protocol'] = protocol
+ redis_command_list.append(redis_command)
+
+ if redis_command_list:
+ afi_command['redistribute'] = redis_command_list
+
+ if afi_conf.get('max_path'):
+ max_path_command = {}
+ match_max_path = match_afi_cfg.get('max_path', {})
+ if afi_conf['max_path'].get('ibgp') and afi_conf['max_path']['ibgp'] != 1 and match_max_path.get('ibgp') is None:
+ max_path_command['ibgp'] = afi_conf['max_path']['ibgp']
+ if afi_conf['max_path'].get('ebgp') and afi_conf['max_path']['ebgp'] != 1 and match_max_path.get('ebgp') is None:
+ max_path_command['ebgp'] = afi_conf['max_path']['ebgp']
+
+ if max_path_command:
+ afi_command['max_path'] = max_path_command
+ requests.extend(self.get_delete_max_path_requests(vrf_name, afi, safi, afi_command['max_path'], False, afi_command['max_path']))
+
+ if afi_command:
+ afi_command['afi'] = afi
+ afi_command['safi'] = safi
+ afi_command_list.append(afi_command)
+
+ if afi_command_list:
+ commands.append({'bgp_as': as_val, 'vrf_name': vrf_name, 'address_family': {'afis': afi_command_list}})
+
+ return commands, requests
+
+ @staticmethod
+ def _get_diff_list(base_list, compare_with_list):
+ if not compare_with_list:
+ return base_list
+
+ return [item for item in base_list if item not in compare_with_list]
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py
index dc2b023b1..d57cf36e2 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_as_paths/bgp_as_paths.py
@@ -17,13 +17,13 @@ from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.c
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
+ search_obj_in_list
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
update_states,
get_diff,
)
-from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
to_request,
edit_config
@@ -120,50 +120,142 @@ class Bgp_as_paths(ConfigBase):
commands = []
requests = []
state = self._module.params['state']
- for i in want:
- if i.get('members'):
- temp = []
- for j in i['members']:
- temp.append(j.replace('\\\\', '\\'))
- i['members'] = temp
- diff = get_diff(want, have)
- for i in want:
- if i.get('members'):
- temp = []
- for j in i['members']:
- temp.append(j.replace('\\', '\\\\'))
- i['members'] = temp
if state == 'overridden':
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
- commands, requests = self._state_deleted(want, have, diff)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
+ diff = get_diff(want, have)
commands, requests = self._state_merged(want, have, diff)
elif state == 'replaced':
- commands, requests = self._state_replaced(want, have, diff)
+ commands, requests = self._state_replaced(want, have)
return commands, requests
- @staticmethod
- def _state_replaced(**kwargs):
+ def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
+ add_commands = []
+ del_commands = []
commands = []
- return commands
+ requests = []
+
+ for cmd in want:
+ # Set action to deny if not specfied for as-path-list
+ if cmd.get('permit') is None:
+ cmd['permit'] = False
+
+ match = search_obj_in_list(cmd['name'], have, 'name')
+ # Replace existing as-path-list
+ if match:
+ # Delete entire as-path-list if no members are specified
+ if not cmd.get('members'):
+ del_commands.append(match)
+ requests.append(self.get_delete_single_as_path_request(cmd['name']))
+ else:
+ if cmd['permit'] != match['permit']:
+ # If action is changed, delete the entire as-path list
+ # and add the given configuration
+ del_commands.append(match)
+ requests.append(self.get_delete_single_as_path_request(cmd['name']))
+ add_commands.append(cmd)
+ requests.append(self.get_new_add_request(cmd))
+ else:
+ want_members_set = set(cmd['members'])
+ have_members_set = set(match['members'])
+ members_to_delete = list(have_members_set.difference(want_members_set))
+ members_to_add = list(want_members_set.difference(have_members_set))
+ if members_to_delete:
+ del_commands.append({'name': cmd['name'], 'permit': cmd['permit'], 'members': members_to_delete})
+ if len(members_to_delete) == len(match['members']):
+ requests.append(self.get_delete_single_as_path_request(cmd['name']))
+ else:
+ requests.append(self.get_delete_single_as_path_member_request(cmd['name'], members_to_delete))
+
+ if members_to_add:
+ add_commands.append({'name': cmd['name'], 'permit': cmd['permit'], 'members': members_to_add})
+ requests.append(self.get_new_add_request({'name': cmd['name'], 'permit': cmd['permit'], 'members': members_to_add}))
+ else:
+ if cmd.get('members'):
+ add_commands.append(cmd)
+ requests.append(self.get_new_add_request(cmd))
+
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+
+ if add_commands:
+ commands.extend(update_states(add_commands, 'replaced'))
+
+ return commands, requests
- @staticmethod
- def _state_overridden(**kwargs):
+ def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
+ add_commands = []
+ del_commands = []
commands = []
- return commands
+ requests = []
+
+ # Delete as-path-lists that are not specified
+ for cfg in have:
+ if not search_obj_in_list(cfg['name'], want, 'name'):
+ del_commands.append(cfg)
+ requests.append(self.get_delete_single_as_path_request(cfg['name']))
+
+ for cmd in want:
+ # Set action to deny if not specfied for as-path-list
+ if cmd.get('permit') is None:
+ cmd['permit'] = False
+
+ match = search_obj_in_list(cmd['name'], have, 'name')
+ # Override existing as-path-list
+ if match:
+ # Delete entire as-path-list if no members are specified
+ if not cmd.get('members'):
+ del_commands.append(match)
+ requests.append(self.get_delete_single_as_path_request(cmd['name']))
+ else:
+ if cmd['permit'] != match['permit']:
+ # If action is changed, delete the entire as-path list
+ # and add the given configuration
+ del_commands.append(match)
+ requests.append(self.get_delete_single_as_path_request(cmd['name']))
+ add_commands.append(cmd)
+ requests.append(self.get_new_add_request(cmd))
+ else:
+ want_members_set = set(cmd['members'])
+ have_members_set = set(match['members'])
+ members_to_delete = list(have_members_set.difference(want_members_set))
+ members_to_add = list(want_members_set.difference(have_members_set))
+ if members_to_delete:
+ del_commands.append({'name': cmd['name'], 'permit': cmd['permit'], 'members': members_to_delete})
+ if len(members_to_delete) == len(match['members']):
+ requests.append(self.get_delete_single_as_path_request(cmd['name']))
+ else:
+ requests.append(self.get_delete_single_as_path_member_request(cmd['name'], members_to_delete))
+
+ if members_to_add:
+ add_commands.append({'name': cmd['name'], 'permit': cmd['permit'], 'members': members_to_add})
+ requests.append(self.get_new_add_request({'name': cmd['name'], 'permit': cmd['permit'], 'members': members_to_add}))
+ else:
+ if cmd.get('members'):
+ add_commands.append(cmd)
+ requests.append(self.get_new_add_request(cmd))
+
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+
+ if add_commands:
+ commands.extend(update_states(add_commands, 'overridden'))
+
+ return commands, requests
def _state_merged(self, want, have, diff):
""" The command generator when state is merged
@@ -173,6 +265,19 @@ class Bgp_as_paths(ConfigBase):
the current configuration
"""
commands = diff
+ for cmd in commands:
+ match = next((item for item in have if item['name'] == cmd['name']), None)
+ if match:
+ # Use existing action if not specified
+ if cmd.get('permit') is None:
+ cmd['permit'] = match['permit']
+ elif cmd['permit'] != match['permit']:
+ action = 'permit' if match['permit'] else 'deny'
+ self._module.fail_json(msg='Cannot override existing action {0} of {1}'.format(action, cmd['name']))
+ # Set action to deny if not specfied for a new as-path-list
+ elif cmd.get('permit') is None:
+ cmd['permit'] = False
+
requests = self.get_modify_as_path_list_requests(commands, have)
if commands and len(requests) > 0:
commands = update_states(commands, "merged")
@@ -181,7 +286,7 @@ class Bgp_as_paths(ConfigBase):
return commands, requests
- def _state_deleted(self, want, have, diff):
+ def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
@@ -239,7 +344,7 @@ class Bgp_as_paths(ConfigBase):
requests.append(request)
return requests
- def get_delete_single_as_path_member_requests(self, name, members):
+ def get_delete_single_as_path_member_request(self, name, members):
url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:"
url = url + "bgp-defined-sets/as-path-sets/as-path-set={name}/config/{members_param}"
method = "DELETE"
@@ -248,15 +353,8 @@ class Bgp_as_paths(ConfigBase):
request = {"path": url.format(name=name, members_param=members_str), "method": method}
return request
- def get_delete_single_as_path_requests(self, name):
- url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set={}"
- method = "DELETE"
- request = {"path": url.format(name), "method": method}
- return request
-
- def get_delete_single_as_path_action_requests(self, name):
+ def get_delete_single_as_path_request(self, name):
url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set={}"
- url = url + "/openconfig-bgp-policy-ext:action"
method = "DELETE"
request = {"path": url.format(name), "method": method}
return request
@@ -270,25 +368,18 @@ class Bgp_as_paths(ConfigBase):
name = cmd['name']
members = cmd['members']
permit = cmd['permit']
- if members:
- diff_members = []
- for item in have:
- if item['name'] == name:
- for member_want in cmd['members']:
- if item['members']:
- if str(member_want) in item['members']:
- diff_members.append(member_want)
- if diff_members:
- requests.append(self.get_delete_single_as_path_member_requests(name, diff_members))
-
- elif permit:
- for item in have:
- if item['name'] == name:
- requests.append(self.get_delete_single_as_path_action_requests(name))
- else:
- for item in have:
- if item['name'] == name:
- requests.append(self.get_delete_single_as_path_requests(name))
+ match = next((item for item in have if item['name'] == cmd['name']), None)
+ if match:
+ if members:
+ if match.get('members'):
+ del_members = set(match['members']).intersection(set(members))
+ if del_members:
+ if len(del_members) == len(match['members']):
+ requests.append(self.get_delete_single_as_path_request(name))
+ else:
+ requests.append(self.get_delete_single_as_path_member_request(name, del_members))
+ else:
+ requests.append(self.get_delete_single_as_path_request(name))
return requests
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py
index 670fb26d3..82ed70a3f 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_communities/bgp_communities.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -62,6 +62,13 @@ class Bgp_communities(ConfigBase):
'bgp_communities',
]
+ standard_communities_map = {
+ 'no_peer': 'NOPEER',
+ 'no_export': 'NO_EXPORT',
+ 'no_advertise': 'NO_ADVERTISE',
+ 'local_as': 'NO_EXPORT_SUBCONFED'
+ }
+
def __init__(self, module):
super(Bgp_communities, self).__init__(module)
@@ -89,6 +96,7 @@ class Bgp_communities(ConfigBase):
existing_bgp_communities_facts = self.get_bgp_communities_facts()
commands, requests = self.set_config(existing_bgp_communities_facts)
+
if commands and len(requests) > 0:
if not self._module.check_mode:
try:
@@ -116,6 +124,13 @@ class Bgp_communities(ConfigBase):
to the desired configuration
"""
want = self._module.params['config']
+ if want:
+ for conf in want:
+ if conf.get("match", None):
+ conf["match"] = conf["match"].upper()
+ if conf.get("members", {}) and conf['members'].get("regex", []):
+ conf['members']['regex'].sort()
+
have = existing_bgp_communities_facts
resp = self.set_state(want, have)
return to_list(resp)
@@ -138,17 +153,16 @@ class Bgp_communities(ConfigBase):
# fp.write('comm: have: ' + str(have) + '\n')
# fp.write('comm: diff: ' + str(diff) + '\n')
if state == 'overridden':
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
- commands, requests = self._state_deleted(want, have, diff)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
commands, requests = self._state_merged(want, have, diff)
elif state == 'replaced':
- commands, requests = self._state_replaced(want, have, diff)
+ commands, requests = self._state_replaced(want, have)
return commands, requests
- @staticmethod
- def _state_replaced(**kwargs):
+ def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
@@ -156,10 +170,13 @@ class Bgp_communities(ConfigBase):
to the desired configuration
"""
commands = []
- return commands
+ requests = []
+
+ commands, requests = self.get_replaced_overridden_config(want, have, "replaced")
- @staticmethod
- def _state_overridden(**kwargs):
+ return commands, requests
+
+ def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
@@ -167,7 +184,11 @@ class Bgp_communities(ConfigBase):
to the desired configuration
"""
commands = []
- return commands
+ requests = []
+
+ commands, requests = self.get_replaced_overridden_config(want, have, "overridden")
+
+ return commands, requests
def _state_merged(self, want, have, diff):
""" The command generator when state is merged
@@ -177,7 +198,7 @@ class Bgp_communities(ConfigBase):
the current configuration
"""
commands = diff
- requests = self.get_modify_bgp_community_requests(commands, have)
+ requests = self.get_modify_bgp_community_requests(commands, have, "merged")
if commands and len(requests) > 0:
commands = update_states(commands, "merged")
else:
@@ -185,7 +206,7 @@ class Bgp_communities(ConfigBase):
return commands, requests
- def _state_deleted(self, want, have, diff):
+ def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
@@ -217,28 +238,18 @@ class Bgp_communities(ConfigBase):
return commands, requests
- def get_delete_single_bgp_community_member_requests(self, name, type, members):
+ def get_delete_single_bgp_community_member_requests(self, name, members):
requests = []
for member in members:
url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:"
url = url + "bgp-defined-sets/community-sets/community-set={name}/config/{members_param}"
method = "DELETE"
- memberstr = member
- if type == 'expanded':
- memberstr = 'REGEX:' + member
- members_params = {'community-member': memberstr}
+ members_params = {'community-member': member}
members_str = urlencode(members_params)
request = {"path": url.format(name=name, members_param=members_str), "method": method}
requests.append(request)
return requests
- def get_delete_all_members_bgp_community_requests(self, name):
- url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:"
- url = url + "bgp-defined-sets/community-sets/community-set={}/config/community-member"
- method = "DELETE"
- request = {"path": url.format(name), "method": method}
- return request
-
def get_delete_single_bgp_community_requests(self, name):
url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set={}"
method = "DELETE"
@@ -255,70 +266,90 @@ class Bgp_communities(ConfigBase):
return requests
def get_delete_bgp_communities(self, commands, have, is_delete_all):
- # with open('/root/ansible_log.log', 'a+') as fp:
- # fp.write('bgp_commmunities: delete requests ************** \n')
requests = []
if is_delete_all:
requests = self.get_delete_all_bgp_communities(commands)
else:
for cmd in commands:
name = cmd['name']
- type = cmd['type']
- members = cmd['members']
- if members:
- if members['regex']:
- diff_members = []
- for item in have:
- if item['name'] == name and item['members']:
- for member_want in members['regex']:
- if str(member_want) in item['members']['regex']:
- diff_members.append(member_want)
- if diff_members:
- requests.extend(self.get_delete_single_bgp_community_member_requests(name, type, diff_members))
- else:
- for item in have:
- if item['name'] == name:
- if item['members']:
- requests.append(self.get_delete_all_members_bgp_community_requests(name))
- else:
- for item in have:
- if item['name'] == name:
+ members = cmd.get('members', None)
+ cmd_type = cmd['type']
+ diff_members = []
+
+ for item in have:
+ if item['name'] == name:
+ if 'permit' not in cmd or cmd['permit'] is None:
+ cmd['permit'] = item['permit']
+
+ if cmd == item:
requests.append(self.get_delete_single_bgp_community_requests(name))
+ break
+
+ if cmd_type == "standard":
+ for attr in self.standard_communities_map:
+ if cmd.get(attr, None) and item[attr] and cmd[attr] == item[attr]:
+ diff_members.append(self.standard_communities_map[attr])
+
+ if members:
+ if members.get('regex', []):
+ for member_want in members['regex']:
+ if item.get('members', None) and item['members'].get('regex', []):
+ if str(member_want) in item['members']['regex']:
+ diff_members.append("REGEX:" + str(member_want))
+ else:
+ requests.append(self.get_delete_single_bgp_community_requests(name))
+
+ else:
+ if cmd_type == "standard":
+ no_attr = True
+ for attr in self.standard_communities_map:
+ if cmd.get(attr, None):
+ no_attr = False
+ break
+ if no_attr:
+ requests.append(self.get_delete_single_bgp_community_requests(name))
+ else:
+ requests.append(self.get_delete_single_bgp_community_requests(name))
+ break
+
+ if diff_members:
+ requests.extend(self.get_delete_single_bgp_community_member_requests(name, diff_members))
- # with open('/root/ansible_log.log', 'a+') as fp:
- # fp.write('bgp_commmunities: delete requests' + str(requests) + '\n')
return requests
def get_new_add_request(self, conf):
url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
method = "PATCH"
- # members = conf['members']
- # members_str = ', '.join(members)
- # members_list = list()
- # for member in members.split(','):
- # members_list.append(str(member))
+ community_members = []
+ community_action = ""
if 'match' not in conf:
conf['match'] = "ANY"
- # with open('/root/ansible_log.log', 'a+') as fp:
- # fp.write('bgp_communities: conf' + str(conf) + '\n')
- if 'local_as' in conf and conf['local_as']:
- conf['members']['regex'].append("NO_EXPORT_SUBCONFED")
- if 'no_peer' in conf and conf['no_peer']:
- conf['members']['regex'].append("NOPEER")
- if 'no_export' in conf and conf['no_export']:
- conf['members']['regex'].append("NO_EXPORT")
- if 'no_advertise' in conf and conf['no_advertise']:
- conf['members']['regex'].append("NO_ADVERTISE")
- input_data = {'name': conf['name'], 'members_list': conf['members']['regex'], 'match': conf['match']}
- if conf['type'] == 'expanded':
- input_data['regex'] = "REGEX:"
- else:
- input_data['regex'] = ""
+
+ if conf['type'] == 'standard':
+ for attr in self.standard_communities_map:
+ if attr in conf and conf[attr]:
+ community_members.append(self.standard_communities_map[attr])
+ if 'members' in conf and conf['members'] and conf['members'].get('regex', []):
+ for i in conf['members']['regex']:
+ community_members.extend([str(i)])
+ if not community_members:
+ self._module.fail_json(msg='Cannot create standard community-list {0} without community attributes'.format(conf['name']))
+
+ elif conf['type'] == 'expanded':
+ if 'members' in conf and conf['members'] and conf['members'].get('regex', []):
+ for i in conf['members']['regex']:
+ community_members.extend(["REGEX:" + str(i)])
+ if not community_members:
+ self._module.fail_json(msg='Cannot create expanded community-list {0} without community attributes'.format(conf['name']))
+
if conf['permit']:
- input_data['permit'] = "PERMIT"
+ community_action = "PERMIT"
else:
- input_data['permit'] = "DENY"
+ community_action = "DENY"
+
+ input_data = {'name': conf['name'], 'members_list': community_members, 'match': conf['match'].upper(), 'permit': community_action}
+
payload_template = """
{
"openconfig-bgp-policy:community-sets": {
@@ -328,7 +359,7 @@ class Bgp_communities(ConfigBase):
"config": {
"community-set-name": "{{name}}",
"community-member": [
- {% for member in members_list %}"{{regex}}{{member}}"{%- if not loop.last -%},{% endif %}{%endfor%}
+ {% for member in members_list %}"{{member}}"{%- if not loop.last -%},{% endif %}{%endfor%}
],
"openconfig-bgp-policy-ext:action": "{{permit}}",
"match-set-options": "{{match}}"
@@ -342,27 +373,118 @@ class Bgp_communities(ConfigBase):
intended_payload = t.render(input_data)
ret_payload = json.loads(intended_payload)
request = {"path": url, "method": method, "data": ret_payload}
- # with open('/root/ansible_log.log', 'a+') as fp:
- # fp.write('bgp_communities: request' + str(request) + '\n')
+
return request
- def get_modify_bgp_community_requests(self, commands, have):
+ def get_modify_bgp_community_requests(self, commands, have, cur_state):
requests = []
if not commands:
return requests
for conf in commands:
- for item in have:
- if item['name'] == conf['name']:
- if 'type' not in conf:
- conf['type'] = item['type']
- if 'permit' not in conf:
- conf['permit'] = item['permit']
- if 'match' not in conf:
- conf['match'] = item['match']
- if 'members' not in conf:
- conf['members'] = item['members']
+ if cur_state == "merged":
+ for item in have:
+ if item['name'] == conf['name']:
+ if 'type' not in conf:
+ conf['type'] = item['type']
+ if 'permit' not in conf or conf['permit'] is None:
+ conf['permit'] = item['permit']
+ if 'match' not in conf:
+ conf['match'] = item['match']
+ if conf['type'] == "standard":
+ for attr in self.standard_communities_map:
+ if attr not in conf and attr in item:
+ conf[attr] = item[attr]
+ else:
+ if 'members' not in conf:
+ if item.get('members', {}) and item['members'].get('regex', []):
+ conf['members'] = {'regex': item['members']['regex']}
+ else:
+ conf['members'] = item['members']
+ break
+
new_req = self.get_new_add_request(conf)
if new_req:
requests.append(new_req)
return requests
+
+ def get_replaced_overridden_config(self, want, have, cur_state):
+ commands, requests = [], []
+
+ commands_del, requests_del = [], []
+ commands_add, requests_add = [], []
+
+ for conf in want:
+ name = conf['name']
+ in_have = False
+ for have_conf in have:
+ if have_conf['name'] == name:
+ in_have = True
+ if have_conf['type'] != conf['type']:
+ # If both community list are of same name but different types
+ commands_del.append(have_conf)
+ commands_add.append(conf)
+ else:
+ is_change = False
+
+ if have_conf['permit'] != conf['permit']:
+ is_change = True
+
+ if have_conf['match'] != conf['match']:
+ is_change = is_delete = True
+
+ if conf["type"] == "standard":
+ no_attr = True
+ for attr in self.standard_communities_map:
+ if not conf.get(attr, None):
+ if have_conf.get(attr, None):
+ is_change = True
+ else:
+ no_attr = False
+ if not have_conf.get(attr, None):
+ is_change = True
+
+ if no_attr:
+ # Since standard type needs atleast one attribute to exist
+ self._module.fail_json(msg='Cannot create standard community-list {0} without community attributes'.format(conf['name']))
+ else:
+ members = conf.get('members', {})
+ if members and members.get('regex', []):
+ if have_conf.get('members', {}) and have_conf['members'].get('regex', []):
+ if set(have_conf['members']['regex']).symmetric_difference(set(members['regex'])):
+ is_change = True
+ else:
+ # If there are no members in any community list of want, then
+ # that particular community list request to be ignored since
+ # expanded type needs community-member to exist
+ self._module.fail_json(msg='Cannot create expanded community-list {0} without community attributes'.format(conf['name']))
+
+ if is_change:
+ commands_add.append(conf)
+ commands_del.append(have_conf)
+ break
+
+ if not in_have:
+ commands_add.append(conf)
+
+ if cur_state == "overridden":
+ for have_conf in have:
+ in_want = next((conf for conf in want if conf['name'] == have_conf['name']), None)
+ if not in_want:
+ commands_del.append(have_conf)
+
+ if commands_del:
+ requests_del = self.get_delete_bgp_communities(commands_del, have, False)
+
+ if len(requests_del) > 0:
+ commands.extend(update_states(commands_del, "deleted"))
+ requests.extend(requests_del)
+
+ if commands_add:
+ requests_add = self.get_modify_bgp_community_requests(commands_add, have, cur_state)
+
+ if len(requests_add) > 0:
+ commands.extend(update_states(commands_add, cur_state))
+ requests.extend(requests_add)
+
+ return commands, requests
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py
index 751f88e48..8cd9953e6 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_ext_communities/bgp_ext_communities.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -60,6 +60,11 @@ class Bgp_ext_communities(ConfigBase):
'bgp_ext_communities',
]
+ standard_communities_map = {
+ "route_origin": "route-origin",
+ "route_target": "route-target"
+ }
+
def __init__(self, module):
super(Bgp_ext_communities, self).__init__(module)
@@ -87,6 +92,7 @@ class Bgp_ext_communities(ConfigBase):
existing_bgp_ext_communities_facts = self.get_bgp_ext_communities_facts()
commands, requests = self.set_config(existing_bgp_ext_communities_facts)
+
if commands and len(requests) > 0:
if not self._module.check_mode:
try:
@@ -114,6 +120,21 @@ class Bgp_ext_communities(ConfigBase):
to the desired configuration
"""
want = self._module.params['config']
+ if want:
+ for conf in want:
+ cmd_type = conf.get("type", None)
+ if cmd_type and conf.get("match", None):
+ conf['match'] = conf['match'].lower()
+ if cmd_type and conf.get("members", {}):
+ if cmd_type == "expanded":
+ if conf['members'].get("regex", []):
+ conf['members']['regex'].sort()
+ else:
+ if conf['members'].get("route_origin", []):
+ conf['members']['route_origin'].sort()
+ if conf['members'].get("route_target", []):
+ conf['members']['route_target'].sort()
+
have = existing_bgp_ext_communities_facts
resp = self.set_state(want, have)
return to_list(resp)
@@ -133,17 +154,16 @@ class Bgp_ext_communities(ConfigBase):
new_want = self.validate_type(want)
diff = get_diff(new_want, have)
if state == 'overridden':
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
- commands, requests = self._state_deleted(want, have, diff)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
commands, requests = self._state_merged(want, have, diff)
elif state == 'replaced':
- commands, requests = self._state_replaced(want, have, diff)
+ commands, requests = self._state_replaced(want, have)
return commands, requests
- @staticmethod
- def _state_replaced(**kwargs):
+ def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
@@ -151,10 +171,13 @@ class Bgp_ext_communities(ConfigBase):
to the desired configuration
"""
commands = []
- return commands
+ requests = []
+
+ commands, requests = self.get_replaced_overridden_config(want, have, "replaced")
- @staticmethod
- def _state_overridden(**kwargs):
+ return commands, requests
+
+ def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
@@ -162,7 +185,11 @@ class Bgp_ext_communities(ConfigBase):
to the desired configuration
"""
commands = []
- return commands
+ requests = []
+
+ commands, requests = self.get_replaced_overridden_config(want, have, "overridden")
+
+ return commands, requests
def _state_merged(self, want, have, diff):
""" The command generator when state is merged
@@ -172,7 +199,7 @@ class Bgp_ext_communities(ConfigBase):
the current configuration
"""
commands = diff
- requests = self.get_modify_bgp_ext_community_requests(commands, have)
+ requests = self.get_modify_bgp_ext_community_requests(commands, have, "merged")
if commands and len(requests) > 0:
commands = update_states(commands, "merged")
else:
@@ -180,7 +207,7 @@ class Bgp_ext_communities(ConfigBase):
return commands, requests
- def _state_deleted(self, want, have, diff):
+ def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
@@ -204,7 +231,7 @@ class Bgp_ext_communities(ConfigBase):
return commands, requests
- def get_delete_single_bgp_ext_community_member_requests(self, name, type, members):
+ def get_delete_single_bgp_ext_community_member_requests(self, name, members):
requests = []
for member in members:
url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:"
@@ -216,13 +243,6 @@ class Bgp_ext_communities(ConfigBase):
requests.append(request)
return requests
- def get_delete_all_members_bgp_ext_community_requests(self, name):
- url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:"
- url = url + "bgp-defined-sets/ext-community-sets/ext-community-set={}/config/ext-community-member"
- method = "DELETE"
- request = {"path": url.format(name), "method": method}
- return request
-
def get_delete_single_bgp_ext_community_requests(self, name):
url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set={}"
method = "DELETE"
@@ -245,71 +265,79 @@ class Bgp_ext_communities(ConfigBase):
else:
for cmd in commands:
name = cmd['name']
- type = cmd['type']
- members = cmd['members']
- if members:
- if members['regex'] or members['route_origin'] or members['route_target']:
- diff_members = []
- for item in have:
- if item['name'] == name and item['members']:
- if members['regex']:
+ cmd_type = cmd['type']
+ members = cmd.get('members', None)
+ diff_members = []
+
+ for item in have:
+ if item["name"] == name:
+ if 'permit' not in cmd or cmd['permit'] is None:
+ cmd['permit'] = item['permit']
+ if cmd == item:
+ requests.append(self.get_delete_single_bgp_ext_community_requests(name))
+ break
+
+ if members:
+ if cmd_type == "expanded":
+ if members.get('regex', []):
for member_want in members['regex']:
- if str(member_want) in item['members']['regex']:
- diff_members.append('REGEX:' + str(member_want))
- if members['route_origin']:
- for member_want in members['route_origin']:
- if str(member_want) in item['members']['route_origin']:
- diff_members.append("route-origin:" + str(member_want))
- if members['route_target']:
- for member_want in members['route_target']:
- if str(member_want) in item['members']['route_target']:
- diff_members.append("route-target:" + str(member_want))
- if diff_members:
- requests.extend(self.get_delete_single_bgp_ext_community_member_requests(name, type, diff_members))
- else:
- for item in have:
- if item['name'] == name:
- if item['members']:
- requests.append(self.get_delete_all_members_bgp_ext_community_requests(name))
- else:
- for item in have:
- if item['name'] == name:
+ if item.get("members", None) and item['members'].get('regex', []):
+ if str(member_want) in item['members']['regex']:
+ diff_members.append("REGEX:" + str(member_want))
+ else:
+ requests.append(self.get_delete_single_bgp_ext_community_requests(name))
+ else:
+ no_members = True
+ for attr in self.standard_communities_map:
+ if members.get(attr, []):
+ no_members = False
+ for member_want in members[attr]:
+ if item.get("members", None) and item['members'].get(attr, []):
+ if str(member_want) in item['members'][attr]:
+ diff_members.append(self.standard_communities_map[attr] + ":" + str(member_want))
+ if no_members:
+ requests.append(self.get_delete_single_bgp_ext_community_requests(name))
+ else:
requests.append(self.get_delete_single_bgp_ext_community_requests(name))
+ break
+
+ if diff_members:
+ requests.extend(self.get_delete_single_bgp_ext_community_member_requests(name, diff_members))
+
return requests
def get_new_add_request(self, conf):
url = "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
method = "PATCH"
- members = conf.get('members', None)
+ community_members = []
+ community_action = ""
+
if 'match' not in conf:
conf['match'] = "ANY"
- else:
- conf['match'] = conf['match'].upper()
- input_data = {'name': conf['name'], 'match': conf['match']}
-
- input_data['members_list'] = list()
- if members:
- regex = members.get('regex', None)
- if regex:
- input_data['members_list'].extend(["REGEX:" + cfg for cfg in regex])
- else:
- route_target = members.get('route_target', None)
- if route_target:
- input_data['members_list'].extend(["route-target:" + cfg for cfg in route_target])
- route_origin = members.get('route_origin', None)
- if route_origin:
- input_data['members_list'].extend(["route-origin:" + cfg for cfg in route_origin])
if conf['type'] == 'expanded':
- input_data['regex'] = "REGEX:"
- else:
- input_data['regex'] = ""
+ if 'members' in conf and conf['members'] and conf['members'].get('regex', []):
+ for i in conf['members']['regex']:
+ community_members.extend(["REGEX:" + str(i)])
+ elif conf['type'] == 'standard':
+ for attr in self.standard_communities_map:
+ if 'members' in conf and conf['members'] and conf['members'].get(attr, []):
+ for i in conf['members'][attr]:
+ community_members.extend([self.standard_communities_map[attr] + ":" + str(i)])
+
+ if not community_members:
+ self._module.fail_json(msg='Cannot create {0} community-list {1} without community attributes'.format(conf['type'], conf['name']))
+ return {}
+
if conf['permit']:
- input_data['permit'] = "PERMIT"
+ community_action = "PERMIT"
else:
- input_data['permit'] = "DENY"
+ community_action = "DENY"
+
+ input_data = {'name': conf['name'], 'members_list': community_members, 'match': conf['match'].upper(), 'permit': community_action}
+
payload_template = """
{
"openconfig-bgp-policy:ext-community-sets": {
@@ -335,23 +363,37 @@ class Bgp_ext_communities(ConfigBase):
request = {"path": url, "method": method, "data": ret_payload}
return request
- def get_modify_bgp_ext_community_requests(self, commands, have):
+ def get_modify_bgp_ext_community_requests(self, commands, have, cur_state):
requests = []
if not commands:
return requests
for conf in commands:
- for item in have:
- if item['name'] == conf['name']:
- if 'type' not in conf:
- conf['type'] = item['type']
- if 'permit' not in conf:
- conf['permit'] = item['permit']
- if 'match' not in conf:
- conf['match'] = item['match']
- if 'members' not in conf:
- conf['members'] = item['members']
- break
+ if cur_state == "merged":
+ for item in have:
+ if item['name'] == conf['name']:
+ if 'type' not in conf:
+ conf['type'] = item['type']
+ if 'permit' not in conf or conf['permit'] is None:
+ conf['permit'] = item['permit']
+ if 'match' not in conf:
+ conf['match'] = item['match']
+ if 'members' not in conf:
+ if conf['type'] == "expanded":
+ if item.get('members', {}) and item['members'].get('regex', []):
+ conf['members'] = {'regex': item['members']['regex']}
+ else:
+ conf['members'] = item['members']
+ else:
+ no_members = True
+ for attr in self.standard_communities_map:
+ if item.get('members', {}) and item['members'].get(attr, []):
+ no_members = False
+ conf['members'] = {attr: item['members'][attr]}
+ if no_members:
+ conf['members'] = item['members']
+ break
+
new_req = self.get_new_add_request(conf)
if new_req:
requests.append(new_req)
@@ -369,3 +411,84 @@ class Bgp_ext_communities(ConfigBase):
new_want.append(cfg)
return new_want
+
+ def get_replaced_overridden_config(self, want, have, cur_state):
+ commands, requests = [], []
+
+ commands_del, requests_del = [], []
+ commands_add, requests_add = [], []
+
+ for conf in want:
+ name = conf['name']
+ in_have = False
+ for have_conf in have:
+ if have_conf['name'] == name:
+ in_have = True
+ if have_conf['type'] != conf['type']:
+ # If both extended community list are of same name but different types
+ commands_del.append(have_conf)
+ commands_add.append(conf)
+ else:
+ is_change = False
+
+ if have_conf['permit'] != conf['permit']:
+ is_change = True
+
+ if have_conf['match'] != conf['match']:
+ is_change = True
+
+ if conf["type"] == "expanded":
+ members = conf.get('members', {})
+ if members and conf['members'].get('regex', []):
+ if have_conf.get('members', {}) and have_conf['members'].get('regex', []):
+ if set(have_conf['members']['regex']).symmetric_difference(set(members['regex'])):
+ is_change = True
+ else:
+ # If there are no members in any expanded ext community list of want, then
+ # abort the playbook with an error message explaining why the specified command is not valid
+ self._module.fail_json(msg='Cannot create expanded extended community-list '
+ '{0} without community attributes'.format(conf['name']))
+ else:
+ members = conf.get('members', {})
+ no_members = True
+ for attr in self.standard_communities_map:
+ if members and conf['members'].get(attr, []):
+ no_members = False
+ if have_conf.get('members', {}) and have_conf['members'].get(attr, []):
+ if set(have_conf['members'][attr]).symmetric_difference(set(members[attr])):
+ is_change = True
+
+ if no_members:
+ # If there are no members in any standard ext community list of want, then
+ # abort the playbook with an error message explaining why the specified command is not valid
+ self._module.fail_json(msg='Cannot create standard extended community-list '
+ '{0} without community attributes'.format(conf['name']))
+
+ if is_change:
+ commands_add.append(conf)
+ commands_del.append(have_conf)
+ break
+ if not in_have:
+ commands_add.append(conf)
+
+ if cur_state == "overridden":
+ for have_conf in have:
+ in_want = next((conf for conf in want if conf['name'] == have_conf['name']), None)
+ if not in_want:
+ commands_del.append(have_conf)
+
+ if commands_del:
+ requests_del = self.get_delete_bgp_ext_communities(commands_del, have, False)
+
+ if len(requests_del) > 0:
+ commands.extend(update_states(commands_del, "deleted"))
+ requests.extend(requests_del)
+
+ if commands_add:
+ requests_add = self.get_modify_bgp_ext_community_requests(commands_add, have, cur_state)
+
+ if len(requests_add) > 0:
+ commands.extend(update_states(commands_add, cur_state))
+ requests.extend(requests_add)
+
+ return commands, requests
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py
index 31bbec78d..9c0920832 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors/bgp_neighbors.py
@@ -27,6 +27,7 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
update_states,
get_diff,
+ remove_matching_defaults
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import (
validate_bgps,
@@ -37,6 +38,8 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import to_request
from ansible.module_utils.connection import ConnectionError
+from copy import deepcopy
+
PATCH = 'patch'
DELETE = 'delete'
@@ -47,6 +50,95 @@ TEST_KEYS = [
{'afis': {'afi': '', 'safi': ''}},
]
+default_entries = [
+ [
+ {'name': 'peer_group'},
+ {'name': 'timers'},
+ {'name': 'keepalive', 'default': 60}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'timers'},
+ {'name': 'holdtime', 'default': 180}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'timers'},
+ {'name': 'connect_retry', 'default': 30}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'advertisement_interval', 'default': 30}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'auth_pwd'},
+ {'name': 'encrypted', 'default': False}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'ebgp_multihop'},
+ {'name': 'enabled', 'default': False}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'passive', 'default': False}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'address_family'},
+ {'name': 'afis'},
+ {'name': 'ip_afi'},
+ {'name': 'send_default_route', 'default': False}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'address_family'},
+ {'name': 'afis'},
+ {'name': 'activate', 'default': False}
+ ],
+ [
+ {'name': 'peer_group'},
+ {'name': 'address_family'},
+ {'name': 'afis'},
+ {'name': 'prefix_limit'},
+ {'name': 'prevent_teardown', 'default': False}
+ ],
+ [
+ {'name': 'neighbors'},
+ {'name': 'timers'},
+ {'name': 'keepalive', 'default': 60}
+ ],
+ [
+ {'name': 'neighbors'},
+ {'name': 'timers'},
+ {'name': 'holdtime', 'default': 180}
+ ],
+ [
+ {'name': 'neighbors'},
+ {'name': 'timers'},
+ {'name': 'connect_retry', 'default': 30}
+ ],
+ [
+ {'name': 'neighbors'},
+ {'name': 'advertisement_interval', 'default': 30}
+ ],
+ [
+ {'name': 'neighbors'},
+ {'name': 'auth_pwd'},
+ {'name': 'encrypted', 'default': False}
+ ],
+ [
+ {'name': 'neighbors'},
+ {'name': 'ebgp_multihop'},
+ {'name': 'enabled', 'default': False}
+ ],
+ [
+ {'name': 'neighbors'},
+ {'name': 'passive', 'default': False}
+ ],
+]
+
class Bgp_neighbors(ConfigBase):
"""
@@ -180,7 +272,9 @@ class Bgp_neighbors(ConfigBase):
commands = have
new_have = have
else:
- new_have = self.remove_default_entries(have)
+ new_have = deepcopy(have)
+ for default_entry in default_entries:
+ remove_matching_defaults(new_have, default_entry)
d_diff = get_diff(want, new_have, TEST_KEYS, is_skeleton=True)
delete_diff = get_diff(want, d_diff, TEST_KEYS, is_skeleton=True)
commands = delete_diff
@@ -192,141 +286,6 @@ class Bgp_neighbors(ConfigBase):
commands = []
return commands, requests
- def remove_default_entries(self, data):
- new_data = []
- if not data:
- return new_data
- for conf in data:
- new_conf = {}
- as_val = conf['bgp_as']
- vrf_name = conf['vrf_name']
- new_conf['bgp_as'] = as_val
- new_conf['vrf_name'] = vrf_name
- peergroup = conf.get('peer_group', None)
- new_peergroups = []
- if peergroup is not None:
- for pg in peergroup:
- new_pg = {}
- pg_val = pg.get('name', None)
- new_pg['name'] = pg_val
- remote_as = pg.get('remote_as', None)
- new_remote = {}
- if remote_as:
- peer_as = remote_as.get('peer_as', None)
- peer_type = remote_as.get('peer_type', None)
- if peer_as is not None:
- new_remote['peer_as'] = peer_as
- if peer_type is not None:
- new_remote['peer_type'] = peer_type
- if new_remote:
- new_pg['remote_as'] = new_remote
- timers = pg.get('timers', None)
- new_timers = {}
- if timers:
- keepalive = timers.get('keepalive', None)
- holdtime = timers.get('holdtime', None)
- connect_retry = timers.get('connect_retry', None)
- if keepalive is not None and keepalive != 60:
- new_timers['keepalive'] = keepalive
- if holdtime is not None and holdtime != 180:
- new_timers['holdtime'] = holdtime
- if connect_retry is not None and connect_retry != 30:
- new_timers['connect_retry'] = connect_retry
- if new_timers:
- new_pg['timers'] = new_timers
- advertisement_interval = pg.get('advertisement_interval', None)
- if advertisement_interval is not None and advertisement_interval != 30:
- new_pg['advertisement_interval'] = advertisement_interval
- bfd = pg.get('bfd', None)
- if bfd is not None:
- new_pg['bfd'] = bfd
- capability = pg.get('capability', None)
- if capability is not None:
- new_pg['capability'] = capability
- afi = []
- address_family = pg.get('address_family', None)
- if address_family:
- if address_family.get('afis', None):
- for each in address_family['afis']:
- if each:
- tmp = {}
- if each.get('afi', None) is not None:
- tmp['afi'] = each['afi']
- if each.get('safi', None) is not None:
- tmp['safi'] = each['safi']
- if each.get('activate', None) is not None and each['activate'] is not False:
- tmp['activate'] = each['activate']
- if each.get('allowas_in', None) is not None:
- tmp['allowas_in'] = each['allowas_in']
- if each.get('ip_afi', None) is not None:
- tmp['ip_afi'] = each['ip_afi']
- if each.get('prefix_limit', None) is not None:
- tmp['prefix_limit'] = each['prefix_limit']
- if each.get('prefix_list_in', None) is not None:
- tmp['prefix_list_in'] = each['prefix_list_in']
- if each.get('prefix_list_out', None) is not None:
- tmp['prefix_list_out'] = each['prefix_list_out']
- afi.append(tmp)
- if afi and len(afi) > 0:
- afis = {}
- afis.update({'afis': afi})
- new_pg['address_family'] = afis
- if new_pg:
- new_peergroups.append(new_pg)
- if new_peergroups:
- new_conf['peer_group'] = new_peergroups
- neighbors = conf.get('neighbors', None)
- new_neighbors = []
- if neighbors is not None:
- for neighbor in neighbors:
- new_neighbor = {}
- neighbor_val = neighbor.get('neighbor', None)
- new_neighbor['neighbor'] = neighbor_val
- remote_as = neighbor.get('remote_as', None)
- new_remote = {}
- if remote_as:
- peer_as = remote_as.get('peer_as', None)
- peer_type = remote_as.get('peer_type', None)
- if peer_as is not None:
- new_remote['peer_as'] = peer_as
- if peer_type is not None:
- new_remote['peer_type'] = peer_type
- if new_remote:
- new_neighbor['remote_as'] = new_remote
- peer_group = neighbor.get('peer_group', None)
- if peer_group:
- new_neighbor['peer_group'] = peer_group
- timers = neighbor.get('timers', None)
- new_timers = {}
- if timers:
- keepalive = timers.get('keepalive', None)
- holdtime = timers.get('holdtime', None)
- connect_retry = timers.get('connect_retry', None)
- if keepalive is not None and keepalive != 60:
- new_timers['keepalive'] = keepalive
- if holdtime is not None and holdtime != 180:
- new_timers['holdtime'] = holdtime
- if connect_retry is not None and connect_retry != 30:
- new_timers['connect_retry'] = connect_retry
- if new_timers:
- new_neighbor['timers'] = new_timers
- advertisement_interval = neighbor.get('advertisement_interval', None)
- if advertisement_interval is not None and advertisement_interval != 30:
- new_neighbor['advertisement_interval'] = advertisement_interval
- bfd = neighbor.get('bfd', None)
- if bfd is not None:
- new_neighbor['bfd'] = bfd
- capability = neighbor.get('capability', None)
- if capability is not None:
- new_neighbor['capability'] = capability
- if new_neighbor:
- new_neighbors.append(new_neighbor)
- if new_neighbors:
- new_conf['neighbors'] = new_neighbors
- if new_conf:
- new_data.append(new_conf)
- return new_data
-
def build_bgp_peer_groups_payload(self, cmd, have, bgp_as, vrf_name):
requests = []
bgp_peer_group_list = []
@@ -444,7 +403,7 @@ class Bgp_neighbors(ConfigBase):
if each.get('prefix_limit', None) is not None:
pfx_lmt_cfg = get_prefix_limit_payload(each['prefix_limit'])
if pfx_lmt_cfg and afi_safi == 'L2VPN_EVPN':
- samp.update({'l2vpn-evpn': {'prefix-limit': {'config': pfx_lmt_cfg}}})
+ self._module.fail_json('Prefix limit configuration not supported for l2vpn evpn')
else:
if each.get('ip_afi', None) is not None:
afi_safi_cfg = get_ip_afi_cfg_payload(each['ip_afi'])
@@ -696,13 +655,35 @@ class Bgp_neighbors(ConfigBase):
advertisement_interval = each.get('advertisement_interval', None)
bfd = each.get('bfd', None)
capability = each.get('capability', None)
+ auth_pwd = each.get('auth_pwd', None)
+ pg_description = each.get('pg_description', None)
+ disable_connected_check = each.get('disable_connected_check', None)
+ dont_negotiate_capability = each.get('dont_negotiate_capability', None)
+ ebgp_multihop = each.get('ebgp_multihop', None)
+ enforce_first_as = each.get('enforce_first_as', None)
+ enforce_multihop = each.get('enforce_multihop', None)
+ local_address = each.get('local_address', None)
+ local_as = each.get('local_as', None)
+ override_capability = each.get('override_capability', None)
+ passive = each.get('passive', None)
+ shutdown_msg = each.get('shutdown_msg', None)
+ solo = each.get('solo', None)
+ strict_capability_match = each.get('strict_capability_match', None)
+ ttl_security = each.get('ttl_security', None)
address_family = each.get('address_family', None)
- if name and not remote_as and not timers and not advertisement_interval and not bfd and not capability and not address_family:
+ if (name and not remote_as and not timers and not advertisement_interval and not bfd and not capability and not auth_pwd and not
+ pg_description and disable_connected_check is None and dont_negotiate_capability is None and not ebgp_multihop and
+ enforce_first_as is None and enforce_multihop is None and not local_address and not local_as and override_capability
+ is None and passive is None and not shutdown_msg and solo is None and strict_capability_match is None and not ttl_security and
+ not address_family):
want_pg_match = None
if want_peer_group:
want_pg_match = next((cfg for cfg in want_peer_group if cfg['name'] == name), None)
if want_pg_match:
- keys = ['remote_as', 'timers', 'advertisement_interval', 'bfd', 'capability', 'address_family']
+ keys = ['remote_as', 'timers', 'advertisement_interval', 'bfd', 'capability', 'auth_pwd', 'pg_description',
+ 'disable_connected_check', 'dont_negotiate_capability', 'ebgp_multihop', 'enforce_first_as', 'enforce_multihop',
+ 'local_address', 'local_as', 'override_capability', 'passive', 'shutdown_msg', 'solo', 'strict_capability_match',
+ 'ttl_security', 'address_family']
if not any(want_pg_match.get(key, None) for key in keys):
requests.append(self.get_delete_vrf_specific_peergroup_request(vrf_name, name))
else:
@@ -808,7 +789,7 @@ class Bgp_neighbors(ConfigBase):
delete_path = delete_static_path + '/ebgp-multihop/config/enabled'
requests.append({'path': delete_path, 'method': DELETE})
if cmd['ebgp_multihop'].get('multihop_ttl', None) is not None:
- delete_path = delete_static_path + '/ebgp-multihop/config/multihop_ttl'
+ delete_path = delete_static_path + '/ebgp-multihop/config/multihop-ttl'
requests.append({'path': delete_path, 'method': DELETE})
if cmd.get('address_family', None) is not None:
if cmd['address_family'].get('afis', None) is None:
@@ -857,9 +838,6 @@ class Bgp_neighbors(ConfigBase):
requests.extend(self.delete_ip_afi_requests(ip_afi, afi_safi_name, 'ipv6-unicast', delete_static_path))
if prefix_limit:
requests.extend(self.delete_prefix_limit_requests(prefix_limit, afi_safi_name, 'ipv6-unicast', delete_static_path))
- elif afi_safi == 'L2VPN_EVPN':
- if prefix_limit:
- requests.extend(self.delete_prefix_limit_requests(prefix_limit, afi_safi_name, 'l2vpn-evpn', delete_static_path))
return requests
@@ -909,12 +887,36 @@ class Bgp_neighbors(ConfigBase):
advertisement_interval = each.get('advertisement_interval', None)
bfd = each.get('bfd', None)
capability = each.get('capability', None)
- if neighbor and not remote_as and not peer_group and not timers and not advertisement_interval and not bfd and not capability:
+ auth_pwd = each.get('auth_pwd', None)
+ nbr_description = each.get('nbr_description', None)
+ disable_connected_check = each.get('disable_connected_check', None)
+ dont_negotiate_capability = each.get('dont_negotiate_capability', None)
+ ebgp_multihop = each.get('ebgp_multihop', None)
+ enforce_first_as = each.get('enforce_first_as', None)
+ enforce_multihop = each.get('enforce_multihop', None)
+ local_address = each.get('local_address', None)
+ local_as = each.get('local_as', None)
+ override_capability = each.get('override_capability', None)
+ passive = each.get('passive', None)
+ port = each.get('port', None)
+ shutdown_msg = each.get('shutdown_msg', None)
+ solo = each.get('solo', None)
+ strict_capability_match = each.get('strict_capability_match', None)
+ ttl_security = each.get('ttl_security', None)
+ v6only = each.get('v6only', None)
+ if (neighbor and not remote_as and not peer_group and not timers and not advertisement_interval and not bfd and not capability and not
+ auth_pwd and not nbr_description and disable_connected_check is None and dont_negotiate_capability is None and not
+ ebgp_multihop and enforce_first_as is None and enforce_multihop is None and not local_address and not local_as and
+ override_capability is None and passive is None and not port and not shutdown_msg and solo is None and strict_capability_match
+ is None and not ttl_security and v6only is None):
want_nei_match = None
if want_neighbors:
want_nei_match = next(cfg for cfg in want_neighbors if cfg['neighbor'] == neighbor)
if want_nei_match:
- keys = ['remote_as', 'peer_group', 'timers', 'advertisement_interval', 'bfd', 'capability']
+ keys = ['remote_as', 'peer_group', 'timers', 'advertisement_interval', 'bfd', 'capability', 'auth_pwd', 'nbr_description',
+ 'disable_connected_check', 'dont_negotiate_capability', 'ebgp_multihop', 'enforce_first_as', 'enforce_multihop',
+ 'local_address', 'local_as', 'override_capability', 'passive', 'port', 'shutdown_msg', 'solo',
+ 'strict_capability_match', 'ttl_security', 'v6only']
if not any(want_nei_match.get(key, None) for key in keys):
requests.append(self.delete_neighbor_whole_request(vrf_name, neighbor))
else:
@@ -1034,7 +1036,7 @@ class Bgp_neighbors(ConfigBase):
delete_path = delete_static_path + '/ebgp-multihop/config/enabled'
requests.append({'path': delete_path, 'method': DELETE})
if cmd['ebgp_multihop'].get('multihop_ttl', None) is not None:
- delete_path = delete_static_path + '/ebgp-multihop/config/multihop_ttl'
+ delete_path = delete_static_path + '/ebgp-multihop/config/multihop-ttl'
requests.append({'path': delete_path, 'method': DELETE})
return requests
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py
index 15f46f966..196a6eea9 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/bgp_neighbors_af/bgp_neighbors_af.py
@@ -13,11 +13,6 @@ created
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-try:
- from urllib import quote
-except ImportError:
- from urllib.parse import quote
-
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
@@ -288,7 +283,7 @@ class Bgp_neighbors_af(ConfigBase):
if conf_prefix_limit:
pfx_lmt_cfg = get_prefix_limit_payload(conf_prefix_limit)
if pfx_lmt_cfg and afi_safi_val == 'L2VPN_EVPN':
- afi_safi['l2vpn-evpn'] = {'prefix-limit': {'config': pfx_lmt_cfg}}
+ self._module.fail_json('Prefix limit configuration not supported for l2vpn evpn')
else:
if conf_ip_afi:
ip_afi_cfg = get_ip_afi_cfg_payload(conf_ip_afi)
diff --git a/ansible_collections/dellemc/os9/plugins/modules/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/copp/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/plugins/modules/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/copp/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/copp/copp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/copp/copp.py
new file mode 100644
index 000000000..cec802e67
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/copp/copp.py
@@ -0,0 +1,393 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_copp class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ get_replaced_config,
+ send_requests,
+ remove_empties,
+ update_states,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+
+COPP_GROUPS_PATH = '/data/openconfig-copp-ext:copp/copp-groups'
+PATCH = 'patch'
+DELETE = 'delete'
+TEST_KEYS = [
+ {'copp_groups': {'copp_name': ''}}
+]
+reserved_copp_names = [
+ 'copp-system-lacp',
+ 'copp-system-udld',
+ 'copp-system-stp',
+ 'copp-system-bfd',
+ 'copp-system-ptp',
+ 'copp-system-lldp',
+ 'copp-system-vrrp',
+ 'copp-system-iccp',
+ 'copp-system-ospf',
+ 'copp-system-bgp',
+ 'copp-system-pim',
+ 'copp-system-igmp',
+ 'copp-system-suppress',
+ 'copp-system-arp',
+ 'copp-system-dhcp',
+ 'copp-system-icmp',
+ 'copp-system-ip2me',
+ 'copp-system-subnet',
+ 'copp-system-nat',
+ 'copp-system-mtu',
+ 'copp-system-sflow',
+ 'copp-system-default',
+ 'copp-system-ttl',
+ 'default'
+]
+
+
+class Copp(ConfigBase):
+ """
+ The sonic_copp class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'copp',
+ ]
+
+ def __init__(self, module):
+ super(Copp, self).__init__(module)
+
+ def get_copp_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ copp_facts = facts['ansible_network_resources'].get('copp')
+ if not copp_facts:
+ return []
+ return copp_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+ commands = []
+
+ existing_copp_facts = self.get_copp_facts()
+ commands, requests = self.set_config(existing_copp_facts)
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_copp_facts = self.get_copp_facts()
+
+ result['before'] = existing_copp_facts
+ if result['changed']:
+ result['after'] = changed_copp_facts
+
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_copp_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ have = existing_copp_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ state = self._module.params['state']
+
+ diff = get_diff(want, have, TEST_KEYS)
+
+ if state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(diff)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have, diff)
+ return commands, requests
+
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ replaced_config = get_replaced_config(want, have, TEST_KEYS)
+
+ if replaced_config:
+ is_delete_all = True
+ requests = self.get_delete_copp_requests(replaced_config, None, is_delete_all)
+ send_requests(self._module, requests)
+
+ commands = want
+ else:
+ commands = diff
+
+ requests = []
+
+ if commands:
+ requests = self.get_modify_copp_groups_request(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ self.sort_lists_in_config(want)
+ self.sort_lists_in_config(have)
+
+ have = self.filter_copp_groups(have)
+ if have and have != want:
+ is_delete_all = True
+ requests = self.get_delete_copp_requests(have, None, is_delete_all)
+ send_requests(self._module, requests)
+ have = []
+
+ commands = []
+ requests = []
+
+ if not have and want:
+ commands = want
+ requests = self.get_modify_copp_groups_request(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "overridden")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_merged(self, diff):
+ """ The command generator when state is merged
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff
+ requests = self.get_modify_copp_groups_request(commands)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ is_delete_all = False
+ # if want is none, then delete ALL
+ want = remove_empties(want)
+ if not want:
+ commands = have
+ is_delete_all = True
+ else:
+ commands = want
+ commands = self.filter_copp_groups(commands)
+ requests = self.get_delete_copp_requests(commands, have, is_delete_all)
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def get_modify_copp_groups_request(self, commands):
+ request = None
+
+ copp_groups = commands.get('copp_groups', None)
+ if copp_groups:
+ group_list = []
+ for group in copp_groups:
+ config_dict = {}
+ group_dict = {}
+ copp_name = group.get('copp_name', None)
+ trap_priority = group.get('trap_priority', None)
+ trap_action = group.get('trap_action', None)
+ queue = group.get('queue', None)
+ cir = group.get('cir', None)
+ cbs = group.get('cbs', None)
+
+ if copp_name:
+ config_dict['name'] = copp_name
+ group_dict['name'] = copp_name
+ if trap_priority:
+ config_dict['trap-priority'] = trap_priority
+ if trap_action:
+ config_dict['trap-action'] = trap_action
+ if queue:
+ config_dict['queue'] = queue
+ if cir:
+ config_dict['cir'] = cir
+ if cbs:
+ config_dict['cbs'] = cbs
+ if config_dict:
+ group_dict['config'] = config_dict
+ group_list.append(group_dict)
+
+ if group_list:
+ copp_groups_dict = {'copp-group': group_list}
+ payload = {'openconfig-copp-ext:copp-groups': copp_groups_dict}
+ request = {'path': COPP_GROUPS_PATH, 'method': PATCH, 'data': payload}
+
+ return request
+
+ def get_delete_copp_requests(self, commands, have, is_delete_all):
+ requests = []
+
+ if is_delete_all:
+ copp_groups = commands.get('copp_groups', None)
+ if copp_groups:
+ for group in copp_groups:
+ copp_name = group.get('copp_name', None)
+ requests.append(self.get_delete_single_copp_group_request(copp_name))
+ else:
+ copp_groups = commands.get('copp_groups', None)
+ if copp_groups:
+ for group in copp_groups:
+ copp_name = group.get('copp_name', None)
+ trap_priority = group.get('trap_priority', None)
+ trap_action = group.get('trap_action', None)
+ queue = group.get('queue', None)
+ cir = group.get('cir', None)
+ cbs = group.get('cbs', None)
+
+ if have:
+ cfg_copp_groups = have.get('copp_groups', None)
+ if cfg_copp_groups:
+ for cfg_group in cfg_copp_groups:
+ cfg_copp_name = cfg_group.get('copp_name', None)
+ cfg_trap_priority = cfg_group.get('trap_priority', None)
+ cfg_trap_action = cfg_group.get('trap_action', None)
+ cfg_queue = cfg_group.get('queue', None)
+ cfg_cir = cfg_group.get('cir', None)
+ cfg_cbs = cfg_group.get('cbs', None)
+
+ if copp_name == cfg_copp_name:
+ if trap_priority and trap_priority == cfg_trap_priority:
+ requests.append(self.get_delete_copp_groups_attr_request(copp_name, 'trap-priority'))
+ if trap_action and trap_action == cfg_trap_action:
+ err_msg = "Deletion of trap-action attribute is not supported."
+ self._module.fail_json(msg=err_msg, code=405)
+ requests.append(self.get_delete_copp_groups_attr_request(copp_name, 'trap-action'))
+ if queue and queue == cfg_queue:
+ requests.append(self.get_delete_copp_groups_attr_request(copp_name, 'queue'))
+ if cir and cir == cfg_cir:
+ requests.append(self.get_delete_copp_groups_attr_request(copp_name, 'cir'))
+ if cbs and cbs == cfg_cbs:
+ requests.append(self.get_delete_copp_groups_attr_request(copp_name, 'cbs'))
+ if not trap_priority and not trap_action and not queue and not cir and not cbs:
+ requests.append(self.get_delete_single_copp_group_request(copp_name))
+
+ return requests
+
+ def get_delete_copp_groups_attr_request(self, copp_name, attr):
+ url = '%s/copp-group=%s/config/%s' % (COPP_GROUPS_PATH, copp_name, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_single_copp_group_request(self, copp_name):
+ url = '%s/copp-group=%s' % (COPP_GROUPS_PATH, copp_name)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def filter_copp_groups(self, commands):
+ cfg_dict = {}
+
+ if commands:
+ copp_groups = commands.get('copp_groups', None)
+ if copp_groups:
+ copp_groups_list = []
+ for group in copp_groups:
+ copp_name = group.get('copp_name', None)
+ if copp_name not in reserved_copp_names:
+ copp_groups_list.append(group)
+ if copp_groups_list:
+ cfg_dict['copp_groups'] = copp_groups_list
+
+ return cfg_dict
+
+ def get_copp_groups_key(self, copp_groups_key):
+ return copp_groups_key.get('copp_name')
+
+ def sort_lists_in_config(self, config):
+ if 'copp_groups' in config and config['copp_groups'] is not None:
+ config['copp_groups'].sort(key=self.get_copp_groups_key)
diff --git a/ansible_collections/dellemc/os9/plugins/terminal/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_relay/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/plugins/terminal/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_relay/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_relay/dhcp_relay.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_relay/dhcp_relay.py
new file mode 100644
index 000000000..64d50fb5b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_relay/dhcp_relay.py
@@ -0,0 +1,695 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_dhcp_relay class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ update_states,
+ normalize_interface_name,
+ get_normalize_interface_name,
+ remove_empties_from_list
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+PATCH = 'patch'
+DELETE = 'delete'
+
+DEFAULT_CIRCUIT_ID = '%p'
+DEFAULT_MAX_HOP_COUNT = 10
+DEFAULT_POLICY_ACTION = 'discard'
+
+BOOL_TO_SELECT_VALUE = {
+ True: 'ENABLE',
+ False: 'DISABLE'
+}
+
+
+class Dhcp_relay(ConfigBase):
+ """
+ The sonic_dhcp_relay class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'dhcp_relay',
+ ]
+
+ dhcp_relay_intf_path = 'data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface={intf_name}'
+ dhcp_relay_intf_config_path = {
+ 'circuit_id': dhcp_relay_intf_path + '/agent-information-option/config/circuit-id',
+ 'link_select': dhcp_relay_intf_path + '/agent-information-option/config/openconfig-relay-agent-ext:link-select',
+ 'max_hop_count': dhcp_relay_intf_path + '/config/openconfig-relay-agent-ext:max-hop-count',
+ 'policy_action': dhcp_relay_intf_path + '/config/openconfig-relay-agent-ext:policy-action',
+ 'server_address': dhcp_relay_intf_path + '/config/helper-address={server_address}',
+ 'server_addresses_all': dhcp_relay_intf_path + '/config/helper-address',
+ 'source_interface': dhcp_relay_intf_path + '/config/openconfig-relay-agent-ext:src-intf',
+ 'vrf_name': dhcp_relay_intf_path + '/config/openconfig-relay-agent-ext:vrf',
+ 'vrf_select': dhcp_relay_intf_path + '/agent-information-option/config/openconfig-relay-agent-ext:vrf-select'
+ }
+
+ dhcpv6_relay_intf_path = 'data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface={intf_name}'
+ dhcpv6_relay_intf_config_path = {
+ 'max_hop_count': dhcpv6_relay_intf_path + '/config/openconfig-relay-agent-ext:max-hop-count',
+ 'server_address': dhcpv6_relay_intf_path + '/config/helper-address={server_address}',
+ 'server_addresses_all': dhcpv6_relay_intf_path + '/config/helper-address',
+ 'source_interface': dhcpv6_relay_intf_path + '/config/openconfig-relay-agent-ext:src-intf',
+ 'vrf_name': dhcpv6_relay_intf_path + '/config/openconfig-relay-agent-ext:vrf',
+ 'vrf_select': dhcpv6_relay_intf_path + '/options/config/openconfig-relay-agent-ext:vrf-select'
+ }
+
+ def __init__(self, module):
+ super(Dhcp_relay, self).__init__(module)
+
+ def get_dhcp_relay_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ dhcp_relay_facts = facts['ansible_network_resources'].get('dhcp_relay')
+ if not dhcp_relay_facts:
+ return []
+ return dhcp_relay_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+
+ existing_dhcp_relay_facts = self.get_dhcp_relay_facts()
+ commands, requests = self.set_config(existing_dhcp_relay_facts)
+ if commands:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+
+ changed_dhcp_relay_facts = self.get_dhcp_relay_facts()
+
+ result['before'] = existing_dhcp_relay_facts
+ if result['changed']:
+ result['after'] = changed_dhcp_relay_facts
+
+ result['commands'] = commands
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_dhcp_relay_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+ want = self._module.params['config']
+ if want:
+ # In state deleted, specific empty parameters are supported
+ if state != 'deleted':
+ want = remove_empties_from_list(want)
+
+ normalize_interface_name(want, self._module)
+ for config in want:
+ if config.get('ipv4') and config['ipv4'].get('source_interface'):
+ config['ipv4']['source_interface'] = get_normalize_interface_name(config['ipv4']['source_interface'], self._module)
+ if config.get('ipv6') and config['ipv6'].get('source_interface'):
+ config['ipv6']['source_interface'] = get_normalize_interface_name(config['ipv6']['source_interface'], self._module)
+ else:
+ want = []
+
+ have = existing_dhcp_relay_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+ if state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(want, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ return commands, requests
+
+ def _state_merged(self, want, have):
+ """ The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = get_diff(want, have)
+ requests = self.get_modify_dhcp_dhcpv6_relay_requests(commands)
+ if commands and len(requests) > 0:
+ commands = update_states(commands, 'merged')
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ requests = []
+ if not want:
+ commands = have
+ requests.extend(self.get_delete_dhcp_dhcpv6_relay_completely_requests(commands))
+ else:
+ commands = want
+ requests.extend(self.get_delete_dhcp_dhcpv6_relay_requests(commands, have))
+
+ if len(requests) == 0:
+ commands = []
+
+ if commands:
+ commands = update_states(commands, "deleted")
+
+ return commands, requests
+
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ del_commands, del_requests = self.get_delete_commands_requests_for_replaced_overridden(want, have, 'replaced')
+ if del_commands:
+ new_have = get_diff(have, del_commands)
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+ else:
+ new_have = have
+
+ add_commands = get_diff(want, new_have)
+ if add_commands:
+ commands.extend(update_states(add_commands, 'replaced'))
+ requests.extend(self.get_modify_dhcp_dhcpv6_relay_requests(add_commands))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ del_commands, del_requests = self.get_delete_commands_requests_for_replaced_overridden(want, have, 'overridden')
+ if del_commands:
+ new_have = get_diff(have, del_commands)
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+ else:
+ new_have = have
+
+ add_commands = get_diff(want, new_have)
+ if add_commands:
+ commands.extend(update_states(add_commands, 'overridden'))
+ requests.extend(self.get_modify_dhcp_dhcpv6_relay_requests(add_commands))
+
+ return commands, requests
+
+ def get_modify_dhcp_dhcpv6_relay_requests(self, commands):
+ """Get requests to modify DHCP and DHCPv6 relay configurations
+ for all interfaces specified by the commands
+ """
+ requests = []
+
+ for command in commands:
+ if command.get('ipv4'):
+ requests.extend(self.get_modify_specific_dhcp_relay_param_requests(command))
+ if command.get('ipv6'):
+ requests.extend(self.get_modify_specific_dhcpv6_relay_param_requests(command))
+
+ return requests
+
+ def get_modify_specific_dhcp_relay_param_requests(self, command):
+ """Get requests to modify specific DHCP relay configurations
+ based on the command specified for the interface
+ """
+ requests = []
+
+ name = command['name']
+ ipv4 = command.get('ipv4')
+ if not ipv4:
+ return requests
+
+ # Specifying appropriate order for merge to succeed
+ server_addresses = self.get_server_addresses(ipv4.get('server_addresses'))
+ if server_addresses:
+ payload = {'openconfig-relay-agent:helper-address': list(server_addresses)}
+ url = self.dhcp_relay_intf_config_path['server_addresses_all'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv4.get('vrf_name'):
+ payload = {'openconfig-relay-agent-ext:vrf': ipv4['vrf_name']}
+ url = self.dhcp_relay_intf_config_path['vrf_name'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv4.get('source_interface'):
+ payload = {'openconfig-relay-agent-ext:src-intf': ipv4['source_interface']}
+ url = self.dhcp_relay_intf_config_path['source_interface'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv4.get('link_select') is not None:
+ link_select = BOOL_TO_SELECT_VALUE[ipv4['link_select']]
+ payload = {'openconfig-relay-agent-ext:link-select': link_select}
+ url = self.dhcp_relay_intf_config_path['link_select'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv4.get('max_hop_count'):
+ payload = {'openconfig-relay-agent-ext:max-hop-count': ipv4['max_hop_count']}
+ url = self.dhcp_relay_intf_config_path['max_hop_count'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv4.get('vrf_select') is not None:
+ vrf_select = BOOL_TO_SELECT_VALUE[ipv4['vrf_select']]
+ payload = {'openconfig-relay-agent-ext:vrf-select': vrf_select}
+ url = self.dhcp_relay_intf_config_path['vrf_select'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv4.get('policy_action'):
+ payload = {'openconfig-relay-agent-ext:policy-action': ipv4['policy_action'].upper()}
+ url = self.dhcp_relay_intf_config_path['policy_action'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv4.get('circuit_id'):
+ payload = {'openconfig-relay-agent:circuit-id': ipv4['circuit_id']}
+ url = self.dhcp_relay_intf_config_path['circuit_id'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ return requests
+
+ def get_modify_specific_dhcpv6_relay_param_requests(self, command):
+ """Get requests to modify specific DHCPv6 relay configurations
+ based on the command specified for the interface
+ """
+ requests = []
+
+ name = command['name']
+ ipv6 = command.get('ipv6')
+ if not ipv6:
+ return requests
+
+ # Specifying appropriate order for merge to succeed
+ server_addresses = self.get_server_addresses(ipv6.get('server_addresses'))
+ if server_addresses:
+ payload = {'openconfig-relay-agent:helper-address': list(server_addresses)}
+ url = self.dhcpv6_relay_intf_config_path['server_addresses_all'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv6.get('vrf_name'):
+ payload = {'openconfig-relay-agent-ext:vrf': ipv6['vrf_name']}
+ url = self.dhcpv6_relay_intf_config_path['vrf_name'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv6.get('source_interface'):
+ payload = {'openconfig-relay-agent-ext:src-intf': ipv6['source_interface']}
+ url = self.dhcpv6_relay_intf_config_path['source_interface'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv6.get('max_hop_count'):
+ payload = {'openconfig-relay-agent-ext:max-hop-count': ipv6['max_hop_count']}
+ url = self.dhcpv6_relay_intf_config_path['max_hop_count'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if ipv6.get('vrf_select') is not None:
+ vrf_select = BOOL_TO_SELECT_VALUE[ipv6['vrf_select']]
+ payload = {'openconfig-relay-agent-ext:vrf-select': vrf_select}
+ url = self.dhcpv6_relay_intf_config_path['vrf_select'].format(intf_name=name)
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ return requests
+
+ def get_delete_dhcp_dhcpv6_relay_completely_requests(self, have):
+ """Get requests to delete all existing DHCP and DHCPv6 relay
+ configurations in the chassis
+ """
+ requests = []
+ for cfg in have:
+ if cfg.get('ipv4'):
+ requests.append(self.get_delete_all_dhcp_relay_intf_request(cfg['name']))
+ if cfg.get('ipv6'):
+ requests.append(self.get_delete_all_dhcpv6_relay_intf_request(cfg['name']))
+
+ return requests
+
+ def get_delete_dhcp_dhcpv6_relay_requests(self, commands, have):
+ """Get requests to delete DHCP and DHCPv6 relay configurations
+ based on the commands specified
+ """
+ requests = []
+
+ for command in commands:
+ intf_name = command['name']
+ have_obj = next((cfg for cfg in have if cfg['name'] == intf_name), None)
+ if not have_obj:
+ continue
+
+ have_ipv4 = have_obj.get('ipv4')
+ have_ipv6 = have_obj.get('ipv6')
+
+ ipv4 = command.get('ipv4')
+ ipv6 = command.get('ipv6')
+ if not ipv4 and not ipv6:
+ if have_ipv4:
+ requests.append(self.get_delete_all_dhcp_relay_intf_request(intf_name))
+ if have_ipv6:
+ requests.append(self.get_delete_all_dhcpv6_relay_intf_request(intf_name))
+ else:
+ if ipv4 and have_ipv4:
+ requests.extend(self.get_delete_specific_dhcp_relay_param_requests(command, have_obj))
+ if ipv6 and have_ipv6:
+ requests.extend(self.get_delete_specific_dhcpv6_relay_param_requests(command, have_obj))
+
+ return requests
+
+ def get_delete_specific_dhcp_relay_param_requests(self, command, config, is_state_deleted=True):
+ """Get requests to delete specific DHCP relay configurations
+ based on the command specified for the interface
+ """
+ requests = []
+
+ name = command['name']
+ ipv4 = command.get('ipv4')
+ have_ipv4 = config.get('ipv4')
+ if not ipv4 or not have_ipv4:
+ return requests
+
+ server_addresses = self.get_server_addresses(ipv4.get('server_addresses'))
+ have_server_addresses = self.get_server_addresses(have_ipv4.get('server_addresses'))
+
+ # Delete all DHCP relay config for an interface, if only
+ # a single server address with no value is specified.
+ #
+ # This "special" YAML sequence is supported to provide
+ # "delete all AF parameters" functionality despite the Ansible
+ # infrastructure limitations that prevent use of a simpler
+ # syntax for deleting an entire AF parameter dictionary.
+ if (ipv4.get('server_addresses') and len(ipv4.get('server_addresses'))
+ and not server_addresses):
+ requests.append(self.get_delete_all_dhcp_relay_intf_request(name))
+ return requests
+
+ del_server_addresses = have_server_addresses.intersection(server_addresses)
+ if del_server_addresses:
+ # Deleting all DHCP server addresses configured on an
+ # interface automatically removes all DHCP relay config in
+ # that interface. Therefore, seperate requests to delete
+ # other DHCP relay configs are not required.
+ if is_state_deleted and len(del_server_addresses) == len(have_server_addresses):
+ requests.append(self.get_delete_all_dhcp_relay_intf_request(name))
+ return requests
+
+ for addr in del_server_addresses:
+ url = self.dhcp_relay_intf_config_path['server_address'].format(intf_name=name, server_address=addr)
+ requests.append({'path': url, 'method': DELETE})
+
+ # Specifying appropriate order for deletion to succeed
+ if ipv4.get('link_select') is not None and have_ipv4.get('link_select'):
+ url = self.dhcp_relay_intf_config_path['link_select'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ if (ipv4.get('source_interface') and have_ipv4.get('source_interface')
+ and ipv4['source_interface'] == have_ipv4['source_interface']):
+ url = self.dhcp_relay_intf_config_path['source_interface'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ if (ipv4.get('max_hop_count') and have_ipv4.get('max_hop_count')
+ and ipv4['max_hop_count'] == have_ipv4['max_hop_count']
+ and have_ipv4['max_hop_count'] != DEFAULT_MAX_HOP_COUNT):
+ url = self.dhcp_relay_intf_config_path['max_hop_count'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ if ipv4.get('vrf_select') is not None and have_ipv4.get('vrf_select'):
+ url = self.dhcp_relay_intf_config_path['vrf_select'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ if (ipv4.get('policy_action') and have_ipv4.get('policy_action')
+ and ipv4['policy_action'] == have_ipv4['policy_action']
+ and have_ipv4['policy_action'] != DEFAULT_POLICY_ACTION):
+ url = self.dhcp_relay_intf_config_path['policy_action'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ if (ipv4.get('circuit_id') and have_ipv4.get('circuit_id')
+ and ipv4['circuit_id'] == have_ipv4['circuit_id']
+ and have_ipv4['circuit_id'] != DEFAULT_CIRCUIT_ID):
+ url = self.dhcp_relay_intf_config_path['circuit_id'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ return requests
+
+ def get_delete_specific_dhcpv6_relay_param_requests(self, command, have, is_state_deleted=True):
+ """Get requests to delete specific DHCPv6 relay configurations
+ based on the command specified for the interface
+ """
+ requests = []
+
+ name = command['name']
+ ipv6 = command.get('ipv6')
+ have_ipv6 = have.get('ipv6')
+ if not ipv6 or not have_ipv6:
+ return requests
+
+ server_addresses = self.get_server_addresses(ipv6.get('server_addresses'))
+ have_server_addresses = self.get_server_addresses(have_ipv6.get('server_addresses'))
+
+ # Delete all DHCPv6 relay config for an interface, if only
+ # a single server address with no value is specified.
+ #
+ # This "special" YAML sequence is supported to provide
+ # "delete all AF parameters" functionality despite the Ansible
+ # infrastructure limitations that prevent use of a simpler
+ # syntax for deleting an entire AF parameter dictionary.
+ if (ipv6.get('server_addresses') and len(ipv6.get('server_addresses'))
+ and not server_addresses):
+ requests.append(self.get_delete_all_dhcpv6_relay_intf_request(name))
+ return requests
+
+ del_server_addresses = have_server_addresses.intersection(server_addresses)
+ if del_server_addresses:
+ # Deleting all DHCPv6 server addresses configured on an
+ # interface automatically removes all DHCPv6 relay config
+ # in that interface. Therefore, seperate requests to delete
+ # other DHCPv6 relay configs are not required.
+ if is_state_deleted and len(del_server_addresses) == len(have_server_addresses):
+ requests.append(self.get_delete_all_dhcpv6_relay_intf_request(name))
+ return requests
+
+ for addr in del_server_addresses:
+ url = self.dhcpv6_relay_intf_config_path['server_address'].format(intf_name=name, server_address=addr)
+ requests.append({'path': url, 'method': DELETE})
+
+ # Specifying appropriate order for deletion to succeed
+ if (ipv6.get('source_interface') and have_ipv6.get('source_interface')
+ and ipv6['source_interface'] == have_ipv6['source_interface']):
+ url = self.dhcpv6_relay_intf_config_path['source_interface'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ if (ipv6.get('max_hop_count') and have_ipv6.get('max_hop_count')
+ and ipv6['max_hop_count'] == have_ipv6['max_hop_count']
+ and have_ipv6['max_hop_count'] != DEFAULT_MAX_HOP_COUNT):
+ url = self.dhcpv6_relay_intf_config_path['max_hop_count'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ if ipv6.get('vrf_select') is not None and have_ipv6.get('vrf_select'):
+ url = self.dhcpv6_relay_intf_config_path['vrf_select'].format(intf_name=name)
+ requests.append({'path': url, 'method': DELETE})
+
+ return requests
+
+ def get_delete_all_dhcp_relay_intf_request(self, intf_name):
+ """Get request to delete all DHCP relay configurations in the
+ specified interface
+ """
+ return {'path': self.dhcp_relay_intf_config_path['server_addresses_all'].format(intf_name=intf_name), 'method': DELETE}
+
+ def get_delete_all_dhcpv6_relay_intf_request(self, intf_name):
+ """Get request to delete all DHCPv6 relay configurations in the
+ specified interface
+ """
+ return {'path': self.dhcpv6_relay_intf_config_path['server_addresses_all'].format(intf_name=intf_name), 'method': DELETE}
+
+ def get_delete_commands_requests_for_replaced_overridden(self, want, have, state):
+ """Returns the commands and requests necessary to remove applicable
+ current configurations when state is replaced or overridden
+ """
+ default_value = {
+ 'circuit_id': DEFAULT_CIRCUIT_ID,
+ 'max_hop_count': DEFAULT_MAX_HOP_COUNT,
+ 'policy_action': DEFAULT_POLICY_ACTION
+ }
+ commands = []
+ requests = []
+ if not have:
+ return commands, requests
+
+ for conf in have:
+ intf_name = conf['name']
+ ipv4_conf = conf.get('ipv4')
+ ipv6_conf = conf.get('ipv6')
+
+ match_obj = next((cmd for cmd in want if cmd['name'] == intf_name), None)
+ if not match_obj:
+ # Delete all DHCP and DHCPv6 relay config for interfaces,
+ # that are not specified in overridden.
+ if state == 'overridden':
+ commands.append(conf)
+ if ipv4_conf:
+ requests.append(self.get_delete_all_dhcp_relay_intf_request(intf_name))
+ if ipv6_conf:
+ requests.append(self.get_delete_all_dhcpv6_relay_intf_request(intf_name))
+ continue
+
+ command = {'name': intf_name}
+ if ipv4_conf:
+ match_ipv4 = match_obj.get('ipv4')
+ # Delete all DHCP relay config for an interface if not specified
+ if not match_ipv4:
+ command['ipv4'] = ipv4_conf
+ requests.append(self.get_delete_all_dhcp_relay_intf_request(intf_name))
+ else:
+ have_server_addresses = self.get_server_addresses(ipv4_conf.get('server_addresses'))
+ server_addresses = self.get_server_addresses(match_ipv4.get('server_addresses'))
+
+ # Delete all DHCP relay config for an interface, if
+ # all existing server addresses are to be replaced
+ # or if the VRF is to be removed.
+ if (not have_server_addresses.intersection(server_addresses)
+ or (ipv4_conf.get('vrf_name') and match_ipv4.get('vrf_name') is None)):
+ command['ipv4'] = ipv4_conf
+ requests.append(self.get_delete_all_dhcp_relay_intf_request(intf_name))
+ else:
+ ipv4_command = {}
+ del_server_addresses = have_server_addresses.difference(server_addresses)
+ if del_server_addresses:
+ ipv4_command['server_addresses'] = []
+ for address in del_server_addresses:
+ ipv4_command['server_addresses'].append({'address': address})
+
+ for option in ('source_interface', 'link_select', 'vrf_select'):
+ if ipv4_conf.get(option) and match_ipv4.get(option) is None:
+ ipv4_command[option] = ipv4_conf[option]
+
+ for option in ('circuit_id', 'max_hop_count', 'policy_action'):
+ if (ipv4_conf.get(option) and match_ipv4.get(option) is None
+ and ipv4_conf[option] != default_value[option]):
+ ipv4_command[option] = ipv4_conf[option]
+
+ if ipv4_command:
+ command['ipv4'] = ipv4_command
+ requests.extend(self.get_delete_specific_dhcp_relay_param_requests(command, command, False))
+
+ if ipv6_conf:
+ match_ipv6 = match_obj.get('ipv6')
+ # Delete all DHCPv6 relay config for an interface if not specified
+ if not match_ipv6:
+ command['ipv6'] = ipv6_conf
+ requests.append(self.get_delete_all_dhcpv6_relay_intf_request(intf_name))
+ else:
+ have_server_addresses = self.get_server_addresses(ipv6_conf.get('server_addresses'))
+ server_addresses = self.get_server_addresses(match_ipv6.get('server_addresses'))
+
+ # Delete all DHCPv6 relay config for an interface, if
+ # all existing server addresses are to be replaced
+ # or if the VRF is to be removed.
+ if (not have_server_addresses.intersection(server_addresses)
+ or (ipv6_conf.get('vrf_name') and match_ipv6.get('vrf_name') is None)):
+ command['ipv6'] = ipv6_conf
+ requests.append(self.get_delete_all_dhcpv6_relay_intf_request(intf_name))
+ else:
+ ipv6_command = {}
+ del_server_addresses = have_server_addresses.difference(server_addresses)
+ if del_server_addresses:
+ ipv6_command['server_addresses'] = []
+ for address in del_server_addresses:
+ ipv6_command['server_addresses'].append({'address': address})
+
+ for option in ('source_interface', 'vrf_select'):
+ if ipv6_conf.get(option) and match_ipv6.get(option) is None:
+ ipv6_command[option] = ipv6_conf[option]
+
+ if (ipv6_conf.get('max_hop_count') and match_ipv6.get('max_hop_count') is None
+ and ipv6_conf['max_hop_count'] != default_value['max_hop_count']):
+ ipv6_command['max_hop_count'] = ipv6_conf['max_hop_count']
+
+ if ipv6_command:
+ command['ipv6'] = ipv6_command
+ requests.extend(self.get_delete_specific_dhcpv6_relay_param_requests(command, command, False))
+
+ if command.get('ipv4') or command.get('ipv6'):
+ commands.append(command)
+
+ return commands, requests
+
+ @staticmethod
+ def get_server_addresses(server_addresses_dict):
+ """Get a set of server addresses available in the given
+ server_addresses dict
+ """
+ server_addresses = set()
+ if not server_addresses_dict:
+ return server_addresses
+
+ for addr in server_addresses_dict:
+ if addr.get('address'):
+ server_addresses.add(addr['address'])
+
+ return server_addresses
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_snooping/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/tests/integration/targets/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_snooping/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_snooping/dhcp_snooping.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_snooping/dhcp_snooping.py
new file mode 100644
index 000000000..d3c3233b1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/dhcp_snooping/dhcp_snooping.py
@@ -0,0 +1,649 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_dhcp_snooping class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+ remove_empties,
+ validate_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ update_states
+)
+
+
+class Dhcp_snooping(ConfigBase):
+ """
+ The sonic_dhcp_snooping class
+ """
+ test_keys = [
+ {'afis': {'afi': ''}},
+ {"source_bindings": {"mac_addr": ""}},
+ {"trusted": {"intf_name": ""}}
+ ]
+
+ ipv4_key = 'ipv4'
+ ipv6_key = 'ipv6'
+
+ delete_method_value = 'delete'
+ patch_method_value = 'patch'
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'dhcp_snooping',
+ ]
+
+ dhcp_snooping_uri = 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ config_uri = dhcp_snooping_uri + '/config'
+ enable_uri = config_uri + '/dhcpv{v}-admin-enable'
+ verify_mac_uri = config_uri + '/dhcpv{v}-verify-mac-address'
+ binding_uri = dhcp_snooping_uri + '-static-binding/entry'
+ trusted_uri = 'data/openconfig-interfaces:interfaces/interface={name}/dhcpv{v}-snooping-trust/config/dhcpv{v}-snooping-trust'
+ vlans_uri = 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST={vlan_name}/dhcpv{v}_snooping_enable'
+
+ def __init__(self, module):
+ super(Dhcp_snooping, self).__init__(module)
+
+ def get_dhcp_snooping_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset,
+ self.gather_network_resources)
+ dhcp_snooping_facts = facts['ansible_network_resources'].get('dhcp_snooping')
+ if not dhcp_snooping_facts:
+ return []
+ return dhcp_snooping_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = list()
+
+ existing_dhcp_snooping_facts = self.get_dhcp_snooping_facts()
+ commands, requests = self.set_config(existing_dhcp_snooping_facts)
+ if commands:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_dhcp_snooping_facts = self.get_dhcp_snooping_facts()
+
+ result['before'] = existing_dhcp_snooping_facts
+ if result['changed']:
+ result['after'] = changed_dhcp_snooping_facts
+
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_dhcp_snooping_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ have = existing_dhcp_snooping_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ afis = {}
+ want = self.remove_none(want)
+
+ # just in case weird arguments passed
+ if want is None:
+ want = {}
+ if have is None:
+ have = {}
+
+ if want.get('afis') is not None:
+ for want_afi in want.get('afis'):
+ if want_afi.get('afi') == self.ipv4_key:
+ afis['want_ipv4'] = want_afi
+ elif want_afi.get('afi') == self.ipv6_key:
+ afis['want_ipv6'] = want_afi
+
+ if have.get('afis') is not None:
+ for have_afi in have.get('afis'):
+ if have_afi.get('afi') == self.ipv4_key:
+ afis['have_ipv4'] = have_afi
+ elif have_afi.get('afi') == self.ipv6_key:
+ afis['have_ipv6'] = have_afi
+
+ state = self._module.params['state']
+ if state == 'merged':
+ commands, requests = self._state_merged(want, have, afis)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have, afis)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have, afis)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have, afis)
+
+ return commands, requests
+
+ def _state_merged(self, want, have, afis):
+ """ The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ want = remove_empties(want)
+ self.validate_config({"config": want})
+
+ commands = get_diff(want, have, test_keys=self.test_keys)
+ self.prep_replaced_to_merge(commands, afis)
+ requests = self.get_modify_requests(commands)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have, afis):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ requests = []
+ if not have or not have.get('afis'):
+ # nothing that could be deleted
+ commands = []
+ elif not want or not want.get('afis'):
+ # want is empty, meaning want to delete all config
+ # afis parameter only stores the on device config at this point
+ commands, requests = self.get_delete_all_have_requests(afis)
+ else:
+ # some mix of settings specified in both
+ commands, requests = self.get_delete_specific_requests(afis)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+ return commands, requests
+
+ def _state_overridden(self, want, have, afis):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ if not want:
+ return commands, requests
+
+ # Determine if there is any configuration specified in the playbook
+ # that is not contained in the current configuration.
+ diff_requested = get_diff(want, have, self.test_keys)
+ diff_requested_keyed = {}
+ for afi in diff_requested.get("afis", []):
+ diff_requested_keyed[afi["afi"]] = afi
+
+ # Determine if there is anything already configured that is not
+ # specified in the playbook.
+ diff_unwanted = get_diff(have, want, self.test_keys)
+
+ # Idempotency check: If the configuration already matches the
+ # requested configuration with no extra attributes, no
+ # commands should be executed on the device.
+ if not diff_requested and not diff_unwanted:
+ return commands, requests
+
+ used_commands_per_afi = []
+ commands = []
+
+ for diff_unwanted_afi in diff_unwanted.get("afis", []):
+ # enabled and verify_mac can't be deleted from config, only set to default.
+ # so in the case they appear in both the "need to delete" and "need to change", keeping in both results in double requests
+ if "enabled" in diff_unwanted_afi and "enabled" in diff_requested_keyed.get(diff_unwanted_afi["afi"], {}):
+ del diff_unwanted_afi["enabled"]
+ if "verify_mac" in diff_unwanted_afi and "verify_mac" in diff_requested_keyed.get(diff_unwanted_afi["afi"], {}):
+ del diff_unwanted_afi["verify_mac"]
+ afi_commands, afi_requests = self.get_delete_specific_afi_fields_requests(diff_unwanted_afi, afis["have_" + diff_unwanted_afi["afi"]])
+ if afi_commands:
+ afi_commands["afi"] = diff_unwanted_afi["afi"]
+ used_commands_per_afi.append(afi_commands)
+ requests.extend(afi_requests)
+ if len(used_commands_per_afi):
+ commands = {"afis": used_commands_per_afi}
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+
+ # apply the things to add or change
+ # need to add back in the source bindings since the diff could pick up only the different values in a source binding
+ self.prep_replaced_to_merge(diff_requested, afis)
+ overridden_requests = self.get_modify_requests(diff_requested)
+ requests.extend(overridden_requests)
+ if diff_requested and len(overridden_requests) > 0:
+ diff_requested = update_states(diff_requested, "overridden")
+ commands.extend(diff_requested)
+ return commands, requests
+
+ def _state_replaced(self, want, have, afis):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ # do needed deletes
+ commands, requests = self.get_delete_replaced_groupings(afis)
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ # getting what needs to be added/changed after deletes
+ # need to add back in the source bindings since the diff could pick up only the different values in a source binding
+ diff = get_diff(want, have, self.test_keys)
+ self.prep_replaced_to_merge(diff, afis)
+ merged_commands = diff
+
+ replaced_requests = self.get_modify_requests(merged_commands)
+ requests.extend(replaced_requests)
+ if merged_commands and len(replaced_requests) > 0:
+ merged_commands = update_states(merged_commands, "replaced")
+ commands.extend(merged_commands)
+ return commands, requests
+
+ def validate_config(self, config):
+ '''validate passed in config is argspec compliant. Also does checks on values in ranges that ansible might not do'''
+ validated_config = validate_config(self._module.argument_spec, config)
+ return validated_config
+
+ def remove_none(self, config):
+ '''goes through nested dictionary items and removes any keys that have None as value.
+ enables using empty list/dict to specify clear everything for that section and differentiate this
+ 'clear everything' case from when no value was given
+ remove_empties in ansible utils will remove empty lists and dicts as well as None'''
+ if isinstance(config, dict):
+ for k, v in list(config.items()):
+ if v is None:
+ del config[k]
+ else:
+ self.remove_none(v)
+ elif isinstance(config, list):
+ for item in list(config):
+ if item is None:
+ config.remove(item)
+ self.remove_none(item)
+ return config
+
+ def get_modify_requests(self, to_modify_config):
+ '''builds and returns requests to add in given config
+
+ :param to_modify: dictionary specifying what to modify in argspec format. expected to be at root level of config'''
+ requests = []
+
+ if to_modify_config.get('afis') is not None:
+ for afi_config in to_modify_config.get('afis'):
+ requests.extend(self.get_single_afi_modify_requests(afi_config))
+
+ return requests
+
+ def get_single_afi_modify_requests(self, to_modify_afi):
+ """build requests to modify a single afi family. Uses passed in config to find which family and what to change
+
+ :param to_modify_afi: dictionary specifying the config to add/change in argspec format. expected to be for a single afi
+ :param v: version number of afi to modify
+ """
+ requests = []
+ v = self.afi_to_vnum(to_modify_afi)
+
+ if to_modify_afi.get('enabled') is not None:
+ payload = {'openconfig-dhcp-snooping:dhcpv{v}-admin-enable'.format(v=v): to_modify_afi['enabled']}
+ uri = self.enable_uri.format(v=v)
+ requests.append({'path': uri, 'method': self.patch_method_value, 'data': payload})
+
+ if to_modify_afi.get('verify_mac') is not None:
+ payload = {'openconfig-dhcp-snooping:dhcpv{v}-verify-mac-address'.format(v=v): to_modify_afi['verify_mac']}
+ uri = self.verify_mac_uri.format(v=v)
+ requests.append({'path': uri, 'method': self.patch_method_value, 'data': payload})
+
+ if to_modify_afi.get('trusted'):
+ for intf in to_modify_afi.get('trusted'):
+ intf_name = intf.get("intf_name")
+ if intf_name:
+ payload = {'openconfig-interfaces:dhcpv{v}-snooping-trust'.format(v=v): 'ENABLE'}
+ uri = self.trusted_uri.format(name=intf_name, v=v)
+ requests.append({'path': uri, 'method': self.patch_method_value, 'data': payload})
+
+ if to_modify_afi.get('vlans'):
+ for vlan_id in to_modify_afi.get('vlans'):
+ payload = {'sonic-vlan:dhcpv{v}_snooping_enable'.format(v=v): 'enable'}
+ uri = self.vlans_uri.format(vlan_name='Vlan' + vlan_id, v=v)
+ requests.append({'path': uri, 'method': self.patch_method_value, 'data': payload})
+
+ if to_modify_afi.get('source_bindings'):
+ entries = []
+ for entry in to_modify_afi.get('source_bindings'):
+ if entry.get('mac_addr'):
+ entries.append({
+ 'mac': entry.get('mac_addr'),
+ 'iptype': 'ipv' + str(v),
+ 'config': {
+ 'mac': entry.get('mac_addr'),
+ 'iptype': 'ipv' + str(v),
+ 'vlan': "Vlan" + str(entry.get('vlan_id')),
+ 'interface': entry.get('intf_name'),
+ 'ip': entry.get('ip_addr'),
+ }
+ })
+
+ payload = {'openconfig-dhcp-snooping:entry': entries}
+ uri = self.binding_uri
+ requests.append({'path': uri, 'method': self.patch_method_value, 'data': payload})
+
+ return requests
+
+ def get_delete_all_have_requests(self, afis):
+ '''creates and builds list of requests to delete all current dhcp snooping config for ipv4 and ipv6'''
+ modified_afi_commands = []
+ requests = []
+ ipv4_commands, ipv4_requests = self.get_delete_specific_afi_fields_requests(afis.get('have_ipv4'), afis.get('have_ipv4'))
+ requests.extend(ipv4_requests)
+ if ipv4_commands:
+ ipv4_commands["afi"] = afis.get('have_ipv4')["afi"]
+ modified_afi_commands.append(ipv4_commands)
+ ipv6_commands, ipv6_requests = self.get_delete_specific_afi_fields_requests(afis.get('have_ipv6'), afis.get('have_ipv6'))
+ requests.extend(ipv6_requests)
+ if ipv6_commands:
+ ipv6_commands["afi"] = afis.get('have_ipv6')["afi"]
+ modified_afi_commands.append(ipv6_commands)
+
+ sent_commands = []
+ if modified_afi_commands:
+ sent_commands = {"afis": modified_afi_commands}
+
+ return sent_commands, requests
+
+ def get_delete_specific_requests(self, afis):
+ '''creates and returns list of requests to delete afi settings.
+ Checks if clearing settings for a ip family or just matching fields in config'''
+ modified_afi_commands = []
+ requests = []
+
+ want_ipv4 = afis.get('want_ipv4')
+ want_ipv6 = afis.get('want_ipv6')
+ have_ipv4 = afis.get('have_ipv4')
+ have_ipv6 = afis.get('have_ipv6')
+
+ if want_ipv4:
+ if want_ipv4.keys() == set(["afi"]):
+ # just afi key supplied, interpreting this as delete all config for that afi
+ ipv4_commands, ipv4_requests = self.get_delete_specific_afi_fields_requests(have_ipv4, have_ipv4)
+ else:
+ ipv4_commands, ipv4_requests = self.get_delete_specific_afi_fields_requests(want_ipv4, have_ipv4)
+ requests.extend(ipv4_requests)
+ if ipv4_commands:
+ ipv4_commands["afi"] = want_ipv4["afi"]
+ modified_afi_commands.append(ipv4_commands)
+ if want_ipv6:
+ if want_ipv6.keys() == set(["afi"]):
+ ipv6_commands, ipv6_requests = self.get_delete_specific_afi_fields_requests(have_ipv6, have_ipv6)
+ else:
+ ipv6_commands, ipv6_requests = self.get_delete_specific_afi_fields_requests(want_ipv6, have_ipv6)
+ requests.extend(ipv6_requests)
+ if ipv6_commands:
+ ipv6_commands["afi"] = want_ipv6["afi"]
+ modified_afi_commands.append(ipv6_commands)
+
+ sent_commands = []
+ if modified_afi_commands:
+ sent_commands = {"afis": modified_afi_commands}
+
+ return sent_commands, requests
+
+ def get_delete_specific_afi_fields_requests(self, want_afi, have_afi):
+ '''creates and builds list of requests for deleting some fields of dhcp snooping config for
+ one ip family. Each field checked and deleted independently from each other depending on if
+ it is specified in playbook and matches with current config'''
+ sent_commands = {}
+ requests = []
+
+ if want_afi.get('enabled') is True and have_afi.get('enabled') is True:
+ # only need to send a request if want from playbook is set to non default value and the setting currently configured is non default
+ sent_commands.update({"enabled": want_afi.get("enabled")})
+ requests.extend(self.get_delete_enabled_request(want_afi))
+ if want_afi.get('verify_mac') is False and have_afi.get('verify_mac') is False:
+ sent_commands.update({"verify_mac": want_afi.get("verify_mac")})
+ requests.extend(self.get_delete_verify_mac_request(want_afi))
+ if want_afi.get('vlans') is not None and have_afi.get('vlans') is not None and have_afi.get("vlans") != []:
+ # gathering list of vlans to be deleted. this section also handles cases where empty list of vlans is passed in
+ # which means delete all vlans
+ to_delete_vlans = have_afi["vlans"]
+ if len(want_afi["vlans"]) > 0:
+ to_delete_vlans = list(set(have_afi.get("vlans", [])).intersection(set(want_afi.get("vlans", []))))
+ to_delete = {"afi": want_afi["afi"], "vlans": to_delete_vlans}
+ if len(to_delete["vlans"]):
+ sent_commands.update({"vlans": deepcopy(to_delete_vlans)})
+ requests.extend(self.get_delete_vlans_requests(to_delete))
+ if want_afi.get('trusted') is not None and have_afi.get('trusted') is not None and have_afi.get('trusted') != []:
+ # gathering list of interfaces to be deleted. this section also handles cases where empty list of interfaces is passed in which
+ # means delete all trusted interfaces
+ to_delete_trusted = have_afi["trusted"]
+ if len(want_afi["trusted"]) > 0:
+ to_delete_trusted = want_afi["trusted"]
+ # removing interfaces that don't exist on device
+ for intf in list(to_delete_trusted):
+ if intf not in have_afi["trusted"]:
+ to_delete_trusted.remove(intf)
+ to_delete = {"afi": want_afi["afi"], "trusted": to_delete_trusted}
+ if len(to_delete["trusted"]):
+ sent_commands.update({"trusted": deepcopy(to_delete_trusted)})
+ requests.extend(self.get_delete_trusted_requests(to_delete))
+ if want_afi.get('source_bindings') is not None and have_afi.get('source_bindings') is not None and have_afi.get('source_bindings') != []:
+ # gathering list of source bindings to be deleted. this section also handles cases where empty list of bindings is passed in which
+ # means delete all trusted bindings
+ to_delete_bindings = have_afi["source_bindings"]
+ if len(want_afi["source_bindings"]) > 0:
+ to_delete_bindings = want_afi["source_bindings"]
+ # removing bindings that don't exist on device
+ existing_keys = [binding["mac_addr"] for binding in have_afi["source_bindings"]]
+ for binding in list(to_delete_bindings):
+ if binding["mac_addr"] not in existing_keys:
+ # need to check by the key since can have two different versions of same binding
+ to_delete_bindings.remove(binding)
+ to_delete = {"afi": want_afi["afi"], "source_bindings": to_delete_bindings}
+ if len(to_delete["source_bindings"]):
+ sent_commands.update({"source_bindings": deepcopy(to_delete_bindings)})
+ requests.extend(self.get_delete_specific_source_bindings_requests(to_delete))
+
+ return sent_commands, requests
+
+ def get_delete_enabled_request(self, afi):
+ '''makes and returns request to "delete" aka reset to default the enabled setting for one afi family. returns as a list'''
+ payload = {'openconfig-dhcp-snooping:dhcpv{v}-admin-enable'.format(v=self.afi_to_vnum(afi)): False}
+ return [{'path': self.enable_uri.format(v=self.afi_to_vnum(afi)), 'method': self.patch_method_value, 'data': payload}]
+
+ def get_delete_verify_mac_request(self, afi):
+ '''makes and returns request to "delete" aka reset to default the config for one afi family's verify mac setting'''
+ payload = {'openconfig-dhcp-snooping:dhcpv{v}-verify-mac-address'.format(v=self.afi_to_vnum(afi)): True}
+ return [{'path': self.verify_mac_uri.format(v=self.afi_to_vnum(afi)), 'method': self.patch_method_value, 'data': payload}]
+
+ def get_delete_vlans_requests(self, afi):
+ '''makes and returns request to delete the given vlans for the given afi faimily.
+ input expected as a dictionary of form {"afi": <ip_version>, "vlans": <list_of_vlans>}'''
+ requests = []
+ if afi.get('vlans'):
+ for vlan_id in afi.get('vlans'):
+ requests.append({
+ 'path': self.vlans_uri.format(vlan_name='Vlan' + vlan_id, v=self.afi_to_vnum(afi)),
+ 'method': self.delete_method_value
+ })
+ return requests
+
+ def get_delete_trusted_requests(self, afi):
+ '''makes and returns request to delete the given trusted interfaces for the given afi faimily.
+ input expected as a dictionary of form {"afi": <ip_version>, "trusted": [{"intf_name": <name>}...]}'''
+ requests = []
+ if afi.get('trusted'):
+ for intf in afi.get('trusted'):
+ intf_name = intf.get('intf_name')
+ if intf_name:
+ requests.append({
+ 'path': self.trusted_uri.format(name=intf_name, v=self.afi_to_vnum(afi)),
+ 'method': self.delete_method_value
+ })
+ return requests
+
+ def get_delete_all_source_bindings_request(self):
+ '''creates request to delete the source bindings list, which clears all bindings from both families'''
+ return [{'path': self.binding_uri, 'method': self.delete_method_value}]
+
+ def get_delete_specific_source_bindings_requests(self, afi):
+ '''creates and builds a list of requests to delete the source bindings listed in the given afi family
+ input expected as a dictionary of form to_delete = {"afi": <ip_version>, "source_bindings": <list of source_bindings>}'''
+ requests = []
+ for entry in afi.get('source_bindings'):
+ if entry.get('mac_addr'):
+ requests.append({
+ 'path': self.binding_uri + '={mac},{ipv}'.format(mac=entry.get('mac_addr'), ipv=afi.get('afi')),
+ 'method': self.delete_method_value
+ })
+ return requests
+
+ def get_delete_individual_source_bindings_requests(self, afi, entry):
+ '''create a request to delete the given source binding entry and address family specified
+ by afi'''
+ return [{'path': self.binding_uri + '={mac},{ipv}'.format(mac=entry.get('mac_addr'), ipv=afi.get('afi')), 'method': self.delete_method_value}]
+
+ def get_delete_replaced_groupings(self, afis):
+ '''builds list of requests to handle replaced state for both address families'''
+ modified_afi_commands = []
+ requests = []
+
+ want_ipv4 = afis.get('want_ipv4')
+ have_ipv4 = afis.get('have_ipv4')
+ want_ipv6 = afis.get('want_ipv6')
+ have_ipv6 = afis.get('have_ipv6')
+
+ if want_ipv4 and have_ipv4:
+ ipv4_commands, ipv4_requests = self.get_delete_replaced_groupings_afi(want_ipv4, have_ipv4)
+ requests.extend(ipv4_requests)
+ if ipv4_commands:
+ ipv4_commands["afi"] = want_ipv4["afi"]
+ modified_afi_commands.append(ipv4_commands)
+ if want_ipv6 and have_ipv6:
+ ipv6_commands, ipv6_requests = self.get_delete_replaced_groupings_afi(want_ipv6, have_ipv6)
+ requests.extend(ipv6_requests)
+ if ipv6_commands:
+ ipv6_commands["afi"] = want_ipv6["afi"]
+ modified_afi_commands.append(ipv6_commands)
+
+ sent_commands = []
+ if modified_afi_commands:
+ sent_commands = {"afis": modified_afi_commands}
+
+ return sent_commands, requests
+
+ def get_delete_replaced_groupings_afi(self, want_afi, have_afi):
+ '''creates and builds a list of requests to handle all parts that need to be deleted
+ while handling the replaced state for an address family'''
+ sent_commands = {}
+ requests = []
+ diff_requested = get_diff(have_afi, want_afi, self.test_keys)
+
+ if diff_requested.get("vlans") and "vlans" in want_afi:
+ # delete any vlans that are different
+ to_delete = {"afi": have_afi["afi"], "vlans": diff_requested["vlans"]}
+ sent_commands["vlans"] = deepcopy(diff_requested["vlans"])
+ requests.extend(self.get_delete_vlans_requests(to_delete))
+ if diff_requested.get('trusted') and 'trusted' in want_afi:
+ # delete anything that has a difference, covers things that are
+ # in have but not want and things in both but modified
+ to_delete = {"afi": have_afi["afi"], "trusted": diff_requested["trusted"]}
+ sent_commands["trusted"] = deepcopy(diff_requested["trusted"])
+ requests.extend(self.get_delete_trusted_requests(to_delete))
+ if diff_requested.get('source_bindings') and 'source_bindings' in want_afi:
+ # assuming source bindings considered a replaceable subsection ie the list afterwards
+ # should look exactly like what was passed into want
+ if want_afi["source_bindings"] == []:
+ # replaced told want to replace existing with blank list, only thing to do is delete existing bindings for family
+ sent_commands["source_bindings"] = deepcopy(have_afi["source_bindings"])
+ requests.extend(self.get_delete_specific_source_bindings_requests(have_afi))
+ else:
+ sent_commands["source_bindings"] = deepcopy(diff_requested["source_bindings"])
+ for entry in diff_requested["source_bindings"]:
+ requests.extend(self.get_delete_individual_source_bindings_requests(have_afi, entry))
+ return sent_commands, requests
+
+ def prep_replaced_to_merge(self, diff, afis):
+ '''preps results from a get diff for use in merging. needed for source bindings to have all data needed. get diff only returns the fields that
+ are different in each source binding when all data for it is needed instead. Fills in each source binding in diff with what is found for it in afis'''
+ if not diff or not diff.get("afis"):
+ return {}
+ for diff_afi in diff["afis"]:
+ if "source_bindings" in diff_afi:
+ for binding in diff_afi["source_bindings"]:
+ binding.update(self.match_binding(binding["mac_addr"], afis["want_" + diff_afi["afi"]]["source_bindings"]))
+
+ @staticmethod
+ def match_binding(mac_addr, bindings):
+ for binding in bindings:
+ if binding["mac_addr"] == mac_addr:
+ return binding
+ return {}
+
+ @staticmethod
+ def afi_to_vnum(afi):
+ if afi.get('afi') == 'ipv6':
+ return '6'
+ else:
+ return '4'
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py
index acf985ebf..33607817b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/interfaces/interfaces.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# © Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -18,6 +18,15 @@ try:
except ImportError:
from urllib.parse import quote
+"""
+The use of natsort causes sanity error due to it is not available in python version currently used.
+When natsort becomes available, the code here and below using it will be applied.
+from natsort import (
+ natsorted,
+ ns
+)
+"""
+from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
@@ -33,14 +42,22 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.interfaces_util import (
build_interfaces_create_request,
+ retrieve_default_intf_speed,
+ retrieve_port_group_interfaces
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
get_diff,
update_states,
- normalize_interface_name
+ normalize_interface_name,
+ remove_empties_from_list
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ get_new_config,
+ get_formatted_config_diff
)
from ansible.module_utils._text import to_native
from ansible.module_utils.connection import ConnectionError
+import re
import traceback
LIB_IMP_ERR = None
@@ -53,8 +70,54 @@ except Exception as e:
ERR_MSG = to_native(e)
LIB_IMP_ERR = traceback.format_exc()
+GET = 'get'
PATCH = 'patch'
DELETE = 'delete'
+url = 'data/openconfig-interfaces:interfaces/interface=%s'
+eth_conf_url = "/openconfig-if-ethernet:ethernet/config"
+
+port_num_regex = re.compile(r'[\d]{1,4}$')
+non_eth_attribute = ('description', 'mtu', 'enabled')
+eth_attribute = ('description', 'mtu', 'enabled', 'auto_negotiate', 'speed', 'fec', 'advertised_speed')
+
+attributes_default_value = {
+ "description": '',
+ "mtu": 9100,
+ "enabled": False,
+ "auto_negotiate": False,
+ "fec": 'FEC_DISABLED',
+ "advertised_speed": []
+}
+default_intf_speeds = {}
+port_group_interfaces = None
+
+
+def __derive_interface_config_delete_op(key_set, command, exist_conf):
+ new_conf = exist_conf
+ intf_name = command['name']
+
+ for attr in eth_attribute:
+ if attr in command:
+ if attr == "speed":
+ new_conf[attr] = default_intf_speeds[intf_name]
+ elif attr == "advertised_speed":
+ if new_conf[attr] is not None:
+ new_conf[attr] = list(set(new_conf[attr]).difference(command[attr]))
+ if new_conf[attr] == []:
+ new_conf[attr] = None
+ elif attr == "auto_negotiate":
+ new_conf[attr] = False
+ if new_conf.get('advertised_speed') is not None:
+ new_conf['advertised_speed'] = None
+ else:
+ new_conf[attr] = attributes_default_value[attr]
+
+ return True, new_conf
+
+
+TEST_KEYS_formatted_diff = [
+ {'config': {'name': '', '__delete_op': __derive_interface_config_delete_op}},
+]
class Interfaces(ConfigBase):
@@ -71,9 +134,6 @@ class Interfaces(ConfigBase):
'interfaces',
]
- params = ('description', 'mtu', 'enabled')
- delete_flag = False
-
def __init__(self, module):
super(Interfaces, self).__init__(module)
@@ -100,7 +160,7 @@ class Interfaces(ConfigBase):
warnings = list()
existing_interfaces_facts = self.get_interfaces_facts()
- commands, requests = self.set_config(existing_interfaces_facts)
+ commands, requests = self.set_config(existing_interfaces_facts, warnings)
if commands and len(requests) > 0:
if not self._module.check_mode:
try:
@@ -116,10 +176,27 @@ class Interfaces(ConfigBase):
if result['changed']:
result['after'] = changed_interfaces_facts
+ new_config = changed_interfaces_facts
+ old_config = existing_interfaces_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_interfaces_facts,
+ TEST_KEYS_formatted_diff)
+ # See the above comment about natsort module
+ # new_config = natsorted(new_config, key=lambda x: x['name'])
+ # For time-being, use simple "sort"
+ new_config.sort(key=lambda x: x['name'])
+ result['after(generated)'] = new_config
+ old_config.sort(key=lambda x: x['name'])
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
- def set_config(self, existing_interfaces_facts):
+ def set_config(self, existing_interfaces_facts, warnings):
""" Collect the configuration from the args passed to the module,
collect the current configuration (as a dict from facts)
@@ -128,12 +205,51 @@ class Interfaces(ConfigBase):
to the desired configuration
"""
want = self._module.params['config']
- normalize_interface_name(want, self._module)
have = existing_interfaces_facts
+ self.filter_out_mgmt_interface(want, have)
- resp = self.set_state(want, have)
+ new_want, new_have = self.validate_config(want, have, warnings)
+ resp = self.set_state(new_want, new_have)
return to_list(resp)
+ def validate_config(self, want, have, warnings):
+ new_want = deepcopy(want)
+ new_have = deepcopy(have)
+ normalize_interface_name(new_want, self._module)
+ for cmd in new_have:
+ # If auto_neg is true, ignore speed
+ if cmd.get('auto_negotiate') is True:
+ if cmd.get('speed'):
+ cmd.pop('speed')
+ elif cmd.get('advertised_speed'):
+ cmd.pop('advertised_speed')
+
+ if new_want:
+ for cmd in new_want:
+ intf = next((cfg for cfg in new_have if cfg['name'] == cmd['name']), None)
+ state = self._module.params['state']
+ if cmd.get('advertised_speed'):
+ cmd['advertised_speed'].sort()
+
+ if state != "deleted":
+ if intf:
+ want_autoneg = cmd.get('auto_negotiate')
+ have_autoneg = intf.get('auto_negotiate')
+ want_speed = cmd.get('speed')
+ want_ads = cmd.get('advertised_speed')
+
+ if want_speed is not None:
+ if want_autoneg or (want_ads and have_autoneg):
+ warnings.append("Speed cannot be configured when autoneg is enabled")
+ cmd.pop('speed')
+
+ if want_ads is not None:
+ if want_autoneg is False or (not want_autoneg and not have_autoneg):
+ warnings.append("Advertised speed cannot be configured when autoneg is disabled")
+ cmd.pop('advertised_speed')
+
+ return new_want, new_have
+
def set_state(self, want, have):
""" Select the appropriate function based on the state provided
@@ -149,18 +265,17 @@ class Interfaces(ConfigBase):
# removing the dict in case diff found
if state == 'overridden':
- have = [each_intf for each_intf in have if each_intf['name'].startswith('Ethernet')]
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
- commands, requests = self._state_deleted(want, have, diff)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
commands, requests = self._state_merged(want, have, diff)
elif state == 'replaced':
- commands, requests = self._state_replaced(want, have, diff)
+ commands, requests = self._state_replaced(want, have)
return commands, requests
- def _state_replaced(self, want, have, diff):
+ def _state_replaced(self, want, have):
""" The command generator when state is replaced
:param want: the desired configuration as a dictionary
@@ -170,17 +285,13 @@ class Interfaces(ConfigBase):
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
- commands = self.filter_comands_to_change(diff, have)
- requests = self.get_delete_interface_requests(commands, have)
- requests.extend(self.get_modify_interface_requests(commands, have))
- if commands and len(requests) > 0:
- commands = update_states(commands, "replaced")
- else:
- commands = []
+ commands = []
+ requests = []
+ commands, requests = self.get_replaced_overridden_config(want, have, "replaced")
return commands, requests
- def _state_overridden(self, want, have, diff):
+ def _state_overridden(self, want, have):
""" The command generator when state is overridden
:param want: the desired configuration as a dictionary
@@ -190,18 +301,9 @@ class Interfaces(ConfigBase):
to the desired configuration
"""
commands = []
- commands_del = self.filter_comands_to_change(want, have)
- requests = self.get_delete_interface_requests(commands_del, have)
- del_req_count = len(requests)
- if commands_del and del_req_count > 0:
- commands_del = update_states(commands_del, "deleted")
- commands.extend(commands_del)
-
- commands_over = diff
- requests.extend(self.get_modify_interface_requests(commands_over, have))
- if commands_over and len(requests) > del_req_count:
- commands_over = update_states(commands_over, "overridden")
- commands.extend(commands_over)
+ requests = []
+
+ commands, requests = self.get_replaced_overridden_config(want, have, "overridden")
return commands, requests
@@ -214,8 +316,8 @@ class Interfaces(ConfigBase):
:returns: the commands necessary to merge the provided into
the current configuration
"""
- commands = diff
- requests = self.get_modify_interface_requests(commands, have)
+ commands = self.filter_commands_to_change(diff, have)
+ requests = self.get_interface_requests(commands, have)
if commands and len(requests) > 0:
commands = update_states(commands, "merged")
else:
@@ -223,7 +325,7 @@ class Interfaces(ConfigBase):
return commands, requests
- def _state_deleted(self, want, have, diff):
+ def _state_deleted(self, want, have):
""" The command generator when state is deleted
:param want: the objects from which the configuration should be removed
@@ -234,34 +336,23 @@ class Interfaces(ConfigBase):
of the provided objects
"""
# if want is none, then delete all the interfaces
+
+ want = remove_empties_from_list(want)
+ delete_all = False
if not want:
commands = have
+ delete_all = True
else:
commands = want
- requests = self.get_delete_interface_requests(commands, have)
-
- if commands and len(requests) > 0:
- commands = update_states(commands, "deleted")
- else:
- commands = []
-
- return commands, requests
-
- def filter_comands_to_delete(self, configs, have):
+ commands_del, requests = self.handle_delete_interface_config(commands, have, delete_all)
commands = []
+ if commands_del:
+ commands.extend(update_states(commands_del, "deleted"))
- for conf in configs:
- if self.is_this_delete_required(conf, have):
- temp_conf = dict()
- temp_conf['name'] = conf['name']
- temp_conf['description'] = ''
- temp_conf['mtu'] = 9100
- temp_conf['enabled'] = True
- commands.append(temp_conf)
- return commands
+ return commands, requests
- def filter_comands_to_change(self, configs, have):
+ def filter_commands_to_change(self, configs, have):
commands = []
if configs:
for conf in configs:
@@ -269,17 +360,18 @@ class Interfaces(ConfigBase):
commands.append(conf)
return commands
- def get_modify_interface_requests(self, configs, have):
- self.delete_flag = False
- commands = self.filter_comands_to_change(configs, have)
-
- return self.get_interface_requests(commands, have)
-
- def get_delete_interface_requests(self, configs, have):
- self.delete_flag = True
- commands = self.filter_comands_to_delete(configs, have)
+ def is_this_change_required(self, conf, have):
+ intf = next((e_intf for e_intf in have if conf['name'] == e_intf['name']), None)
+ if intf:
+ # Check all parameter if any one is different from existing
+ for param in eth_attribute:
+ if conf.get(param) is not None and conf.get(param) != intf.get(param):
+ return True
+ else:
+ # if given interface is not present
+ return True
- return self.get_interface_requests(commands, have)
+ return False
def get_interface_requests(self, configs, have):
requests = []
@@ -288,67 +380,285 @@ class Interfaces(ConfigBase):
# Create URL and payload
for conf in configs:
- name = conf["name"]
- if self.delete_flag and name.startswith('Loopback'):
- method = DELETE
- url = 'data/openconfig-interfaces:interfaces/interface=%s' % quote(name, safe='')
- request = {"path": url, "method": method}
+ name = conf['name']
+ have_conf = next((cfg for cfg in have if cfg['name'] == name), None)
+
+ # Create Loopback incase if not available in have
+ if name.startswith('Loopback'):
+ if not have_conf:
+ loopback_create_request = build_interfaces_create_request(name)
+ requests.append(loopback_create_request)
else:
- # Create Loopback in case not availble in have
- if name.startswith('Loopback'):
- have_conf = next((cfg for cfg in have if cfg['name'] == name), None)
- if not have_conf:
- loopback_create_request = build_interfaces_create_request(name)
- requests.append(loopback_create_request)
- method = PATCH
- url = 'data/openconfig-interfaces:interfaces/interface=%s/config' % quote(name, safe='')
- payload = self.build_create_payload(conf)
- request = {"path": url, "method": method, "data": payload}
- requests.append(request)
-
+ attribute = eth_attribute if name.startswith('Eth') else non_eth_attribute
+
+ for attr in attribute:
+ if attr in conf:
+ c_attr = conf.get(attr)
+ h_attr = have_conf.get(attr)
+ attr_request = self.build_create_request(c_attr, h_attr, name, attr)
+ if attr_request:
+ requests.append(attr_request)
return requests
- def is_this_delete_required(self, conf, have):
- if conf['name'] == "eth0":
- return False
- intf = next((e_intf for e_intf in have if conf['name'] == e_intf['name']), None)
- if intf:
- if (intf['name'].startswith('Loopback') or not ((intf.get('description') is None or intf.get('description') == '') and
- (intf.get('enabled') is None or intf.get('enabled') is True) and (intf.get('mtu') is None or intf.get('mtu') == 9100))):
- return True
- return False
+ def build_create_request(self, c_attr, h_attr, intf_name, attr):
+ attributes_payload = {
+ "speed": 'port-speed',
+ "auto_negotiate": 'auto-negotiate',
+ "fec": 'openconfig-if-ethernet-ext2:port-fec',
+ "advertised_speed": 'openconfig-if-ethernet-ext2:advertised-speed'
+ }
+
+ config_url = (url + eth_conf_url) % quote(intf_name, safe='')
+ payload = {'openconfig-if-ethernet:config': {}}
+ payload_attr = attributes_payload.get(attr, attr)
+ method = PATCH
+
+ if attr in ('description', 'mtu', 'enabled'):
+ config_url = (url + '/config') % quote(intf_name, safe='')
+ payload = {'openconfig-interfaces:config': {}}
+ payload['openconfig-interfaces:config'][payload_attr] = c_attr
+ return {"path": config_url, "method": method, "data": payload}
+
+ elif attr in ('fec'):
+ payload['openconfig-if-ethernet:config'][payload_attr] = 'openconfig-platform-types:' + c_attr
+ return {"path": config_url, "method": method, "data": payload}
+ else:
+ payload['openconfig-if-ethernet:config'][payload_attr] = c_attr
+ if attr == 'speed':
+ if self.is_port_in_port_group(intf_name):
+ self._module.fail_json(msg='Unable to configure speed in port group member. Please use port group module to change the speed')
+ payload['openconfig-if-ethernet:config'][payload_attr] = 'openconfig-if-ethernet:' + c_attr
+ if attr == 'advertised_speed':
+ c_ads = c_attr if c_attr else []
+ h_ads = h_attr if h_attr else []
+ new_ads = list(set(h_ads).union(c_ads))
+ if new_ads:
+ payload['openconfig-if-ethernet:config'][payload_attr] = ','.join(new_ads)
+
+ return {"path": config_url, "method": method, "data": payload}
+
+ return []
+
+ def handle_delete_interface_config(self, commands, have, delete_all=False):
+ if not commands:
+ return [], []
+
+ commands_del, requests = [], []
+ # Create URL and payload
+ for conf in commands:
+ name = conf['name']
+ have_conf = next((cfg for cfg in have if cfg['name'] == name), None)
+ if have_conf:
+ lp_key_set = set(conf.keys())
+ if name.startswith('Loopback'):
+ if delete_all or len(lp_key_set) == 1:
+ method = DELETE
+ lpbk_url = url % quote(name, safe='')
+ request = {"path": lpbk_url, "method": DELETE}
+ requests.append(request)
+
+ commands_del.append({'name': name})
+ continue
+
+ cmd = deepcopy(have_conf) if len(lp_key_set) == 1 else deepcopy(conf)
+
+ del_cmd = {'name': name}
+ attribute = eth_attribute if name.startswith('Eth') else non_eth_attribute
+
+ for attr in attribute:
+ if attr in conf:
+ c_attr = conf.get(attr)
+ h_attr = have_conf.get(attr)
+ default_val = self.get_default_value(attr, h_attr, name)
+ if c_attr is not None and h_attr is not None and h_attr != default_val:
+ if attr == 'advertised_speed':
+ c_ads = c_attr if c_attr else []
+ h_ads = h_attr if h_attr else []
+ new_ads = list(set(h_attr).intersection(c_attr))
+ if new_ads:
+ del_cmd.update({attr: new_ads})
+ requests.append(self.build_delete_request(c_ads, h_ads, name, attr))
+ else:
+ del_cmd.update({attr: h_attr})
+ requests.append(self.build_delete_request(c_attr, h_attr, name, attr))
+ if requests:
+ commands_del.append(del_cmd)
+
+ return commands_del, requests
+
+ def get_replaced_overridden_config(self, want, have, cur_state):
+ commands, requests = [], []
+
+ commands_add, commands_del = [], []
+ requests_add, requests_del = [], []
+
+ delete_all = False
+ for conf in want:
+ name = conf['name']
+ intf = next((e_intf for e_intf in have if name == e_intf['name']), None)
+ if name.startswith('Loopback'):
+ if not intf:
+ commands_add.append({'name': name})
+ continue
+
+ temp_conf = {}
+ add_conf, del_conf = {}, {}
+
+ temp_conf['name'] = name
+ attribute = eth_attribute if name.startswith('Eth') else non_eth_attribute
+
+ if not intf:
+ commands_add.append(conf)
+ else:
+ is_change = False
+ non_ads_attr_specified = False
+ if cur_state == "replaced":
+ for attr in conf:
+ if attr != 'name' and attr != 'advertised_speed' and conf.get(attr) is not None:
+ non_ads_attr_specified = True
+ break
+ else:
+ non_ads_attr_specified = True
+
+ for attr in attribute:
+ c_attr = conf.get(attr)
+ h_attr = intf.get(attr)
+ default_val = self.get_default_value(attr, h_attr, name)
+ if attr != 'advertised_speed':
+ if c_attr is None and h_attr is not None and h_attr != default_val and non_ads_attr_specified:
+ del_conf[attr] = h_attr
+ requests_del.append(self.build_delete_request(c_attr, h_attr, name, attr))
+ if c_attr is not None and c_attr != h_attr:
+ add_conf[attr] = c_attr
+ requests_add.append(self.build_create_request(c_attr, h_attr, name, attr))
+ else:
+ c_ads = c_attr if c_attr else []
+ h_ads = h_attr if h_attr else []
+ new_ads = list(set(c_ads).difference(h_ads))
+ delete_ads = list(set(h_ads).difference(c_ads))
+ if new_ads:
+ add_conf[attr] = new_ads
+ requests_add.append(self.build_create_request(new_ads, h_attr, name, attr))
+ if delete_ads:
+ del_conf[attr] = delete_ads
+ requests_del.append(self.build_delete_request(delete_ads, h_attr, name, attr))
+
+ if add_conf:
+ add_conf['name'] = name
+ commands_add.append(add_conf)
+
+ if del_conf:
+ del_conf['name'] = name
+ commands_del.append(del_conf)
+
+ if cur_state == "overridden":
+ for have_conf in have:
+ in_want = next((conf for conf in want if conf['name'] == have_conf['name']), None)
+ if not in_want:
+ del_conf = {}
+ for attr in attribute:
+ h_attr = have_conf.get(attr)
+ if h_attr is not None and h_attr != self.get_default_value(attr, h_attr, have_conf['name']):
+ del_conf[attr] = h_attr
+ requests_del.append(self.build_delete_request([], h_attr, have_conf['name'], attr))
+ if del_conf:
+ del_conf['name'] = have_conf['name']
+ commands_del.append(del_conf)
+
+ if len(requests_del) > 0:
+ commands.extend(update_states(commands_del, "deleted"))
+ requests.extend(requests_del)
+
+ if len(requests_add) > 0:
+ commands.extend(update_states(commands_add, cur_state))
+ requests.extend(requests_add)
- def is_this_change_required(self, conf, have):
- if conf['name'] == "eth0":
- return False
- ret_flag = False
- intf = next((e_intf for e_intf in have if conf['name'] == e_intf['name']), None)
- if intf:
- # Check all parameter if any one is differen from existing
- for param in self.params:
- if conf.get(param) is not None and conf.get(param) != intf.get(param):
- ret_flag = True
- break
- # if given interface is not present
+ return commands, requests
+
+ def build_delete_request(self, c_attr, h_attr, intf_name, attr):
+ method = DELETE
+ attributes_payload = {
+ "speed": 'port-speed',
+ "auto_negotiate": 'auto-negotiate',
+ "fec": 'openconfig-if-ethernet-ext2:port-fec',
+ "advertised_speed": 'openconfig-if-ethernet-ext2:advertised-speed'
+ }
+
+ config_url = (url + eth_conf_url) % quote(intf_name, safe='')
+ payload = {'openconfig-if-ethernet:config': {}}
+ payload_attr = attributes_payload.get(attr, attr)
+
+ if attr in ('description', 'mtu', 'enabled'):
+ attr_url = "/config/" + payload_attr
+ config_url = (url + attr_url) % quote(intf_name, safe='')
+ return {"path": config_url, "method": method}
+
+ elif attr in ('fec'):
+ payload_attr = attributes_payload[attr]
+ payload['openconfig-if-ethernet:config'][payload_attr] = 'FEC_DISABLED'
+ return {"path": config_url, "method": PATCH, "data": payload}
else:
- ret_flag = True
+ payload_attr = attributes_payload[attr]
+ if attr == 'auto_negotiate':
+ # For auto-negotiate, we assign value to False since deleting the attribute will become None if deleted
+ # In case, if auto-negotiate is disabled, both speed and advertised_speed will have default value.
+ payload['openconfig-if-ethernet:config'][payload_attr] = False
+ return {"path": config_url, "method": PATCH, "data": payload}
+
+ if attr == 'speed':
+ attr_url = eth_conf_url + "/" + attributes_payload[attr]
+ del_config_url = (url + attr_url) % quote(intf_name, safe='')
+ return {"path": del_config_url, "method": method}
+
+ if attr == 'advertised_speed':
+ new_ads = list(set(h_attr).difference(c_attr))
+ if new_ads:
+ payload['openconfig-if-ethernet:config'][payload_attr] = ','.join(new_ads)
+ return {"path": config_url, "method": PATCH, "data": payload}
+ else:
+ attr_url = eth_conf_url + "/" + attributes_payload[attr]
+ del_config_url = (url + attr_url) % quote(intf_name, safe='')
+ return {"path": del_config_url, "method": method}
+ return {}
+
+ # Utils
+ def get_default_value(self, attr, h_attr, intf_name):
+ if attr == 'speed':
+ default_val = self._retrieve_default_intf_speed(intf_name)
+ if default_val == 'SPEED_DEFAULT':
+ # Incase if the port belongs to port-group, we can not able to delete the speed
+ default_val = h_attr
+ return default_val
+ else:
+ return attributes_default_value[attr]
+
+ def filter_out_mgmt_interface(self, want, have):
+ if want:
+ mgmt_intf = next((intf for intf in want if intf['name'] == 'Management0'), None)
+ if mgmt_intf:
+ self._module.fail_json(msg='Management interface should not be configured.')
+
+ for intf in have:
+ if intf['name'] == 'Management0':
+ have.remove(intf)
+ break
+
+ def is_port_in_port_group(self, intf_name):
+ global port_group_interfaces
+ if port_group_interfaces is None:
+ port_group_interfaces = retrieve_port_group_interfaces(self._module)
+ port_num = re.search(port_num_regex, intf_name)
+ port_num = int(port_num.group(0))
+ if port_num in port_group_interfaces:
+ return True
- return ret_flag
+ return False
- def build_create_payload(self, conf):
- temp_conf = dict()
- temp_conf['name'] = conf['name']
+ def _retrieve_default_intf_speed(self, intf_name):
+ # To avoid multiple get requests
+ if self.is_port_in_port_group(intf_name):
+ return "SPEED_DEFAULT"
- if not temp_conf['name'].startswith('Loopback'):
- if conf.get('enabled') is not None:
- if conf.get('enabled'):
- temp_conf['enabled'] = True
- else:
- temp_conf['enabled'] = False
- if conf.get('description') is not None:
- temp_conf['description'] = conf['description']
- if conf.get('mtu') is not None:
- temp_conf['mtu'] = conf['mtu']
-
- payload = {'openconfig-interfaces:config': temp_conf}
- return payload
+ if default_intf_speeds.get(intf_name) is None:
+ default_intf_speeds[intf_name] = retrieve_default_intf_speed(self._module, intf_name)
+ return default_intf_speeds[intf_name]
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ip_neighbor/ip_neighbor.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ip_neighbor/ip_neighbor.py
new file mode 100644
index 000000000..ab3a4dde6
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ip_neighbor/ip_neighbor.py
@@ -0,0 +1,420 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_ip_neighbor class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+ remove_empties
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import (
+ Facts
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ update_states,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ get_new_config,
+ get_formatted_config_diff
+)
+from ansible.module_utils.connection import ConnectionError
+
+GET = 'get'
+PATCH = 'patch'
+PUT = 'put'
+DELETE = 'delete'
+GLB_URL = 'data/openconfig-neighbor:neighbor-globals/neighbor-global'
+URL = 'data/openconfig-neighbor:neighbor-globals/neighbor-global=Values'
+CONFIG_URL = 'data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config'
+
+IP_NEIGH_CONFIG_DEFAULT = {
+ 'ipv4_arp_timeout': 180,
+ 'ipv4_drop_neighbor_aging_time': 300,
+ 'ipv6_drop_neighbor_aging_time': 300,
+ 'ipv6_nd_cache_expiry': 180,
+ 'num_local_neigh': 0
+}
+
+IP_NEIGH_CONFIG_REQ_DEFAULT = {
+ 'name': 'Values',
+ 'ipv4-arp-timeout': 180,
+ 'ipv4-drop-neighbor-aging-time': 300,
+ 'ipv6-drop-neighbor-aging-time': 300,
+ 'ipv6-nd-cache-expiry': 180,
+ 'num-local-neigh': 0
+}
+
+
+def __derive_ip_neighbor_config_delete_op(key_set, command, exist_conf):
+ new_conf = exist_conf
+
+ if 'ipv4_arp_timeout' in command:
+ new_conf['ipv4_arp_timeout'] = IP_NEIGH_CONFIG_DEFAULT['ipv4_arp_timeout']
+
+ if 'ipv4_drop_neighbor_aging_time' in command:
+ new_conf['ipv4_drop_neighbor_aging_time'] = \
+ IP_NEIGH_CONFIG_DEFAULT['ipv4_drop_neighbor_aging_time']
+
+ if 'ipv6_drop_neighbor_aging_time' in command:
+ new_conf['ipv6_drop_neighbor_aging_time'] = \
+ IP_NEIGH_CONFIG_DEFAULT['ipv6_drop_neighbor_aging_time']
+
+ if 'ipv6_nd_cache_expiry' in command:
+ new_conf['ipv6_nd_cache_expiry'] = IP_NEIGH_CONFIG_DEFAULT['ipv6_nd_cache_expiry']
+
+ if 'num_local_neigh' in command:
+ new_conf['num_local_neigh'] = IP_NEIGH_CONFIG_DEFAULT['num_local_neigh']
+
+ return True, new_conf
+
+
+TEST_KEYS_formatted_diff = [
+ {'__default_ops': {'__delete_op': __derive_ip_neighbor_config_delete_op}},
+]
+
+
+class Ip_neighbor(ConfigBase):
+ """
+ The sonic_ip_neighbor class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'ip_neighbor',
+ ]
+
+ def __init__(self, module):
+ super(Ip_neighbor, self).__init__(module)
+
+ def get_ip_neighbor_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ ip_neighbor_facts = facts['ansible_network_resources'].get('ip_neighbor')
+ if not ip_neighbor_facts:
+ requests = self.build_create_all_requests()
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ ip_neighbor_facts = facts['ansible_network_resources'].get('ip_neighbor')
+
+ if not ip_neighbor_facts:
+ err_msg = "IP neighbor module: get facts failed."
+ self._module.fail_json(msg=err_msg, code=500)
+
+ return ip_neighbor_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = list()
+ commands = list()
+ requests = list()
+
+ existing_ip_neighbor_facts = self.get_ip_neighbor_facts()
+
+ commands, requests = self.set_config(existing_ip_neighbor_facts)
+
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_ip_neighbor_facts = self.get_ip_neighbor_facts()
+
+ result['before'] = existing_ip_neighbor_facts
+ if result['changed']:
+ result['after'] = changed_ip_neighbor_facts
+
+ new_config = changed_ip_neighbor_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_ip_neighbor_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_ip_neighbor_facts,
+ new_config,
+ self._module._verbosity)
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_ip_neighbor_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ have = existing_ip_neighbor_facts
+
+ resp = self.set_state(want, have)
+
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+ want = remove_empties(want)
+
+ if state == 'merged':
+ commands, requests = self._state_merged(want, have)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+
+ return commands, requests
+
+ def _state_merged(self, want, have):
+ """ The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = get_diff(want, have)
+ requests = []
+
+ if commands:
+ requests = self.build_merge_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ delete_all = False
+ if not want:
+ tmp_commands = have
+ delete_all = True
+ else:
+ tmp_commands = want
+ tmp_commands = self.preprocess_delete_commands(tmp_commands, have)
+
+ commands = get_diff(tmp_commands, IP_NEIGH_CONFIG_DEFAULT)
+
+ requests = []
+ if commands:
+ requests = self.build_delete_requests(commands, delete_all)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ new_want = self.augment_want_with_default(want)
+ commands = get_diff(new_want, have)
+
+ requests = []
+ if commands:
+ requests = self.build_merge_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ new_want = self.augment_want_with_default(want)
+ commands = get_diff(new_want, have)
+
+ requests = []
+ if commands:
+ requests = self.build_merge_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "overridden")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def preprocess_delete_commands(self, commands, have):
+ new_commands = dict()
+
+ if 'ipv4_arp_timeout' in commands:
+ new_commands['ipv4_arp_timeout'] = have['ipv4_arp_timeout']
+
+ if 'ipv4_drop_neighbor_aging_time' in commands:
+ new_commands['ipv4_drop_neighbor_aging_time'] = have['ipv4_drop_neighbor_aging_time']
+
+ if 'ipv6_drop_neighbor_aging_time' in commands:
+ new_commands['ipv6_drop_neighbor_aging_time'] = have['ipv6_drop_neighbor_aging_time']
+
+ if 'ipv6_nd_cache_expiry' in commands:
+ new_commands['ipv6_nd_cache_expiry'] = have['ipv6_nd_cache_expiry']
+
+ if 'num_local_neigh' in commands:
+ new_commands['num_local_neigh'] = have['num_local_neigh']
+
+ return new_commands
+
+ def augment_want_with_default(self, want):
+ new_want = IP_NEIGH_CONFIG_DEFAULT
+
+ if 'ipv4_arp_timeout' in want:
+ new_want['ipv4_arp_timeout'] = want['ipv4_arp_timeout']
+
+ if 'ipv4_drop_neighbor_aging_time' in want:
+ new_want['ipv4_drop_neighbor_aging_time'] = want['ipv4_drop_neighbor_aging_time']
+
+ if 'ipv6_drop_neighbor_aging_time' in want:
+ new_want['ipv6_drop_neighbor_aging_time'] = want['ipv6_drop_neighbor_aging_time']
+
+ if 'ipv6_nd_cache_expiry' in want:
+ new_want['ipv6_nd_cache_expiry'] = want['ipv6_nd_cache_expiry']
+
+ if 'num_local_neigh' in want:
+ new_want['num_local_neigh'] = want['num_local_neigh']
+
+ return new_want
+
+ def build_create_all_requests(self):
+ requests = []
+ payload = {
+ "openconfig-neighbor:neighbor-global":
+ [{"name": "Values",
+ "config": IP_NEIGH_CONFIG_REQ_DEFAULT}]
+ }
+ method = PUT
+
+ request = {"path": GLB_URL, "method": method, "data": payload}
+ requests.append(request)
+ return requests
+
+ def build_merge_requests(self, conf):
+ requests = []
+ ip_neigh_config = dict()
+
+ if 'ipv4_arp_timeout' in conf:
+ ip_neigh_config['ipv4-arp-timeout'] = conf['ipv4_arp_timeout']
+
+ if 'ipv4_drop_neighbor_aging_time' in conf:
+ ip_neigh_config['ipv4-drop-neighbor-aging-time'] = conf['ipv4_drop_neighbor_aging_time']
+
+ if 'ipv6_drop_neighbor_aging_time' in conf:
+ ip_neigh_config['ipv6-drop-neighbor-aging-time'] = conf['ipv6_drop_neighbor_aging_time']
+
+ if 'ipv6_nd_cache_expiry' in conf:
+ ip_neigh_config['ipv6-nd-cache-expiry'] = conf['ipv6_nd_cache_expiry']
+
+ if 'num_local_neigh' in conf:
+ ip_neigh_config['num-local-neigh'] = conf['num_local_neigh']
+
+ if ip_neigh_config:
+ payload = {'config': ip_neigh_config}
+ method = PATCH
+ requests = {"path": CONFIG_URL, "method": method, "data": payload}
+
+ return requests
+
+ def build_delete_requests(self, conf, delete_all):
+ requests = []
+ method = DELETE
+
+ if delete_all:
+ request = {"path": URL, "method": method}
+ requests.append(request)
+ return requests
+
+ if 'ipv4_arp_timeout' in conf:
+ req_url = CONFIG_URL + '/ipv4-arp-timeout'
+ request = {"path": req_url, "method": method}
+ requests.append(request)
+
+ if 'ipv4_drop_neighbor_aging_time' in conf:
+ req_url = CONFIG_URL + '/ipv4-drop-neighbor-aging-time'
+ request = {"path": req_url, "method": method}
+ requests.append(request)
+
+ if 'ipv6_drop_neighbor_aging_time' in conf:
+ req_url = CONFIG_URL + '/ipv6-drop-neighbor-aging-time'
+ request = {"path": req_url, "method": method}
+ requests.append(request)
+
+ if 'ipv6_nd_cache_expiry' in conf:
+ req_url = CONFIG_URL + '/ipv6-nd-cache-expiry'
+ request = {"path": req_url, "method": method}
+ requests.append(request)
+
+ if 'num_local_neigh' in conf:
+ req_url = CONFIG_URL + '/num-local-neigh'
+ request = {"path": req_url, "method": method}
+ requests.append(request)
+
+ return requests
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/__init__.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_acls/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/__init__.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_acls/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_acls/l2_acls.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_acls/l2_acls.py
new file mode 100644
index 000000000..392e69039
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_acls/l2_acls.py
@@ -0,0 +1,602 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_l2_acls class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ast import literal_eval
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.validation import check_required_arguments
+from ansible.module_utils.connection import ConnectionError
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+ remove_empties,
+ validate_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ update_states
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
+
+DELETE = 'delete'
+PATCH = 'patch'
+POST = 'post'
+
+TEST_KEYS_formatted_diff = [
+ {'config': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {'rules': {'sequence_num': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
+
+L2_ACL_TYPE = 'ACL_L2'
+ETHERTYPE_FORMAT = '0x{:04x}'
+
+ethertype_value_to_protocol_map = {
+ '0x0800': 'ipv4',
+ '0x0806': 'arp',
+ '0x86dd': 'ipv6'
+}
+pcp_value_to_traffic_map = {
+ 0: 'be',
+ 1: 'bk',
+ 2: 'ee',
+ 3: 'ca',
+ 4: 'vi',
+ 5: 'vo',
+ 6: 'ic',
+ 7: 'nc'
+}
+
+# Spec value to payload value mappings
+action_value_to_payload_map = {
+ 'permit': 'ACCEPT',
+ 'discard': 'DISCARD',
+ 'do-not-nat': 'DO_NOT_NAT',
+ 'deny': 'DROP',
+ 'transit': 'TRANSIT'
+}
+ethertype_protocol_to_payload_map = {
+ 'arp': 'ETHERTYPE_ARP',
+ 'ipv4': 'ETHERTYPE_IPV4',
+ 'ipv6': 'ETHERTYPE_IPV6'
+}
+ethertype_value_to_payload_map = {
+ '0x8847': 'ETHERTYPE_MPLS',
+ '0x88cc': 'ETHERTYPE_LLDP',
+ '0x8915': 'ETHERTYPE_ROCE'
+}
+pcp_traffic_to_value_map = {v: k for k, v in pcp_value_to_traffic_map.items()}
+
+
+class L2_acls(ConfigBase):
+ """
+ The sonic_l2_acls class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'l2_acls',
+ ]
+
+ acl_path = 'data/openconfig-acl:acl/acl-sets/acl-set'
+ l2_acl_path = 'data/openconfig-acl:acl/acl-sets/acl-set={acl_name},ACL_L2'
+ l2_acl_rule_path = 'data/openconfig-acl:acl/acl-sets/acl-set={acl_name},ACL_L2/acl-entries'
+ l2_acl_remark_path = 'data/openconfig-acl:acl/acl-sets/acl-set={acl_name},ACL_L2/config/description'
+
+ def __init__(self, module):
+ super(L2_acls, self).__init__(module)
+
+ def get_l2_acls_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ l2_acls_facts = facts['ansible_network_resources'].get('l2_acls')
+ if not l2_acls_facts:
+ return []
+ return l2_acls_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+
+ existing_l2_acls_facts = self.get_l2_acls_facts()
+ commands, requests = self.set_config(existing_l2_acls_facts)
+ if commands:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._handle_failure_response(exc)
+
+ result['changed'] = True
+
+ changed_l2_acls_facts = self.get_l2_acls_facts()
+
+ result['before'] = existing_l2_acls_facts
+ if result['changed']:
+ result['after'] = changed_l2_acls_facts
+
+ result['commands'] = commands
+
+ new_config = changed_l2_acls_facts
+ old_config = existing_l2_acls_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_l2_acls_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+ if self._module._diff:
+ self.sort_config(new_config)
+ self.sort_config(old_config)
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_l2_acls_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ if want:
+ want = self.validate_and_normalize_config(want)
+ else:
+ want = []
+
+ have = existing_l2_acls_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+ if state in ('merged', 'overridden', 'replaced'):
+ commands, requests = self._state_merged_overridden_replaced(want, have, state)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+
+ return commands, requests
+
+ def _handle_failure_response(self, connection_error):
+ log = None
+ try:
+ response = literal_eval(connection_error.args[0])
+ error_app_tag = response['ietf-restconf:errors']['error'][0].get('error-app-tag')
+ except Exception:
+ pass
+ else:
+ if error_app_tag == 'too-many-elements':
+ log = 'Exceeds maximum number of ACL / ACL Rules'
+ elif error_app_tag == 'update-not-allowed':
+ log = 'Creating ACLs with same name and different type not allowed'
+
+ if log:
+ response.update({u'log': log})
+ self._module.fail_json(msg=to_text(response), code=connection_error.code)
+ else:
+ self._module.fail_json(msg=str(connection_error), code=connection_error.code)
+
+ def _state_merged_overridden_replaced(self, want, have, state):
+ """ The command generator when state is merged/overridden/replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ add_commands = []
+ del_commands = []
+ commands = []
+
+ add_requests = []
+ del_requests = []
+ requests = []
+
+ have_dict = self._convert_config_list_to_dict(have)
+ want_dict = self._convert_config_list_to_dict(want)
+ have_acl_names = set(have_dict.keys())
+ want_acl_names = set(want_dict.keys())
+
+ if state == 'overridden':
+ # Delete non-modified ACLs
+ for acl_name in have_acl_names.difference(want_acl_names):
+ del_commands.append({'name': acl_name})
+ del_requests.append(self.get_delete_l2_acl_request(acl_name))
+
+ # Modify existing ACLs
+ for acl_name in want_acl_names.intersection(have_acl_names):
+ acl_add_command = {'name': acl_name}
+ acl_del_command = {'name': acl_name}
+ rule_add_commands = []
+ rule_del_commands = []
+
+ have_acl = have_dict[acl_name]
+ want_acl = want_dict[acl_name]
+ if not want_acl['remark']:
+ if have_acl['remark'] and state in ('replaced', 'overridden'):
+ acl_del_command['remark'] = have_acl['remark']
+ del_requests.append(self.get_delete_l2_acl_remark_request(acl_name))
+ else:
+ if want_acl['remark'] != have_acl['remark']:
+ acl_add_command['remark'] = want_acl['remark']
+ add_requests.append(self.get_create_l2_acl_remark_request(acl_name, want_acl['remark']))
+
+ have_seq_nums = set(have_acl['rules'].keys())
+ want_seq_nums = set(want_acl['rules'].keys())
+
+ if state in ('replaced', 'overridden'):
+ # Delete non-modified rules
+ for seq_num in have_seq_nums.difference(want_seq_nums):
+ rule_del_commands.append({'sequence_num': seq_num})
+ del_requests.append(self.get_delete_l2_acl_rule_request(acl_name, seq_num))
+
+ for seq_num in want_seq_nums.intersection(have_seq_nums):
+ # Replace existing rules
+ if have_acl['rules'][seq_num] != want_acl['rules'][seq_num]:
+ if state == 'merged':
+ self._module.fail_json(
+ msg="Cannot update existing sequence {0} of L2 ACL {1} with state merged."
+ " Please use state replaced or overridden.".format(seq_num, acl_name)
+ )
+
+ rule_del_commands.append({'sequence_num': seq_num})
+ del_requests.append(self.get_delete_l2_acl_rule_request(acl_name, seq_num))
+
+ rule_add_commands.append(want_acl['rules'][seq_num])
+ add_requests.append(self.get_create_l2_acl_rule_request(acl_name, seq_num, want_acl['rules'][seq_num]))
+
+ # Add new rules
+ for seq_num in want_seq_nums.difference(have_seq_nums):
+ rule_add_commands.append(want_acl['rules'][seq_num])
+ add_requests.append(self.get_create_l2_acl_rule_request(acl_name, seq_num, want_acl['rules'][seq_num]))
+
+ if rule_del_commands:
+ acl_del_command['rules'] = rule_del_commands
+ if rule_add_commands:
+ acl_add_command['rules'] = rule_add_commands
+
+ if acl_del_command.get('rules') or acl_del_command.get('remark'):
+ del_commands.append(acl_del_command)
+ if acl_add_command.get('rules') or acl_add_command.get('remark'):
+ add_commands.append(acl_add_command)
+
+ # Add new ACLs
+ for acl_name in want_acl_names.difference(have_acl_names):
+ acl_add_command = {'name': acl_name}
+ add_requests.append(self.get_create_l2_acl_request(acl_name))
+
+ want_acl = want_dict[acl_name]
+ if want_acl['remark']:
+ acl_add_command['remark'] = want_acl['remark']
+ add_requests.append(self.get_create_l2_acl_remark_request(acl_name, want_acl['remark']))
+
+ # Add new rules
+ want_seq_nums = set(want_acl['rules'].keys())
+ if want_seq_nums:
+ acl_add_command['rules'] = []
+ for seq_num in want_seq_nums:
+ acl_add_command['rules'].append(want_acl['rules'][seq_num])
+ add_requests.append(self.get_create_l2_acl_rule_request(acl_name, seq_num, want_acl['rules'][seq_num]))
+
+ add_commands.append(acl_add_command)
+
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+
+ if add_commands:
+ commands.extend(update_states(add_commands, state))
+ requests.extend(add_requests)
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ requests = []
+
+ if not want:
+ for acl in have:
+ commands.append({'name': acl['name']})
+ requests.append(self.get_delete_l2_acl_request(acl['name']))
+ else:
+ have_dict = self._convert_config_list_to_dict(have)
+ want_dict = self._convert_config_list_to_dict(want)
+ have_acl_names = set(have_dict.keys())
+ want_acl_names = set(want_dict.keys())
+
+ # Delete existing ACLs
+ for acl_name in want_acl_names.intersection(have_acl_names):
+ have_acl = have_dict[acl_name]
+ want_acl = want_dict[acl_name]
+
+ # Delete entire ACL if only the name is specified
+ if not want_acl['remark'] and not want_acl['rules']:
+ commands.append({'name': acl_name})
+ requests.append(self.get_delete_l2_acl_request(acl_name))
+ continue
+
+ acl_del_command = {'name': acl_name}
+ rule_del_commands = []
+ have_seq_nums = set(have_acl['rules'].keys())
+ want_seq_nums = set(want_acl['rules'].keys())
+
+ if want_acl['remark'] and want_acl['remark'] == have_acl['remark']:
+ acl_del_command['remark'] = want_acl['remark']
+ requests.append(self.get_delete_l2_acl_remark_request(acl_name))
+
+ # Delete existing rules
+ # When state is deleted, options other than sequence_num are not considered
+ for seq_num in want_seq_nums.intersection(have_seq_nums):
+ rule_del_commands.append({'sequence_num': seq_num})
+ requests.append(self.get_delete_l2_acl_rule_request(acl_name, seq_num))
+
+ if rule_del_commands:
+ acl_del_command['rules'] = rule_del_commands
+
+ if acl_del_command.get('rules') or acl_del_command.get('remark'):
+ commands.append(acl_del_command)
+
+ commands = update_states(commands, "deleted")
+ return commands, requests
+
+ def get_create_l2_acl_request(self, acl_name):
+ """Get request to create L2 ACL with specified name"""
+ url = self.acl_path
+ payload = {
+ 'acl-set': [{
+ 'name': acl_name,
+ 'type': L2_ACL_TYPE,
+ 'config': {
+ 'name': acl_name,
+ 'type': L2_ACL_TYPE
+ }
+ }]
+ }
+
+ return {'path': url, 'method': PATCH, 'data': payload}
+
+ def get_create_l2_acl_remark_request(self, acl_name, remark):
+ """Get request to add given remark to the specified L2 ACL"""
+ url = self.l2_acl_remark_path.format(acl_name=acl_name)
+ payload = {'description': remark}
+ return {'path': url, 'method': PATCH, 'data': payload}
+
+ def get_create_l2_acl_rule_request(self, acl_name, seq_num, rule):
+ """Get request to create a rule with given sequence number
+ and configuration in the specified L2 ACL
+ """
+ url = self.l2_acl_rule_path.format(acl_name=acl_name)
+ payload = {
+ 'openconfig-acl:acl-entry': [{
+ 'sequence-id': seq_num,
+ 'config': {
+ 'sequence-id': seq_num
+ },
+ 'l2': {
+ 'config': {}
+ },
+ 'actions': {
+ 'config': {
+ 'forwarding-action': action_value_to_payload_map[rule['action']]
+ }
+ }
+ }]
+ }
+ rule_l2_config = payload['openconfig-acl:acl-entry'][0]['l2']['config']
+
+ if rule['source'].get('host'):
+ rule_l2_config['source-mac'] = rule['source']['host']
+ elif rule['source'].get('address'):
+ rule_l2_config['source-mac'] = rule['source']['address']
+ rule_l2_config['source-mac-mask'] = rule['source']['address_mask']
+
+ if rule['destination'].get('host'):
+ rule_l2_config['destination-mac'] = rule['destination']['host']
+ elif rule['destination'].get('address'):
+ rule_l2_config['destination-mac'] = rule['destination']['address']
+ rule_l2_config['destination-mac-mask'] = rule['destination']['address_mask']
+
+ if rule.get('ethertype'):
+ if rule['ethertype'].get('value'):
+ rule_l2_config['ethertype'] = ethertype_value_to_payload_map.get(rule['ethertype']['value'], int(rule['ethertype']['value'], 16))
+ else:
+ rule_l2_config['ethertype'] = ethertype_protocol_to_payload_map[next(iter(rule['ethertype']))]
+
+ if rule.get('vlan_id') is not None:
+ rule_l2_config['vlanid'] = rule['vlan_id']
+
+ if rule.get('vlan_tag_format') and rule['vlan_tag_format'].get('multi_tagged'):
+ rule_l2_config['vlan-tag-format'] = 'openconfig-acl-ext:MULTI_TAGGED'
+
+ if rule.get('dei') is not None:
+ rule_l2_config['dei'] = rule['dei']
+
+ if rule.get('pcp'):
+ if rule['pcp'].get('traffic_type'):
+ rule_l2_config['pcp'] = pcp_traffic_to_value_map[rule['pcp']['traffic_type']]
+ else:
+ rule_l2_config['pcp'] = rule['pcp']['value']
+ rule_l2_config['pcp-mask'] = rule['pcp']['mask']
+
+ if rule.get('remark'):
+ payload['openconfig-acl:acl-entry'][0]['config']['description'] = rule['remark']
+
+ return {'path': url, 'method': POST, 'data': payload}
+
+ def get_delete_l2_acl_request(self, acl_name):
+ """Get request to delete L2 ACL with specified name"""
+ url = self.l2_acl_path.format(acl_name=acl_name)
+ return {'path': url, 'method': DELETE}
+
+ def get_delete_l2_acl_remark_request(self, acl_name):
+ """Get request to delete remark of the specified L2 ACL"""
+ url = self.l2_acl_remark_path.format(acl_name=acl_name)
+ return {'path': url, 'method': DELETE}
+
+ def get_delete_l2_acl_rule_request(self, acl_name, seq_num):
+ """Get request to delete the rule with given sequence number
+ in the specified L2 ACL
+ """
+ url = self.l2_acl_rule_path.format(acl_name=acl_name)
+ url += '/acl-entry={0}'.format(seq_num)
+ return {'path': url, 'method': DELETE}
+
+ def validate_and_normalize_config(self, config_list):
+ """Validate and normalize the given config"""
+ # Remove empties and validate the config with argument spec
+ updated_config_list = [remove_empties(config) for config in config_list]
+ validate_config(self._module.argument_spec, {'config': updated_config_list})
+
+ state = self._module.params['state']
+ # When state is deleted, options other than sequence_num are not considered
+ if state == 'deleted':
+ return updated_config_list
+
+ for acl in updated_config_list:
+ if not acl.get('rules'):
+ continue
+
+ for rule in acl['rules']:
+ self._check_required(['action', 'source', 'destination'], rule, ['config', 'rules'])
+ for endpoint in ('source', 'destination'):
+ if rule[endpoint].get('any') is False:
+ self._invalid_rule('True is the only valid value for {0} -> any'.format(endpoint), acl['name'], rule['sequence_num'])
+ elif rule[endpoint].get('host'):
+ rule[endpoint]['host'] = rule[endpoint]['host'].lower()
+ elif rule[endpoint].get('address'):
+ rule[endpoint]['address'] = rule[endpoint]['address'].lower()
+ rule[endpoint]['address_mask'] = rule[endpoint]['address_mask'].lower()
+
+ self._normalize_ethertype(rule)
+ self._normalize_pcp(rule)
+ self._normalize_vlan_tag_format(rule)
+
+ return updated_config_list
+
+ def _invalid_rule(self, err_msg, acl_name, seq_num):
+ self._module.fail_json(msg='L2 ACL {0}, sequence number {1}: {2}'.format(acl_name, seq_num, err_msg))
+
+ def _check_required(self, required_parameters, parameters, options_context=None):
+ if required_parameters:
+ spec = {}
+ for parameter in required_parameters:
+ spec[parameter] = {'required': True}
+
+ try:
+ check_required_arguments(spec, parameters, options_context)
+ except TypeError as exc:
+ self._module.fail_json(msg=str(exc))
+
+ @staticmethod
+ def _normalize_ethertype(rule):
+ ethertype = rule.get('ethertype')
+ if ethertype:
+ if ethertype.get('value'):
+ value = ethertype.pop('value')
+ if value.startswith('0x'):
+ value = ETHERTYPE_FORMAT.format(int(value, 16))
+ else:
+ # If the hexadecimal number is not enclosed within
+ # quotes, it will be passed as a string after being
+ # converted to decimal.
+ value = ETHERTYPE_FORMAT.format(int(value, 10))
+
+ if value in ethertype_value_to_protocol_map:
+ ethertype[ethertype_value_to_protocol_map[value]] = True
+ else:
+ ethertype['value'] = value
+ else:
+ # Remove ethertype option if its value is False
+ if not next(iter(ethertype.values())):
+ del rule['ethertype']
+
+ @staticmethod
+ def _normalize_pcp(rule):
+ pcp = rule.get('pcp')
+ if pcp and pcp.get('value') is not None and pcp.get('mask') is None:
+ pcp['traffic_type'] = pcp_value_to_traffic_map[pcp['value']]
+ del pcp['value']
+
+ @staticmethod
+ def _normalize_vlan_tag_format(rule):
+ vlan_tag_format = rule.get('vlan_tag_format')
+ # Remove vlan_tag_format option if the value is False
+ if vlan_tag_format and not vlan_tag_format.get('multi_tagged'):
+ del rule['vlan_tag_format']
+
+ @staticmethod
+ def _convert_config_list_to_dict(config_list):
+ config_dict = {}
+ for config in config_list:
+ acl_name = config['name']
+ config_dict[acl_name] = {}
+ config_dict[acl_name]['remark'] = config.get('remark')
+ config_dict[acl_name]['rules'] = {}
+ if config.get('rules'):
+ for rule in config['rules']:
+ config_dict[acl_name]['rules'][rule['sequence_num']] = rule
+
+ return config_dict
+
+ def sort_config(self, configs):
+ # natsort provides better result.
+ # The use of natsort causes sanity error due to it is not available in
+ # python version currently used.
+ # new_config = natsorted(new_config, key=lambda x: x['name'])
+ # For time-being, use simple "sort"
+ configs.sort(key=lambda x: x['name'])
+
+ for conf in configs:
+ if conf.get('rules', []):
+ conf['rules'].sort(key=lambda x: x['sequence_num'])
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py
index fccba7707..c47f06940 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l2_interfaces/l2_interfaces.py
@@ -13,17 +13,19 @@ created
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import json
+import traceback
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
get_diff,
+ get_ranges_in_list,
update_states,
normalize_interface_name
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties,
to_list
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import (
@@ -33,9 +35,14 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
to_request,
edit_config
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG,
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
from ansible.module_utils._text import to_native
from ansible.module_utils.connection import ConnectionError
-import traceback
LIB_IMP_ERR = None
ERR_MSG = None
@@ -47,6 +54,7 @@ except Exception as e:
ERR_MSG = to_native(e)
LIB_IMP_ERR = traceback.format_exc()
+DELETE = 'delete'
PATCH = 'patch'
intf_key = 'openconfig-if-ethernet:ethernet'
port_chnl_key = 'openconfig-if-aggregate:aggregation'
@@ -54,6 +62,10 @@ port_chnl_key = 'openconfig-if-aggregate:aggregation'
TEST_KEYS = [
{'allowed_vlans': {'vlan': ''}},
]
+TEST_KEYS_formatted_diff = [
+ {'config': {'name': '', '__delete_op': __DELETE_CONFIG}},
+ {'allowed_vlans': {'vlan': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
class L2_interfaces(ConfigBase):
@@ -112,6 +124,19 @@ class L2_interfaces(ConfigBase):
if result['changed']:
result['after'] = changed_l2_interfaces_facts
+ new_config = changed_l2_interfaces_facts
+ old_config = existing_l2_interfaces_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_l2_interfaces_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+ if self._module._diff:
+ self.sort_config(new_config)
+ self.sort_config(old_config)
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -123,7 +148,15 @@ class L2_interfaces(ConfigBase):
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
+ state = self._module.params['state']
want = self._module.params['config']
+ if want:
+ # In state deleted, specific empty parameters are supported
+ if state != 'deleted':
+ want = [remove_empties(conf) for conf in want]
+ else:
+ want = []
+
normalize_interface_name(want, self._module)
have = existing_l2_interfaces_facts
@@ -147,45 +180,40 @@ class L2_interfaces(ConfigBase):
"""
state = self._module.params['state']
- diff = get_diff(want, have, TEST_KEYS)
-
if state == 'overridden':
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
- commands, requests = self._state_deleted(want, have, diff)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
- commands, requests = self._state_merged(want, have, diff)
+ commands, requests = self._state_merged(want, have)
elif state == 'replaced':
- commands, requests = self._state_replaced(want, have, diff)
+ commands, requests = self._state_replaced(want, have)
return commands, requests
- def _state_replaced(self, want, have, diff):
+ def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
-
+ commands = []
requests = []
- commands = diff
- if commands:
- requests_del = self.get_delete_all_switchport_requests(commands)
- if requests_del:
- requests.extend(requests_del)
-
- requests_rep = self.get_create_l2_interface_request(commands)
- if len(requests_del) or len(requests_rep):
- requests.extend(requests_rep)
- commands = update_states(commands, "replaced")
- else:
- commands = []
+ del_commands, del_requests = self.get_delete_commands_requests_for_replaced_overridden(want, have, 'replaced')
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+
+ add_commands, add_requests = self.get_merge_commands_requests(want, have)
+ if add_commands:
+ commands.extend(update_states(add_commands, 'replaced'))
+ requests.extend(add_requests)
return commands, requests
- def _state_overridden(self, want, have, diff):
+ def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
@@ -195,23 +223,19 @@ class L2_interfaces(ConfigBase):
commands = []
requests = []
- commands_del = get_diff(have, want, TEST_KEYS)
- requests_del = self.get_delete_all_switchport_requests(commands_del)
- if len(requests_del):
- requests.extend(requests_del)
- commands_del = update_states(commands_del, "deleted")
- commands.extend(commands_del)
+ del_commands, del_requests = self.get_delete_commands_requests_for_replaced_overridden(want, have, 'overridden')
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
- commands_over = diff
- requests_over = self.get_create_l2_interface_request(commands_over)
- if requests_over:
- requests.extend(requests_over)
- commands_over = update_states(commands_over, "overridden")
- commands.extend(commands_over)
+ add_commands, add_requests = self.get_merge_commands_requests(want, have)
+ if add_commands:
+ commands.extend(update_states(add_commands, 'overridden'))
+ requests.extend(add_requests)
return commands, requests
- def _state_merged(self, want, have, diff):
+ def _state_merged(self, want, have):
""" The command generator when state is merged
:rtype: A list
@@ -220,77 +244,235 @@ class L2_interfaces(ConfigBase):
Requests necessary to merge to the current configuration
at position-1
"""
- commands = diff
- requests = self.get_create_l2_interface_request(commands)
- if commands and len(requests):
- commands = update_states(commands, "merged")
+ commands, requests = self.get_merge_commands_requests(want, have)
+ if commands:
+ commands = update_states(commands, 'merged')
+
return commands, requests
- def _state_deleted(self, want, have, diff):
+ def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
+ commands, requests = self.get_delete_commands_requests_for_deleted(want, have)
+ if commands:
+ commands = update_states(commands, 'deleted')
- # if want is none, then delete all the vlan links
- if not want or len(have) == 0:
- commands = have
- requests = self.get_delete_all_switchport_requests(commands)
+ return commands, requests
+
+ def get_merge_commands_requests(self, want, have):
+ """Returns the commands and requests necessary to merge the provided
+ configurations into the current configuration
+ """
+ commands = []
+ requests = []
+ if not want:
+ return commands, requests
+
+ if have:
+ diff = get_diff(want, have, TEST_KEYS)
else:
- commands = want
- requests = self.get_delete_specifig_switchport_requests(want, have)
- if len(requests) == 0:
- commands = []
+ diff = want
- if commands:
- commands = update_states(commands, "deleted")
+ for cmd in diff:
+ name = cmd['name']
+ if name == 'eth0':
+ continue
+
+ if cmd.get('trunk') and cmd['trunk'].get('allowed_vlans'):
+ match = next((cnf for cnf in have if cnf['name'] == name), None)
+ if match:
+ cmd['trunk']['allowed_vlans'] = self.get_trunk_allowed_vlans_diff(cmd, match)
+ if not cmd['trunk']['allowed_vlans']:
+ cmd.pop('trunk')
+
+ if cmd.get('access') or cmd.get('trunk'):
+ commands.append(cmd)
+ requests = self.get_create_l2_interface_requests(commands)
return commands, requests
- def get_trunk_delete_switchport_request(self, config, match_config):
- method = "DELETE"
- name = config['name']
+ def get_delete_commands_requests_for_deleted(self, want, have):
+ """Returns the commands and requests necessary to remove the current
+ configuration of the provided objects when state is deleted
+ """
+ commands = []
requests = []
- match_trunk = match_config.get('trunk')
- if match_trunk:
- conf_allowed_vlans = config['trunk'].get('allowed_vlans', [])
- if conf_allowed_vlans:
- for each_allowed_vlan in conf_allowed_vlans:
- if each_allowed_vlan in match_trunk.get('allowed_vlans'):
- vlan_id = each_allowed_vlan['vlan']
- key = intf_key
- if name.startswith('PortChannel'):
- key = port_chnl_key
- url = "data/openconfig-interfaces:interfaces/interface={0}/{1}/".format(name, key)
- url += "openconfig-vlan:switched-vlan/config/trunk-vlans={0}".format(vlan_id)
- request = {"path": url, "method": method}
- requests.append(request)
- return requests
+ if not have:
+ return commands, requests
+
+ if not want:
+ # Delete all L2 interface config
+ commands = [remove_empties(conf) for conf in have]
+ requests = self.get_delete_all_switchport_requests(commands)
+ return commands, requests
+
+ for conf in want:
+ name = conf['name']
+ matched = next((cnf for cnf in have if cnf['name'] == name), None)
+ if matched:
+ # If both access and trunk are not mentioned, delete all config
+ # in that interface
+ if not conf.get('access') and not conf.get('trunk'):
+ command = {'name': name}
+ if matched.get('access'):
+ command['access'] = matched['access']
+ if matched.get('trunk'):
+ command['trunk'] = matched['trunk']
+
+ commands.append(command)
+ requests.extend(self.get_delete_all_switchport_requests([command]))
+ else:
+ command = {}
+ if conf.get('access'):
+ access_match = matched.get('access')
+ if conf['access'].get('vlan'):
+ if access_match and access_match.get('vlan') == conf['access']['vlan']:
+ command['access'] = {'vlan': conf['access']['vlan']}
+ requests.append(self.get_access_delete_switchport_request(name))
+ else:
+ # If access -> vlan is mentioned without value,
+ # delete existing access vlan config
+ if access_match and access_match.get('vlan'):
+ command['access'] = {'vlan': access_match['vlan']}
+ requests.append(self.get_access_delete_switchport_request(name))
+
+ if conf.get('trunk'):
+ if conf['trunk'].get('allowed_vlans'):
+ trunk_vlans_to_delete = self.get_trunk_allowed_vlans_common(conf, matched)
+ if trunk_vlans_to_delete:
+ command['trunk'] = {'allowed_vlans': trunk_vlans_to_delete}
+ requests.append(self.get_trunk_allowed_vlans_delete_switchport_request(name, command['trunk']['allowed_vlans']))
+ else:
+ # If trunk -> allowed_vlans is mentioned without
+ # value, delete existing trunk allowed vlans config
+ trunk_match = matched.get('trunk')
+ if trunk_match and trunk_match.get('allowed_vlans'):
+ command['trunk'] = {'allowed_vlans': trunk_match['allowed_vlans'].copy()}
+ requests.append(self.get_trunk_allowed_vlans_delete_switchport_request(name, command['trunk']['allowed_vlans']))
+
+ if command:
+ command['name'] = name
+ commands.append(command)
+
+ return commands, requests
+
+ def get_delete_commands_requests_for_replaced_overridden(self, want, have, state):
+ """Returns the commands and requests necessary to remove applicable
+ current configurations when state is replaced or overridden
+ """
+ commands = []
+ requests = []
+ if not have:
+ return commands, requests
+
+ have_interfaces = self.get_interface_names(have)
+ want_interfaces = self.get_interface_names(want)
+ interfaces_to_replace = have_interfaces.intersection(want_interfaces)
+ if state == 'overridden':
+ interfaces_to_delete = have_interfaces.difference(want_interfaces)
+ else:
+ interfaces_to_delete = []
+
+ if want:
+ del_diff = get_diff(have, want, TEST_KEYS)
+ else:
+ del_diff = have
+
+ for conf in del_diff:
+ name = conf['name']
+
+ # Delete all config in interfaces not specified in overridden
+ if name in interfaces_to_delete:
+ command = {'name': name}
+ if conf.get('access'):
+ command['access'] = conf['access']
+ if conf.get('trunk'):
+ command['trunk'] = conf['trunk']
+
+ commands.append(command)
+ requests.extend(self.get_delete_all_switchport_requests([command]))
+
+ # Delete config in interfaces that are replaced/overridden
+ elif name in interfaces_to_replace:
+ command = {}
+
+ if conf.get('access') and conf['access'].get('vlan'):
+ command['access'] = {'vlan': conf['access']['vlan']}
+ requests.append(self.get_access_delete_switchport_request(name))
+
+ if conf.get('trunk') and conf['trunk'].get('allowed_vlans'):
+ matched = next((cnf for cnf in want if cnf['name'] == name), None)
+ if matched:
+ trunk_vlans_to_delete = self.get_trunk_allowed_vlans_diff(conf, matched)
+ if trunk_vlans_to_delete:
+ command['trunk'] = {'allowed_vlans': trunk_vlans_to_delete}
+ requests.append(self.get_trunk_allowed_vlans_delete_switchport_request(name, command['trunk']['allowed_vlans']))
+
+ if command:
+ command['name'] = name
+ commands.append(command)
+
+ return commands, requests
+
+ def get_trunk_allowed_vlans_delete_switchport_request(self, intf_name, allowed_vlans):
+ """Returns the request as a dict to delete the trunk vlan ranges
+ specified in allowed_vlans for the given interface
+ """
+ method = DELETE
+ vlan_id_list = ""
+ for each_allowed_vlan in allowed_vlans:
+ vlan_id = each_allowed_vlan['vlan']
+
+ if '-' in vlan_id:
+ vlan_id_fmt = vlan_id.replace('-', '..')
+ else:
+ vlan_id_fmt = vlan_id
+
+ if vlan_id_list:
+ vlan_id_list += ",{0}".format(vlan_id_fmt)
+ else:
+ vlan_id_list = vlan_id_fmt
+
+ key = intf_key
+ if intf_name.startswith('PortChannel'):
+ key = port_chnl_key
+
+ url = "data/openconfig-interfaces:interfaces/interface={0}/{1}/".format(intf_name, key)
+ url += "openconfig-vlan:switched-vlan/config/"
+ url += "trunk-vlans=" + vlan_id_list.replace(',', '%2C')
+
+ request = {"path": url, "method": method}
+ return request
+
+ def get_access_delete_switchport_request(self, intf_name):
+ """Returns the request as a dict to delete the access vlan
+ configuration for the given interface
+ """
+ method = DELETE
+ key = intf_key
+ if intf_name.startswith('PortChannel'):
+ key = port_chnl_key
+ url = "data/openconfig-interfaces:interfaces/interface={}/{}/openconfig-vlan:switched-vlan/config/access-vlan"
+ request = {"path": url.format(intf_name, key), "method": method}
- def get_access_delete_switchport_request(self, config, match_config):
- method = "DELETE"
- request = None
- name = config['name']
- match_access = match_config.get('access')
- if match_access and match_access.get('vlan') == config['access'].get('vlan'):
- key = intf_key
- if name.startswith('PortChannel'):
- key = port_chnl_key
- url = "data/openconfig-interfaces:interfaces/interface={}/{}/openconfig-vlan:switched-vlan/config/access-vlan"
- request = {"path": url.format(name, key), "method": method}
return request
def get_delete_all_switchport_requests(self, configs):
+ """Returns a list of requests to delete all switchport
+ configuration for all interfaces specified in the config list
+ """
requests = []
if not configs:
return requests
# Create URL and payload
url = "data/openconfig-interfaces:interfaces/interface={}/{}/openconfig-vlan:switched-vlan/config"
- method = "DELETE"
+ method = DELETE
for intf in configs:
- name = intf.get("name")
+ name = intf['name']
key = intf_key
if name.startswith('PortChannel'):
key = port_chnl_key
@@ -301,78 +483,19 @@ class L2_interfaces(ConfigBase):
return requests
- def get_delete_specifig_switchport_requests(self, configs, have):
+ def get_create_l2_interface_requests(self, configs):
+ """Returns a list of requests to add the switchport
+ configurations specified in the config list
+ """
requests = []
if not configs:
return requests
- for conf in configs:
- name = conf['name']
-
- matched = next((cnf for cnf in have if cnf['name'] == name), None)
- if matched:
- keys = conf.keys()
-
- # if both access and trunk not mention in delete
- if not ('access' in keys) and not ('trunk' in keys):
- requests.extend(self.get_delete_all_switchport_requests([conf]))
- else:
- # if access or trnuk is mentioned with value
- if conf.get('access') or conf.get('trunk'):
- # if access is mentioned with value
- if conf.get('access'):
- vlan = conf.get('access').get('vlan')
- if vlan:
- request = self.get_access_delete_switchport_request(conf, matched)
- if request:
- requests.append(request)
- else:
- if matched.get('access') and matched.get('access').get('vlan'):
- conf['access']['vlan'] = matched.get('access').get('vlan')
- request = self.get_access_delete_switchport_request(conf, matched)
- if request:
- requests.append(request)
-
- # if trunk is mentioned with value
- if conf.get('trunk'):
- allowed_vlans = conf['trunk'].get('allowed_vlans')
- if allowed_vlans:
- requests.extend(self.get_trunk_delete_switchport_request(conf, matched))
- # allowed vlans mentinoed without value
- else:
- if matched.get('trunk') and matched.get('trunk').get('allowed_vlans'):
- conf['trunk']['allowed_vlans'] = matched.get('trunk') and matched.get('trunk').get('allowed_vlans').copy()
- requests.extend(self.get_trunk_delete_switchport_request(conf, matched))
- # check for access or trunk is mentioned without value
- else:
- # access mentioned wothout value
- if ('access' in keys) and conf.get('access', None) is None:
- # get the existing values and delete it
- if matched.get('access'):
- conf['access'] = matched.get('access').copy()
- request = self.get_access_delete_switchport_request(conf, matched)
- if request:
- requests.append(request)
- # trunk mentioned wothout value
- if ('trunk' in keys) and conf.get('trunk', None) is None:
- # get the existing values and delete it
- if matched.get('trunk'):
- conf['trunk'] = matched.get('trunk').copy()
- requests.extend(self.get_trunk_delete_switchport_request(conf, matched))
-
- return requests
-
- def get_create_l2_interface_request(self, configs):
- requests = []
- if not configs:
- return requests
# Create URL and payload
url = "data/openconfig-interfaces:interfaces/interface={}/{}/openconfig-vlan:switched-vlan/config"
- method = "PATCH"
+ method = PATCH
for conf in configs:
- name = conf.get('name')
- if name == "eth0":
- continue
+ name = conf['name']
key = intf_key
if name.startswith('PortChannel'):
key = port_chnl_key
@@ -382,33 +505,124 @@ class L2_interfaces(ConfigBase):
"data": payload
}
requests.append(request)
+
return requests
def build_create_payload(self, conf):
- payload_url = '{"openconfig-vlan:config":{ '
- access_payload = ''
- trunk_payload = ''
- if conf.get('access'):
- access_vlan_id = conf['access']['vlan']
- access_payload = '"access-vlan": {0}'.format(access_vlan_id)
- if conf.get('trunk'):
- trunk_payload = '"trunk-vlans": ['
- cnt = 0
+ """Returns the payload to add the switchport configurations
+ specified in the interface config
+ """
+ payload = {'openconfig-vlan:config': {}}
+ trunk_payload = []
+
+ if conf.get('access') and conf['access'].get('vlan'):
+ payload['openconfig-vlan:config']['access-vlan'] = int(conf['access']['vlan'])
+
+ if conf.get('trunk') and conf['trunk'].get('allowed_vlans'):
for each_allowed_vlan in conf['trunk']['allowed_vlans']:
- if cnt > 0:
- trunk_payload += ','
- trunk_payload += str(each_allowed_vlan['vlan'])
- cnt = cnt + 1
- trunk_payload += ']'
-
- if access_payload != '':
- payload_url += access_payload
- if trunk_payload != '':
- if access_payload != '':
- payload_url += ','
- payload_url += trunk_payload
-
- payload_url += '}}'
-
- ret_payload = json.loads(payload_url)
- return ret_payload
+ vlan_val = each_allowed_vlan['vlan']
+ if '-' in vlan_val:
+ trunk_payload.append('{0}'.format(vlan_val.replace('-', '..')))
+ else:
+ trunk_payload.append(int(vlan_val))
+
+ if trunk_payload:
+ payload['openconfig-vlan:config']['trunk-vlans'] = trunk_payload
+
+ return payload
+
+ def get_trunk_allowed_vlans_common(self, config, match):
+ """Returns the allowed vlan ranges that are common in the
+ interface configurations specified by 'config' and 'match' in
+ allowed_vlans spec format
+ """
+ trunk_vlans = []
+ match_trunk_vlans = []
+ if config.get('trunk') and config['trunk'].get('allowed_vlans'):
+ trunk_vlans = config['trunk']['allowed_vlans']
+
+ if not trunk_vlans:
+ return []
+
+ if match.get('trunk') and match['trunk'].get('allowed_vlans'):
+ match_trunk_vlans = match['trunk']['allowed_vlans']
+
+ if not match_trunk_vlans:
+ return []
+
+ trunk_vlans = self.get_vlan_id_list(trunk_vlans)
+ match_trunk_vlans = self.get_vlan_id_list(match_trunk_vlans)
+ return self.get_allowed_vlan_range_list(list(set(trunk_vlans).intersection(set(match_trunk_vlans))))
+
+ def get_trunk_allowed_vlans_diff(self, config, match):
+ """Returns the allowed vlan ranges present only in 'config'
+ and and not in 'match' in allowed_vlans spec format
+ """
+ trunk_vlans = []
+ match_trunk_vlans = []
+ if config.get('trunk') and config['trunk'].get('allowed_vlans'):
+ trunk_vlans = config['trunk']['allowed_vlans']
+
+ if not trunk_vlans:
+ return []
+
+ if match.get('trunk') and match['trunk'].get('allowed_vlans'):
+ match_trunk_vlans = match['trunk']['allowed_vlans']
+
+ if not match_trunk_vlans:
+ return trunk_vlans
+
+ trunk_vlans = self.get_vlan_id_list(trunk_vlans)
+ match_trunk_vlans = self.get_vlan_id_list(match_trunk_vlans)
+ return self.get_allowed_vlan_range_list(list(set(trunk_vlans) - set(match_trunk_vlans)))
+
+ @staticmethod
+ def get_vlan_id_list(allowed_vlan_range_list):
+ """Returns a list of all VLAN IDs specified in allowed_vlans list"""
+ vlan_id_list = []
+ if allowed_vlan_range_list:
+ for vlan_range in allowed_vlan_range_list:
+ vlan_val = vlan_range['vlan']
+ if '-' in vlan_val:
+ start, end = vlan_val.split('-')
+ vlan_id_list.extend(range(int(start), int(end) + 1))
+ else:
+ # Single VLAN ID
+ vlan_id_list.append(int(vlan_val))
+
+ return vlan_id_list
+
+ @staticmethod
+ def get_allowed_vlan_range_list(vlan_id_list):
+ """Returns the allowed_vlans list for given list of VLAN IDs"""
+ allowed_vlan_range_list = []
+
+ if vlan_id_list:
+ vlan_id_list.sort()
+ for vlan_range in get_ranges_in_list(vlan_id_list):
+ allowed_vlan_range_list.append({'vlan': '-'.join(map(str, (vlan_range[0], vlan_range[-1])[:len(vlan_range)]))})
+
+ return allowed_vlan_range_list
+
+ @staticmethod
+ def get_interface_names(configs):
+ """Returns a set of interface names available in the given
+ configs list
+ """
+ interface_names = set()
+ for conf in configs:
+ interface_names.add(conf['name'])
+
+ return interface_names
+
+ def sort_config(self, configs):
+ # natsort provides better result.
+ # The use of natsort causes sanity error due to it is not available in
+ # python version currently used.
+ # new_config = natsorted(new_config, key=lambda x: x['name'])
+ # For time-being, use simple "sort"
+ configs.sort(key=lambda x: x['name'])
+
+ for conf in configs:
+ if conf.get('trunk', {}) and conf['trunk'].get('allowed_vlans', []):
+ conf['trunk']['allowed_vlans'].sort(key=lambda x: x['vlan'])
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tests/aaa_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_acls/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tests/aaa_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_acls/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_acls/l3_acls.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_acls/l3_acls.py
new file mode 100644
index 000000000..26fbb7fdb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_acls/l3_acls.py
@@ -0,0 +1,763 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_l3_acls class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ast import literal_eval
+
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.validation import check_required_arguments
+from ansible.module_utils.connection import ConnectionError
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+ remove_empties,
+ validate_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ update_states
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
+
+DELETE = 'delete'
+PATCH = 'patch'
+POST = 'post'
+
+TEST_KEYS_formatted_diff = [
+ {'config': {'address_family': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {'acls': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {'rules': {'sequence_num': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
+
+L4_PORT_START = 0
+L4_PORT_END = 65535
+
+protocol_number_to_name_map = {
+ 1: 'icmp',
+ 6: 'tcp',
+ 17: 'udp',
+ 58: 'icmpv6'
+}
+dscp_value_to_name_map = {
+ 0: 'default',
+ 8: 'cs1',
+ 16: 'cs2',
+ 24: 'cs3',
+ 32: 'cs4',
+ 40: 'cs5',
+ 48: 'cs6',
+ 56: 'cs7',
+ 10: 'af11',
+ 12: 'af12',
+ 14: 'af13',
+ 18: 'af21',
+ 20: 'af22',
+ 22: 'af23',
+ 26: 'af31',
+ 28: 'af32',
+ 30: 'af33',
+ 34: 'af41',
+ 36: 'af42',
+ 38: 'af43',
+ 46: 'ef',
+ 44: 'voice_admit'
+}
+
+# Spec value to payload value mappings
+acl_type_to_payload_map = {
+ 'ipv4': 'ACL_IPV4',
+ 'ipv6': 'ACL_IPV6'
+}
+acl_type_to_host_mask_map = {
+ 'ipv4': '/32',
+ 'ipv6': '/128'
+}
+action_value_to_payload_map = {
+ 'permit': 'ACCEPT',
+ 'discard': 'DISCARD',
+ 'do-not-nat': 'DO_NOT_NAT',
+ 'deny': 'DROP',
+ 'transit': 'TRANSIT'
+}
+protocol_name_to_payload_map = {
+ 'icmp': 'IP_ICMP',
+ 'icmpv6': 58,
+ 'tcp': 'IP_TCP',
+ 'udp': 'IP_UDP'
+}
+protocol_number_to_payload_map = {
+ 2: 'IP_IGMP',
+ 46: 'IP_RSVP',
+ 47: 'IP_GRE',
+ 51: 'IP_AUTH',
+ 103: 'IP_PIM',
+ 115: 'IP_L2TP'
+}
+dscp_name_to_value_map = {v: k for k, v in dscp_value_to_name_map.items()}
+
+
+class L3_acls(ConfigBase):
+ """
+ The sonic_l3_acls class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'l3_acls',
+ ]
+
+ acl_path = 'data/openconfig-acl:acl/acl-sets/acl-set'
+ l3_acl_path = 'data/openconfig-acl:acl/acl-sets/acl-set={acl_name},{acl_type}'
+ l3_acl_rule_path = 'data/openconfig-acl:acl/acl-sets/acl-set={acl_name},{acl_type}/acl-entries'
+ l3_acl_remark_path = 'data/openconfig-acl:acl/acl-sets/acl-set={acl_name},{acl_type}/config/description'
+
+ def __init__(self, module):
+ super(L3_acls, self).__init__(module)
+
+ def get_l3_acls_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ l3_acls_facts = facts['ansible_network_resources'].get('l3_acls')
+ if not l3_acls_facts:
+ return []
+ return l3_acls_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+
+ existing_l3_acls_facts = self.get_l3_acls_facts()
+ commands, requests = self.set_config(existing_l3_acls_facts)
+ if commands:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._handle_failure_response(exc)
+
+ result['changed'] = True
+
+ changed_l3_acls_facts = self.get_l3_acls_facts()
+
+ result['before'] = existing_l3_acls_facts
+ if result['changed']:
+ result['after'] = changed_l3_acls_facts
+
+ result['commands'] = commands
+
+ new_config = changed_l3_acls_facts
+ old_config = existing_l3_acls_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_l3_acls_facts,
+ TEST_KEYS_formatted_diff)
+ self.post_process_generated_config(new_config)
+ result['after(generated)'] = new_config
+ if self._module._diff:
+ self.sort_config(new_config)
+ self.sort_config(old_config)
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_l3_acls_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ if want:
+ want = self.validate_and_normalize_config(want)
+ else:
+ want = []
+
+ have = existing_l3_acls_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+ if state in ('merged', 'overridden', 'replaced'):
+ commands, requests = self._state_merged_overridden_replaced(want, have, state)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+
+ return commands, requests
+
+ def _handle_failure_response(self, connection_error):
+ log = None
+ try:
+ response = literal_eval(connection_error.args[0])
+ error_app_tag = response['ietf-restconf:errors']['error'][0].get('error-app-tag')
+ except Exception:
+ pass
+ else:
+ if error_app_tag == 'too-many-elements':
+ log = 'Exceeds maximum number of ACL / ACL Rules'
+ elif error_app_tag == 'update-not-allowed':
+ log = 'Creating ACLs with same name and different type not allowed'
+
+ if log:
+ response.update({u'log': log})
+ self._module.fail_json(msg=to_text(response), code=connection_error.code)
+ else:
+ self._module.fail_json(msg=str(connection_error), code=connection_error.code)
+
+ def _state_merged_overridden_replaced(self, want, have, state):
+ """ The command generator when state is merged/overridden/replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ add_commands = []
+ del_commands = []
+ commands = []
+
+ add_requests = []
+ del_requests = []
+ requests = []
+
+ have_dict = self._convert_config_list_to_dict(have)
+ want_dict = self._convert_config_list_to_dict(want)
+
+ for acl_type in ('ipv4', 'ipv6'):
+ acl_type_add_commands = []
+ acl_type_del_commands = []
+
+ have_acl_names = set(have_dict.get(acl_type, {}).keys())
+ want_acl_names = set(want_dict.get(acl_type, {}).keys())
+
+ if state == 'overridden':
+ # Delete non-modified ACLs
+ for acl_name in have_acl_names.difference(want_acl_names):
+ acl_type_del_commands.append({'name': acl_name})
+ del_requests.append(self.get_delete_l3_acl_request(acl_type, acl_name))
+
+ # Modify existing ACLs
+ for acl_name in want_acl_names.intersection(have_acl_names):
+ acl_add_command = {'name': acl_name}
+ acl_del_command = {'name': acl_name}
+ rule_add_commands = []
+ rule_del_commands = []
+
+ have_acl = have_dict[acl_type][acl_name]
+ want_acl = want_dict[acl_type][acl_name]
+ if not want_acl['remark']:
+ if have_acl['remark'] and state in ('replaced', 'overridden'):
+ acl_del_command['remark'] = have_acl['remark']
+ del_requests.append(self.get_delete_l3_acl_remark_request(acl_type, acl_name))
+ else:
+ if want_acl['remark'] != have_acl['remark']:
+ acl_add_command['remark'] = want_acl['remark']
+ add_requests.append(self.get_create_l3_acl_remark_request(acl_type, acl_name, want_acl['remark']))
+
+ have_seq_nums = set(have_acl['rules'].keys())
+ want_seq_nums = set(want_acl['rules'].keys())
+
+ if state in ('replaced', 'overridden'):
+ # Delete non-modified rules
+ for seq_num in have_seq_nums.difference(want_seq_nums):
+ rule_del_commands.append({'sequence_num': seq_num})
+ del_requests.append(self.get_delete_l3_acl_rule_request(acl_type, acl_name, seq_num))
+
+ for seq_num in want_seq_nums.intersection(have_seq_nums):
+ # Replace existing rules
+ if have_acl['rules'][seq_num] != want_acl['rules'][seq_num]:
+ if state == 'merged':
+ self._module.fail_json(
+ msg="Cannot update existing sequence {0} of {1} ACL {2} with state merged."
+ " Please use state replaced or overridden.".format(seq_num, acl_type, acl_name)
+ )
+
+ rule_del_commands.append({'sequence_num': seq_num})
+ del_requests.append(self.get_delete_l3_acl_rule_request(acl_type, acl_name, seq_num))
+
+ rule_add_commands.append(want_acl['rules'][seq_num])
+ add_requests.append(self.get_create_l3_acl_rule_request(acl_type, acl_name, seq_num, want_acl['rules'][seq_num]))
+
+ # Add new rules
+ for seq_num in want_seq_nums.difference(have_seq_nums):
+ rule_add_commands.append(want_acl['rules'][seq_num])
+ add_requests.append(self.get_create_l3_acl_rule_request(acl_type, acl_name, seq_num, want_acl['rules'][seq_num]))
+
+ if rule_del_commands:
+ acl_del_command['rules'] = rule_del_commands
+ if rule_add_commands:
+ acl_add_command['rules'] = rule_add_commands
+
+ if acl_del_command.get('rules') or acl_del_command.get('remark'):
+ acl_type_del_commands.append(acl_del_command)
+ if acl_add_command.get('rules') or acl_add_command.get('remark'):
+ acl_type_add_commands.append(acl_add_command)
+
+ # Add new ACLs
+ for acl_name in want_acl_names.difference(have_acl_names):
+ acl_add_command = {'name': acl_name}
+ add_requests.append(self.get_create_l3_acl_request(acl_type, acl_name))
+
+ want_acl = want_dict[acl_type][acl_name]
+ if want_acl['remark']:
+ acl_add_command['remark'] = want_acl['remark']
+ add_requests.append(self.get_create_l3_acl_remark_request(acl_type, acl_name, want_acl['remark']))
+
+ # Add new rules
+ want_seq_nums = set(want_acl['rules'].keys())
+ if want_seq_nums:
+ acl_add_command['rules'] = []
+ for seq_num in want_seq_nums:
+ acl_add_command['rules'].append(want_acl['rules'][seq_num])
+ add_requests.append(self.get_create_l3_acl_rule_request(acl_type, acl_name, seq_num, want_acl['rules'][seq_num]))
+
+ acl_type_add_commands.append(acl_add_command)
+
+ if acl_type_del_commands:
+ del_commands.append({'address_family': acl_type, 'acls': acl_type_del_commands})
+
+ if acl_type_add_commands:
+ add_commands.append({'address_family': acl_type, 'acls': acl_type_add_commands})
+
+ if del_commands:
+ commands = update_states(del_commands, 'deleted')
+ requests = del_requests
+
+ if add_commands:
+ commands.extend(update_states(add_commands, state))
+ requests.extend(add_requests)
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ requests = []
+
+ if not want:
+ for config in have:
+ if not config.get('acls'):
+ continue
+
+ acl_type_commands = []
+ acl_type = config['address_family']
+ for acl in config['acls']:
+ acl_type_commands.append({'name': acl['name']})
+ requests.append(self.get_delete_l3_acl_request(acl_type, acl['name']))
+
+ if acl_type_commands:
+ commands.append({'address_family': acl_type, 'acls': acl_type_commands})
+ else:
+ have_dict = self._convert_config_list_to_dict(have)
+ want_dict = self._convert_config_list_to_dict(want)
+
+ for acl_type in ('ipv4', 'ipv6'):
+ acl_type_commands = []
+ have_acl_names = set(have_dict.get(acl_type, {}).keys())
+ want_acl_names = set(want_dict.get(acl_type, {}).keys())
+
+ # If only the type is specified, delete all ACLs of that type
+ if acl_type in want_dict and not want_acl_names:
+ for acl_name in have_acl_names:
+ acl_type_commands.append({'name': acl_name})
+ requests.append(self.get_delete_l3_acl_request(acl_type, acl_name))
+
+ # Delete existing ACLs
+ for acl_name in want_acl_names.intersection(have_acl_names):
+ have_acl = have_dict[acl_type][acl_name]
+ want_acl = want_dict[acl_type][acl_name]
+
+ # Delete entire ACL if only the name is specified
+ if not want_acl['remark'] and not want_acl['rules']:
+ acl_type_commands.append({'name': acl_name})
+ requests.append(self.get_delete_l3_acl_request(acl_type, acl_name))
+ continue
+
+ acl_del_command = {'name': acl_name}
+ rule_del_commands = []
+ have_seq_nums = set(have_acl['rules'].keys())
+ want_seq_nums = set(want_acl['rules'].keys())
+
+ if want_acl['remark'] and want_acl['remark'] == have_acl['remark']:
+ acl_del_command['remark'] = want_acl['remark']
+ requests.append(self.get_delete_l3_acl_remark_request(acl_type, acl_name))
+
+ # Delete existing rules
+ # When state is deleted, options other than sequence_num are not considered
+ for seq_num in want_seq_nums.intersection(have_seq_nums):
+ rule_del_commands.append({'sequence_num': seq_num})
+ requests.append(self.get_delete_l3_acl_rule_request(acl_type, acl_name, seq_num))
+
+ if rule_del_commands:
+ acl_del_command['rules'] = rule_del_commands
+
+ if acl_del_command.get('rules') or acl_del_command.get('remark'):
+ acl_type_commands.append(acl_del_command)
+
+ if acl_type_commands:
+ commands.append({'address_family': acl_type, 'acls': acl_type_commands})
+
+ commands = update_states(commands, "deleted")
+ return commands, requests
+
+ def get_create_l3_acl_request(self, acl_type, acl_name):
+ """Get request to create L3 ACL with specified type and name"""
+ url = self.acl_path
+ payload = {
+ 'acl-set': [{
+ 'name': acl_name,
+ 'type': acl_type_to_payload_map[acl_type],
+ 'config': {
+ 'name': acl_name,
+ 'type': acl_type_to_payload_map[acl_type]
+ }
+ }]
+ }
+
+ return {'path': url, 'method': PATCH, 'data': payload}
+
+ def get_create_l3_acl_remark_request(self, acl_type, acl_name, remark):
+ """Get request to add given remark to the specified L3 ACL"""
+ url = self.l3_acl_remark_path.format(acl_name=acl_name, acl_type=acl_type_to_payload_map[acl_type])
+ payload = {'description': remark}
+ return {'path': url, 'method': PATCH, 'data': payload}
+
+ def get_create_l3_acl_rule_request(self, acl_type, acl_name, seq_num, rule):
+ """Get request to create a rule with given sequence number
+ and configuration in the specified L3 ACL
+ """
+ url = self.l3_acl_rule_path.format(acl_name=acl_name, acl_type=acl_type_to_payload_map[acl_type])
+ payload = {
+ 'openconfig-acl:acl-entry': [{
+ 'sequence-id': seq_num,
+ 'config': {
+ 'sequence-id': seq_num
+ },
+ acl_type: {
+ 'config': {}
+ },
+ 'transport': {
+ 'config': {}
+ },
+ 'actions': {
+ 'config': {
+ 'forwarding-action': action_value_to_payload_map[rule['action']]
+ }
+ }
+ }]
+ }
+ rule_l3_config = payload['openconfig-acl:acl-entry'][0][acl_type]['config']
+ rule_l4_config = payload['openconfig-acl:acl-entry'][0]['transport']['config']
+
+ if rule['protocol'].get('number') is not None:
+ protocol = rule['protocol']['number']
+ rule_l3_config['protocol'] = protocol_number_to_payload_map.get(protocol, protocol)
+ else:
+ protocol = rule['protocol']['name']
+ if protocol not in ('ip', 'ipv6'):
+ rule_l3_config['protocol'] = protocol_name_to_payload_map[protocol]
+
+ if rule['source'].get('host'):
+ rule_l3_config['source-address'] = rule['source']['host'] + acl_type_to_host_mask_map[acl_type]
+ elif rule['source'].get('prefix'):
+ rule_l3_config['source-address'] = rule['source']['prefix']
+
+ src_port_number = self._convert_port_dict_to_payload_format(rule['source'].get('port_number'))
+ if src_port_number:
+ rule_l4_config['source-port'] = src_port_number
+
+ if rule['destination'].get('host'):
+ rule_l3_config['destination-address'] = rule['destination']['host'] + acl_type_to_host_mask_map[acl_type]
+ elif rule['destination'].get('prefix'):
+ rule_l3_config['destination-address'] = rule['destination']['prefix']
+
+ dest_port_number = self._convert_port_dict_to_payload_format(rule['destination'].get('port_number'))
+ if dest_port_number:
+ rule_l4_config['destination-port'] = dest_port_number
+
+ if rule.get('protocol_options'):
+ if protocol in ('icmp', 'icmpv6') and rule['protocol_options'].get(protocol):
+ if rule['protocol_options'][protocol].get('type') is not None:
+ rule_l4_config['icmp-type'] = rule['protocol_options'][protocol]['type']
+ if rule['protocol_options'][protocol].get('code') is not None:
+ rule_l4_config['icmp-code'] = rule['protocol_options'][protocol]['code']
+ elif rule['protocol_options'].get('tcp'):
+ if rule['protocol_options']['tcp'].get('established'):
+ rule_l4_config['tcp-session-established'] = True
+ else:
+ tcp_flag_list = []
+ for tcp_flag in rule['protocol_options']['tcp'].keys():
+ if rule['protocol_options']['tcp'][tcp_flag]:
+ tcp_flag_list.append('tcp_{0}'.format(tcp_flag).upper())
+
+ if tcp_flag_list:
+ rule_l4_config['tcp-flags'] = tcp_flag_list
+
+ if rule.get('vlan_id') is not None:
+ payload['openconfig-acl:acl-entry'][0]['l2'] = {
+ 'config': {
+ 'vlanid': rule['vlan_id']
+ }
+ }
+
+ if rule.get('dscp'):
+ if rule['dscp'].get('value') is not None:
+ rule_l3_config['dscp'] = rule['dscp']['value']
+ else:
+ dscp_opt = next(iter(rule['dscp']))
+ if rule['dscp'][dscp_opt]:
+ rule_l3_config['dscp'] = dscp_name_to_value_map[dscp_opt]
+
+ if rule.get('remark'):
+ payload['openconfig-acl:acl-entry'][0]['config']['description'] = rule['remark']
+
+ return {'path': url, 'method': POST, 'data': payload}
+
+ def get_delete_l3_acl_request(self, acl_type, acl_name):
+ """Get request to delete L3 ACL with specified type and name"""
+ url = self.l3_acl_path.format(acl_name=acl_name, acl_type=acl_type_to_payload_map[acl_type])
+ return {'path': url, 'method': DELETE}
+
+ def get_delete_l3_acl_remark_request(self, acl_type, acl_name):
+ """Get request to delete remark of the specified L3 ACL"""
+ url = self.l3_acl_remark_path.format(acl_name=acl_name, acl_type=acl_type_to_payload_map[acl_type])
+ return {'path': url, 'method': DELETE}
+
+ def get_delete_l3_acl_rule_request(self, acl_type, acl_name, seq_num):
+ """Get request to delete the rule with given sequence number
+ in the specified L3 ACL
+ """
+ url = self.l3_acl_rule_path.format(acl_name=acl_name, acl_type=acl_type_to_payload_map[acl_type])
+ url += '/acl-entry={0}'.format(seq_num)
+ return {'path': url, 'method': DELETE}
+
+ def validate_and_normalize_config(self, config_list):
+ """Validate and normalize the given config"""
+ # Remove empties and validate the config with argument spec
+ updated_config_list = [remove_empties(config) for config in config_list]
+ validate_config(self._module.argument_spec, {'config': updated_config_list})
+
+ state = self._module.params['state']
+ # When state is deleted, options other than sequence_num are not considered
+ if state == 'deleted':
+ return updated_config_list
+
+ for config in updated_config_list:
+ if not config.get('acls'):
+ continue
+
+ acl_type = config['address_family']
+ for acl in config['acls']:
+ if not acl.get('rules'):
+ continue
+
+ acl_name = acl['name']
+ for rule in acl['rules']:
+ seq_num = rule['sequence_num']
+
+ self._check_required(['action', 'source', 'destination', 'protocol'], rule, ['config', 'acls', 'rules'])
+ self._validate_and_normalize_protocol(acl_type, acl_name, rule)
+ protocol = rule['protocol']['name'] if rule['protocol'].get('name') else str(rule['protocol']['number'])
+
+ for endpoint in ('source', 'destination'):
+ if rule[endpoint].get('any') is False:
+ self._invalid_rule('True is the only valid value for {0} -> any'.format(endpoint), acl_type, acl_name, seq_num)
+ elif rule[endpoint].get('host'):
+ rule[endpoint]['host'] = rule[endpoint]['host'].lower()
+ elif rule[endpoint].get('prefix'):
+ rule[endpoint]['prefix'] = rule[endpoint]['prefix'].lower()
+
+ if rule[endpoint].get('port_number'):
+ if protocol not in ('tcp', 'udp'):
+ self._invalid_rule('{0} -> port_number is valid only for TCP or UDP protocol'.format(endpoint), acl_type, acl_name, seq_num)
+
+ self._validate_and_normalize_port_number(acl_type, acl_name, rule, endpoint)
+
+ if rule.get('protocol_options'):
+ protocol_options = next(iter(rule['protocol_options']))
+ if protocol != protocol_options:
+ self._invalid_rule('protocol_options -> {0} is not valid for protocol {1}'.format(protocol_options, protocol),
+ acl_type, acl_name, seq_num)
+
+ self._normalize_protocol_options(rule)
+
+ self._normalize_dscp(rule)
+
+ return updated_config_list
+
+ def _validate_and_normalize_protocol(self, acl_type, acl_name, rule):
+ protocol = rule.get('protocol')
+ if protocol:
+ if protocol.get('number') is not None:
+ if protocol['number'] in protocol_number_to_name_map:
+ protocol['name'] = protocol_number_to_name_map[protocol.pop('number')]
+
+ protocol_name = protocol.get('name')
+ if (acl_type == 'ipv4' and protocol_name in ('ipv6', 'icmpv6')) or (acl_type == 'ipv6' and protocol_name in ('ip', 'icmp')):
+ self._invalid_rule('invalid protocol {0} for {1} ACL'.format(protocol_name, acl_type), acl_type, acl_name, rule['sequence_num'])
+
+ def _validate_and_normalize_port_number(self, acl_type, acl_name, rule, endpoint):
+ port_number = rule.get(endpoint, {}).get('port_number')
+ if port_number:
+ # Greater than 0 is the same as less than 65535
+ if port_number.get('gt') == L4_PORT_START:
+ port_number['lt'] = L4_PORT_END
+ del port_number['gt']
+ elif rule[endpoint]['port_number'].get('range'):
+ port_range = rule[endpoint]['port_number']['range']
+ if port_range['begin'] >= port_range['end']:
+ self._invalid_rule('begin must be less than end in {0} -> port_number -> range'.format(endpoint), acl_type, acl_name, rule['sequence_num'])
+
+ # Range of 0 to x is the same as less than x and
+ # range of x to 65535 is the same as greater than x
+ if port_range['begin'] == L4_PORT_START:
+ port_number['lt'] = port_range['end']
+ del port_number['range']
+ elif port_range['end'] == L4_PORT_END:
+ port_number['gt'] = port_range['begin']
+ del port_number['range']
+
+ def _invalid_rule(self, err_msg, acl_type, acl_name, seq_num):
+ self._module.fail_json(msg='{0} ACL {1}, sequence number {2}: {3}'.format(acl_type, acl_name, seq_num, err_msg))
+
+ def _check_required(self, required_parameters, parameters, options_context=None):
+ if required_parameters:
+ spec = {}
+ for parameter in required_parameters:
+ spec[parameter] = {'required': True}
+
+ try:
+ check_required_arguments(spec, parameters, options_context)
+ except TypeError as exc:
+ self._module.fail_json(msg=str(exc))
+
+ @staticmethod
+ def _normalize_protocol_options(rule):
+ tcp = rule.get('protocol_options', {}).get('tcp')
+ if tcp:
+ # Remove protocol_options option if all tcp options are False
+ if not any(list(tcp.values())):
+ del rule['protocol_options']
+ else:
+ tcp_flag_list = list(tcp.keys())
+ for tcp_flag in tcp_flag_list:
+ # Remove tcp option if its value is False
+ if not tcp[tcp_flag]:
+ del tcp[tcp_flag]
+
+ @staticmethod
+ def _normalize_dscp(rule):
+ dscp = rule.get('dscp')
+ if dscp:
+ if dscp.get('value') is not None:
+ if dscp['value'] in dscp_value_to_name_map:
+ dscp[dscp_value_to_name_map[dscp.pop('value')]] = True
+ else:
+ # Remove dscp option if its value is False
+ if not next(iter(dscp.values())):
+ del rule['dscp']
+
+ @staticmethod
+ def _convert_config_list_to_dict(config_list):
+ config_dict = {}
+ for config in config_list:
+ acl_type = config['address_family']
+ config_dict[acl_type] = {}
+ if config.get('acls'):
+ for acl in config['acls']:
+ acl_name = acl['name']
+ config_dict[acl_type][acl_name] = {}
+ config_dict[acl_type][acl_name]['remark'] = acl.get('remark')
+ config_dict[acl_type][acl_name]['rules'] = {}
+ if acl.get('rules'):
+ for rule in acl['rules']:
+ config_dict[acl_type][acl_name]['rules'][rule['sequence_num']] = rule
+
+ return config_dict
+
+ @staticmethod
+ def _convert_port_dict_to_payload_format(port_dict):
+ payload = None
+ if port_dict:
+ if port_dict.get('eq') is not None:
+ payload = port_dict['eq']
+ elif port_dict.get('lt') is not None:
+ payload = '{0}..{1}'.format(L4_PORT_START, port_dict['lt'])
+ elif port_dict.get('gt') is not None:
+ payload = '{0}..{1}'.format(port_dict['gt'], L4_PORT_END)
+ elif port_dict.get('range'):
+ payload = '{0}..{1}'.format(port_dict['range']['begin'], port_dict['range']['end'])
+
+ return payload
+
+ def sort_config(self, configs):
+ # natsort provides better result.
+ # The use of natsort causes sanity error due to it is not available in
+ # python version currently used.
+ # new_config = natsorted(new_config, key=lambda x: x['name'])
+ # For time-being, use simple "sort"
+ configs.sort(key=lambda x: x['address_family'])
+
+ for conf in configs:
+ acls = conf.get('acls', [])
+ if acls:
+ acls.sort(key=lambda x: x['name'])
+ for acl in acls:
+ if acl.get('rules', []):
+ acl['rules'].sort(key=lambda x: x['sequence_num'])
+
+ def post_process_generated_config(self, configs):
+ for conf in configs[:]:
+ if not conf.get('acls', []):
+ configs.remove(conf)
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py
index d1b735251..200d8552b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/l3_interfaces/l3_interfaces.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -30,7 +30,6 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
to_request,
edit_config
)
-from ansible.module_utils._text import to_native
from ansible.module_utils.connection import ConnectionError
TEST_KEYS = [
@@ -125,17 +124,17 @@ class L3_interfaces(ConfigBase):
state = self._module.params['state']
diff = get_diff(want, have, TEST_KEYS)
if state == 'overridden':
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
- commands, requests = self._state_deleted(want, have, diff)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
commands, requests = self._state_merged(want, have, diff)
elif state == 'replaced':
- commands, requests = self._state_replaced(want, have, diff)
+ commands, requests = self._state_replaced(want, have)
ret_commands = commands
return ret_commands, requests
- def _state_replaced(self, want, have, diff):
+ def _state_replaced(self, want, have):
""" The command generator when state is replaced
:rtype: A list
@@ -144,19 +143,22 @@ class L3_interfaces(ConfigBase):
"""
ret_requests = list()
commands = list()
- l3_interfaces_to_delete = get_diff(have, want, TEST_KEYS)
- obj = self.get_object(l3_interfaces_to_delete, want)
- diff = get_diff(obj, want, TEST_KEYS)
+ new_want = self.update_object(want)
+ new_have = self.remove_default_entries(have)
+ get_replace_interfaces_list = self.get_interface_object_for_replaced(new_have, want)
+
+ diff = get_diff(get_replace_interfaces_list, new_want, TEST_KEYS)
+
if diff:
- delete_l3_interfaces_requests = self.get_delete_all_requests(want)
+ delete_l3_interfaces_requests = self.get_delete_all_requests(diff)
ret_requests.extend(delete_l3_interfaces_requests)
- commands.extend(update_states(want, "deleted"))
+ commands.extend(update_states(diff, "deleted"))
l3_interfaces_to_create_requests = self.get_create_l3_interfaces_requests(want, have, want)
ret_requests.extend(l3_interfaces_to_create_requests)
- commands.extend(update_states(want, "merged"))
+ commands.extend(update_states(want, "replaced"))
return commands, ret_requests
- def _state_overridden(self, want, have, diff):
+ def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
@@ -165,16 +167,19 @@ class L3_interfaces(ConfigBase):
"""
ret_requests = list()
commands = list()
- interfaces_to_delete = get_diff(have, want, TEST_KEYS)
- if interfaces_to_delete:
- delete_interfaces_requests = self.get_delete_l3_interfaces_requests(want, have)
+ new_want = self.update_object(want)
+ new_have = self.remove_default_entries(have)
+ get_override_interfaces = self.get_interface_object_for_overridden(new_have)
+ diff = get_diff(get_override_interfaces, new_want, TEST_KEYS)
+ diff2 = get_diff(new_want, get_override_interfaces, TEST_KEYS)
+
+ if diff or diff2:
+ delete_interfaces_requests = self.get_delete_all_requests(have)
ret_requests.extend(delete_interfaces_requests)
- commands.extend(update_states(interfaces_to_delete, "deleted"))
-
- if diff:
- interfaces_to_create_requests = self.get_create_l3_interfaces_requests(diff, have, want)
+ commands.extend(update_states(diff, "deleted"))
+ interfaces_to_create_requests = self.get_create_l3_interfaces_requests(want, have, want)
ret_requests.extend(interfaces_to_create_requests)
- commands.extend(update_states(diff, "merged"))
+ commands.extend(update_states(want, "overridden"))
return commands, ret_requests
@@ -195,7 +200,7 @@ class L3_interfaces(ConfigBase):
return commands, requests
- def _state_deleted(self, want, have, diff):
+ def _state_deleted(self, want, have):
""" The command generator when state is deleted
:rtype: A list
@@ -215,7 +220,16 @@ class L3_interfaces(ConfigBase):
commands = update_states(commands, "deleted")
return commands, requests
- def get_object(self, have, want):
+ def remove_default_entries(self, have):
+ new_have = list()
+ for obj in have:
+ if obj['ipv4']['addresses'] is not None or obj['ipv4']['anycast_addresses'] is not None:
+ new_have.append(obj)
+ elif obj['ipv6']['addresses'] is not None or obj['ipv6']['enabled']:
+ new_have.append(obj)
+ return new_have
+
+ def get_interface_object_for_replaced(self, have, want):
objects = list()
names = [i.get('name', None) for i in want]
for obj in have:
@@ -223,6 +237,43 @@ class L3_interfaces(ConfigBase):
objects.append(obj.copy())
return objects
+ def update_object(self, want):
+ objects = list()
+ for obj in want:
+ new_obj = {}
+ if 'name' in obj:
+ new_obj['name'] = obj['name']
+ if obj['ipv4'] is None:
+ new_obj['ipv4'] = {'addresses': None, 'anycast_addresses': None}
+ else:
+ new_obj['ipv4'] = obj['ipv4']
+
+ if obj['ipv6'] is None:
+ new_obj['ipv6'] = {'addresses': None, 'enabled': False}
+ else:
+ new_obj['ipv6'] = obj['ipv6']
+
+ objects.append(new_obj)
+ return objects
+
+ def get_interface_object_for_overridden(self, have):
+ objects = list()
+ for obj in have:
+ if 'name' in obj and obj['name'] != "Management0":
+ ipv4_addresses = obj['ipv4']['addresses']
+ ipv6_addresses = obj['ipv6']['addresses']
+ anycast_addresses = obj['ipv4']['anycast_addresses']
+ ipv6_enable = obj['ipv6']['enabled']
+
+ if ipv4_addresses is not None or ipv6_addresses is not None:
+ objects.append(obj.copy())
+ continue
+
+ if ipv6_enable or anycast_addresses is not None:
+ objects.append(obj.copy())
+ continue
+ return objects
+
def get_address(self, ip_str, have_obj):
to_return = list()
for i in have_obj:
@@ -241,6 +292,8 @@ class L3_interfaces(ConfigBase):
ipv6_addr_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/addresses/address={address}'
ipv6_enabled_url = 'data/openconfig-interfaces:interfaces/interface={intf_name}/{sub_intf_name}/openconfig-if-ip:ipv6/config/enabled'
+ if not want:
+ return requests
for each_l3 in want:
l3 = each_l3.copy()
name = l3.pop('name')
@@ -274,7 +327,7 @@ class L3_interfaces(ConfigBase):
if name and ipv4 is None and ipv6 is None:
is_del_ipv4 = True
is_del_ipv6 = True
- elif ipv4 and ipv4.get('addresses') and not ipv4.get('anycast_addresses'):
+ elif ipv4 and not ipv4.get('addresses') and not ipv4.get('anycast_addresses'):
is_del_ipv4 = True
elif ipv6 and not ipv6.get('addresses') and ipv6.get('enabled') is None:
is_del_ipv6 = True
@@ -299,24 +352,27 @@ class L3_interfaces(ConfigBase):
# Store the primary ip at end of the list. So primary ip will be deleted after the secondary ips
ipv4_del_reqs = []
- for ip in ipv4_addrs:
- match_ip = next((addr for addr in have_ipv4_addrs if addr['address'] == ip['address']), None)
- if match_ip:
- addr = ip['address'].split('/')[0]
- del_url = ipv4_addr_url.format(intf_name=name, sub_intf_name=sub_intf, address=addr)
- if match_ip['secondary']:
- del_url += '/config/secondary'
- ipv4_del_reqs.insert(0, {"path": del_url, "method": DELETE})
- else:
- ipv4_del_reqs.append({"path": del_url, "method": DELETE})
- if ipv4_del_reqs:
- requests.extend(ipv4_del_reqs)
-
- for ip in ipv4_anycast_addrs:
- if have_ipv4_addrs and ip in have_ipv4_addrs:
- ip = ip.replace('/', '%2f')
- anycast_delete_request = {"path": ipv4_anycast_url.format(intf_name=name, sub_intf_name=sub_intf, anycast_ip=ip), "method": DELETE}
- requests.append(anycast_delete_request)
+ if ipv4_addrs:
+ for ip in ipv4_addrs:
+ if have_ipv4_addrs:
+ match_ip = next((addr for addr in have_ipv4_addrs if addr['address'] == ip['address']), None)
+ if match_ip:
+ addr = ip['address'].split('/')[0]
+ del_url = ipv4_addr_url.format(intf_name=name, sub_intf_name=sub_intf, address=addr)
+ if match_ip['secondary']:
+ del_url += '/config/secondary'
+ ipv4_del_reqs.insert(0, {"path": del_url, "method": DELETE})
+ else:
+ ipv4_del_reqs.append({"path": del_url, "method": DELETE})
+ if ipv4_del_reqs:
+ requests.extend(ipv4_del_reqs)
+
+ if ipv4_anycast_addrs:
+ for ip in ipv4_anycast_addrs:
+ if have_ipv4_anycast_addrs and ip in have_ipv4_anycast_addrs:
+ ip = ip.replace('/', '%2f')
+ anycast_delete_request = {"path": ipv4_anycast_url.format(intf_name=name, sub_intf_name=sub_intf, anycast_ip=ip), "method": DELETE}
+ requests.append(anycast_delete_request)
if is_del_ipv6:
if have_ipv6_addrs and len(have_ipv6_addrs) != 0:
@@ -334,12 +390,12 @@ class L3_interfaces(ConfigBase):
ipv6_addrs = l3['ipv6']['addresses']
if 'enabled' in l3['ipv6']:
ipv6_enabled = l3['ipv6']['enabled']
-
- for ip in ipv6_addrs:
- if have_ipv6_addrs and ip['address'] in have_ipv6_addrs:
- addr = ip['address'].split('/')[0]
- request = {"path": ipv6_addr_url.format(intf_name=name, sub_intf_name=sub_intf, address=addr), "method": DELETE}
- requests.append(request)
+ if ipv6_addrs:
+ for ip in ipv6_addrs:
+ if have_ipv6_addrs and ip['address'] in have_ipv6_addrs:
+ addr = ip['address'].split('/')[0]
+ request = {"path": ipv6_addr_url.format(intf_name=name, sub_intf_name=sub_intf, address=addr), "method": DELETE}
+ requests.append(request)
if have_ipv6_enabled and ipv6_enabled is not None:
request = {"path": ipv6_enabled_url.format(intf_name=name, sub_intf_name=sub_intf), "method": DELETE}
@@ -349,8 +405,9 @@ class L3_interfaces(ConfigBase):
def get_delete_all_completely_requests(self, configs):
delete_requests = list()
for l3 in configs:
- if l3['ipv4'] or l3['ipv6']:
- delete_requests.append(l3)
+ if l3['name'] != "Management0":
+ if l3['ipv4'] or l3['ipv6']:
+ delete_requests.append(l3)
return self.get_delete_all_requests(delete_requests)
def get_delete_all_requests(self, configs):
@@ -364,6 +421,8 @@ class L3_interfaces(ConfigBase):
name = l3.get('name')
ipv4_addrs = []
ipv4_anycast = []
+ if name == "Management0":
+ continue
if l3.get('ipv4'):
if l3['ipv4'].get('addresses'):
ipv4_addrs = l3['ipv4']['addresses']
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py
index 541de2c4c..7ccd8ce02 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lag_interfaces/lag_interfaces.py
@@ -21,6 +21,9 @@ except ImportError:
import json
+from copy import (
+ deepcopy
+)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
@@ -39,6 +42,11 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
to_request,
edit_config
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
from ansible.module_utils._text import to_native
from ansible.module_utils.connection import ConnectionError
import traceback
@@ -60,6 +68,10 @@ DELETE = 'delete'
TEST_KEYS = [
{'interfaces': {'member': ''}},
]
+TEST_KEYS_formatted_diff = [
+ {'config': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {'interfaces': {'member': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
class Lag_interfaces(ConfigBase):
@@ -119,6 +131,19 @@ class Lag_interfaces(ConfigBase):
if result['changed']:
result['after'] = changed_lag_interfaces_facts
+ new_config = changed_lag_interfaces_facts
+ old_config = existing_lag_interfaces_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_lag_interfaces_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+ if self._module._diff:
+ self.sort_config(new_config)
+ self.sort_config(old_config)
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -188,7 +213,7 @@ class Lag_interfaces(ConfigBase):
replaced_list.append(list_obj)
requests = self.get_delete_lag_interfaces_requests(replaced_list)
if requests:
- commands.extend(update_states(replaced_list, "replaced"))
+ commands.extend(update_states(replaced_list, "deleted"))
replaced_commands, replaced_requests = self.template_for_lag_creation(have, diff_members, diff_portchannels, "replaced")
if replaced_requests:
commands.extend(replaced_commands)
@@ -208,20 +233,31 @@ class Lag_interfaces(ConfigBase):
delete_list = list()
delete_list = get_diff(have, want, TEST_KEYS)
delete_members, delete_portchannels = self.diff_list_for_member_creation(delete_list)
+
replaced_list = list()
for i in want:
list_obj = search_obj_in_list(i['name'], delete_members, "name")
if list_obj:
replaced_list.append(list_obj)
+
requests = self.get_delete_lag_interfaces_requests(replaced_list)
- commands.extend(update_states(replaced_list, "overridden"))
- delete_members = get_diff(delete_members, replaced_list, TEST_KEYS)
- commands_overridden, requests_overridden = self.template_for_lag_deletion(have, delete_members, delete_portchannels, "overridden")
- requests.extend(requests_overridden)
- commands.extend(commands_overridden)
+ commands.extend(update_states(replaced_list, "deleted"))
+
+ deleted_po_list = list()
+ for i in delete_list:
+ list_obj = search_obj_in_list(i['name'], want, "name")
+ if not list_obj:
+ deleted_po_list.append(i)
+
+ requests_deleted_po = self.get_delete_portchannel_requests(deleted_po_list)
+ requests.extend(requests_deleted_po)
+ commands_del = self.prune_commands(deleted_po_list)
+ commands.extend(update_states(commands_del, "deleted"))
+
override_commands, override_requests = self.template_for_lag_creation(have, diff_members, diff_portchannels, "overridden")
commands.extend(override_commands)
requests.extend(override_requests)
+
return commands, requests
def _state_merged(self, want, have, diff_members, diff_portchannels):
@@ -248,7 +284,8 @@ class Lag_interfaces(ConfigBase):
requests = self.get_delete_all_lag_interfaces_requests()
portchannel_requests = self.get_delete_all_portchannel_requests()
requests.extend(portchannel_requests)
- commands.extend(update_states(have, "Deleted"))
+ commands_del = self.prune_commands(have)
+ commands.extend(update_states(commands_del, "deleted"))
else: # delete specific lag interfaces and specific portchannels
commands = get_diff(want, diff, TEST_KEYS)
commands = remove_empties_from_list(commands)
@@ -312,7 +349,8 @@ class Lag_interfaces(ConfigBase):
commands.extend(update_states(delete_members, state_name))
if delete_portchannels:
portchannel_requests = self.get_delete_portchannel_requests(delete_portchannels)
- commands.extend(update_states(delete_portchannels, state_name))
+ commands_del = self.prune_commands(delete_portchannels)
+ commands.extend(update_states(commands_del, state_name))
if requests:
requests.extend(portchannel_requests)
else:
@@ -336,8 +374,7 @@ class Lag_interfaces(ConfigBase):
def build_create_payload_member(self, name):
payload_template = """{\n"openconfig-if-aggregate:aggregate-id": "{{name}}"\n}"""
- temp = name.split("PortChannel", 1)[1]
- input_data = {"name": temp}
+ input_data = {"name": name}
env = jinja2.Environment(autoescape=False)
t = env.from_string(payload_template)
intended_payload = t.render(input_data)
@@ -419,3 +456,21 @@ class Lag_interfaces(ConfigBase):
requests.append(request)
return requests
+
+ def sort_config(self, configs):
+ # natsort provides better result.
+ # The use of natsort causes sanity error due to it is not available in
+ # python version currently used.
+ # new_config = natsorted(new_config, key=lambda x: x['name'])
+ # For time-being, use simple "sort"
+ configs.sort(key=lambda x: x['name'])
+
+ for conf in configs:
+ if conf.get('members', {}) and conf['members'].get('interfaces', []):
+ conf['members']['interfaces'].sort(key=lambda x: x['member'])
+
+ def prune_commands(self, commands):
+ cmds = deepcopy(commands)
+ for cmd in cmds:
+ cmd.pop('members', None)
+ return cmds
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tests/acl_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lldp_global/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tests/acl_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lldp_global/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lldp_global/lldp_global.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lldp_global/lldp_global.py
new file mode 100644
index 000000000..f27d63e81
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/lldp_global/lldp_global.py
@@ -0,0 +1,296 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_lldp_global class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ update_states
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+
+PATCH = 'patch'
+DELETE = 'delete'
+
+
+class Lldp_global(ConfigBase):
+ """
+ The sonic_lldp_global class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'lldp_global',
+ ]
+
+ lldp_global_path = 'data/openconfig-lldp:lldp/config'
+ lldp_global_config_path = {
+ 'enable': lldp_global_path + '/enabled',
+ 'hello_time': lldp_global_path + '/hello-timer',
+ 'mode': lldp_global_path + '/openconfig-lldp-ext:mode',
+ 'multiplier': lldp_global_path + '/openconfig-lldp-ext:multiplier',
+ 'system_description': lldp_global_path + '/system-description',
+ 'system_name': lldp_global_path + '/system-name',
+ 'tlv_select': lldp_global_path + '/suppress-tlv-advertisement',
+ }
+ lldp_suppress_tlv = '/data/openconfig-lldp:lldp/config/suppress-tlv-advertisement={lldp_suppress_tlv}'
+
+ def __init__(self, module):
+ super(Lldp_global, self).__init__(module)
+
+ def get_lldp_global_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ lldp_global_facts = facts['ansible_network_resources'].get('lldp_global')
+ if not lldp_global_facts:
+ return []
+ return lldp_global_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+
+ existing_lldp_global_facts = self.get_lldp_global_facts()
+ commands, requests = self.set_config(existing_lldp_global_facts)
+ if commands:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+
+ changed_lldp_global_facts = self.get_lldp_global_facts()
+
+ result['before'] = existing_lldp_global_facts
+ if result['changed']:
+ result['after'] = changed_lldp_global_facts
+
+ result['commands'] = commands
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_lldp_global_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ have = existing_lldp_global_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+ diff = get_diff(want, have)
+ if state == 'deleted':
+ commands, requests = self._state_deleted(want, have, diff)
+ elif state == 'merged':
+ commands, requests = self._state_merged(diff)
+ return commands, requests
+
+ def _state_merged(self, diff):
+ """ The command generator when state is merged
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff
+ requests = []
+ requests.extend(self.get_modify_specific_lldp_global_param_requests(commands))
+ if commands and len(requests) > 0:
+ commands = update_states(commands, 'merged')
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have, diff):
+ """ The command generator when state is deleted
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ requests = []
+
+ if not want:
+ commands = have
+ requests.extend(self.get_delete_lldp_global_completely_requests(commands))
+ else:
+ commands = get_diff(want, diff)
+ requests.extend(self.get_delete_specific_lldp_global_param_requests(commands, have))
+
+ if len(requests) == 0:
+ commands = []
+
+ if commands:
+ commands = update_states(commands, "deleted")
+
+ return commands, requests
+
+ def get_modify_specific_lldp_global_param_requests(self, command):
+ """Get requests to modify specific LLDP Global configurations
+ based on the command specified for the interface
+ """
+ requests = []
+
+ if not command:
+ return requests
+ if 'enable' in command and command['enable'] is not None:
+ payload = {'openconfig-lldp:enabled': command['enable']}
+ url = self.lldp_global_config_path['enable']
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if 'hello_time' in command and command['hello_time'] is not None:
+ payload = {'openconfig-lldp:hello-timer': str(command['hello_time'])}
+ url = self.lldp_global_config_path['hello_time']
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if 'mode' in command and command['mode'] is not None:
+ payload = {'openconfig-lldp-ext:mode': command['mode'].upper()}
+ url = self.lldp_global_config_path['mode']
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if 'multiplier' in command and command['multiplier'] is not None:
+ payload = {'openconfig-lldp-ext:multiplier': int(command['multiplier'])}
+ url = self.lldp_global_config_path['multiplier']
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if 'system_name' in command and command['system_name'] is not None:
+ payload = {'openconfig-lldp:system-name': command['system_name']}
+ url = self.lldp_global_config_path['system_name']
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if 'system_description' in command and command['system_description'] is not None:
+ payload = {'openconfig-lldp:system-description': command['system_description']}
+ url = self.lldp_global_config_path['system_description']
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ if 'tlv_select' in command:
+ if 'management_address' in command['tlv_select']:
+ payload = {'openconfig-lldp:suppress-tlv-advertisement': ["MANAGEMENT_ADDRESS"]}
+ url = self.lldp_global_config_path['tlv_select']
+ if command['tlv_select']['management_address'] is False:
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+ elif command['tlv_select']['management_address'] is True:
+ url = self.lldp_suppress_tlv.format(lldp_suppress_tlv="MANAGEMENT_ADDRESS")
+ requests.append({'path': url, 'method': DELETE})
+ if 'system_capabilities' in command['tlv_select']:
+ payload = {'openconfig-lldp:suppress-tlv-advertisement': ["SYSTEM_CAPABILITIES"]}
+ url = self.lldp_global_config_path['tlv_select']
+ if command['tlv_select']['system_capabilities'] is False:
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+ elif command['tlv_select']['system_capabilities'] is True:
+ url = self.lldp_suppress_tlv.format(lldp_suppress_tlv="SYSTEM_CAPABILITIES")
+ requests.append({'path': url, 'method': DELETE})
+ return requests
+
+ def get_delete_lldp_global_completely_requests(self, have):
+ """Get requests to delete all existing LLDP global
+ configurations in the chassis
+ """
+ default_config_dict = {"enable": True, "tlv_select": {"management_address": True, "system_capabilities": True}}
+ requests = []
+ if default_config_dict != have:
+ return [{'path': self.lldp_global_path, 'method': DELETE}]
+ return requests
+
+ def get_delete_specific_lldp_global_param_requests(self, command, config):
+ """Get requests to delete specific LLDP global configurations
+ based on the command specified for the interface
+ """
+ requests = []
+
+ if not command:
+ return requests
+ if 'hello_time' in command:
+ url = self.lldp_global_config_path['hello_time']
+ requests.append({'path': url, 'method': DELETE})
+
+ if 'enable' in command:
+ url = self.lldp_global_config_path['enable']
+ if command['enable'] is False:
+ payload = {'openconfig-lldp:enabled': True}
+ elif command['enable'] is True:
+ payload = {'openconfig-lldp:enabled': False}
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+ if 'mode' in command:
+ url = self.lldp_global_config_path['mode']
+ requests.append({'path': url, 'method': DELETE})
+
+ if 'multiplier' in command:
+ url = self.lldp_global_config_path['multiplier']
+ requests.append({'path': url, 'method': DELETE})
+
+ if 'system_name' in command:
+ url = self.lldp_global_config_path['system_name']
+ requests.append({'path': url, 'method': DELETE})
+
+ if 'system_description' in command:
+ url = self.lldp_global_config_path['system_description']
+ requests.append({'path': url, 'method': DELETE})
+ # The tlv_select configs are enabled by default.Hence false leads deletion of configs.
+ if 'tlv_select' in command:
+ if 'management_address' in command['tlv_select']:
+ payload = {'openconfig-lldp:suppress-tlv-advertisement': ["MANAGEMENT_ADDRESS"]}
+ url = self.lldp_global_config_path['tlv_select']
+ if command['tlv_select']['management_address'] is True:
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+ elif command['tlv_select']['management_address'] is False:
+ url = self.lldp_suppress_tlv.format(lldp_suppress_tlv="MANAGEMENT_ADDRESS")
+ requests.append({'path': url, 'method': DELETE})
+ if 'system_capabilities' in command['tlv_select']:
+ payload = {'openconfig-lldp:suppress-tlv-advertisement': ["SYSTEM_CAPABILITIES"]}
+ url = self.lldp_global_config_path['tlv_select']
+ if command['tlv_select']['system_capabilities'] is True:
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+ elif command['tlv_select']['system_capabilities'] is False:
+ url = self.lldp_suppress_tlv.format(lldp_suppress_tlv="SYSTEM_CAPABILITIES")
+ requests.append({'path': url, 'method': DELETE})
+ return requests
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/logging/logging.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/logging/logging.py
new file mode 100644
index 000000000..82262b561
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/logging/logging.py
@@ -0,0 +1,458 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_logging class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ update_states,
+ get_normalize_interface_name,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
+from ansible.module_utils.connection import ConnectionError
+
+PATCH = 'PATCH'
+DELETE = 'DELETE'
+
+DEFAULT_REMOTE_PORT = 514
+DEFAULT_LOG_TYPE = 'log'
+
+TEST_KEYS = [
+ {
+ "remote_servers": {"host": ""}
+ }
+]
+TEST_KEYS_formatted_diff = [
+ {
+ "remote_servers": {"host": "", '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}
+ }
+]
+
+
+class Logging(ConfigBase):
+ """
+ The sonic_logging class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'logging',
+ ]
+
+ def __init__(self, module):
+ super(Logging, self).__init__(module)
+
+ def get_logging_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ logging_facts = facts['ansible_network_resources'].get('logging')
+ if not logging_facts:
+ return []
+ return logging_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = list()
+ commands = list()
+ requests = list()
+
+ existing_logging_facts = self.get_logging_facts()
+
+ commands, requests = self.set_config(existing_logging_facts)
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_logging_facts = self.get_logging_facts()
+
+ result['before'] = existing_logging_facts
+ if result['changed']:
+ result['after'] = changed_logging_facts
+
+ new_config = changed_logging_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_logging_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_logging_facts,
+ new_config,
+ self._module._verbosity)
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_logging_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ if want is None:
+ want = []
+
+ have = existing_logging_facts
+ resp = self.set_state(want, have)
+
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+
+ self.validate_want(want, state)
+ self.preprocess_want(want, state)
+
+ if state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(want, have)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have)
+
+ return commands, requests
+
+ def _state_merged(self, want, have):
+ """ The command generator when state is merged
+
+ :param want: the additive configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ diff = get_diff(want, have, TEST_KEYS)
+
+ commands = diff
+ requests = []
+ if commands:
+ requests = self.get_merge_requests(commands, have)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :param want: the objects from which the configuration should be removed
+ :param have: the current configuration as a dictionary
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ # Get a list of requested servers to delete that are not present in the current
+ # configuration on the device. This list can be used to filter out these
+ # unconfigured servers from the list of "delete" commands to be sent to the switch.
+ unconfigured = get_diff(want, have, TEST_KEYS)
+
+ want_none = {'remote_servers': None}
+ want_any = get_diff(want, want_none, TEST_KEYS)
+ # if want_any is none, then delete all NTP configurations
+
+ delete_all = False
+ if not want_any:
+ commands = have
+ delete_all = True
+ else:
+ if not unconfigured:
+ commands = want_any
+ else:
+ # Some of the servers requested for deletion are not in the current
+ # device configuration. Filter these out of the list to be used for sending
+ # "delete" commands to the device.
+ commands = get_diff(want_any, unconfigured, TEST_KEYS)
+
+ requests = []
+ if commands:
+ requests = self.get_delete_requests(commands, delete_all)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ replaced_config = self.get_replaced_config(have, want)
+ if 'remote_servers' in replaced_config:
+ replaced_config['remote_servers'].sort(key=self.get_host)
+ if 'remote_servers' in want:
+ want['remote_servers'].sort(key=self.get_host)
+
+ if replaced_config and replaced_config != want:
+ delete_all = False
+ del_requests = self.get_delete_requests(replaced_config, delete_all)
+ requests.extend(del_requests)
+ commands.extend(update_states(replaced_config, "deleted"))
+ replaced_config = []
+
+ if not replaced_config and want:
+ add_commands = want
+ add_requests = self.get_merge_requests(add_commands, replaced_config)
+
+ if len(add_requests) > 0:
+ requests.extend(add_requests)
+ commands.extend(update_states(add_commands, "replaced"))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ if 'remote_servers' in have:
+ have['remote_servers'].sort(key=self.get_host)
+ if 'remote_servers' in want:
+ want['remote_servers'].sort(key=self.get_host)
+
+ commands = []
+ requests = []
+
+ if have and have != want:
+ delete_all = True
+ del_requests = self.get_delete_requests(have, delete_all)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ if not have and want:
+ add_commands = want
+ add_requests = self.get_merge_requests(add_commands, have)
+
+ if len(add_requests) > 0:
+ requests.extend(add_requests)
+ commands.extend(update_states(add_commands, "overridden"))
+
+ return commands, requests
+
+ def get_host(self, remote_server):
+ return remote_server.get('host')
+
+ def search_config_servers(self, host, servers):
+
+ if servers is not None:
+ for server in servers:
+ if server['host'] == host:
+ return server
+ return []
+
+ def get_replaced_config(self, have, want):
+
+ replaced_config = dict()
+ replaced_servers = []
+ if 'remote_servers' in have and 'remote_servers' in want:
+ for server in want['remote_servers']:
+ replaced_server = self.search_config_servers(server['host'], have['remote_servers'])
+ if replaced_server:
+ replaced_servers.append(replaced_server)
+
+ replaced_config['remote_servers'] = replaced_servers
+ return replaced_config
+
+ def validate_want(self, want, state):
+
+ if state == 'deleted':
+
+ if 'remote_servers' in want and want['remote_servers'] is not None:
+ for server in want['remote_servers']:
+ source_interface_config = server.get('source_interface', None)
+ remote_port_config = server.get('remote_port', None)
+ message_type_config = server.get('message_type', None)
+ vrf_config = server.get('vrf', None)
+ if source_interface_config or remote_port_config or \
+ message_type_config or vrf_config:
+ err_msg = "Logging remote_server parameter(s) can not be deleted."
+ self._module.fail_json(msg=err_msg, code=405)
+
+ def preprocess_want(self, want, state):
+
+ if state == 'merged':
+ if 'remote_servers' in want and want['remote_servers'] is not None:
+ for server in want['remote_servers']:
+ if 'source_interface' in server and not server['source_interface']:
+ server.pop('source_interface', None)
+ else:
+ server['source_interface'] = \
+ get_normalize_interface_name(server['source_interface'], self._module)
+ if 'remote_port' in server and not server['remote_port']:
+ server.pop('remote_port', None)
+ if 'message_type' in server and not server['message_type']:
+ server.pop('message_type', None)
+ if 'vrf' in server and not server['vrf']:
+ server.pop('vrf', None)
+
+ if state == 'replaced' or state == 'overridden':
+ if 'remote_servers' in want and want['remote_servers'] is not None:
+ for server in want['remote_servers']:
+ if 'source_interface' in server and not server['source_interface']:
+ server.pop('source_interface', None)
+ else:
+ server['source_interface'] = \
+ get_normalize_interface_name(server['source_interface'], self._module)
+ if 'remote_port' in server and not server['remote_port']:
+ server['remote_port'] = DEFAULT_REMOTE_PORT
+ if 'message_type' in server and not server['message_type']:
+ server['message_type'] = DEFAULT_LOG_TYPE
+
+ def get_merge_requests(self, configs, have):
+
+ requests = []
+
+ servers_config = configs.get('remote_servers', None)
+ if servers_config:
+ servers_request = self.get_create_servers_requests(servers_config, have)
+ if servers_request:
+ requests.extend(servers_request)
+
+ return requests
+
+ def get_delete_requests(self, configs, delete_all):
+
+ requests = []
+
+ servers_config = configs.get('remote_servers', None)
+ if servers_config:
+ servers_request = []
+ if delete_all:
+ servers_request = self.get_delete_all_servers_requests()
+ else:
+ servers_request = self.get_delete_servers_requests(servers_config)
+
+ if servers_request:
+ requests.extend(servers_request)
+
+ return requests
+
+ def get_create_servers_requests(self, configs, have):
+
+ requests = []
+
+ # Create URL and payload
+ method = PATCH
+ url = 'data/openconfig-system:system/logging/remote-servers'
+ server_configs = []
+ for config in configs:
+ req_config = dict()
+ req_config['host'] = config['host']
+ if 'source_interface' in config:
+ req_config['source-interface'] = config['source_interface']
+ if 'message_type' in config:
+ req_config['message-type'] = config['message_type']
+ if 'remote_port' in config:
+ req_config['remote-port'] = config['remote_port']
+ if 'vrf' in config:
+ req_config['vrf-name'] = config['vrf']
+
+ server_host = config['host']
+ server_config = {"host": server_host, "config": req_config}
+ server_configs.append(server_config)
+
+ payload = {"openconfig-system:remote-servers": {"remote-server": server_configs}}
+ request = {"path": url, "method": method, "data": payload}
+ requests.append(request)
+
+ return requests
+
+ def get_delete_servers_requests(self, configs):
+
+ requests = []
+
+ # Create URL and payload
+ method = DELETE
+ for config in configs:
+ server_host = config['host']
+ url = 'data/openconfig-system:system/logging/remote-servers/remote-server={0}'.format(server_host)
+ request = {"path": url, "method": method}
+ requests.append(request)
+
+ return requests
+
+ def get_delete_all_servers_requests(self):
+
+ requests = []
+
+ # Create URL and payload
+ method = DELETE
+ url = 'data/openconfig-system:system/logging/remote-servers'
+ request = {"path": url, "method": method}
+ requests.append(request)
+
+ return requests
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tests/bgp_vrf.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mac/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tests/bgp_vrf.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mac/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mac/mac.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mac/mac.py
new file mode 100644
index 000000000..866ff3934
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mac/mac.py
@@ -0,0 +1,431 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_mac class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ update_states,
+ get_diff,
+ get_replaced_config,
+ send_requests
+)
+
+NETWORK_INSTANCE_PATH = '/data/openconfig-network-instance:network-instances/network-instance'
+PATCH = 'patch'
+DELETE = 'delete'
+TEST_KEYS = [
+ {'config': {'vrf_name': ''}},
+ {'mac_table_entries': {'mac_address': '', 'vlan_id': ''}}
+]
+
+
+class Mac(ConfigBase):
+ """
+ The sonic_mac class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'mac',
+ ]
+
+ def __init__(self, module):
+ super(Mac, self).__init__(module)
+
+ def get_mac_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ mac_facts = facts['ansible_network_resources'].get('mac')
+ if not mac_facts:
+ return []
+ return mac_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+ commands = []
+
+ existing_mac_facts = self.get_mac_facts()
+ commands, requests = self.set_config(existing_mac_facts)
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_mac_facts = self.get_mac_facts()
+
+ result['before'] = existing_mac_facts
+ if result['changed']:
+ result['after'] = changed_mac_facts
+
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_mac_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ have = existing_mac_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ state = self._module.params['state']
+
+ diff = get_diff(want, have, TEST_KEYS)
+
+ if state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(diff)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have, diff)
+ return commands, requests
+
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ replaced_config = get_replaced_config(want, have, TEST_KEYS)
+
+ if replaced_config:
+ self.sort_lists_in_config(replaced_config)
+ self.sort_lists_in_config(have)
+ is_delete_all = (replaced_config == have)
+ requests = self.get_delete_mac_requests(replaced_config, have, is_delete_all)
+ send_requests(self._module, requests)
+
+ commands = want
+ else:
+ commands = diff
+
+ requests = []
+
+ if commands:
+ requests = self.get_modify_mac_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ self.sort_lists_in_config(want)
+ self.sort_lists_in_config(have)
+
+ if have and have != want:
+ is_delete_all = True
+ requests = self.get_delete_mac_requests(have, None, is_delete_all)
+ send_requests(self._module, requests)
+ have = []
+
+ commands = []
+ requests = []
+
+ if not have and want:
+ commands = want
+ requests = self.get_modify_mac_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "overridden")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_merged(self, diff):
+ """ The command generator when state is merged
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff
+ requests = self.get_modify_mac_requests(commands)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ is_delete_all = False
+ # if want is none, then delete ALL
+ if not want:
+ commands = have
+ is_delete_all = True
+ else:
+ commands = want
+
+ commands = self.remove_default_entries(commands)
+ requests = self.get_delete_mac_requests(commands, have, is_delete_all)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def get_modify_mac_requests(self, commands):
+
+ requests = []
+
+ if not commands:
+ return requests
+
+ for cmd in commands:
+ vrf_name = cmd.get('vrf_name', None)
+ mac = cmd.get('mac', {})
+ if mac:
+ aging_time = mac.get('aging_time', None)
+ dampening_interval = mac.get('dampening_interval', None)
+ dampening_threshold = mac.get('dampening_threshold', None)
+ mac_table_entries = mac.get('mac_table_entries', [])
+ fdb_dict = {}
+ dampening_cfg_dict = {}
+ if aging_time:
+ fdb_dict['config'] = {'mac-aging-time': aging_time}
+ if dampening_interval:
+ dampening_cfg_dict['interval'] = dampening_interval
+ if dampening_threshold:
+ dampening_cfg_dict['threshold'] = dampening_threshold
+ if mac_table_entries:
+ entry_list = []
+ entries_dict = {}
+ mac_table_dict = {}
+ for entry in mac_table_entries:
+ entry_dict = {}
+ entry_cfg_dict = {}
+ mac_address = entry.get('mac_address', None)
+ vlan_id = entry.get('vlan_id', None)
+ interface = entry.get('interface', None)
+ if mac_address:
+ entry_dict['mac-address'] = mac_address
+ entry_cfg_dict['mac-address'] = mac_address
+ if vlan_id:
+ entry_dict['vlan'] = vlan_id
+ entry_cfg_dict['vlan'] = vlan_id
+ if entry_cfg_dict:
+ entry_dict['config'] = entry_cfg_dict
+ if interface:
+ entry_dict['interface'] = {'interface-ref': {'config': {'interface': interface, 'subinterface': 0}}}
+ if entry_dict:
+ entry_list.append(entry_dict)
+ if entry_list:
+ entries_dict['entry'] = entry_list
+ if entries_dict:
+ mac_table_dict['entries'] = entries_dict
+ if mac_table_dict:
+ fdb_dict['mac-table'] = mac_table_dict
+ if fdb_dict:
+ url = '%s=%s/fdb' % (NETWORK_INSTANCE_PATH, vrf_name)
+ payload = {'openconfig-network-instance:fdb': fdb_dict}
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+ if dampening_cfg_dict:
+ url = '%s=%s/openconfig-mac-dampening:mac-dampening' % (NETWORK_INSTANCE_PATH, vrf_name)
+ payload = {'openconfig-mac-dampening:mac-dampening': {'config': dampening_cfg_dict}}
+ requests.append({'path': url, 'method': PATCH, 'data': payload})
+
+ return requests
+
+ def get_delete_mac_requests(self, commands, have, is_delete_all):
+ requests = []
+
+ for cmd in commands:
+ vrf_name = cmd.get('vrf_name', None)
+ if vrf_name and is_delete_all:
+ requests.extend(self.get_delete_all_mac_requests(vrf_name))
+ else:
+ mac = cmd.get('mac', {})
+ if mac:
+ aging_time = mac.get('aging_time', None)
+ dampening_interval = mac.get('dampening_interval', None)
+ dampening_threshold = mac.get('dampening_threshold', None)
+ mac_table_entries = mac.get('mac_table_entries', [])
+
+ for cfg in have:
+ cfg_vrf_name = cfg.get('vrf_name', None)
+ cfg_mac = cfg.get('mac', {})
+ if cfg_mac:
+ cfg_aging_time = cfg_mac.get('aging_time', None)
+ cfg_dampening_interval = cfg_mac.get('dampening_interval', None)
+ cfg_dampening_threshold = cfg_mac.get('dampening_threshold', None)
+ cfg_mac_table_entries = cfg_mac.get('mac_table_entries', [])
+
+ if vrf_name and vrf_name == cfg_vrf_name:
+ if aging_time and aging_time == cfg_aging_time:
+ requests.append(self.get_delete_fdb_cfg_attr(vrf_name, 'mac-aging-time'))
+ if dampening_interval and dampening_interval == cfg_dampening_interval:
+ requests.append(self.get_delete_mac_dampening_attr(vrf_name, 'interval'))
+ if dampening_threshold and dampening_threshold == cfg_dampening_threshold:
+ requests.append(self.get_delete_mac_dampening_attr(vrf_name, 'threshold'))
+
+ if mac_table_entries:
+ for entry in mac_table_entries:
+ mac_address = entry.get('mac_address', None)
+ vlan_id = entry.get('vlan_id', None)
+ interface = entry.get('interface', None)
+
+ if cfg_mac_table_entries:
+ for cfg_entry in cfg_mac_table_entries:
+ cfg_mac_address = cfg_entry.get('mac_address', None)
+ cfg_vlan_id = cfg_entry.get('vlan_id', None)
+ cfg_interface = cfg_entry.get('interface', None)
+ if mac_address and vlan_id and mac_address == cfg_mac_address and vlan_id == cfg_vlan_id:
+ if interface and interface == cfg_interface:
+ requests.append(self.get_delete_mac_table_intf(vrf_name, mac_address, vlan_id))
+ elif not interface:
+ requests.append(self.get_delete_mac_table_entry(vrf_name, mac_address, vlan_id))
+ return requests
+
+ def get_delete_all_mac_requests(self, vrf_name):
+ requests = []
+ url = '%s=%s/fdb' % (NETWORK_INSTANCE_PATH, vrf_name)
+ requests.append({'path': url, 'method': DELETE})
+ url = '%s=%s/openconfig-mac-dampening:mac-dampening' % (NETWORK_INSTANCE_PATH, vrf_name)
+ requests.append({'path': url, 'method': DELETE})
+
+ return requests
+
+ def get_delete_fdb_cfg_attr(self, vrf_name, attr):
+ url = '%s=%s/fdb/config/%s' % (NETWORK_INSTANCE_PATH, vrf_name, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mac_dampening_attr(self, vrf_name, attr):
+ url = '%s=%s/openconfig-mac-dampening:mac-dampening/config/%s' % (NETWORK_INSTANCE_PATH, vrf_name, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mac_table_entry(self, vrf_name, mac_address, vlan_id):
+ url = '%s=%s/fdb/mac-table/entries/entry=%s,%s' % (NETWORK_INSTANCE_PATH, vrf_name, mac_address, vlan_id)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mac_table_intf(self, vrf_name, mac_address, vlan_id):
+ url = '%s=%s/fdb/mac-table/entries/entry=%s,%s/interface' % (NETWORK_INSTANCE_PATH, vrf_name, mac_address, vlan_id)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_mac_vrf_name(self, vrf_name):
+ return vrf_name.get('vrf_name')
+
+ def sort_lists_in_config(self, config):
+ if config:
+ config.sort(key=self.get_mac_vrf_name)
+ for cfg in config:
+ if 'mac' in cfg and cfg['mac'] is not None:
+ if 'mac_table_entries' in cfg['mac'] and cfg['mac']['mac_table_entries'] is not None:
+ cfg['mac']['mac_table_entries'].sort(key=lambda x: (x['mac_address'], x['vlan_id']))
+
+ def remove_default_entries(self, data):
+ new_data = []
+
+ if not data:
+ return new_data
+
+ for conf in data:
+ new_conf = {}
+ vrf_name = conf.get('vrf_name', None)
+ mac = conf.get('mac', None)
+ if mac:
+ new_mac = {}
+ aging_time = mac.get('aging_time', None)
+ dampening_interval = mac.get('dampening_interval', None)
+ dampening_threshold = mac.get('dampening_threshold', None)
+ mac_table_entries = mac.get('mac_table_entries', None)
+
+ if aging_time and aging_time != 600:
+ new_mac['aging_time'] = aging_time
+ if dampening_interval and dampening_interval != 5:
+ new_mac['dampening_interval'] = dampening_interval
+ if dampening_threshold and dampening_threshold != 5:
+ new_mac['dampening_threshold'] = dampening_threshold
+ if mac_table_entries is not None:
+ new_mac['mac_table_entries'] = mac_table_entries
+ if new_mac:
+ new_conf['mac'] = new_mac
+ new_conf['vrf_name'] = vrf_name
+ if new_conf:
+ new_data.append(new_conf)
+
+ return new_data
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py
index 88215e8fc..68c99f0ab 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/mclag/mclag.py
@@ -14,16 +14,21 @@ created
from __future__ import absolute_import, division, print_function
__metaclass__ = type
+
+import re
+from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties,
to_list
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
update_states,
get_diff,
+ get_ranges_in_list,
get_normalize_interface_name,
normalize_interface_name
)
@@ -57,6 +62,17 @@ class Mclag(ConfigBase):
'mclag',
]
+ mclag_simple_attrs = set({
+ 'peer_address',
+ 'source_address',
+ 'peer_link',
+ 'system_mac',
+ 'keepalive',
+ 'session_timeout',
+ 'delay_restore',
+ 'gateway_mac'
+ })
+
def __init__(self, module):
super(Mclag, self).__init__(module)
@@ -123,6 +139,11 @@ class Mclag(ConfigBase):
vlans_list = unique_ip['vlans']
if vlans_list:
normalize_interface_name(vlans_list, self._module, 'vlan')
+ peer_gateway = want.get('peer_gateway', None)
+ if peer_gateway:
+ vlans_list = peer_gateway['vlans']
+ if vlans_list:
+ normalize_interface_name(vlans_list, self._module, 'vlan')
members = want.get('members', None)
if members:
portchannels_list = members['portchannels']
@@ -143,11 +164,13 @@ class Mclag(ConfigBase):
"""
state = self._module.params['state']
if state == 'deleted':
- commands = self._state_deleted(want, have)
+ commands, requests = self._state_deleted(want, have)
elif state == 'merged':
diff = get_diff(want, have, TEST_KEYS)
- commands = self._state_merged(want, have, diff)
- return commands
+ commands, requests = self._state_merged(want, have, diff)
+ elif state in ('replaced', 'overridden'):
+ commands, requests = self._state_replaced_overridden(want, have, state)
+ return commands, requests
def _state_merged(self, want, have, diff):
""" The command generator when state is merged
@@ -159,7 +182,21 @@ class Mclag(ConfigBase):
requests = []
commands = []
if diff:
- requests = self.get_create_mclag_request(want, diff)
+ # Obtain diff for VLAN ranges in unique_ip
+ if 'unique_ip' in diff and diff['unique_ip'] is not None and diff['unique_ip'].get('vlans'):
+ if 'unique_ip' in have and have['unique_ip'] is not None and have['unique_ip'].get('vlans'):
+ diff['unique_ip']['vlans'] = self.get_vlan_range_diff(diff['unique_ip']['vlans'], have['unique_ip']['vlans'])
+ if not diff['unique_ip']['vlans']:
+ diff.pop('unique_ip')
+
+ # Obtain diff for VLAN ranges in peer_gateway
+ if 'peer_gateway' in diff and diff['peer_gateway'] is not None and diff['peer_gateway'].get('vlans'):
+ if 'peer_gateway' in have and have['peer_gateway'] is not None and have['peer_gateway'].get('vlans'):
+ diff['peer_gateway']['vlans'] = self.get_vlan_range_diff(diff['peer_gateway']['vlans'], have['peer_gateway']['vlans'])
+ if not diff['peer_gateway']['vlans']:
+ diff.pop('peer_gateway')
+
+ requests = self.get_create_mclag_requests(want, diff)
if len(requests) > 0:
commands = update_states(diff, "merged")
return commands, requests
@@ -175,19 +212,159 @@ class Mclag(ConfigBase):
requests = []
if not want:
if have:
- requests = self.get_delete_all_mclag_domain_request()
+ requests = self.get_delete_all_mclag_domain_requests(have)
if len(requests) > 0:
commands = update_states(have, "deleted")
else:
+ del_unique_ip_vlans = []
+ del_peer_gateway_vlans = []
+ # Create list of VLANs to be deleted based on VLAN ranges in unique_ip
+ if 'unique_ip' in want and want['unique_ip'] is not None and want['unique_ip'].get('vlans'):
+ want_unique_ip = want.pop('unique_ip')
+ if 'unique_ip' in have and have['unique_ip'] is not None and have['unique_ip'].get('vlans'):
+ del_unique_ip_vlans = self.get_vlan_range_common(want_unique_ip['vlans'], have['unique_ip']['vlans'])
+
+ # Create list of VLANs to be deleted based on VLAN ranges in peer_gateway
+ if 'peer_gateway' in want and want['peer_gateway'] is not None and want['peer_gateway'].get('vlans'):
+ want_peer_gateway = want.pop('peer_gateway')
+ if 'peer_gateway' in have and have['peer_gateway'] is not None and have['peer_gateway'].get('vlans'):
+ del_peer_gateway_vlans = self.get_vlan_range_common(want_peer_gateway['vlans'], have['peer_gateway']['vlans'])
+
new_have = self.remove_default_entries(have)
d_diff = get_diff(want, new_have, TEST_KEYS, is_skeleton=True)
diff_want = get_diff(want, d_diff, TEST_KEYS, is_skeleton=True)
+
+ if del_unique_ip_vlans:
+ diff_want['unique_ip'] = {'vlans': del_unique_ip_vlans}
+ if del_peer_gateway_vlans:
+ diff_want['peer_gateway'] = {'vlans': del_peer_gateway_vlans}
+
if diff_want:
- requests = self.get_delete_mclag_attribute_request(want, diff_want)
+ requests = self.get_delete_mclag_attribute_requests(have['domain_id'], diff_want)
if len(requests) > 0:
commands = update_states(diff_want, "deleted")
return commands, requests
+ def _state_replaced_overridden(self, want, have, state):
+ """ The command generator when state is replaced/overridden
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ requests = []
+ if want and not have:
+ commands = [update_states(want, state)]
+ requests = self.get_create_mclag_requests(want, want)
+ elif not want and have:
+ commands = [update_states(have, 'deleted')]
+ requests = self.get_delete_all_mclag_domain_requests(have)
+ elif want and have:
+ add_command = {}
+ del_command = {}
+ delete_all = False
+
+ # If 'domain_id' is modified, delete all mclag configuration.
+ if want['domain_id'] != have['domain_id']:
+ del_command = have
+ add_command = want
+ delete_all = True
+ else:
+ have = have.copy()
+ want = want.copy()
+ delete_all_vlans = {
+ 'unique_ip': False,
+ 'peer_gateway': False
+ }
+
+ # Delete unspecified configurations when:
+ # 1) state is overridden.
+ # 2) state is replaced and configuration other than
+ # unique_ip, peer_gateway or members is specified.
+ delete_unspecified = True
+ if state == 'replaced' and not self.mclag_simple_attrs.intersection(remove_empties(want).keys()):
+ delete_unspecified = False
+
+ # Create lists of VLANs to be deleted and added based on VLAN ranges
+ for option in ('unique_ip', 'peer_gateway'):
+ have_cfg = {}
+ want_cfg = {}
+ # The options are removed from the dict to avoid
+ # comparing the VLAN ranges two more times using get_diff
+ if have.get(option) and have[option].get('vlans'):
+ have_cfg = have.pop(option)
+ if want.get(option) and 'vlans' in want[option]:
+ want_cfg = want.pop(option)
+
+ if want_cfg:
+ if have_cfg:
+ # Delete all VLANs if empty 'vlans' list is provided
+ if not want_cfg['vlans']:
+ delete_all_vlans[option] = True
+ del_command[option] = have_cfg
+ else:
+ have_vlans = set(self.get_vlan_id_list(have_cfg['vlans']))
+ want_vlans = set(self.get_vlan_id_list(want_cfg['vlans']))
+ if have_vlans.intersection(want_vlans):
+ del_command[option] = {'vlans': self.get_vlan_range_list(list(have_vlans - want_vlans))}
+ if not del_command[option]['vlans']:
+ del_command.pop(option)
+ add_command[option] = {'vlans': self.get_vlan_range_list(list(want_vlans - have_vlans))}
+ if not add_command[option]['vlans']:
+ add_command.pop(option)
+ else:
+ delete_all_vlans[option] = True
+ del_command[option] = have_cfg
+ add_command[option] = want_cfg
+ else:
+ if want_cfg['vlans']:
+ add_command[option] = want_cfg
+ else:
+ if have_cfg and delete_unspecified:
+ delete_all_vlans[option] = True
+ del_command[option] = have_cfg
+
+ del_diff = get_diff(self.remove_default_entries(have), want, TEST_KEYS)
+ for option in del_diff:
+ if not want.get(option):
+ if delete_unspecified:
+ del_command[option] = del_diff[option]
+ else:
+ # Delete portchannels that are not specified
+ if option == 'members' and want.get(option):
+ del_command[option] = del_diff[option]
+
+ # To update 'gateway_mac' configuration in the device,
+ # delete already configured value.
+ if option == 'gateway_mac' and want.get(option):
+ del_command[option] = del_diff[option]
+
+ diff = get_diff(want, have, TEST_KEYS)
+ add_command.update(diff)
+
+ if del_command:
+ del_command['domain_id'] = have['domain_id']
+ commands.extend(update_states(del_command, 'deleted'))
+ if delete_all:
+ requests = self.get_delete_all_mclag_domain_requests(del_command)
+ else:
+ if any(delete_all_vlans.values()):
+ del_command = deepcopy(del_command)
+
+ # Set 'vlans' to None to delete all VLANs
+ for option in delete_all_vlans:
+ if delete_all_vlans[option]:
+ del_command[option]['vlans'] = None
+ requests = self.get_delete_mclag_attribute_requests(del_command['domain_id'], del_command)
+
+ if add_command:
+ add_command['domain_id'] = want['domain_id']
+ commands.extend(update_states(add_command, state))
+ requests.extend(self.get_create_mclag_requests(add_command, add_command))
+
+ return commands, requests
+
def remove_default_entries(self, data):
new_data = {}
if not data:
@@ -196,6 +373,7 @@ class Mclag(ConfigBase):
default_val_dict = {
'keepalive': 1,
'session_timeout': 30,
+ 'delay_restore': 300
}
for key, val in data.items():
if not (val is None or (key in default_val_dict and val == default_val_dict[key])):
@@ -203,9 +381,9 @@ class Mclag(ConfigBase):
return new_data
- def get_delete_mclag_attribute_request(self, want, command):
+ def get_delete_mclag_attribute_requests(self, domain_id, command):
requests = []
- url_common = 'data/openconfig-mclag:mclag/mclag-domains/mclag-domain=%s/config' % (want["domain_id"])
+ url_common = 'data/openconfig-mclag:mclag/mclag-domains/mclag-domain=%s/config' % (domain_id)
method = DELETE
if 'source_address' in command and command["source_address"] is not None:
url = url_common + '/source-address'
@@ -231,16 +409,30 @@ class Mclag(ConfigBase):
url = url_common + '/mclag-system-mac'
request = {'path': url, 'method': method}
requests.append(request)
+ if 'delay_restore' in command and command['delay_restore'] is not None:
+ url = url_common + '/delay-restore'
+ request = {'path': url, 'method': method}
+ requests.append(request)
+ if 'peer_gateway' in command and command['peer_gateway'] is not None:
+ if command['peer_gateway']['vlans'] is None:
+ request = {'path': 'data/openconfig-mclag:mclag/vlan-ifs/vlan-if', 'method': method}
+ requests.append(request)
+ elif command['peer_gateway']['vlans'] is not None:
+ vlan_id_list = self.get_vlan_id_list(command['peer_gateway']['vlans'])
+ for vlan in vlan_id_list:
+ peer_gateway_url = 'data/openconfig-mclag:mclag/vlan-ifs/vlan-if=Vlan{0}'.format(vlan)
+ request = {'path': peer_gateway_url, 'method': method}
+ requests.append(request)
if 'unique_ip' in command and command['unique_ip'] is not None:
if command['unique_ip']['vlans'] is None:
request = {'path': 'data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface', 'method': method}
requests.append(request)
elif command['unique_ip']['vlans'] is not None:
- for each in command['unique_ip']['vlans']:
- if each:
- unique_ip_url = 'data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=%s' % (each['vlan'])
- request = {'path': unique_ip_url, 'method': method}
- requests.append(request)
+ vlan_id_list = self.get_vlan_id_list(command['unique_ip']['vlans'])
+ for vlan in vlan_id_list:
+ unique_ip_url = 'data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan{0}'.format(vlan)
+ request = {'path': unique_ip_url, 'method': method}
+ requests.append(request)
if 'members' in command and command['members'] is not None:
if command['members']['portchannels'] is None:
request = {'path': 'data/openconfig-mclag:mclag/interfaces/interface', 'method': method}
@@ -251,17 +443,29 @@ class Mclag(ConfigBase):
portchannel_url = 'data/openconfig-mclag:mclag/interfaces/interface=%s' % (each['lag'])
request = {'path': portchannel_url, 'method': method}
requests.append(request)
+ if 'gateway_mac' in command and command['gateway_mac'] is not None:
+ request = {'path': 'data/openconfig-mclag:mclag/mclag-gateway-macs/mclag-gateway-mac', 'method': method}
+ requests.append(request)
return requests
- def get_delete_all_mclag_domain_request(self):
+ def get_delete_all_mclag_domain_requests(self, have):
requests = []
path = 'data/openconfig-mclag:mclag/mclag-domains'
method = DELETE
+ if have.get('peer_gateway'):
+ request = {'path': 'data/openconfig-mclag:mclag/vlan-ifs/vlan-if', 'method': method}
+ requests.append(request)
+ if have.get('unique_ip'):
+ request = {'path': 'data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface', 'method': method}
+ requests.append(request)
+ if have.get('gateway_mac'):
+ request = {'path': 'data/openconfig-mclag:mclag/mclag-gateway-macs/mclag-gateway-mac', 'method': method}
+ requests.append(request)
request = {'path': path, 'method': method}
requests.append(request)
return requests
- def get_create_mclag_request(self, want, commands):
+ def get_create_mclag_requests(self, want, commands):
requests = []
path = 'data/openconfig-mclag:mclag/mclag-domains/mclag-domain'
method = PATCH
@@ -269,6 +473,17 @@ class Mclag(ConfigBase):
if payload:
request = {'path': path, 'method': method, 'data': payload}
requests.append(request)
+ if 'gateway_mac' in commands and commands['gateway_mac'] is not None:
+ gateway_mac_path = 'data/openconfig-mclag:mclag/mclag-gateway-macs/mclag-gateway-mac'
+ gateway_mac_method = PATCH
+ gateway_mac_payload = {
+ 'openconfig-mclag:mclag-gateway-mac': [{
+ 'gateway-mac': commands['gateway_mac'],
+ 'config': {'gateway-mac': commands['gateway_mac']}
+ }]
+ }
+ request = {'path': gateway_mac_path, 'method': gateway_mac_method, 'data': gateway_mac_payload}
+ requests.append(request)
if 'unique_ip' in commands and commands['unique_ip'] is not None:
if commands['unique_ip']['vlans'] and commands['unique_ip']['vlans'] is not None:
unique_ip_path = 'data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface'
@@ -276,6 +491,13 @@ class Mclag(ConfigBase):
unique_ip_payload = self.build_create_unique_ip_payload(commands['unique_ip']['vlans'])
request = {'path': unique_ip_path, 'method': unique_ip_method, 'data': unique_ip_payload}
requests.append(request)
+ if 'peer_gateway' in commands and commands['peer_gateway'] is not None:
+ if commands['peer_gateway']['vlans'] and commands['peer_gateway']['vlans'] is not None:
+ peer_gateway_path = 'data/openconfig-mclag:mclag/vlan-ifs/vlan-if'
+ peer_gateway_method = PATCH
+ peer_gateway_payload = self.build_create_peer_gateway_payload(commands['peer_gateway']['vlans'])
+ request = {'path': peer_gateway_path, 'method': peer_gateway_method, 'data': peer_gateway_payload}
+ requests.append(request)
if 'members' in commands and commands['members'] is not None:
if commands['members']['portchannels'] and commands['members']['portchannels'] is not None:
portchannel_path = 'data/openconfig-mclag:mclag/interfaces/interface'
@@ -299,6 +521,8 @@ class Mclag(ConfigBase):
temp['peer-link'] = str(commands['peer_link'])
if 'system_mac' in commands and commands['system_mac'] is not None:
temp['openconfig-mclag:mclag-system-mac'] = str(commands['system_mac'])
+ if 'delay_restore' in commands and commands['delay_restore'] is not None:
+ temp['delay-restore'] = commands['delay_restore']
mclag_dict = {}
if temp:
domain_id = {"domain-id": want["domain_id"]}
@@ -312,8 +536,18 @@ class Mclag(ConfigBase):
def build_create_unique_ip_payload(self, commands):
payload = {"openconfig-mclag:vlan-interface": []}
- for each in commands:
- payload['openconfig-mclag:vlan-interface'].append({"name": each['vlan'], "config": {"name": each['vlan'], "unique-ip-enable": "ENABLE"}})
+ vlan_id_list = self.get_vlan_id_list(commands)
+ for vlan in vlan_id_list:
+ vlan_name = 'Vlan{0}'.format(vlan)
+ payload['openconfig-mclag:vlan-interface'].append({"name": vlan_name, "config": {"name": vlan_name, "unique-ip-enable": "ENABLE"}})
+ return payload
+
+ def build_create_peer_gateway_payload(self, commands):
+ payload = {"openconfig-mclag:vlan-if": []}
+ vlan_id_list = self.get_vlan_id_list(commands)
+ for vlan in vlan_id_list:
+ vlan_name = 'Vlan{0}'.format(vlan)
+ payload['openconfig-mclag:vlan-if'].append({"name": vlan_name, "config": {"name": vlan_name, "peer-gateway-enable": "ENABLE"}})
return payload
def build_create_portchannel_payload(self, want, commands):
@@ -321,3 +555,63 @@ class Mclag(ConfigBase):
for each in commands:
payload['openconfig-mclag:interface'].append({"name": each['lag'], "config": {"name": each['lag'], "mclag-domain-id": want['domain_id']}})
return payload
+
+ def get_vlan_range_common(self, config_vlans, match_vlans):
+ """Returns the vlan ranges present in both 'config_vlans'
+ and 'match_vlans' in vlans spec format
+ """
+ if not config_vlans:
+ return []
+
+ if not match_vlans:
+ return []
+
+ config_vlans = self.get_vlan_id_list(config_vlans)
+ match_vlans = self.get_vlan_id_list(match_vlans)
+ return self.get_vlan_range_list(list(set(config_vlans).intersection(set(match_vlans))))
+
+ def get_vlan_range_diff(self, config_vlans, match_vlans):
+ """Returns the vlan ranges present only in 'config_vlans'
+ and not in 'match_vlans' in vlans spec format
+ """
+ if not config_vlans:
+ return []
+
+ if not match_vlans:
+ return config_vlans
+
+ config_vlans = self.get_vlan_id_list(config_vlans)
+ match_vlans = self.get_vlan_id_list(match_vlans)
+ return self.get_vlan_range_list(list(set(config_vlans) - set(match_vlans)))
+
+ @staticmethod
+ def get_vlan_id_list(vlan_range_list):
+ """Returns a list of all VLAN IDs specified in VLAN range list"""
+ vlan_id_list = []
+ if vlan_range_list:
+ for vlan_range in vlan_range_list:
+ vlan_val = vlan_range['vlan']
+ if '-' in vlan_val:
+ match = re.match(r'Vlan(\d+)-(\d+)', vlan_val)
+ if match:
+ vlan_id_list.extend(range(int(match.group(1)), int(match.group(2)) + 1))
+ else:
+ # Single VLAN ID
+ match = re.match(r'Vlan(\d+)', vlan_val)
+ if match:
+ vlan_id_list.append(int(match.group(1)))
+
+ return vlan_id_list
+
+ @staticmethod
+ def get_vlan_range_list(vlan_id_list):
+ """Returns a list of VLAN ranges for given list of VLAN IDs
+ in vlans spec format"""
+ vlan_range_list = []
+
+ if vlan_id_list:
+ vlan_id_list.sort()
+ for vlan_range in get_ranges_in_list(vlan_id_list):
+ vlan_range_list.append({'vlan': 'Vlan{0}'.format('-'.join(map(str, (vlan_range[0], vlan_range[-1])[:len(vlan_range)])))})
+
+ return vlan_range_list
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py
index a4fdc7e0a..b48fa54f6 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/ntp/ntp.py
@@ -14,8 +14,6 @@ created
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
-
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
@@ -29,20 +27,30 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
get_diff,
+ get_replaced_config,
update_states,
- normalize_interface_name,
normalize_interface_name_list
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ __DELETE_LEAFS_OR_CONFIG_IF_NO_NON_KEY_LEAF,
+ get_new_config,
+ get_formatted_config_diff
+)
+
from ansible.module_utils.connection import ConnectionError
PATCH = 'PATCH'
DELETE = 'DELETE'
TEST_KEYS = [
- {
- "vrf": "", "enable_ntp_auth": "", "source_interfaces": "", "trusted_keys": "",
- "servers": {"address": ""}, "ntp_keys": {"key_id": ""}
- }
+ {"servers": {"address": ""}},
+ {"ntp_keys": {"key_id": ""}}
+]
+TEST_KEYS_formatted_diff = [
+ {'__default_ops': {'__delete_op': __DELETE_LEAFS_OR_CONFIG_IF_NO_NON_KEY_LEAF}},
+ {"servers": {"address": "", '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {"ntp_keys": {"key_id": "", '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}}
]
@@ -106,6 +114,17 @@ class Ntp(ConfigBase):
if result['changed']:
result['after'] = changed_ntp_facts
+ new_config = changed_ntp_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_ntp_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_ntp_facts,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -119,7 +138,7 @@ class Ntp(ConfigBase):
"""
want = self._module.params['config']
if want is None:
- want = []
+ want = {}
have = existing_ntp_facts
@@ -145,6 +164,10 @@ class Ntp(ConfigBase):
commands, requests = self._state_deleted(want, have)
elif state == 'merged':
commands, requests = self._state_merged(want, have)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have)
return commands, requests
@@ -160,6 +183,8 @@ class Ntp(ConfigBase):
commands = diff
requests = []
+
+ self.preprocess_merge_commands(commands, want)
if commands:
requests = self.get_merge_requests(commands, have)
@@ -207,6 +232,77 @@ class Ntp(ConfigBase):
return commands, requests
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ replaced_config = get_replaced_config(want, have, TEST_KEYS)
+
+ add_commands = []
+ if replaced_config:
+ self.sort_lists_in_config(replaced_config)
+ self.sort_lists_in_config(have)
+ delete_all = (replaced_config == have)
+ del_requests = self.get_delete_requests(replaced_config, delete_all)
+ requests.extend(del_requests)
+ commands.extend(update_states(replaced_config, "deleted"))
+
+ add_commands = want
+ else:
+ diff = get_diff(want, have, TEST_KEYS)
+ add_commands = diff
+
+ if add_commands:
+ self.preprocess_merge_commands(add_commands, want)
+ add_requests = self.get_merge_requests(add_commands, have)
+
+ if len(add_requests) > 0:
+ requests.extend(add_requests)
+ commands.extend(update_states(add_commands, "replaced"))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ self.sort_lists_in_config(want)
+ self.sort_lists_in_config(have)
+
+ commands = []
+ requests = []
+
+ if have and have != want:
+ delete_all = True
+ del_requests = self.get_delete_requests(have, delete_all)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ if not have and want:
+ add_commands = want
+ add_requests = self.get_merge_requests(add_commands, have)
+
+ if len(add_requests) > 0:
+ requests.extend(add_requests)
+ commands.extend(update_states(add_commands, "overridden"))
+
+ return commands, requests
+
def validate_want(self, want, state):
if state == 'deleted':
@@ -215,7 +311,9 @@ class Ntp(ConfigBase):
key_id_config = server.get('key_id', None)
minpoll_config = server.get('minpoll', None)
maxpoll_config = server.get('maxpoll', None)
- if key_id_config or minpoll_config or maxpoll_config:
+ prefer_config = server.get('prefer', None)
+ if key_id_config or minpoll_config or maxpoll_config or \
+ prefer_config is not None:
err_msg = "NTP server parameter(s) can not be deleted."
self._module.fail_json(msg=err_msg, code=405)
@@ -247,6 +345,52 @@ class Ntp(ConfigBase):
server.pop('minpoll')
if 'maxpoll' in server and not server['maxpoll']:
server.pop('maxpoll')
+ if 'prefer' in server and server['prefer'] is None:
+ server.pop('prefer')
+
+ if state == 'replaced' or state == 'overridden':
+ enable_auth_want = want.get('enable_ntp_auth', None)
+ if enable_auth_want is None:
+ want['enable_ntp_auth'] = False
+ if 'servers' in want and want['servers'] is not None:
+ for server in want['servers']:
+ if 'prefer' in server and server['prefer'] is None:
+ server['prefer'] = False
+
+ def search_servers(self, svr_address, servers):
+
+ found_server = dict()
+ if servers is not None:
+ for server in servers:
+ if server['address'] == svr_address:
+ found_server = server
+ return found_server
+
+ def preprocess_merge_commands(self, commands, want):
+
+ if 'servers' in commands and commands['servers'] is not None:
+ for server in commands['servers']:
+ if 'minpoll' in server and 'maxpoll' not in server:
+ want_server = dict()
+ if 'servers' in want:
+ want_server = self.search_servers(server['address'], want['servers'])
+
+ if want_server:
+ server['maxpoll'] = want_server['maxpoll']
+ else:
+ err_msg = "Internal error with NTP server maxpoll configuration."
+ self._module.fail_json(msg=err_msg, code=500)
+
+ if 'maxpoll' in server and 'minpoll' not in server:
+ want_server = dict()
+ if 'servers' in want:
+ want_server = self.search_servers(server['address'], want['servers'])
+
+ if want_server:
+ server['minpoll'] = want_server['minpoll']
+ else:
+ err_msg = "Internal error with NTP server minpoll configuration."
+ self._module.fail_json(msg=err_msg, code=500)
def get_merge_requests(self, configs, have):
@@ -448,18 +592,23 @@ class Ntp(ConfigBase):
# Create URL and payload
method = DELETE
- servers_config = configs.get('servers', None)
src_intf_config = configs.get('source_interfaces', None)
vrf_config = configs.get('vrf', None)
enable_auth_config = configs.get('enable_ntp_auth', None)
trusted_key_config = configs.get('trusted_keys', None)
- if servers_config or src_intf_config or vrf_config or \
+ if src_intf_config or vrf_config or \
trusted_key_config or enable_auth_config is not None:
url = 'data/openconfig-system:system/ntp'
request = {"path": url, "method": method}
requests.append(request)
+ servers_config = configs.get('servers', None)
+ if servers_config:
+ url = 'data/openconfig-system:system/ntp/servers'
+ request = {"path": url, "method": method}
+ requests.append(request)
+
keys_config = configs.get('ntp_keys', None)
if keys_config:
url = 'data/openconfig-system:system/ntp/ntp-keys'
@@ -546,3 +695,20 @@ class Ntp(ConfigBase):
requests.append(request)
return requests
+
+ def get_server_address(self, ntp_server):
+ return ntp_server.get('address')
+
+ def get_ntp_key_id(self, ntp_key):
+ return ntp_key.get('key_id')
+
+ def sort_lists_in_config(self, config):
+
+ if 'source_interfaces' in config and config['source_interfaces'] is not None:
+ config['source_interfaces'].sort()
+ if 'servers' in config and config['servers'] is not None:
+ config['servers'].sort(key=self.get_server_address)
+ if 'trusted_keys' in config and config['trusted_keys'] is not None:
+ config['trusted_keys'].sort()
+ if 'ntp_keys' in config and config['ntp_keys'] is not None:
+ config['ntp_keys'].sort(key=self.get_ntp_key_id)
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tests/ecmp_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/pki/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tests/ecmp_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/pki/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/pki/pki.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/pki/pki.py
new file mode 100644
index 000000000..163e59023
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/pki/pki.py
@@ -0,0 +1,563 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell EMC
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_pki class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+ remove_empties,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import (
+ Facts,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ update_states,
+ get_diff,
+)
+
+from urllib.parse import quote
+
+
+TRUST_STORES_PATH = "data/openconfig-pki:pki/trust-stores"
+SECURITY_PROFILES_PATH = "data/openconfig-pki:pki/security-profiles"
+TRUST_STORE_PATH = "data/openconfig-pki:pki/trust-stores/trust-store"
+SECURITY_PROFILE_PATH = (
+ "data/openconfig-pki:pki/security-profiles/security-profile"
+)
+
+PATCH = "patch"
+DELETE = "delete"
+PUT = "put"
+TEST_KEYS = [
+ {"security_profiles": {"profile_name": ""}},
+ {"trust_stores": {"name": ""}},
+]
+
+
+class Pki(ConfigBase):
+ """
+ The sonic_pki class
+ """
+
+ gather_subset = [
+ "!all",
+ "!min",
+ ]
+
+ gather_network_resources = [
+ "pki",
+ ]
+
+ def get_pki_facts(self):
+ """Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(
+ self.gather_subset, self.gather_network_resources
+ )
+ pki_facts = facts["ansible_network_resources"].get("pki")
+ if not pki_facts:
+ return {}
+ return pki_facts
+
+ def execute_module(self):
+ """Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {"changed": False}
+ warnings = list()
+ commands = list()
+
+ existing_pki_facts = self.get_pki_facts()
+ commands, requests = self.set_config(existing_pki_facts)
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(
+ self._module, to_request(self._module, requests)
+ )
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result["changed"] = True
+ result["commands"] = commands
+
+ changed_pki_facts = self.get_pki_facts()
+
+ result["before"] = existing_pki_facts
+ if result["changed"]:
+ result["after"] = changed_pki_facts
+
+ result["warnings"] = warnings
+
+ return result
+
+ def set_config(self, existing_pki_facts):
+ """Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params["config"]
+ have = existing_pki_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ state = self._module.params["state"]
+ if not want:
+ want = {}
+
+ diff = get_diff(want, have, list(TEST_KEYS))
+
+ if state == "overridden":
+ commands, requests = self._state_overridden(want, have, diff)
+ elif state == "deleted":
+ commands, requests = self._state_deleted(want, have, diff)
+ elif state == "merged":
+ commands, requests = self._state_merged(want, have, diff)
+ elif state == "replaced":
+ commands, requests = self._state_replaced(want, have)
+ return commands, requests
+
+ def _state_replaced(self, want, have):
+ """Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ spdiff = sp_diff(want, have)
+ tsdiff = ts_diff(want, have)
+ commands = []
+ requests = []
+ have_dict = {
+ "security_profiles": {
+ sp.get("profile_name"): sp
+ for sp in (have.get("security_profiles") or [])
+ },
+ "trust_stores": {
+ ts.get("name"): ts for ts in (have.get("trust_stores") or [])
+ },
+ }
+ for ts in tsdiff:
+ requests.append(
+ {
+ "path": TRUST_STORE_PATH + "=" + ts.get("name"),
+ "method": PUT,
+ "data": mk_ts_config(ts),
+ }
+ )
+ commands.append(
+ update_states(
+ have_dict["trust_stores"][ts.get("name")], "replaced"
+ )
+ )
+ for sp in spdiff:
+ requests.append(
+ {
+ "path": SECURITY_PROFILE_PATH
+ + "="
+ + sp.get("profile_name"),
+ "method": PUT,
+ "data": mk_sp_config(sp),
+ }
+ )
+ commands.append(
+ update_states(
+ have_dict["security_profiles"][sp.get("profile_name")],
+ "replaced",
+ )
+ )
+
+ return commands, requests
+
+ def _state_overridden(self, want, have, diff):
+ """The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+
+ commands = []
+ requests = []
+ want_tss = [ts.get("name") for ts in (want.get("trust_stores") or [])]
+ want_sps = [
+ sp.get("profile_name")
+ for sp in (want.get("security_profiles") or [])
+ ]
+ have_tss = [ts.get("name") for ts in (have.get("trust_stores") or [])]
+ have_sps = [
+ sp.get("profile_name")
+ for sp in (have.get("security_profiles") or [])
+ ]
+
+ have_dict = {
+ "security_profiles": {
+ sp.get("profile_name"): sp
+ for sp in (have.get("security_profiles") or [])
+ },
+ "trust_stores": {
+ ts.get("name"): ts for ts in (have.get("trust_stores") or [])
+ },
+ }
+ used_ts = []
+ for sp in have_sps:
+ if sp not in want_sps:
+ requests.append(
+ {
+ "path": SECURITY_PROFILE_PATH + "=" + sp,
+ "method": DELETE,
+ }
+ )
+ commands.append(
+ update_states(
+ have_dict["security_profiles"][sp], "deleted"
+ )
+ )
+ else:
+ ts_name = have_dict.get("security_profiles", {}).get(sp, {}).get("trust_store")
+ if ts_name and ts_name not in used_ts:
+ used_ts.append(ts_name)
+
+ for ts in have_tss:
+ if ts not in want_tss and ts not in used_ts:
+ requests.append(
+ {"path": TRUST_STORE_PATH + "=" + ts, "method": DELETE}
+ )
+ commands.append(
+ update_states(have_dict["trust_stores"][ts], "deleted")
+ )
+
+ for ts in want.get("trust_stores") or []:
+ if ts != have_dict["trust_stores"].get(ts.get("name")):
+ requests.append(
+ {
+ "path": TRUST_STORE_PATH + "=" + ts.get("name"),
+ "method": PUT,
+ "data": mk_ts_config(ts),
+ }
+ )
+ commands.append(update_states(ts, "overridden"))
+ for sp in want.get("security_profiles") or []:
+ if sp != have_dict["security_profiles"].get(
+ sp.get("profile_name")
+ ):
+ requests.append(
+ {
+ "path": SECURITY_PROFILE_PATH
+ + "="
+ + sp.get("profile_name"),
+ "method": PUT,
+ "data": mk_sp_config(sp),
+ }
+ )
+ commands.append(update_states(sp, "overridden"))
+
+ return commands, requests
+
+ def _state_merged(self, want, have, diff):
+ """The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff or {}
+ requests = []
+
+ for ts in commands.get("trust_stores") or []:
+ requests.append(
+ {
+ "path": TRUST_STORE_PATH,
+ "method": PATCH,
+ "data": mk_ts_config(ts),
+ }
+ )
+
+ for sp in commands.get("security_profiles") or []:
+ requests.append(
+ {
+ "path": SECURITY_PROFILE_PATH,
+ "method": PATCH,
+ "data": mk_sp_config(sp),
+ }
+ )
+
+ if commands and requests:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have, diff):
+ """The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ requests = []
+ current_ts = [
+ ts.get("name")
+ for ts in (have.get("trust_stores") or [])
+ if ts.get("name")
+ ]
+ current_sp = [
+ sp.get("profile_name")
+ for sp in (have.get("security_profiles") or [])
+ if sp.get("profile_name")
+ ]
+ if not want:
+ commands = have
+ for sp in current_sp:
+ requests.append(
+ {
+ "path": SECURITY_PROFILE_PATH + "=" + sp,
+ "method": DELETE,
+ }
+ )
+ for ts in current_ts:
+ requests.append(
+ {"path": TRUST_STORE_PATH + "=" + ts, "method": DELETE}
+ )
+ else:
+ commands = remove_empties(want)
+
+ for sp in commands.get("security_profiles") or []:
+ if sp.get("profile_name") in current_sp:
+ requests.extend(mk_sp_delete(sp, have))
+ for ts in commands.get("trust_stores") or []:
+ if ts.get("name") in current_ts:
+ requests.extend(mk_ts_delete(ts, have))
+
+ if commands and requests:
+ commands = update_states([commands], "deleted")
+ else:
+ commands = []
+
+ return commands, requests
+
+
+def sp_diff(want, have):
+ hsps = {}
+ wsps = {}
+ dsps = []
+ for hsp in have.get("security_profiles") or []:
+ hsps[hsp.get("profile_name")] = hsp
+ for wsp in want.get("security_profiles") or []:
+ wsps[wsp.get("profile_name")] = wsp
+
+ for spn, sp in wsps.items():
+ dsp = dict(hsps.get(spn))
+ # Pop each leaf from dsp that is not in sp
+ for k, v in dsp.items():
+ if not isinstance(dsp.get(k), list) and not isinstance(
+ dsp.get(k), dict
+ ):
+ if k not in sp:
+ dsp.pop(k)
+ for k, v in sp.items():
+ if not isinstance(dsp.get(k), list) and not isinstance(
+ dsp.get(k), dict
+ ):
+ if dsp.get(k) != v:
+ dsp[k] = v
+ else:
+ if v is not None:
+ dsp[k] = v
+ if dsp != hsps.get(spn):
+ dsps.append(dsp)
+ return dsps
+
+
+def ts_diff(want, have):
+ htss = {}
+ wtss = {}
+ dtss = []
+ for hts in have.get("trust_stores") or []:
+ htss[hts.get("name")] = hts
+ for wts in want.get("trust_stores") or []:
+ wtss[wts.get("name")] = wts
+
+ for tsn, ts in wtss.items():
+ dts = dict(htss.get(tsn))
+ for k, v in ts.items():
+ if not isinstance(dts.get(k), list) and not isinstance(
+ dts.get(k), dict
+ ):
+ if dts.get(k) != v:
+ dts[k] = v
+ else:
+ if v is not None:
+ dts[k] = v
+ if dts != htss.get(tsn):
+ dtss.append(dts)
+ return dtss
+
+
+def mk_sp_config(indata):
+ outdata = {
+ k.replace("_", "-"): v for k, v in indata.items() if v is not None
+ }
+ output = {
+ "openconfig-pki:security-profile": [
+ {"profile-name": outdata.get("profile-name"), "config": outdata}
+ ]
+ }
+ return output
+
+
+def mk_ts_config(indata):
+ outdata = {
+ k.replace("_", "-"): v for k, v in indata.items() if v is not None
+ }
+ output = {
+ "openconfig-pki:trust-store": [
+ {"name": outdata.get("name"), "config": outdata}
+ ]
+ }
+ return output
+
+
+def mk_sp_delete(want_sp, have):
+ requests = []
+ cur_sp = None
+ del_sp = {}
+ for csp in have.get("security_profiles") or []:
+ if csp.get("profile_name") == want_sp.get("profile_name"):
+ cur_sp = csp
+ break
+ if cur_sp:
+ for k, v in want_sp.items():
+ if v is not None and k != "profile_name":
+ if v == cur_sp.get(k) or isinstance(v, list):
+ del_sp[k] = v
+ if len(del_sp) == 0 and len(want_sp) <= 1:
+ requests = [
+ {
+ "path": SECURITY_PROFILE_PATH
+ + "="
+ + want_sp.get("profile_name"),
+ "method": DELETE,
+ }
+ ]
+ else:
+ for k, v in del_sp.items():
+ if isinstance(v, list):
+ for li in v:
+ if li in (cur_sp.get(k) or []):
+ requests.append(
+ {
+ "path": SECURITY_PROFILE_PATH
+ + "="
+ + want_sp.get("profile_name")
+ + "/config/"
+ + k.replace("_", "-")
+ + "="
+ + quote(li, safe=""),
+ "method": DELETE,
+ }
+ )
+ else:
+ requests.append(
+ {
+ "path": SECURITY_PROFILE_PATH
+ + "="
+ + want_sp.get("profile_name")
+ + "/config/"
+ + k.replace("_", "-"),
+ "method": DELETE,
+ }
+ )
+ return requests
+
+
+def mk_ts_delete(want_ts, have):
+ requests = []
+ cur_ts = None
+ del_ts = {}
+ for cts in have.get("trust_stores") or []:
+ if cts.get("name") == want_ts.get("name"):
+ cur_ts = cts
+ break
+ if cur_ts:
+ for k, v in want_ts.items():
+ if v is not None and k != "name":
+ if v == cur_ts.get(k) or isinstance(v, list):
+ del_ts[k] = v
+ if len(del_ts) == 0 and len(want_ts) <= 1:
+ requests = [
+ {
+ "path": TRUST_STORE_PATH + "=" + want_ts.get("name"),
+ "method": DELETE,
+ }
+ ]
+ else:
+ for k, v in del_ts.items():
+ if isinstance(v, list):
+ for li in v:
+ if li in (cur_ts.get(k) or []):
+ requests.append(
+ {
+ "path": TRUST_STORE_PATH
+ + "="
+ + want_ts.get("name")
+ + "/config/"
+ + k.replace("_", "-")
+ + "="
+ + quote(li, safe=""),
+ "method": DELETE,
+ }
+ )
+ else:
+ requests.append(
+ {
+ "path": TRUST_STORE_PATH
+ + "="
+ + want_ts.get("name")
+ + "/config/"
+ + k.replace("_", "-"),
+ "method": DELETE,
+ }
+ )
+ return requests
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py
index 371019d04..654e34dee 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_breakout/port_breakout.py
@@ -17,6 +17,7 @@ from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.c
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
+ search_obj_in_list
)
from ansible.module_utils.connection import ConnectionError
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
@@ -33,7 +34,6 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
PATCH = 'patch'
DELETE = 'delete'
-POST = 'post'
class Port_breakout(ConfigBase):
@@ -152,6 +152,52 @@ class Port_breakout(ConfigBase):
return commands, requests
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+
+ :param want: the additive configuration as a dictionary
+ :param obj_in_have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff
+ requests = self.get_modify_port_breakout_requests(commands, have)
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have, diff):
+ """ The command generator when state is merged
+
+ :param want: the additive configuration as a dictionary
+ :param obj_in_have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = []
+ requests = []
+
+ # Delete port-breakout configuration for interfaces that are not specified
+ for cfg in have:
+ if not search_obj_in_list(cfg['name'], want, 'name'):
+ commands.append(cfg)
+ requests.append(self.get_delete_single_port_breakout(cfg['name'], cfg))
+
+ if commands:
+ commands = update_states(commands, "deleted")
+
+ add_requests = self.get_modify_port_breakout_requests(diff, have)
+ if len(add_requests) > 0:
+ commands.extend(update_states(diff, "overridden"))
+ requests.extend(add_requests)
+
+ return commands, requests
+
def _state_deleted(self, want, have, diff):
""" The command generator when state is deleted
@@ -161,7 +207,7 @@ class Port_breakout(ConfigBase):
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
- # if want is none, then delete all the port_breakouti except admin
+ # if want is none, then delete all the port_breakout except admin
if not want:
commands = have
else:
@@ -215,27 +261,6 @@ class Port_breakout(ConfigBase):
requests.append(req)
return requests
- def get_default_port_breakout_modes(self):
- def_port_breakout_modes = []
- request = [{"path": "operations/sonic-port-breakout:breakout_capabilities", "method": POST}]
- try:
- response = edit_config(self._module, to_request(self._module, request))
- except ConnectionError as exc:
- self._module.fail_json(msg=str(exc), code=exc.code)
-
- raw_port_breakout_list = []
- if "sonic-port-breakout:output" in response[0][1]:
- raw_port_breakout_list = response[0][1].get("sonic-port-breakout:output", {}).get('caps', [])
-
- for port_breakout in raw_port_breakout_list:
- name = port_breakout.get('port', None)
- mode = port_breakout.get('defmode', None)
- if name and mode:
- if '[' in mode:
- mode = mode[:mode.index('[')]
- def_port_breakout_modes.append({'name': name, 'mode': mode})
- return def_port_breakout_modes
-
def get_delete_port_breakout_requests(self, commands, have):
requests = []
if not commands:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_group/port_group.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_group/port_group.py
new file mode 100644
index 000000000..37281403e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/port_group/port_group.py
@@ -0,0 +1,380 @@
+#
+# -*- coding: utf-8 -*-
+# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_port_group class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+"""
+The use of natsort causes sanity error due to it is not available in python version currently used.
+When natsort becomes available, the code here and below using it will be applied.
+from natsort import (
+ natsorted,
+ ns
+)
+"""
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import (
+ Facts,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ update_states,
+ remove_empties_from_list
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
+from ansible.module_utils.connection import ConnectionError
+
+GET = "get"
+PATCH = 'patch'
+DELETE = 'delete'
+url = 'data/openconfig-port-group:port-groups/port-group'
+
+TEST_KEYS = [
+ {
+ 'config': {'id': ''}
+ }
+]
+TEST_KEYS_formatted_diff = [
+ {'config': {'id': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}}
+]
+
+
+class Port_group(ConfigBase):
+ """
+ The sonic_port_group class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'port_group',
+ ]
+
+ pg_default_speeds_ready = False
+ pg_default_speeds = []
+
+ def __init__(self, module):
+ super(Port_group, self).__init__(module)
+
+ if not Port_group.pg_default_speeds_ready:
+ Port_group.pg_default_speeds = self.get_port_group_default_speed()
+ Port_group.pg_default_speeds_ready = True
+
+ def get_port_group_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ port_group_facts = facts['ansible_network_resources'].get('port_group')
+ if not port_group_facts:
+ return []
+ return port_group_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = list()
+
+ existing_port_group_facts = self.get_port_group_facts()
+ commands, requests = self.set_config(existing_port_group_facts)
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_port_group_facts = self.get_port_group_facts()
+
+ result['before'] = existing_port_group_facts
+ if result['changed']:
+ result['after'] = changed_port_group_facts
+
+ new_config = changed_port_group_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_port_group_facts,
+ TEST_KEYS_formatted_diff)
+ # See the above comment about natsort module
+ # new_config = natsorted(new_config, key=lambda x: x['id'])
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_port_group_facts,
+ new_config,
+ self._module._verbosity)
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_port_group_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ have = existing_port_group_facts
+
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+
+ state = self._module.params['state']
+
+ diff = get_diff(want, have, TEST_KEYS)
+
+ tmp_want = remove_empties_from_list(want)
+ new_want = self.remove_empty_dict_from_list(tmp_want)
+
+ new_diff = self.remove_empty_dict_from_list(diff)
+
+ if state == 'overridden':
+ commands, requests = self._state_overridden(new_want, have, new_diff)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(new_want, have, new_diff)
+ elif state == 'merged':
+ commands, requests = self._state_merged(new_want, have, new_diff)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(new_want, have, new_diff)
+
+ return commands, requests
+
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = diff
+ requests = []
+ if commands:
+ requests = self.build_merge_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have, diff):
+ """ The command generator when state is overridden
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ new_want = self.patch_want_with_default(want)
+ commands = get_diff(new_want, have, TEST_KEYS)
+ requests = []
+ if commands:
+ requests = self.build_merge_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_merged(self, want, have, diff):
+ """ The command generator when state is merged
+
+ :param want: the additive configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff
+ requests = []
+ if commands:
+ requests = self.build_merge_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have, diff):
+ """ The command generator when state is deleted
+
+ :param want: the objects from which the configuration should be removed
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ # if want is none, then delete all the port groups
+
+ if not want:
+ tmp_commands = have
+ else:
+ tmp_commands = want
+ tmp_commands = self.preprocess_delete_commands(tmp_commands, have)
+
+ commands = get_diff(tmp_commands, Port_group.pg_default_speeds, TEST_KEYS)
+
+ requests = []
+ if commands:
+ requests = self.build_delete_requests(commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def search_port_groups(self, id, pgs):
+
+ found_pg = dict()
+ if pgs is not None:
+ for pg in pgs:
+ if pg['id'] == id:
+ found_pg = pg
+ return found_pg
+
+ def preprocess_delete_commands(self, commands, have):
+ new_commands = []
+ for cmd in commands:
+ pg_id = cmd['id']
+ pg = self.search_port_groups(pg_id, have)
+ if pg:
+ new_cmd = {'id': pg_id, 'speed': pg['speed']}
+ new_commands.append(new_cmd)
+
+ return new_commands
+
+ def remove_empty_dict_from_list(self, dict_list):
+ new_dict_list = []
+ if dict_list:
+ for dictt in dict_list:
+ if dictt:
+ new_dict_list.append(dictt)
+
+ return new_dict_list
+
+ def build_delete_requests(self, confs):
+ requests = []
+
+ for conf in confs:
+ pg_id = conf['id']
+ method = DELETE
+ pg_url = (url + '=%s/config/speed') % (pg_id)
+ request = {"path": pg_url, "method": method}
+ requests.append(request)
+
+ return requests
+
+ def build_merge_requests(self, confs):
+ requests = []
+ pgs = []
+ for conf in confs:
+ pg_id = conf['id']
+ if 'speed' in conf:
+ pg_conf = {'id': pg_id, 'speed': 'openconfig-if-ethernet:' + conf['speed']}
+ pg = {'id': pg_id, 'config': pg_conf}
+ pgs.append(pg)
+
+ if pgs:
+ payload = {"openconfig-port-group:port-group": pgs}
+ method = PATCH
+ pg_url = url
+ request = {"path": pg_url, "method": method, "data": payload}
+ requests.append(request)
+
+ return requests
+
+ def patch_want_with_default(self, want):
+ new_want = list()
+ for dpg in Port_group.pg_default_speeds:
+ pg_id = dpg['id']
+ pg = self.search_port_groups(pg_id, want)
+ if pg:
+ new_pg = {'id': pg_id, 'speed': pg['speed']}
+ else:
+ new_pg = {'id': pg_id, 'speed': dpg['speed']}
+
+ new_want.append(new_pg)
+ return new_want
+
+ def get_port_group_default_speed(self):
+ """Get all the port group default speeds"""
+
+ pgs_request = [{"path": "data/openconfig-port-group:port-groups/port-group", "method": GET}]
+ try:
+ pgs_response = edit_config(self._module, to_request(self._module, pgs_request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ pgs_config = []
+ if "openconfig-port-group:port-group" in pgs_response[0][1]:
+ pgs_config = pgs_response[0][1].get("openconfig-port-group:port-group", [])
+
+ pgs_dft_speeds = []
+ for pg_config in pgs_config:
+ pg_state = dict()
+ if 'state' in pg_config:
+ pg_state['id'] = pg_config['id']
+ dft_speed_str = pg_config['state'].get('default-speed', None)
+ if dft_speed_str:
+ pg_state['speed'] = dft_speed_str.split(":", 1)[-1]
+ pgs_dft_speeds.append(pg_state)
+
+ return pgs_dft_speeds
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py
index d5c36d3e2..f4dc71214 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/prefix_lists/prefix_lists.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -40,7 +40,7 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
TEST_KEYS = [
{"config": {"afi": "", "name": ""}},
- {"prefixes": {"action": "", "ge": "", "le": "", "prefix": "", "sequence": ""}}
+ {"prefixes": {"ge": "", "le": "", "prefix": "", "sequence": ""}}
]
DELETE = "delete"
@@ -149,6 +149,10 @@ openconfig-routing-policy-ext:extended-prefixes/extended-prefix={},{},{}'
commands, requests = self._state_deleted(want, have)
elif state == 'merged':
commands, requests = self._state_merged(diff)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(diff)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
ret_commands = commands
return ret_commands, requests
@@ -188,6 +192,51 @@ openconfig-routing-policy-ext:extended-prefixes/extended-prefix={},{},{}'
commands = []
return commands, requests
+ def _state_replaced(self, diff):
+ """ The command generator when state is replaced
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = diff
+ requests = self.get_modify_prefix_lists_requests(commands)
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ self.sort_lists_in_config(want)
+ self.sort_lists_in_config(have)
+
+ if have and have != want:
+ del_requests = self.get_delete_all_prefix_list_cfg_requests()
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ if not have and want:
+ mod_commands = want
+ mod_requests = self.get_modify_prefix_lists_requests(mod_commands)
+
+ if len(mod_requests) > 0:
+ requests.extend(mod_requests)
+ commands.extend(update_states(mod_commands, "overridden"))
+
+ return commands, requests
+
def get_modify_prefix_lists_requests(self, commands):
'''Traverse the input list of configuration "modify" commands obtained
from parsing the input playbook parameters. For each command,
@@ -456,3 +505,13 @@ openconfig-routing-policy-ext:extended-prefixes/extended-prefix={},{},{}'
prefix_net['prefixlen'] = int(prefix_val.split("/")[1])
return prefix_net
+
+ def sort_lists_in_config(self, config):
+ if config:
+ config.sort(key=self.get_name)
+ for cfg in config:
+ if 'prefixes' in cfg and cfg['prefixes']:
+ cfg['prefixes'].sort(key=lambda x: (x['sequence'], x['action'], x['prefix']))
+
+ def get_name(self, name):
+ return name.get('name')
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py
index dfa65482f..264ffa014 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/radius_server/radius_server.py
@@ -27,14 +27,25 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
update_states,
get_diff,
+ get_replaced_config,
normalize_interface_name,
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ __DELETE_LEAFS_OR_CONFIG_IF_NO_NON_KEY_LEAF,
+ get_new_config,
+ get_formatted_config_diff
+)
PATCH = 'patch'
DELETE = 'delete'
TEST_KEYS = [
{'host': {'name': ''}},
]
+TEST_KEYS_formatted_diff = [
+ {'__default_ops': {'__delete_op': __DELETE_LEAFS_OR_CONFIG_IF_NO_NON_KEY_LEAF}},
+ {'host': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
class Radius_server(ConfigBase):
@@ -91,6 +102,17 @@ class Radius_server(ConfigBase):
if result['changed']:
result['after'] = changed_radius_server_facts
+ new_config = changed_radius_server_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_radius_server_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_radius_server_facts,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -180,6 +202,67 @@ class Radius_server(ConfigBase):
return commands, requests
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ replaced_config = get_replaced_config(want, have, TEST_KEYS)
+
+ add_commands = []
+ if replaced_config:
+ del_requests = self.get_delete_radius_server_requests(replaced_config, have)
+ requests.extend(del_requests)
+ commands.extend(update_states(replaced_config, "deleted"))
+ add_commands = want
+ else:
+ add_commands = diff
+
+ if add_commands:
+ add_requests = self.get_modify_radius_server_requests(add_commands, have)
+ if len(add_requests) > 0:
+ requests.extend(add_requests)
+ commands.extend(update_states(add_commands, "replaced"))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have, diff):
+ """ The command generator when state is overridden
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ r_diff = get_diff(have, want, TEST_KEYS)
+ if have and (diff or r_diff):
+ del_requests = self.get_delete_radius_server_requests(have, have)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ if not have and want:
+ want_commands = want
+ want_requests = self.get_modify_radius_server_requests(want_commands, have)
+
+ if len(want_requests) > 0:
+ requests.extend(want_requests)
+ commands.extend(update_states(want_commands, "overridden"))
+
+ return commands, requests
+
def get_radius_global_payload(self, conf):
payload = {}
global_cfg = {}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/route_maps/route_maps.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/route_maps/route_maps.py
new file mode 100644
index 000000000..0b40c30f2
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/route_maps/route_maps.py
@@ -0,0 +1,2354 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_route_maps class
+The code in this file compares the current configuration (as a dict)
+to the configuration provided (as a dict) based on the contents of the
+currently executing playbook. The result of the comparison and the end state
+requested by the executing playbook are used to to determine the command set
+necessary to bring the current configuration to it's desired end-state.
+The resulting commands are then transmitted to the target device.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+ validate_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts \
+ import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils \
+ import (
+ get_diff,
+ update_states,
+ remove_empties_from_list,
+ get_normalize_interface_name,
+ check_required
+ )
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+
+TEST_KEYS = [
+ {"config": {"map_name": "", "sequence_num": ""}}
+]
+
+DELETE = "delete"
+PATCH = "patch"
+
+
+class Route_maps(ConfigBase):
+ """
+ The sonic_route_maps class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'route_maps',
+ ]
+
+ route_maps_uri = 'data/openconfig-routing-policy:routing-policy/policy-definitions'
+ route_map_uri = route_maps_uri + '/policy-definition={0}'
+ route_map_stmt_uri = route_map_uri + '/statements/statement={1}'
+ route_map_stmt_base_uri = route_map_uri + '/statements/statement={1}/'
+ route_maps_data_path = 'openconfig-routing-policy:policy-definitions'
+
+ set_community_rest_names = {
+ 'additive': 'openconfig-routing-policy-ext:ADDITIVE',
+ 'local_as': 'openconfig-bgp-types:NO_EXPORT_SUBCONFED',
+ 'no_advertise': 'openconfig-bgp-types:NO_ADVERTISE',
+ 'no_export': 'openconfig-bgp-types:NO_EXPORT',
+ 'no_peer': 'openconfig-bgp-types:NOPEER',
+ 'none': 'openconfig-bgp-types:NONE'
+ }
+
+ set_extcomm_rest_names = {
+ 'rt': 'route-target:',
+ 'soo': 'route-origin:'
+ }
+
+ def __init__(self, module):
+ super(Route_maps, self).__init__(module)
+
+ def get_route_maps_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset,
+ self.gather_network_resources)
+ route_maps_facts = facts['ansible_network_resources'].get('route_maps')
+ if not route_maps_facts:
+ return []
+ return route_maps_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = list()
+
+ existing_route_maps_facts = self.get_route_maps_facts()
+ commands, requests = self.set_config(existing_route_maps_facts)
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.errno)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_route_maps_facts = self.get_route_maps_facts()
+
+ result['before'] = existing_route_maps_facts
+ if result['changed']:
+ result['after'] = changed_route_maps_facts
+
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_route_maps_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ if want:
+ want = self.validate_and_normalize_config(want)
+ else:
+ want = []
+
+ have = existing_route_maps_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ state = self._module.params['state']
+ if state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(want, have)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have)
+ return commands, requests
+
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ # Delete replaced groupings
+ commands = deepcopy(want)
+ requests = self.get_delete_replaced_groupings(commands, have)
+ if not requests:
+ commands = []
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+
+ if requests:
+ modify_have = []
+ else:
+ modify_have = have
+
+ # Apply the commands from the playbook
+ diff = get_diff(want, modify_have, TEST_KEYS)
+ merged_commands = diff
+
+ replaced_requests = self.get_modify_route_maps_requests(merged_commands, want, modify_have)
+ requests.extend(replaced_requests)
+ if merged_commands and len(replaced_requests) > 0:
+ merged_commands = update_states(merged_commands, "replaced")
+ commands.extend(merged_commands)
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ if not want:
+ return commands, requests
+
+ # Determine if there is any configuration specified in the playbook
+ # that is not contained in the current configuration.
+ diff_requested = get_diff(want, have, TEST_KEYS)
+
+ # Determine if there is anything already configured that is not
+ # specified in the playbook.
+ diff_unwanted = get_diff(have, want, TEST_KEYS)
+
+ # Idempotency check: If the configuration already matches the
+ # requested configuration with no extra attributes, no
+ # commands should be executed on the device.
+ if not diff_requested and not diff_unwanted:
+ return commands, requests
+
+ # Delete all current route map configuration
+ commands = have
+ requests = self.get_delete_all_route_map_cfg_request()
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+
+ # Apply the commands from the playbook
+ merged_commands = want
+ overridden_requests = self.get_modify_route_maps_requests(merged_commands, want, [])
+ requests.extend(overridden_requests)
+ if merged_commands and len(overridden_requests) > 0:
+ merged_commands = update_states(merged_commands, "overridden")
+ commands.extend(merged_commands)
+ return commands, requests
+
+ def _state_merged(self, want, have):
+ """ The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ diff = get_diff(want, have, TEST_KEYS)
+ commands = diff
+ requests = self.get_modify_route_maps_requests(commands, want, have)
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ requests = []
+ if not have or have == []:
+ commands = []
+ elif not want or want == []:
+ commands = have
+ requests = self.get_delete_all_route_map_cfg_request()
+ else:
+ commands = want
+ requests = self.get_delete_route_maps_requests(have, commands)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def get_modify_route_maps_requests(self, commands, want, have):
+ '''Traverse the input list of configuration "modify" commands
+ obtained from parsing the input playbook parameters. For each
+ command, create a route map configuration REST API to modify the route
+ map specified by the current command.'''
+
+ requests = []
+ if not commands:
+ return requests
+
+ # Create URL and payload
+ route_maps_payload_list = []
+ route_maps_payload_dict = {'policy-definition': route_maps_payload_list}
+ for command in commands:
+ if command.get('action') is None:
+ self.insert_route_map_cmd_action(command, want)
+ route_map_payload = self.get_modify_single_route_map_request(command, have)
+ if route_map_payload:
+ route_maps_payload_list.append(route_map_payload)
+
+ # Note: This is consistent with current CLI behavior, but should be
+ # revisited if and when the SONiC REST implementation is enhanced
+ # for the "match peer" attribute.
+ self.route_map_remove_configured_match_peer(route_map_payload, have, requests)
+
+ route_maps_data = {self.route_maps_data_path: route_maps_payload_dict}
+ request = {'path': self.route_maps_uri, 'method': PATCH, 'data': route_maps_data}
+ requests.append(request)
+ return requests
+
+ def insert_route_map_cmd_action(self, command, want):
+ '''Insert the "action" value into the specified "command" if it is not
+ already present. This dictionary member will not be present in the
+ command obtained from the "diff" utility if it is unchanged from its
+ currently configured value because it is not a "difference" in the
+ configuration requested by the playbook versus the current
+ configuration. It is, however, needed in order to create the
+ appropriate REST API for modifying other attributes in the route map.'''
+
+ conf_map_name = command.get('map_name', None)
+ conf_seq_num = command.get('sequence_num', None)
+ if not conf_map_name or not conf_seq_num:
+ return
+
+ conf_action = command.get('action', None)
+ if conf_action:
+ return
+
+ # Find the corresponding route map statement in the "want" dict
+ # list and insert it into the current "command" dict.
+ matching_map_in_want = self.get_matching_map(conf_map_name, conf_seq_num, want)
+ if matching_map_in_want:
+ conf_action = matching_map_in_want.get('action')
+ if conf_action is not None:
+ command['action'] = conf_action
+
+ def get_modify_single_route_map_request(self, command, have):
+ '''Create and return the appropriate set of route map REST API attributes
+ to modify the route map configuration specified by the current "command".'''
+
+ request = {}
+ if not command:
+ return request
+
+ conf_map_name = command.get('map_name', None)
+ conf_action = command.get('action', None)
+ conf_seq_num = command.get('sequence_num', None)
+ if not conf_map_name or not conf_action or not conf_seq_num:
+ return request
+
+ req_seq_num = str(conf_seq_num)
+
+ if conf_action == 'permit':
+ req_action = 'ACCEPT_ROUTE'
+ elif conf_action == 'deny':
+ req_action = 'REJECT_ROUTE'
+ else:
+ return request
+
+ # Create a "blank" template for the request
+ route_map_request = {
+ 'name': conf_map_name,
+ 'config': {'name': conf_map_name},
+ 'statements': {
+ 'statement': [
+ {
+ 'name': req_seq_num,
+ 'config': {
+ 'name': req_seq_num
+ },
+ 'actions': {
+ 'config': {
+ 'policy-result': req_action
+ }
+ }
+ }
+ ]
+ }
+ }
+
+ route_map_statement = route_map_request['statements']['statement'][0]
+
+ self.get_route_map_modify_match_attr(command, route_map_statement)
+ self.get_route_map_modify_set_attr(command, route_map_statement, have)
+ self.get_route_map_modify_call_attr(command, route_map_statement)
+
+ return route_map_request
+
+ def get_route_map_modify_match_attr(self, command, route_map_statement):
+ '''In the dict specified by the input route_map_statement paramenter,
+ provide REST API definitions of all "match" attributes contained in the
+ user input command dict specified by the "command" input parameter
+ to this function.'''
+
+ match_top = command.get('match')
+ if not match_top:
+ return
+
+ route_map_statement['conditions'] = {}
+
+ #
+ # Handle configuration for BGP policy "match" conditions
+ # ------------------------------------------------------
+ route_map_statement['conditions']['openconfig-bgp-policy:bgp-conditions'] = {}
+ route_map_match_bgp_policy = \
+ route_map_statement['conditions']['openconfig-bgp-policy:bgp-conditions']
+
+ # Handle match as_path
+ if match_top.get('as_path'):
+ route_map_match_bgp_policy['match-as-path-set'] = {
+ 'config': {
+ 'as-path-set': match_top['as_path'],
+ 'match-set-options': 'ANY'
+ }
+ }
+ # Handle match evpn
+ if match_top.get('evpn'):
+ route_map_match_bgp_policy['openconfig-policy-ext:match-evpn-set'] = \
+ {'config': {}}
+ route_map_match_bgp_evpn = \
+ route_map_match_bgp_policy[
+ 'openconfig-policy-ext:match-evpn-set']['config']
+ if match_top['evpn'].get('default_route') is not None:
+ boolval = self.yaml_bool_to_python_bool(match_top['evpn']['default_route'])
+ route_map_match_bgp_evpn['default-type5-route'] = boolval
+ if match_top['evpn'].get('route_type'):
+ route_type_rest_name = ('openconfig-bgp-policy-ext:' +
+ match_top['evpn']['route_type'].upper())
+ route_map_match_bgp_evpn['route-type'] = route_type_rest_name
+ if match_top['evpn'].get('vni'):
+ route_map_match_bgp_evpn['vni-number'] = match_top['evpn']['vni']
+ if not route_map_match_bgp_evpn:
+ route_map_match_bgp_policy.pop('openconfig-policy-ext:match-evpn-set')
+
+ # Handle BGP policy match configuration under the "config" dictionary
+ route_map_match_bgp_policy['config'] = {}
+ if match_top.get('local_preference'):
+ route_map_match_bgp_policy['config']['local-pref-eq'] = \
+ match_top['local_preference']
+ if match_top.get('metric'):
+ route_map_match_bgp_policy['config']['med-eq'] = match_top['metric']
+ if match_top.get('origin'):
+ route_map_match_bgp_policy['config']['origin-eq'] = match_top['origin'].upper()
+ if match_top.get('community'):
+ route_map_match_bgp_policy['config']['community-set'] = match_top['community']
+ if match_top.get('ext_comm'):
+ route_map_match_bgp_policy['config']['ext-community-set'] = match_top['ext_comm']
+ if match_top.get('ip') and match_top['ip'].get('next_hop'):
+ route_map_match_bgp_policy[
+ 'config']['openconfig-bgp-policy-ext:next-hop-set'] = match_top['ip']['next_hop']
+ if not route_map_match_bgp_policy['config']:
+ route_map_match_bgp_policy.pop('config')
+
+ if not route_map_match_bgp_policy:
+ route_map_statement['conditions'].pop('openconfig-bgp-policy:bgp-conditions')
+
+ # Handle match interface
+ if match_top.get('interface'):
+ route_map_statement['conditions']['match-interface'] = {
+ 'config': {'interface': match_top['interface']}
+ }
+
+ # Handle match IP address/prefix
+ if match_top.get('ip') and match_top['ip'].get('address'):
+ route_map_statement['conditions']['match-prefix-set'] = {
+ 'config': {
+ 'prefix-set': match_top['ip']['address'],
+ 'match-set-options': 'ANY'
+ }
+ }
+
+ # Handle match IPv6 address/prefix
+ if match_top.get('ipv6') and match_top['ipv6'].get('address'):
+ if not route_map_statement['conditions'].get('match-prefix-set'):
+ route_map_statement['conditions']['match-prefix-set'] = {
+ 'config': {
+ 'openconfig-routing-policy-ext:ipv6-prefix-set': match_top[
+ 'ipv6']['address'], 'match-set-options': 'ANY'
+ }
+ }
+ else:
+ route_map_statement[
+ 'conditions']['match-prefix-set']['config'][
+ 'openconfig-routing-policy-ext:ipv6-prefix-set'] = \
+ match_top['ipv6']['address']
+
+ # Handle match peer
+ if match_top.get('peer'):
+ peer_list = list(match_top['peer'].values())
+ route_map_statement['conditions']['match-neighbor-set'] = {
+ 'config': {
+ 'openconfig-routing-policy-ext:address': peer_list
+ }
+ }
+
+ # Handle match source protocol
+ if match_top.get('source_protocol'):
+ rest_protocol_name = ''
+ if match_top['source_protocol'] in ('bgp', 'ospf', 'static'):
+ rest_protocol_name = ('openconfig-policy-types:' +
+ match_top['source_protocol'].upper())
+ elif match_top['source_protocol'] == 'connected':
+ rest_protocol_name = 'openconfig-policy-types:DIRECTLY_CONNECTED'
+
+ route_map_statement['conditions']['config'] = \
+ {'install-protocol-eq': rest_protocol_name}
+
+ # Handle match source VRF
+ if match_top.get('source_vrf'):
+ route_map_statement[
+ 'conditions'][
+ 'openconfig-routing-policy-ext:match-src-network-instance'
+ ] = {'config': {'name': match_top['source_vrf']}}
+
+ # Handle match tag
+ if match_top.get('tag'):
+ route_map_statement['conditions']['match-tag-set'] = {
+ 'config': {
+ 'openconfig-routing-policy-ext:tag-value': [match_top['tag']]
+ }
+ }
+
+ def get_route_map_modify_set_attr(self, command, route_map_statement, have):
+ '''In the dict specified by the input route_map_statement paramenter,
+ provide REST API definitions of all "set" attributes contained in the
+ user input command dict specified by the "command" input parameter
+ to this function.'''
+
+ cmd_set_top = command.get('set')
+ if not cmd_set_top:
+ return
+
+ # Get the current configuration (if any) for this route map statement
+ cfg_set_top = {}
+ conf_map_name = command.get('map_name')
+ conf_seq_num = command.get('sequence_num')
+ cmd_rmap_have = self.get_matching_map(conf_map_name, conf_seq_num, have)
+ if cmd_rmap_have:
+ cfg_set_top = cmd_rmap_have.get('set')
+
+ route_map_actions = route_map_statement['actions']
+
+ # Handle configuration for BGP policy "set" conditions
+ # ----------------------------------------------------
+ route_map_actions['openconfig-bgp-policy:bgp-actions'] = {}
+ route_map_bgp_actions = \
+ route_map_actions['openconfig-bgp-policy:bgp-actions'] = {}
+ # Handle 'set' AS path prepend
+ if cmd_set_top.get('as_path_prepend'):
+ route_map_bgp_actions['set-as-path-prepend'] = {
+ 'config': {
+ 'openconfig-routing-policy-ext:asn-list': cmd_set_top['as_path_prepend']
+ }
+ }
+
+ # Handle'set' community list delete
+ if cmd_set_top.get('comm_list_delete'):
+ route_map_bgp_actions['set-community-delete'] = {
+ 'config': {
+ 'community-set-delete': cmd_set_top['comm_list_delete']
+ }
+ }
+
+ # Handle 'set' community
+ if cmd_set_top.get('community'):
+ route_map_bgp_actions['set-community'] = {
+ 'config': {
+ 'method': 'INLINE',
+ 'options': 'ADD'
+ },
+ 'inline': {
+ 'config': {
+ 'communities': []
+ }
+ }
+ }
+
+ rmap_set_communities_cfg = \
+ route_map_bgp_actions['set-community']['inline']['config']['communities']
+
+ if cmd_set_top['community'].get('community_number'):
+
+ # Abort the playbook if the Community "none' attribute is configured.
+ if cfg_set_top:
+ if (cfg_set_top.get('community') and
+ cfg_set_top['community'].get('community_attributes') and
+ 'none' in cfg_set_top['community']['community_attributes']):
+ self._module.fail_json(
+ msg='\nPlaybook aborted: The route map "set" community '
+ '"none" attribute is configured.\n\nPlease remove '
+ 'the conflicting configuration to configure other '
+ 'community "set" attributes.\n')
+
+ comm_num_list = cmd_set_top['community']['community_number']
+
+ for comm_num in comm_num_list:
+ rmap_set_communities_cfg.append(comm_num)
+
+ if cmd_set_top['community'].get('community_attributes'):
+ comm_attr_list = []
+ comm_attr_list = cmd_set_top['community']['community_attributes']
+ if 'none' in comm_attr_list:
+ # Verify that no other community attributes are being requested
+ # at the same time as the "none" attribute and that no
+ # community attributes are currently configured. Abort the
+ # playbook execution if these conditions are not met.
+ if len(comm_attr_list) > 1 or rmap_set_communities_cfg:
+ self._module.fail_json(
+ msg='\nPlaybook aborted: The route map "set" community "none"'
+ 'attribute cannot be configured when other "set" community '
+ 'attributes are requested or configured.\n\n'
+ 'Please revise the playbook to configure the "none"'
+ 'attribute.\n')
+
+ # Abort the playbook if other Community "set" attributes are
+ # currently configured.
+ if cfg_set_top:
+ if (cfg_set_top.get('community') and
+ (cfg_set_top['community'].get('community_number') or
+ (cfg_set_top['community'].get('community_attributes') and
+ 'none' not in cfg_set_top['community']['community_attributes']))):
+ self._module.fail_json(
+ msg='\nPlaybook aborted: The route map "set" community "none" '
+ ' attribute cannot be configured when other"set" community '
+ 'attributes are requested or configured.\n\n'
+ 'Please remove the conflicting configuration to '
+ 'configure the "none" attribue.\n')
+
+ # Proceed with configuring 'none' if the validity checks passed.
+ rmap_set_communities_cfg.append('openconfig-bgp-types:NONE')
+ else:
+
+ # Abort the playbook if the Community "none' attribute is configured.
+ if cfg_set_top:
+ if (cfg_set_top.get('community') and
+ cfg_set_top['community'].get('community_attributes') and
+ 'none' in cfg_set_top['community']['community_attributes']):
+ self._module.fail_json(
+ msg='\nPlaybook aborted: The route map "set"community "none" attribute is '
+ 'configured.\n\n'
+ 'Please remove the conflicting configuration to configure '
+ 'other community "set" attributes.\n')
+
+ comm_attr_rest_name = {
+ 'local_as': 'openconfig-bgp-types:NO_EXPORT_SUBCONFED',
+ 'no_advertise': 'openconfig-bgp-types:NO_ADVERTISE',
+ 'no_export': 'openconfig-bgp-types:NO_EXPORT',
+ 'no_peer': 'openconfig-bgp-types:NOPEER',
+ 'additive': 'openconfig-routing-policy-ext:ADDITIVE'
+ }
+
+ for comm_attr in comm_attr_list:
+ rmap_set_communities_cfg.append(comm_attr_rest_name[comm_attr])
+
+ # Handle set extcommunity
+ if cmd_set_top.get('extcommunity'):
+ route_map_bgp_actions['set-ext-community'] = {
+ 'config': {
+ 'method': 'INLINE',
+ 'options': 'ADD'
+ },
+ 'inline': {
+ 'config': {
+ 'communities': []
+ }
+ }
+ }
+
+ rmap_set_extcommunities_cfg = \
+ route_map_bgp_actions['set-ext-community']['inline']['config']['communities']
+
+ if cmd_set_top['extcommunity'].get('rt'):
+ rt_list = cmd_set_top['extcommunity']['rt']
+
+ for rt_val in rt_list:
+ rmap_set_extcommunities_cfg.append("route-target:" + rt_val)
+
+ if cmd_set_top['extcommunity'].get('soo'):
+ soo_list = cmd_set_top['extcommunity']['soo']
+
+ for soo in soo_list:
+ rmap_set_extcommunities_cfg.append("route-origin:" + soo)
+
+ #
+ # Handle configuration for BGP policy "set" conditions
+ # to be located within the "config" sub-dictionary
+ # ----------------------------------------------------
+ route_map_bgp_actions['config'] = {}
+ route_map_bgp_actions_cfg = \
+ route_map_actions['openconfig-bgp-policy:bgp-actions']['config']
+
+ # Handle set IP next hop.
+ if cmd_set_top.get('ip_next_hop'):
+ route_map_bgp_actions_cfg['set-next-hop'] = cmd_set_top['ip_next_hop']
+
+ # Handle set IPv6 next hop.
+ if cmd_set_top.get('ipv6_next_hop'):
+ if cmd_set_top['ipv6_next_hop'].get('global_addr'):
+ route_map_bgp_actions_cfg['set-ipv6-next-hop-global'] = \
+ cmd_set_top['ipv6_next_hop']['global_addr']
+ if cmd_set_top['ipv6_next_hop'].get('prefer_global') is not None:
+ boolval = \
+ self.yaml_bool_to_python_bool(cmd_set_top['ipv6_next_hop']['prefer_global'])
+ route_map_bgp_actions_cfg['set-ipv6-next-hop-prefer-global'] = boolval
+
+ # Handle set local preference.
+ if cmd_set_top.get('local_preference'):
+ route_map_bgp_actions_cfg['set-local-pref'] = cmd_set_top['local_preference']
+
+ # Handle set metric
+ if cmd_set_top.get('metric'):
+ route_map_actions['metric-action'] = {'config': {}}
+ route_map_metric_actions = route_map_actions['metric-action']['config']
+
+ if cmd_set_top['metric'].get('value'):
+ route_map_metric_actions['metric'] = cmd_set_top['metric']['value']
+ route_map_metric_actions['action'] = \
+ 'openconfig-routing-policy:METRIC_SET_VALUE'
+ route_map_bgp_actions_cfg['set-med'] = cmd_set_top['metric']['value']
+ elif cmd_set_top['metric'].get('rtt_action'):
+ if cmd_set_top['metric']['rtt_action'] == 'set':
+ route_map_metric_actions['action'] = \
+ 'openconfig-routing-policy:METRIC_SET_RTT'
+ elif cmd_set_top['metric']['rtt_action'] == 'add':
+ route_map_metric_actions['action'] = \
+ 'openconfig-routing-policy:METRIC_ADD_RTT'
+ elif cmd_set_top['metric']['rtt_action'] == 'subtract':
+ route_map_metric_actions['action'] = \
+ 'openconfig-routing-policy:METRIC_SUBTRACT_RTT'
+
+ if not route_map_metric_actions:
+ route_map_actions.pop('metric-action')
+
+ # Handle set origin
+ if cmd_set_top.get('origin'):
+ route_map_bgp_actions_cfg['set-route-origin'] = cmd_set_top['origin'].upper()
+
+ # Handle set weight
+ if cmd_set_top.get('weight'):
+ route_map_bgp_actions_cfg['set-weight'] = cmd_set_top['weight']
+
+ @staticmethod
+ def get_route_map_modify_call_attr(command, route_map_statement):
+ '''In the dict specified by the input route_map_statement paramenter,
+ provide REST API definitions of the "call" attribute (if present)
+ contained in the user input command dict specified by the "command"
+ input parameter to this function.'''
+
+ call_val = command.get('call')
+ if not call_val:
+ return
+
+ if not route_map_statement.get('conditions'):
+ route_map_statement['conditions'] = {'config': {}}
+ elif not route_map_statement['conditions'].get('config'):
+ route_map_statement['conditions']['config'] = {}
+ route_map_statement['conditions']['config']['call-policy'] = call_val
+
+ def get_delete_all_route_map_cfg_request(self):
+ '''Append to the input list of REST API requests the REST API to
+ Delete all route map configuration'''
+ requests = [{'path': self.route_maps_uri, 'method': DELETE}]
+ return requests
+
+ def get_delete_one_route_map_cfg(self, conf_map_name, requests):
+ '''Append to the input list of REST API requests the REST API to
+ delete all configuration for the specified route map.'''
+
+ delete_rmap_path = self.route_map_uri.format(conf_map_name)
+ request = {'path': delete_rmap_path, 'method': DELETE}
+ requests.append(request)
+
+ def get_delete_route_map_stmt_cfg(self, command, requests):
+ '''Append to the input list of REST API requests the REST API to
+ delete all configuration for the route map "statement" (route
+ map sub-section) specified by the combination of the route
+ map name and "statement" sequence number in the input
+ "command" dict.'''
+ conf_map_name = command.get('map_name')
+ conf_seq_num = command.get('sequence_num')
+ req_seq_num = str(conf_seq_num)
+
+ delete_rmap_stmt_path = self.route_map_stmt_uri.format(conf_map_name, req_seq_num)
+ request = {'path': delete_rmap_stmt_path, 'method': DELETE}
+ requests.append(request)
+
+ def get_delete_route_maps_requests(self, have, commands):
+ '''Traverse the input list of configuration "delete" commands obtained
+ from parsing the input playbook parameters. For each command,
+ create and return the appropriate set of REST API requests to delete
+ the appropriate elements from the route map specified by the current command.'''
+
+ requests = []
+ if commands:
+ for command in commands:
+ # Create requests for "eligible" attributes within the current route
+ # map statement. The content of the "command" object, on return from
+ # execution has only the subset of currently configured attributes
+ # within the full group of requested attributes for deletion from
+ # this route map statement.
+ self.get_delete_single_route_map_requests(have, command, requests)
+ return requests
+
+ def get_delete_single_route_map_requests(self, have, command, requests):
+ '''Create and return the appropriate set of route map REST APIs
+ to delete the eligible requestd attributes from the route map
+ configuration specified by the current "command".'''
+
+ if not command:
+ return
+
+ # Validate the current command.
+ conf_map_name = command.get('map_name', None)
+ if not conf_map_name:
+ command = {}
+ return
+ conf_seq_num = command.get('sequence_num', None)
+ if not conf_seq_num:
+ if self.any_rmap_inst_in_have(conf_map_name, have):
+ self.get_delete_one_route_map_cfg(conf_map_name, requests)
+ return
+
+ # Get the current configuration (if any) for this route map statement
+ cmd_rmap_have = self.get_matching_map(conf_map_name, conf_seq_num, have)
+ if not cmd_rmap_have:
+ command = {}
+ return
+
+ # Check for route map statement deletion before proceeding further.
+ cmd_match_top = command.get('match')
+ if cmd_match_top:
+ cmd_match_top = command['match']
+
+ cmd_set_top = command.get('set')
+ if cmd_set_top:
+ cmd_set_top = command['set']
+
+ if not cmd_match_top and not cmd_set_top:
+ self.get_delete_route_map_stmt_cfg(command, requests)
+ return
+
+ # Proceed with validity checking and execution
+ conf_action = command.get('action', None)
+ if not conf_action:
+ self._module.fail_json(
+ msg="\nThe 'action' attribute is required, but is absent"
+ "for route map {0} sequence number {1}\n".format(
+ conf_map_name, conf_seq_num))
+
+ if conf_action not in ('permit', 'deny'):
+ self._module.fail_json(
+ msg="\nInvalid 'action' attribute value {0} for"
+ "route map {1} sequence number {2}\n".format(
+ conf_action, conf_map_name, conf_seq_num))
+ command = {}
+ return
+
+ if cmd_match_top:
+ self.get_route_map_delete_match_attr(command, cmd_rmap_have, requests)
+ if cmd_set_top:
+ self.get_route_map_delete_set_attr(command, cmd_rmap_have, requests)
+ if command:
+ self.get_route_map_delete_call_attr(command, cmd_rmap_have, requests)
+
+ return
+
+ @staticmethod
+ def get_matching_map(conf_map_name, conf_seq_num, input_list):
+ '''In the input list of command or configuration dicts, find the route map
+ configuration "statement" (if it exists) for the specified map name
+ and sequence number.'''
+ for cfg_route_map in input_list:
+ if cfg_route_map.get('map_name') and cfg_route_map.get('sequence_num'):
+ if (cfg_route_map['map_name'] == conf_map_name and
+ cfg_route_map.get('sequence_num') == conf_seq_num):
+ return cfg_route_map
+
+ return {}
+
+ @staticmethod
+ def any_rmap_inst_in_have(conf_map_name, have):
+ '''In the current configuration on the target device, determine if there
+ is at least one configuration "statement" for the specified route map name
+ from the input playbook request.'''
+ for cfg_route_map in have:
+ if cfg_route_map.get('map_name'):
+ if cfg_route_map['map_name'] == conf_map_name:
+ return True
+
+ return False
+
+ def get_route_map_delete_match_attr(self, command, cmd_rmap_have, requests):
+ '''Append to the input list of REST API requests the REST APIs needed
+ for deletion of all eligible "match" attributes contained in the
+ user input command dict specified by the "command" input parameter
+ to this function. Modify the contents of the "command" object to
+ remove any attributes that are not currently configured. These
+ attributes are not "eligible" for deletion and no REST API "request"
+ is generated for them.'''
+
+ conf_map_name = command['map_name']
+ conf_seq_num = command['sequence_num']
+ req_seq_num = str(conf_seq_num)
+
+ match_top = command.get('match')
+ if not match_top:
+ return
+ match_keys = match_top.keys()
+
+ cfg_match_top = cmd_rmap_have.get('match')
+ if not cfg_match_top:
+ command.pop('match')
+ return
+ cfg_match_keys = cfg_match_top.keys()
+
+ match_both_keys = set(match_keys).intersection(cfg_match_keys)
+
+ # Remove any requested deletion items that aren't configured
+ match_pop_keys = set(match_keys).difference(match_both_keys)
+ for key in match_pop_keys:
+ match_top.pop(key)
+ if not match_top or not match_both_keys:
+ command.pop('match')
+ return
+
+ # Handle configuration for BGP policy "match" conditions
+ self.get_route_map_delete_match_bgp(command, match_both_keys, cmd_rmap_have, requests)
+ if not command.get('match'):
+ if 'match' in command:
+ command.pop('match')
+ return
+
+ # Handle generic top level match attributes.
+ generic_match_rest_attr = {
+ 'interface': 'match-interface',
+ 'source_vrf': 'openconfig-routing-policy-ext:match-src-network-instance',
+ 'tag': 'match-tag-set/config/openconfig-routing-policy-ext:tag-value',
+ 'source_protocol': 'config/install-protocol-eq'
+ }
+
+ match_delete_req_base = (self.route_map_stmt_base_uri.format(conf_map_name, req_seq_num) +
+ 'conditions/')
+
+ for key in generic_match_rest_attr:
+ if key in match_both_keys and match_top[key] == cfg_match_top[key]:
+ request_uri = match_delete_req_base + generic_match_rest_attr[key]
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ elif key in match_top:
+ match_top.pop(key)
+ if not match_top:
+ command.pop('match')
+ return
+
+ # Handle match peer
+ peer_str = ''
+ if 'peer' in match_both_keys:
+ if (match_top['peer'].get('interface') and cfg_match_top['peer'].get('interface') and
+ match_top['peer']['interface'] == cfg_match_top['peer']['interface']):
+ peer_str = match_top['peer']['interface']
+ elif (match_top['peer'].get('ip') and cfg_match_top['peer'].get('ip') and
+ match_top['peer']['ip'] == cfg_match_top['peer']['ip']):
+ peer_str = match_top['peer']['ip']
+ elif (match_top['peer'].get('ipv6') and cfg_match_top['peer'].get('ipv6') and
+ match_top['peer']['ipv6'] == cfg_match_top['peer']['ipv6']):
+ peer_str = match_top['peer']['ipv6']
+ else:
+ match_top.pop('peer')
+ if not match_top:
+ command.pop('match')
+ return
+
+ if peer_str:
+ request_uri = (match_delete_req_base +
+ 'match-neighbor-set/config/'
+ 'openconfig-routing-policy-ext:address={0}'.format(peer_str))
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+
+ elif 'peer' in match_top:
+ match_top.pop('peer')
+ if not match_top:
+ command.pop('match')
+ return
+
+ # Handle match IP address/prefix
+ if ('ip' in match_both_keys and match_top['ip'].get('address') and
+ match_top['ip']['address'] == cfg_match_top['ip'].get('address')):
+ request_uri = match_delete_req_base + 'match-prefix-set/config/prefix-set'
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ elif 'ip' in match_top:
+ match_top.pop('ip')
+ if not match_top:
+ command.pop('match')
+ return
+
+ # Handle match IPv6 address/prefix
+ if ('ipv6' in match_both_keys and match_top['ipv6'].get('address') and
+ match_top['ipv6']['address'] == cfg_match_top['ipv6'].get('address')):
+ ipv6_attr_name = \
+ 'match-prefix-set/config/openconfig-routing-policy-ext:ipv6-prefix-set'
+ request_uri = (match_delete_req_base + ipv6_attr_name)
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ elif 'ipv6' in match_top:
+ match_top.pop('ipv6')
+ if not match_top:
+ command.pop('match')
+ return
+
+ def get_route_map_delete_match_bgp(self, command, match_both_keys, cmd_rmap_have, requests):
+ '''Append to the input list of REST API requests the REST APIs needed
+ for deletion of all eligible "match" attributes defined within the
+ BGP match conditions section of the openconfig routing-policy
+ definitions for "policy-definitions" (route maps).'''
+
+ conf_map_name = command.get('map_name', None)
+ conf_seq_num = command.get('sequence_num', None)
+ req_seq_num = str(conf_seq_num)
+ match_top = command['match']
+ cfg_match_top = cmd_rmap_have.get('match')
+ route_map_stmt_base_uri_fmt = self.route_map_stmt_base_uri.format(conf_map_name,
+ req_seq_num)
+ bgp_match_delete_req_base = (route_map_stmt_base_uri_fmt +
+ 'conditions/openconfig-bgp-policy:bgp-conditions/')
+
+ # Handle BGP match items within the "config" sub-tree in the openconfig REST API definitons.
+ self.get_route_map_delete_match_bgp_cfg(command, match_both_keys, cmd_rmap_have, requests)
+
+ # Handle as_path
+ if 'as_path' in match_both_keys and match_top['as_path'] == cfg_match_top['as_path']:
+ request_uri = bgp_match_delete_req_base + 'match-as-path-set'
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ elif match_top.get('as_path'):
+ match_top.pop('as_path')
+
+ # Handle match evpn
+ if 'evpn' in match_both_keys:
+ evpn_cfg_delete_base = \
+ bgp_match_delete_req_base + 'openconfig-bgp-policy-ext:match-evpn-set/config/'
+ evpn_attrs = match_top['evpn']
+ evpn_match_keys = evpn_attrs.keys()
+ evpn_rest_attr = {
+ 'default_route': 'default-type5-route',
+ 'route_type': 'route-type',
+ 'vni': 'vni-number'
+ }
+ pop_list = []
+ for key in evpn_match_keys:
+ if (key not in cfg_match_top['evpn'] or
+ evpn_attrs[key] != cfg_match_top['evpn'][key]):
+ pop_list.append(key)
+ else:
+ request_uri = evpn_cfg_delete_base + evpn_rest_attr[key]
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ for key in pop_list:
+ match_top['evpn'].pop(key)
+ if not match_top['evpn']:
+ match_top.pop('evpn')
+
+ def get_route_map_delete_match_bgp_cfg(self, command, match_both_keys, cmd_rmap_have, requests):
+ '''Append to the input list of REST API requests the REST APIs needed
+ for deletion of all eligible "match" attributes defined within the
+ BGP match conditions 'config' section of the openconfig routing-policy
+ definitions for "policy-definitions" (route maps).'''
+
+ match_top = command['match']
+ cfg_match_top = cmd_rmap_have.get('match')
+ conf_map_name = command['map_name']
+ conf_seq_num = command['sequence_num']
+ req_seq_num = str(conf_seq_num)
+ bgp_keys = {'metric', 'origin', 'local_preference', 'community', 'ext_comm', 'ip'}
+ delete_bgp_keys = bgp_keys.intersection(match_both_keys)
+ if not delete_bgp_keys:
+ return
+ delete_bgp_attrs = []
+ bgp_match_delete_req_base = (self.route_map_stmt_base_uri.format(conf_map_name,
+ req_seq_num) +
+ 'conditions/openconfig-bgp-policy:bgp-conditions/config/')
+
+ # Check for IP next hop deletion. This is a special case because "next_hop" is
+ # a level below "ip" in the argspec hierarchy. If 'ip' is the only key in
+ # delete_bgp_keys, and IP next hop deletion is not required, there is no
+ # BGP condition match attribute deletion required.
+ if 'ip' in delete_bgp_keys:
+ if not match_top['ip'].get('next_hop') or not cfg_match_top['ip'].get('next_hop'):
+ delete_bgp_keys.remove('ip')
+ if 'next_hop' in match_top['ip']:
+ match_top['ip'].pop('next_hop')
+ if not match_top['ip']:
+ match_top.pop('ip')
+ if not match_top:
+ command.pop('match')
+ return
+
+ if not delete_bgp_keys:
+ return
+ else:
+ if match_top['ip']['next_hop'] == cfg_match_top['ip']['next_hop']:
+ request_uri = (bgp_match_delete_req_base +
+ 'openconfig-bgp-policy-ext:next-hop-set')
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ else:
+ match_top['ip'].pop('next_hop')
+ if not match_top['ip']:
+ match_top.pop('ip')
+ if not match_top:
+ command.pop('match')
+ return
+
+ delete_bgp_keys.remove('ip')
+ if not delete_bgp_keys:
+ return
+
+ # Check for deletion of other BGP match attributes.
+ bgp_rest_attr = {
+ 'community': 'community-set',
+ 'ext_comm': 'ext-community-set',
+ 'local_preference': 'local-pref-eq',
+ 'metric': 'med-eq',
+ 'origin': 'origin-eq'
+ }
+ for key in delete_bgp_keys:
+ if match_top[key] == cfg_match_top[key]:
+ bgp_rest_attr_key = bgp_rest_attr[key]
+ delete_bgp_attrs.append(bgp_rest_attr_key)
+ else:
+ match_top.pop(key)
+ if not match_top:
+ command.pop('match')
+ return
+
+ if not delete_bgp_attrs:
+ return
+
+ # Create requests for deletion of the eligible BGP match attributes.
+ for attr in delete_bgp_attrs:
+ request_uri = bgp_match_delete_req_base + attr
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+
+ def get_route_map_delete_set_attr(self, command, cmd_rmap_have, requests):
+ '''Append to the input list of REST API requests the REST APIs needed
+ for deletion of all eligible "set" attributes contained in the
+ user input command dict specified by the "command" input parameter
+ to this function. Modify the contents of the "command" object to
+ remove any attributes that are not currently configured. These
+ attributes are not "eligible" for deletion and no REST API "request"
+ is generated for them.'''
+
+ cmd_set_top = command.get('set')
+ if not cmd_set_top:
+ return
+ set_keys = cmd_set_top.keys()
+
+ cfg_set_top = cmd_rmap_have.get('set')
+ if not cfg_set_top:
+ command.pop('set')
+ return
+ cfg_set_keys = cfg_set_top.keys()
+
+ set_both_keys = set(set_keys).intersection(cfg_set_keys)
+ if not set_both_keys:
+ command.pop('set')
+ return
+
+ conf_map_name = command['map_name']
+ conf_seq_num = command['sequence_num']
+ req_seq_num = str(conf_seq_num)
+ set_delete_base = (self.route_map_stmt_base_uri.format(conf_map_name,
+ req_seq_num) + 'actions/')
+
+ # Handle configuration for BGP policy "set" conditions
+ self.get_route_map_delete_set_bgp(command, set_both_keys, cmd_rmap_have, requests)
+ cmd_set_top = command.get('set')
+ if not cmd_set_top:
+ command.pop('set')
+ return
+
+ # Handle metric "set" attributes.
+ if 'metric' in set_both_keys:
+ set_delete_metric_base = set_delete_base + 'metric-action/config'
+ if cmd_set_top['metric'].get('rtt_action'):
+ if cmd_set_top['metric']['rtt_action'] == cfg_set_top['metric'].get('rtt_action'):
+ request_uri = set_delete_metric_base
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ else:
+ cmd_set_top.pop('metric')
+ if not cmd_set_top:
+ command.pop('set')
+ elif cmd_set_top['metric'].get('value'):
+ set_delete_bgp_base = set_delete_base + 'openconfig-bgp-policy:bgp-actions/'
+ if cmd_set_top['metric']['value'] == cfg_set_top['metric'].get('value'):
+ request = {'path': set_delete_metric_base, 'method': DELETE}
+ requests.append(request)
+ request = {
+ 'path': set_delete_bgp_base + 'config/set-med',
+ 'method': DELETE
+ }
+ requests.append(request)
+
+ else:
+ cmd_set_top.pop('metric')
+ if not cmd_set_top:
+ command.pop('set')
+ else:
+ # 'metric' is not in set_both_keys
+ if cmd_set_top.get('metric'):
+ cmd_set_top.pop('metric')
+ if not cmd_set_top:
+ command.pop('set')
+ return
+
+ def get_route_map_delete_set_bgp(self, command, set_both_keys, cmd_rmap_have, requests):
+ '''Append to the input list of REST API requests the REST APIs needed
+ for deletion of all eligible "set" attributes defined within the
+ BGP "set" conditions section of the openconfig routing-policy
+ definitions for "policy-definitions" (route maps).'''
+
+ cmd_set_top = command['set']
+ cfg_set_top = cmd_rmap_have.get('set')
+ conf_map_name = command['map_name']
+ conf_seq_num = command['sequence_num']
+ req_seq_num = str(conf_seq_num)
+ bgp_set_delete_req_base = (self.route_map_stmt_base_uri.format(conf_map_name, req_seq_num) +
+ 'actions/openconfig-bgp-policy:bgp-actions/')
+
+ # Handle BGP "set" items within the "config" sub-tree in the openconfig REST API definitons.
+ self.get_route_map_delete_set_bgp_cfg(command, set_both_keys, cmd_rmap_have, requests)
+
+ # Handle as_path_prepend
+ if ('as_path_prepend' in set_both_keys and
+ cmd_set_top['as_path_prepend'] == cfg_set_top['as_path_prepend']):
+ request_uri = bgp_set_delete_req_base + 'set-as-path-prepend'
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ else:
+ if cmd_set_top.get('as_path_prepend'):
+ cmd_set_top.pop('as_path_prepend')
+ if not cmd_set_top:
+ return
+
+ # Handle the "community list delete" (comm_list_delete) attribute
+ if ('comm_list_delete' in set_both_keys and
+ cmd_set_top['comm_list_delete'] == cfg_set_top['comm_list_delete']):
+ request_uri = bgp_set_delete_req_base + 'set-community-delete'
+ request = {'path': request_uri, 'method': DELETE}
+ requests.append(request)
+ else:
+ if cmd_set_top.get('comm_list_delete'):
+ cmd_set_top.pop('comm_list_delete')
+ if not cmd_set_top:
+ return
+
+ # Handle "set community": Handle named attributes first, then handle community numbers
+ if 'community' not in set_both_keys:
+ if cmd_set_top.get('community'):
+ cmd_set_top.pop('community')
+ if not cmd_set_top:
+ return
+ else:
+ community_attr_remove_list = []
+ set_community_delete_attrs = []
+ if cmd_set_top['community'].get('community_attributes'):
+ if cfg_set_top['community'].get('community_attributes'):
+ # Append eligible entries to the delete list. Remember which entries
+ # are ineligible.
+ for community_attr in cmd_set_top['community']['community_attributes']:
+ if community_attr in cfg_set_top['community']['community_attributes']:
+ community_rest_name = self.set_community_rest_names[community_attr]
+ set_community_delete_attrs.append(community_rest_name)
+ else:
+ community_attr_remove_list.append(community_attr)
+
+ # Delete ineligible entries from the command list.
+ for community_attr in community_attr_remove_list:
+ cmd_set_top['community']['community_attributes'].remove(community_attr)
+ if not cmd_set_top['community']['community_attributes']:
+ cmd_set_top['community'].pop('community_attributes')
+ else:
+ # No community attribute entries are configured. Pop the corresponding
+ # commands from the command list.
+ cmd_set_top['community'].pop('community_attributes')
+
+ if not cmd_set_top['community']:
+ cmd_set_top.pop('community')
+ if not cmd_set_top:
+ return
+
+ # Handle deletion of "set" community numbers.
+ if cmd_set_top.get('community') and cmd_set_top['community'].get('community_number'):
+ community_number_remove_list = []
+ if cfg_set_top['community'].get('community_number'):
+ # Append eligible entries to the delete list. Remember which entries
+ # are ineligible.
+ for community_number in cmd_set_top['community']['community_number']:
+ if community_number in cfg_set_top['community']['community_number']:
+ set_community_delete_attrs.append(community_number)
+ else:
+ community_number_remove_list.append(community_number)
+
+ # Delete ineligible entries from the command list.
+ for community_number in community_number_remove_list:
+ cmd_set_top['community']['community_number'].remove(community_number)
+ if not cmd_set_top['community']['community_number']:
+ cmd_set_top['community'].pop('community_number')
+ else:
+ # If no community number entries are configured, pop the entire
+ # community number command dict.
+ cmd_set_top['community'].pop('community_number')
+
+ if not cmd_set_top['community']:
+ cmd_set_top.pop('community')
+ if not cmd_set_top:
+ return
+
+ # Format and enqueue a request to delete eligible community attributes
+ if set_community_delete_attrs:
+ bgp_set_delete_community_uri = bgp_set_delete_req_base + 'set-community'
+ bgp_set_delete_comm_payload = \
+ {'openconfig-bgp-policy:set-community': {}}
+ bgp_set_delete_comm_payload_contents = \
+ bgp_set_delete_comm_payload['openconfig-bgp-policy:set-community']
+ bgp_set_delete_comm_payload_contents['config'] = \
+ {'method': 'INLINE', 'options': 'REMOVE'}
+ bgp_set_delete_comm_payload_contents['inline'] = \
+ {'config': {'communities': set_community_delete_attrs}}
+
+ request = {
+ 'path': bgp_set_delete_community_uri,
+ 'method': PATCH,
+ 'data': bgp_set_delete_comm_payload
+ }
+ requests.append(request)
+
+ # Handle set "extended community" deletion
+ if 'extcommunity' not in set_both_keys:
+ if cmd_set_top.get('extcommunity'):
+ cmd_set_top.pop('extcommunity')
+ if not cmd_set_top:
+ return
+ else:
+ set_extcommunity_delete_attrs = []
+
+ for extcomm_type in self.set_extcomm_rest_names:
+ ext_comm_number_remove_list = []
+ if cmd_set_top['extcommunity'].get(extcomm_type):
+ if cfg_set_top['extcommunity'].get(extcomm_type):
+ # Append eligible entries to the delete list. Remember which entries
+ # are ineligible.
+ for extcomm_number in cmd_set_top['extcommunity'][extcomm_type]:
+ if extcomm_number in cfg_set_top['extcommunity'][extcomm_type]:
+ set_extcommunity_delete_attrs.append(
+ self.set_extcomm_rest_names[extcomm_type] + extcomm_number)
+ else:
+ ext_comm_number_remove_list.append(extcomm_number)
+
+ # Delete ineligible entries from the command list.
+ for extcomm_number in ext_comm_number_remove_list:
+ cmd_set_top['extcommunity'][extcomm_type].remove(extcomm_number)
+ if not cmd_set_top['extcommunity'][extcomm_type]:
+ cmd_set_top['extcommunity'].pop(extcomm_type)
+ else:
+ # If no extcommunity entries of this type are configured,
+ # pop the entire extcommunity command sub-dict for this type.
+ cmd_set_top['extcommunity'].pop(extcomm_type)
+
+ if not cmd_set_top['extcommunity']:
+ cmd_set_top.pop('extcommunity')
+ if not cmd_set_top:
+ return
+
+ # Format and enqueue a request to delete eligible extcommunity attributes
+ if set_extcommunity_delete_attrs:
+ bgp_set_delete_extcomm_uri = bgp_set_delete_req_base + 'set-ext-community'
+ bgp_set_delete_extcomm_payload = \
+ {'openconfig-bgp-policy:set-ext-community': {}}
+ bgp_set_delete_comm_payload_contents = \
+ bgp_set_delete_extcomm_payload['openconfig-bgp-policy:set-ext-community']
+ bgp_set_delete_comm_payload_contents['config'] = \
+ {'method': 'INLINE', 'options': 'REMOVE'}
+ bgp_set_delete_comm_payload_contents['inline'] = \
+ {'config': {'communities': set_extcommunity_delete_attrs}}
+
+ request = {
+ 'path': bgp_set_delete_extcomm_uri,
+ 'method': PATCH,
+ 'data': bgp_set_delete_extcomm_payload
+ }
+ requests.append(request)
+
+ def get_route_map_delete_set_bgp_cfg(self, command, set_both_keys, cmd_rmap_have, requests):
+ '''Append to the input list of REST API requests the REST APIs needed
+ for deletion of all eligible "set" attributes defined within the
+ BGP set conditions 'config' section of the openconfig routing-policy
+ definitions for "policy-definitions" (route maps).'''
+
+ cmd_set_top = command['set']
+
+ cfg_set_top = cmd_rmap_have.get('set')
+ conf_map_name = command['map_name']
+ conf_seq_num = command['sequence_num']
+ req_seq_num = str(conf_seq_num)
+ bgp_set_delete_req_base = (self.route_map_stmt_base_uri.format(conf_map_name, req_seq_num) +
+ 'actions/openconfig-bgp-policy:bgp-actions/config/')
+ # Note: Although 'metric' (REST API 'set-med') is in this REST API configuration
+ # group, it is handled separately as part of deleting the top level, functionally
+ # related 'metric-action' attribute.
+ bgp_cfg_keys = {'ip_next_hop', 'origin', 'local_preference', 'ipv6_next_hop', 'weight'}
+ delete_bgp_keys = bgp_cfg_keys.intersection(set_both_keys)
+ if not delete_bgp_keys:
+ for bgp_key in bgp_cfg_keys:
+ if bgp_key in cmd_set_top:
+ cmd_set_top.pop(bgp_key)
+ return
+
+ delete_bgp_attrs = []
+
+ # Handle the special case of ipv6_next_hop
+ if 'ipv6_next_hop' in delete_bgp_keys:
+ delete_bgp_keys.remove('ipv6_next_hop')
+ ipv6_next_hop_rest_names = {
+ 'global_addr': 'set-ipv6-next-hop-global',
+ 'prefer_global': 'set-ipv6-next-hop-prefer-global'
+ }
+ for ipv6_next_hop_key in ipv6_next_hop_rest_names:
+ if cmd_set_top['ipv6_next_hop'].get(ipv6_next_hop_key) is not None:
+ if (cmd_set_top['ipv6_next_hop'][ipv6_next_hop_key] ==
+ cfg_set_top['ipv6_next_hop'].get(ipv6_next_hop_key)):
+ delete_bgp_attrs.append(ipv6_next_hop_rest_names[ipv6_next_hop_key])
+ else:
+ cmd_set_top['ipv6_next_hop'].pop(ipv6_next_hop_key)
+ if not cmd_set_top['ipv6_next_hop']:
+ cmd_set_top.pop('ipv6_next_hop')
+ if not cmd_set_top:
+ return
+
+ if not delete_bgp_keys and not delete_bgp_attrs:
+ return
+
+ # Handle other BGP "config" attributes
+ bgp_cfg_rest_names = {
+ 'ip_next_hop': 'set-next-hop',
+ 'local_preference': 'set-local-pref',
+ 'origin': 'set-route-origin',
+ 'weight': 'set-weight'
+ }
+
+ for bgp_cfg_key in bgp_cfg_rest_names:
+ if bgp_cfg_key in delete_bgp_keys:
+ if cmd_set_top[bgp_cfg_key] == cfg_set_top[bgp_cfg_key]:
+ delete_bgp_attrs.append(bgp_cfg_rest_names[bgp_cfg_key])
+ else:
+ cmd_set_top.pop(bgp_cfg_key)
+
+ if not cmd_set_top:
+ command.pop('set')
+ return
+
+ for delete_bgp_attr in delete_bgp_attrs:
+ del_set_bgp_cfg_uri = bgp_set_delete_req_base + delete_bgp_attr
+ request = {'path': del_set_bgp_cfg_uri, 'method': DELETE}
+ requests.append(request)
+
+ def get_route_map_delete_call_attr(self, command, cmd_rmap_have, requests):
+ '''Append to the input list of REST API requests the REST API needed
+ for deletion of the "call" attribute if this attribute it contained in the
+ user input command dict specified by the "command" input parameter
+ to this function and it is currently configured. Modify the contents of
+ the "command" object to remove the "call" attribute if it is not currently
+ configured.'''
+
+ if not command.get('call'):
+ return
+
+ if not command['call'] == cmd_rmap_have.get('call'):
+ command.pop('call')
+ return
+
+ conf_map_name = command['map_name']
+ req_seq_num = str(command['sequence_num'])
+
+ call_delete_req_uri = \
+ (self.route_map_stmt_base_uri.format(
+ conf_map_name, req_seq_num) + 'conditions/config/call-policy')
+ request = {'path': call_delete_req_uri, 'method': DELETE}
+ requests.append(request)
+
+ @staticmethod
+ def yaml_bool_to_python_bool(yaml_bool):
+ '''Convert the input YAML bool value to a Python bool value'''
+ boolval = False
+ if yaml_bool is None:
+ boolval = False
+ elif yaml_bool:
+ boolval = True
+
+ return boolval
+
+ def route_map_remove_configured_match_peer(self, route_map_payload, have, requests):
+ '''If a route map "match peer" condition is configured in the route map
+ statement corresponding to the incoming route map update request
+ specified by the "route_map_payload" input parameter, equeue a REST API request
+ to delete it.'''
+
+ if (route_map_payload['statements']['statement'][0].get('conditions') and
+ route_map_payload['statements']['statement'][0]
+ ['conditions'].get('match-neighbor-set')):
+ peer = self.match_peer_configured(route_map_payload, have)
+ if peer:
+ request = self.create_match_peer_delete_request(route_map_payload, peer)
+ if request:
+ requests.append(request)
+
+ def match_peer_configured(self, route_map_payload, have):
+ '''Determine if the "match peer ..." condition is already configured for the
+ route map statement corresponding to the incoming route map update request
+ specified by the "route_map_payload" input parameter. Return the peer string
+ if a "match peer" condition is already configured. Otherwise, return an empty
+ string'''
+
+ if not route_map_payload or not have:
+ return ''
+
+ conf_map_name = route_map_payload.get('name')
+ conf_seq_num = (route_map_payload['statements']['statement'][0]['name'])
+ if not conf_map_name or not conf_seq_num:
+ return ''
+
+ # Get the current configuration (if any) for this route map statement
+ cmd_rmap_have = self.get_matching_map(conf_map_name, int(conf_seq_num), have)
+ if (not cmd_rmap_have or not cmd_rmap_have.get('match') or
+ not cmd_rmap_have['match'].get('peer')):
+ return ''
+
+ peer_dict = cmd_rmap_have['match']['peer']
+ if peer_dict.get('interface'):
+ peer_str = peer_dict['interface']
+ elif peer_dict.get('ip'):
+ peer_str = peer_dict['ip']
+ elif peer_dict.get('ipv6'):
+ peer_str = peer_dict['ipv6']
+ else:
+ return ''
+
+ return peer_str
+
+ def create_match_peer_delete_request(self, route_map_payload, peer_str):
+ '''Create a request to delete the current "match peer" configuration for the
+ route map statement corresponding to the incoming route map update request
+ specified by the "route_map_payload," input parameter. Return the created request.'''
+
+ if not route_map_payload:
+ return {}
+
+ conf_map_name = route_map_payload.get('name')
+ conf_seq_num = route_map_payload['statements']['statement'][0]['name']
+ if not conf_map_name or not conf_seq_num:
+ return {}
+ match_delete_req_base = (self.route_map_stmt_base_uri.format(conf_map_name, conf_seq_num) +
+ 'conditions/')
+
+ request_uri = (match_delete_req_base +
+ 'match-neighbor-set/config/'
+ 'openconfig-routing-policy-ext:address={0}'.format(peer_str))
+ request = {'path': request_uri, 'method': DELETE}
+ return request
+
+ def get_delete_replaced_groupings(self, commands, have):
+ '''For each of the route maps specified in the "commands" input list,
+ create requests to delete any existing route map configuration
+ groupings for which modified attribute requests are specified.'''
+
+ requests = []
+ for command in commands:
+ self.get_delete_one_map_replaced_groupings(command, have, requests)
+ return requests
+
+ def get_delete_one_map_replaced_groupings(self, command, have, requests):
+ '''For the route map specified by the input "command", create requests
+ to delete any existing route map configuration groupings for which
+ modified attribute requests are specified'''
+
+ if not command:
+ return {}
+
+ conf_map_name = command.get('map_name', None)
+ conf_seq_num = command.get('sequence_num', None)
+ if not conf_map_name or not conf_seq_num:
+ return {}
+
+ # Get the current configuration (if any) for this route map
+ cmd_rmap_have = self.get_matching_map(conf_map_name, conf_seq_num, have)
+
+ # If there's nothing configured for this route map, there's nothing
+ # to delete.
+ if not cmd_rmap_have:
+ command = {}
+ return command
+
+ self.get_delete_route_map_replaced_match_groupings(command, cmd_rmap_have, requests)
+ replaced_set_group_requests = []
+ self.get_delete_route_map_replaced_set_groupings(command, cmd_rmap_have,
+ replaced_set_group_requests)
+ if replaced_set_group_requests:
+ requests.extend(replaced_set_group_requests)
+
+ # Note: Because the "call" route map attribute is a "flat" attribute, not
+ # a dictionary, no "pre-delete" is required for this branch of the route map
+ # argspec for handling of "replaced" state
+
+ return command
+
+ def get_delete_route_map_replaced_match_groupings(self, command, cmd_rmap_have, requests):
+ '''For the route map specified by the input "command", create requests
+ to delete any existing route map "match" configuration groupings for which
+ modified attribute requests are specified'''
+
+ if not command.get('match'):
+ return
+
+ conf_map_name = command.get('map_name', None)
+ conf_seq_num = command.get('sequence_num', None)
+ req_seq_num = str(conf_seq_num)
+
+ cmd_match_top = command['match']
+ cfg_match_top = cmd_rmap_have.get('match')
+
+ # If there are no 'match' attributes configured for this route map,
+ # there's nothing to delete.
+ if not cfg_match_top:
+ command.pop('match')
+ return
+
+ match_delete_req_base = (self.route_map_stmt_base_uri.format(conf_map_name, req_seq_num) +
+ 'conditions/')
+
+ # Obtain the set of "match" keys for which changes have been requested and
+ # the subset of those keys for which configuration currently exists.
+ cmd_match_keys = cmd_match_top.keys()
+ cfg_match_keys = cfg_match_top.keys()
+
+ peer_str = ''
+ if 'peer' in cfg_match_keys:
+ peer_dict = cfg_match_top['peer']
+ # Only one peer key at a time can be configured.
+ peer_key = list(peer_dict.keys())[0]
+ peer_str = peer_dict[peer_key]
+
+ bgp_match_delete_req_base = match_delete_req_base + 'openconfig-bgp-policy:bgp-conditions/'
+ match_top_level_keys = [
+ 'as_path',
+ 'community',
+ 'ext_comm',
+ 'interface',
+ 'ipv6',
+ 'local_preference',
+ 'metric',
+ 'origin',
+ 'peer',
+ 'source_protocol',
+ 'source_vrf',
+ 'tag'
+ ]
+
+ match_multi_level_keys = [
+ 'evpn',
+ 'ip',
+ ]
+
+ match_uri_attr = {
+ 'as_path': bgp_match_delete_req_base + 'match-as-path-set',
+ 'community': bgp_match_delete_req_base + 'config/community-set',
+ 'evpn': bgp_match_delete_req_base + 'openconfig-bgp-policy-ext:match-evpn-set/config/',
+ 'ext_comm': bgp_match_delete_req_base + 'config/ext-community-set',
+ 'interface': match_delete_req_base + 'match-interface',
+ 'ip': {
+ 'address': match_delete_req_base + 'match-prefix-set/config/prefix-set',
+ 'next_hop': (bgp_match_delete_req_base +
+ 'config/openconfig-bgp-policy-ext:next-hop-set')
+ },
+ 'ipv6': (match_delete_req_base +
+ 'match-prefix-set/config/openconfig-routing-policy-ext:ipv6-prefix-set'),
+ 'local_preference': bgp_match_delete_req_base + 'config/local-pref-eq',
+ 'metric': bgp_match_delete_req_base + 'config/med-eq',
+ 'origin': bgp_match_delete_req_base + 'config/origin-eq',
+ 'peer': (match_delete_req_base +
+ 'match-neighbor-set/config/'
+ 'openconfig-routing-policy-ext:address={0}'.format(peer_str)),
+ 'source_protocol': match_delete_req_base + 'config/install-protocol-eq',
+ 'source_vrf': (match_delete_req_base +
+ 'openconfig-routing-policy-ext:match-src-network-instance'),
+ 'tag': (match_delete_req_base +
+ 'match-tag-set/config/openconfig-routing-policy-ext:tag-value')
+ }
+
+ # Remove all appropriate "match" configuration for this route map if any of the
+ # following criteria are met: (See the note below regarding what configuration
+ # is "appropriate"for deletion.)
+ #
+ # 1) Any top level attribute is specified with a value different from its current
+ # configured value.
+ # 2) Any top level attribute is specified that is not currently configured.
+ # 3) The set of top level attributes specified does not include all currently
+ # configured attributes (regardless of whether the specified values for
+ # these attributes are the same as the ones courrently configured).
+ # (Note: Although the IPv6 attribute is defined as a nested dictionary
+ # to allow for future expansion, it is handled here as a top level
+ # attrbute because it currently has only one member.)
+ #
+ # When deletion has been triggered, an attribute is deleted only if it is
+ # not present at all in the requested configuration. (If it is present in
+ # the requested configuration, the "merge" phase of the "replaced" state
+ # operation will modify it as needed, so it doesn't need to be explicitly
+ # deleted during the "deletion" phase.)
+ #
+ cfg_top_level_key_set = set(cfg_match_keys).intersection(set(match_top_level_keys))
+ cmd_top_level_key_set = set(cmd_match_keys).intersection(set(match_top_level_keys))
+ symmetric_diff_set = cmd_top_level_key_set.symmetric_difference(cfg_top_level_key_set)
+ intersection_diff_set = cmd_top_level_key_set.intersection(cfg_top_level_key_set)
+ cmd_delete_dict = {}
+ if (cmd_top_level_key_set and symmetric_diff_set or
+ (any(keyname for keyname in intersection_diff_set if
+ cmd_match_top[keyname] != cfg_match_top[keyname]))):
+
+ # Deletion has been triggered. First, delete all approriate top level
+ # attributes
+ self.delete_replaced_dict_config(
+ cfg_key_set=cfg_top_level_key_set,
+ cmd_key_set=cmd_top_level_key_set,
+ cfg_parent_dict=cfg_match_top,
+ uri_attr=match_uri_attr,
+ uri_dict_key='cfg_dict_member_key',
+ deletion_dict=cmd_delete_dict,
+ requests=requests)
+
+ # Next, delete all appropriate sub dictionary attributes.
+ match_dict_deletions = {}
+ for match_key in match_multi_level_keys:
+ cfg_key_set = {}
+ cmd_key_set = {}
+ if match_key in cfg_match_top:
+ cfg_key_set = set(cfg_match_top[match_key].keys())
+ if match_key in cfg_match_top:
+ cmd_key_set = ([])
+ if cmd_match_top.get(match_key):
+ cmd_key_set = set(cmd_match_top[match_key].keys())
+ match_dict_deletions[match_key] = {}
+ match_dict_deletions_subdict = match_dict_deletions[match_key]
+ self.delete_replaced_dict_config(
+ cfg_key_set=cfg_key_set,
+ cmd_key_set=cmd_key_set,
+ cfg_parent_dict=cfg_match_top[match_key],
+ uri_attr=match_uri_attr,
+ uri_dict_key=match_key,
+ deletion_dict=match_dict_deletions_subdict,
+ requests=requests)
+
+ # Update the dict specifying deleted commands
+ command.pop('match')
+ if cmd_delete_dict:
+ command['match'] = cmd_delete_dict
+ command['match'].update(match_dict_deletions)
+ return
+
+ # If no top level attribute changes were requested, check for changes in
+ # dictionaries nested below the top level.
+ # -----------------------------------------------------------------------
+ match_key_deletions = {}
+ for match_key in match_multi_level_keys:
+ if match_key in cmd_match_top:
+ if match_key in cfg_match_top:
+ cmd_key_set = set((cmd_match_top[match_key].keys()))
+ cfg_key_set = set(cfg_match_top[match_key].keys())
+ symmetric_diff_set = cmd_key_set.symmetric_difference(cfg_key_set)
+ intersection_diff_set = cmd_key_set.intersection(cfg_key_set)
+ if (symmetric_diff_set or
+ (any(keyname for keyname in intersection_diff_set if
+ cmd_match_top[match_key][keyname] !=
+ cfg_match_top[match_key][keyname]))):
+
+ match_key_deletions[match_key] = {}
+ match_key_deletions_subdict = match_key_deletions[match_key]
+ self.delete_replaced_dict_config(
+ cfg_key_set=cfg_key_set,
+ cmd_key_set=cmd_key_set,
+ cfg_parent_dict=cfg_match_top[match_key],
+ uri_attr=match_uri_attr,
+ uri_dict_key=match_key,
+ deletion_dict=match_key_deletions_subdict,
+ requests=requests)
+
+ command.pop('match')
+ if match_key_deletions:
+ command['match'] = match_key_deletions
+
+ @staticmethod
+ def delete_replaced_dict_config(**in_args):
+ ''' Create and enqueue deletion requests for the appropriate attributes in the dictionary
+ specified by "dict_key". Update the input deletion_dict with the deleted attributes.
+ The input 'inargs' is assumed to contain the following keyword arguments:
+
+ cfg_key_set: The set of currently configured keys for the target dict
+
+ cmd_key_set: The set of currently requested update keys for the target dict
+
+ cfg_parent_dict: The configured dictionary containing the input key set
+
+ uri_attr: a dictionary specifying REST URIs keyed by argspec keys
+
+ uri_dict_key: The key for top level attribue to be used for uri lookup. If set
+ to the string value 'cfg_dict_member_key', the current value of 'cfg_dict_member_key'
+ is used. Otherwise, the specified value is used directly.
+
+ deletion_dict: a dictionary containing attributes deleted from the parent dict
+
+ requests: The list of REST API requests for the executing playbook section
+ '''
+
+ # Set the default uri_key value.
+ uri_key = in_args['uri_dict_key']
+
+ # Iterate through members of the parent dict.
+ for cfg_dict_member_key in in_args['cfg_key_set'].difference(in_args['cmd_key_set']):
+ cfg_dict_member_val = in_args['cfg_parent_dict'][cfg_dict_member_key]
+ if in_args['uri_dict_key'] == 'cfg_dict_member_key':
+ uri_key = cfg_dict_member_key
+ uri = in_args['uri_attr'][uri_key]
+ in_args['deletion_dict'].update(
+ {cfg_dict_member_key: cfg_dict_member_val})
+ if isinstance(uri, dict):
+ for member_key in uri:
+ if in_args['cfg_parent_dict'].get(member_key) is not None:
+ request = {'path': uri[member_key],
+ 'method': DELETE}
+ in_args['requests'].append(request)
+ elif isinstance(uri, list):
+ for set_uri_item in uri:
+ request = {'path': set_uri_item, 'method': DELETE}
+ else:
+ request = {'path': uri, 'method': DELETE}
+ in_args['requests'].append(request)
+
+ def get_delete_route_map_replaced_set_groupings(self, command, cmd_rmap_have,
+ requests):
+ '''For the route map specified by the input "command", create requests
+ to delete any existing route map "set" configuration groupings for which
+ modified attribute requests are specified'''
+
+ if not command.get('set'):
+ return
+
+ conf_map_name = command.get('map_name', None)
+ conf_seq_num = command.get('sequence_num', None)
+ req_seq_num = str(conf_seq_num)
+
+ cmd_set_top = command['set']
+ cfg_set_top = cmd_rmap_have.get('set')
+
+ # If there are no 'set' attributes configured for this route map,
+ # there's nothing to delete.
+ if not cfg_set_top:
+ command.pop('set')
+ return
+
+ set_delete_req_base = (self.route_map_stmt_base_uri.format(conf_map_name, req_seq_num) +
+ 'actions/')
+ bgp_set_delete_req_base = set_delete_req_base + 'openconfig-bgp-policy:bgp-actions/'
+
+ # Obtain the set of "set" keys for which changes have been requested and the set
+ # of keys currently configured.
+ cmd_set_keys = cmd_set_top.keys()
+ cfg_set_keys = cfg_set_top.keys()
+
+ metric_uri = ''
+ if 'metric' in cfg_set_top:
+ if cfg_set_top['metric'].get('rtt_action'):
+ metric_uri = set_delete_req_base + 'metric-action/config'
+ elif cfg_set_top['metric'].get('value'):
+ metric_uri = [set_delete_req_base + 'metric-action/config',
+ bgp_set_delete_req_base + 'config/set-med']
+ # Top level keys: Note: Although "metric" is defined as a dictionary, it
+ # is handled as a "top level" attribute because it can contain
+ # only one configured member (either an rtt_action or a "value").
+ set_top_level_keys = [
+ 'as_path_prepend',
+ 'comm_list_delete',
+ 'ip_next_hop',
+ 'local_preference',
+ 'metric',
+ 'origin',
+ 'weight',
+ ]
+
+ set_uri_attr = {
+ 'as_path_prepend': bgp_set_delete_req_base + 'set-as-path-prepend',
+ 'comm_list_delete': bgp_set_delete_req_base + 'set-community-delete',
+ 'community': bgp_set_delete_req_base + 'set-community',
+ 'extcommunity': bgp_set_delete_req_base + 'set-ext-community',
+ 'ip_next_hop': bgp_set_delete_req_base + 'config/set-next-hop',
+ 'ipv6_next_hop': {
+ 'global_addr': bgp_set_delete_req_base + 'config/set-ipv6-next-hop-global',
+ 'prefer_global': bgp_set_delete_req_base + 'config/set-ipv6-next-hop-prefer-global'
+ },
+ 'local_preference': bgp_set_delete_req_base + 'config/set-local-pref',
+ 'metric': metric_uri,
+ 'origin': bgp_set_delete_req_base + 'config/set-route-origin',
+ 'weight': bgp_set_delete_req_base + 'config/set-weight'
+ }
+
+ # Remove all appropriate "set" configuration for this route map if any of the
+ # following criteria are met: (See the note below regarding what configuration
+ # is "appropriate"for deletion.)
+ #
+ # 1) Any top level attribute is specified with a value different from its current
+ # configured value.
+ # 2) Any top level attribute is specified that is not currently configured.
+ # 3) The set of top level attributes specified does not include all currently
+ # configured attributes (regardless of whether the specified values for
+ # these attributes are the same as the ones courrently configured).
+ # (Note: Although the IPv6 attribute is defined as a nested dictionary
+ # to allow for future expansion, it is handled here as a top level
+ # attrbute because it currently has only one member.)
+ #
+ # When deletion has been triggered, an attribute is deleted only if it is
+ # not present at all in the requested configuration. (If it is present in
+ # the requested configuration, the "merge" phase of the "replaced" state
+ # operation will modify it as needed, so it doesn't need to be explicitly
+ # deleted during the "deletion" phase.)
+ #
+ # Handle top level attributes first. If top level attribute deletion is
+ # triggered, proceed with deletion of dictionaries and lists below the
+ # top level.
+ cfg_top_level_key_set = set(cfg_set_keys).intersection(set(set_top_level_keys))
+ cmd_top_level_key_set = set(cmd_set_keys).intersection(set(set_top_level_keys))
+ cmd_nested_level_key_set = set(cmd_set_keys).difference(set_top_level_keys)
+ symmetric_diff_set = cmd_top_level_key_set.symmetric_difference(cfg_top_level_key_set)
+ intersection_diff_set = cmd_top_level_key_set.intersection(cfg_top_level_key_set)
+ cmd_delete_dict = {}
+ if (cmd_top_level_key_set and symmetric_diff_set or
+ (any(keyname for keyname in intersection_diff_set if
+ cmd_set_top[keyname] != cfg_set_top[keyname]))):
+ # Deletion has been triggered. First, delete all approriate top level
+ # attributes
+ self.delete_replaced_dict_config(
+ cfg_key_set=cfg_top_level_key_set,
+ cmd_key_set=cmd_top_level_key_set,
+ cfg_parent_dict=cfg_set_top,
+ uri_attr=set_uri_attr,
+ uri_dict_key='cfg_dict_member_key',
+ deletion_dict=cmd_delete_dict,
+ requests=requests)
+
+ # Save nested command "set" items and refresh top level command "set" items.
+ cmd_set_nested = {}
+ for nested_key in cmd_nested_level_key_set:
+ if command['set'].get(nested_key) is not None:
+ cmd_set_nested[nested_key] = command['set'][nested_key]
+
+ command.pop('set')
+ if cmd_delete_dict:
+ command['set'] = cmd_delete_dict
+ if cmd_set_nested:
+ if not command.get('set'):
+ command['set'] = {}
+ command['set'].update(cmd_set_nested)
+ if not command.get('set'):
+ command['set'] = {}
+ cmd_set_top = command['set']
+
+ # Proceed with deletion of dictionaries and lists below the top level.
+ # ---------------------------------------------------------------------
+
+ dict_delete_requests = []
+
+ # Check for deletion of set "community" lists. Delete the items in
+ # the currently configured list if it exists. As an optimization,
+ # avoid deleting list items that will be replaced by the received
+ # command.
+
+ set_community_delete_attrs = []
+ if 'community' not in cfg_set_top:
+ if command['set'].get('community'):
+ command['set'].pop('community')
+ if command['set'] is None:
+ command.pop('set')
+ return
+ else:
+ set_community_number_deletions = []
+ if 'community_number' in cfg_set_top['community']:
+
+ # Delete eligible configured community numbers.
+ cfg_community_number_set = set(cfg_set_top['community']['community_number'])
+ cmd_community_number_set = ([])
+ if cmd_set_top.get('community') and 'community_number' in cmd_set_top['community']:
+ cmd_community_number_set = set(cmd_set_top['community']['community_number'])
+ command['set']['community'].pop('community_number')
+
+ for cfg_community_number in cfg_community_number_set.difference(cmd_community_number_set):
+ set_community_delete_attrs.append(cfg_community_number)
+ set_community_number_deletions.append(cfg_community_number)
+
+ if set_community_number_deletions:
+ # Update the list of deleted community numbers in the "command" dict.
+ if not cmd_set_top.get('community'):
+ command['set']['community'] = {}
+ command['set']['community']['community_number'] = set_community_number_deletions
+
+ set_community_attributes_deletions = []
+ if 'community_attributes' in cfg_set_top['community']:
+
+ # Delete eligible configured community attributes.
+ cfg_community_attributes_set = set(cfg_set_top['community']['community_attributes'])
+ cmd_community_attributes_set = ([])
+ if cmd_set_top.get('community') and 'community_attributes' in cmd_set_top['community']:
+ cmd_community_attributes_set = set(cmd_set_top['community']['community_attributes'])
+ command['set']['community'].pop('community_attributes')
+
+ for cfg_community_attribute in cfg_community_attributes_set.difference(cmd_community_attributes_set):
+ set_community_delete_attrs.append(self.set_community_rest_names[cfg_community_attribute])
+ set_community_attributes_deletions.append(cfg_community_attribute)
+
+ if set_community_attributes_deletions:
+ # Update the list of deleted community attributes in the "command" dict.
+ if not cmd_set_top.get('community'):
+ command['set']['community'] = {}
+ command['set']['community']['community_attributes'] = set_community_attributes_deletions
+
+ if command['set'].get('community') is not None and not command['set']['community']:
+ command['set'].pop('community')
+
+ # Format and enqueue a request to delete eligible community attributes
+ if set_community_delete_attrs:
+ bgp_set_delete_community_uri = bgp_set_delete_req_base + 'set-community'
+ bgp_set_delete_comm_payload = \
+ {'openconfig-bgp-policy:set-community': {}}
+ bgp_set_delete_comm_payload_contents = \
+ bgp_set_delete_comm_payload['openconfig-bgp-policy:set-community']
+ bgp_set_delete_comm_payload_contents['config'] = \
+ {'method': 'INLINE', 'options': 'REMOVE'}
+ bgp_set_delete_comm_payload_contents['inline'] = \
+ {'config': {'communities': set_community_delete_attrs}}
+
+ request = {
+ 'path': bgp_set_delete_community_uri,
+ 'method': PATCH,
+ 'data': bgp_set_delete_comm_payload
+ }
+ dict_delete_requests.append(request)
+
+ # Check for deletion of set "extcommunity" lists. Delete the items in
+ # the currently configured list if it exists. As an optimization,
+ # avoid deleting list items that will be replaced by the received
+ # command.
+ set_extcommunity_delete_attrs = []
+
+ if 'extcommunity' not in cfg_set_top:
+ if command['set'].get('extcommunity'):
+ command['set'].pop('extcommunity')
+ if command['set'] is None:
+ command.pop('set')
+ return
+ else:
+ for extcomm_type in self.set_extcomm_rest_names:
+ set_extcommunity_delete_attrs_type = []
+ if extcomm_type in cfg_set_top['extcommunity']:
+ # Delete eligible configured extcommunity list items for this
+ # extcommunity list
+ cfg_extcommunity_list_set = set(cfg_set_top['extcommunity'][extcomm_type])
+ cmd_extcommunity_list_set = ([])
+ if cmd_set_top.get('extcommunity') and extcomm_type in cmd_set_top['extcommunity']:
+ cmd_extcommunity_list_set = set(cmd_set_top['extcommunity'][extcomm_type])
+ command['set']['extcommunity'].pop(extcomm_type)
+ for extcomm_number in cfg_extcommunity_list_set.difference(cmd_extcommunity_list_set):
+ set_extcommunity_delete_attrs.append(
+ self.set_extcomm_rest_names[extcomm_type] +
+ extcomm_number)
+ set_extcommunity_delete_attrs_type.append(extcomm_number)
+
+ if set_extcommunity_delete_attrs_type:
+ # Update the list of deleted extcommunity list items of this type
+ # in the "command" dict.
+ if not cmd_set_top.get('extcommunity'):
+ command['set']['extcommunity'] = {}
+ command['set']['extcommunity'][extcomm_type] = set_extcommunity_delete_attrs_type
+
+ if command['set'].get('extcommunity') is not None and not command['set']['extcommunity']:
+ command['set'].pop('extcommunity')
+
+ # Format and enqueue a request to delete eligible extcommunity attributes
+ if set_extcommunity_delete_attrs:
+ bgp_set_delete_extcomm_uri = bgp_set_delete_req_base + 'set-ext-community'
+ bgp_set_delete_extcomm_payload = \
+ {'openconfig-bgp-policy:set-ext-community': {}}
+ bgp_set_delete_comm_payload_contents = \
+ bgp_set_delete_extcomm_payload[
+ 'openconfig-bgp-policy:set-ext-community']
+ bgp_set_delete_comm_payload_contents['config'] = \
+ {'method': 'INLINE', 'options': 'REMOVE'}
+ bgp_set_delete_comm_payload_contents['inline'] = \
+ {'config': {'communities': set_extcommunity_delete_attrs}}
+
+ request = {
+ 'path': bgp_set_delete_extcomm_uri,
+ 'method': PATCH,
+ 'data': bgp_set_delete_extcomm_payload
+ }
+ dict_delete_requests.append(request)
+
+ # Check for deletion of ipv6_next_hop attributes. Delete the attributes
+ # in the currently configured ipv6_next_hop dict list if they exist.
+ # As an optimization, avoid deleting attributes that will be replaced
+ # by the received command.
+ ipv6_next_hop_deleted_members = {}
+ if 'ipv6_next_hop' not in cfg_set_top:
+ if command['set'].get('ipv6_next_hop'):
+ command['set'].pop('ipv6_next_hop')
+ if command['set'] is None:
+ command.pop('set')
+ return
+ else:
+ # Delete eligible configured ipv6_next_hop members.
+ cfg_ipv6_next_hop_key_set = set(cfg_set_top['ipv6_next_hop'].keys())
+ cmd_ipv6_next_hop_key_set = ([])
+ if cmd_set_top.get('ipv6_next_hop'):
+ cmd_ipv6_next_hop_key_set = set(cfg_set_top['ipv6_next_hop'].keys())
+ command['set'].pop('ipv6_next_hop')
+
+ set_uri = set_uri_attr['ipv6_next_hop']
+ for ipv6_next_hop_key in cfg_ipv6_next_hop_key_set.difference(cmd_ipv6_next_hop_key_set):
+ ipv6_next_hop_deleted_members[ipv6_next_hop_key] = \
+ cfg_set_top['ipv6_next_hop'][ipv6_next_hop_key]
+ request = {'path': set_uri[ipv6_next_hop_key], 'method': DELETE}
+ dict_delete_requests.append(request)
+
+ if ipv6_next_hop_deleted_members:
+ # Update the list of deleted ipv6_next_hop attributes in the "command" dict.
+ if not cmd_set_top.get('ipv6_next_hop'):
+ command['set']['ipv6_next_hop'] = {}
+ command['set']['ipv6_next_hop'] = ipv6_next_hop_deleted_members
+
+ if dict_delete_requests:
+ requests.extend(dict_delete_requests)
+
+ return
+
+ # If no top level attribute changes were requested, check for changes in
+ # dictionaries nested below the top level.
+ # -----------------------------------------------------------------------
+
+ # Check for replacement of set "community" lists. Delete the items in
+ # the currently configured list if it exists and any items for that
+ # list are specified in the received command.
+ dict_delete_requests = []
+ set_community_delete_attrs = []
+ if 'community' in cmd_set_top:
+ if 'community' not in cfg_set_top:
+ command['set'].pop('community')
+ if command['set'] is None:
+ command.pop('set')
+ return
+ else:
+ if 'community_number' in cmd_set_top['community']:
+ set_community_number_deletions = []
+ if 'community_number' in cfg_set_top['community']:
+ symmetric_diff_set = \
+ (set(cmd_set_top['community']['community_number']).symmetric_difference(
+ set(cfg_set_top['community']['community_number'])))
+ if symmetric_diff_set:
+ for community_number in cfg_set_top['community']['community_number']:
+ if (community_number not in cmd_set_top['community']
+ ['community_number']):
+ set_community_delete_attrs.append(community_number)
+ set_community_number_deletions.append(community_number)
+ command['set']['community'].pop('community_number')
+ if set_community_delete_attrs:
+ command['set']['community']['community_number'] = \
+ set_community_number_deletions
+
+ if 'community_attributes' in cmd_set_top['community']:
+ set_community_named_attr_deletions = []
+ if 'community_attributes' in cfg_set_top['community']:
+ symmetric_diff_set = \
+ (set(cmd_set_top[
+ 'community']['community_attributes']).symmetric_difference(
+ set(cfg_set_top['community']['community_attributes'])))
+ if symmetric_diff_set:
+ cfg_set_top_comm_attr = cfg_set_top['community']['community_attributes']
+ for community_attr in cfg_set_top_comm_attr:
+ if (community_attr not in cmd_set_top['community']
+ ['community_attributes']):
+ set_community_delete_attrs.append(
+ self.set_community_rest_names[community_attr])
+ set_community_named_attr_deletions.append(community_attr)
+ command['set']['community'].pop('community_attributes')
+ if set_community_named_attr_deletions:
+ command['set']['community']['community_attributes'] = \
+ set_community_named_attr_deletions
+ if command['set']['community'] is None:
+ command['set'].pop('community')
+
+ # Format and enqueue a request to delete eligible community attributes
+ if set_community_delete_attrs:
+ bgp_set_delete_community_uri = bgp_set_delete_req_base + 'set-community'
+ bgp_set_delete_comm_payload = \
+ {'openconfig-bgp-policy:set-community': {}}
+ bgp_set_delete_comm_payload_contents = \
+ bgp_set_delete_comm_payload['openconfig-bgp-policy:set-community']
+ bgp_set_delete_comm_payload_contents['config'] = \
+ {'method': 'INLINE', 'options': 'REMOVE'}
+ bgp_set_delete_comm_payload_contents['inline'] = \
+ {'config': {'communities': set_community_delete_attrs}}
+
+ request = {
+ 'path': bgp_set_delete_community_uri,
+ 'method': PATCH,
+ 'data': bgp_set_delete_comm_payload
+ }
+ dict_delete_requests.append(request)
+
+ # Check for replacement of set "extcommunity" lists. Delete any items in
+ # the currently configured list if the corresponding item is not
+ # specified in the received command.
+ set_extcommunity_delete_attrs = []
+ if 'extcommunity' in cmd_set_top:
+ if 'extcommunity' not in cfg_set_top:
+ command['set'].pop('extcommunity')
+ else:
+ for extcomm_type in self.set_extcomm_rest_names:
+ set_extcommunity_delete_attrs_type = []
+ if cmd_set_top['extcommunity'].get(extcomm_type):
+ if extcomm_type in cfg_set_top['extcommunity']:
+ symmetric_diff_set = \
+ (set(
+ cmd_set_top['extcommunity'][extcomm_type]).symmetric_difference(
+ set(cfg_set_top['extcommunity'][extcomm_type])))
+ if symmetric_diff_set:
+ # Append eligible entries to the delete list.
+ for extcomm_number in cfg_set_top['extcommunity'][extcomm_type]:
+ if (extcomm_number not in
+ cmd_set_top['extcommunity'][extcomm_type]):
+ set_extcommunity_delete_attrs.append(
+ self.set_extcomm_rest_names[extcomm_type] +
+ extcomm_number)
+ set_extcommunity_delete_attrs_type.append(extcomm_number)
+ # Replace the requested extcommunity numbers for this type with the list of
+ # deleted extcommunity numbers (if any) for this type.
+ command['set']['extcommunity'].pop(extcomm_type)
+ if set_extcommunity_delete_attrs_type:
+ command['set']['extcommunity'][extcomm_type] = \
+ set_extcommunity_delete_attrs_type
+
+ if command['set']['extcommunity'] is None:
+ command['set'].pop('extcommunity')
+
+ # Format and enqueue a request to delete eligible extcommunity attributes
+ if set_extcommunity_delete_attrs:
+ bgp_set_delete_extcomm_uri = bgp_set_delete_req_base + 'set-ext-community'
+ bgp_set_delete_extcomm_payload = \
+ {'openconfig-bgp-policy:set-ext-community': {}}
+ bgp_set_delete_comm_payload_contents = \
+ bgp_set_delete_extcomm_payload[
+ 'openconfig-bgp-policy:set-ext-community']
+ bgp_set_delete_comm_payload_contents['config'] = \
+ {'method': 'INLINE', 'options': 'REMOVE'}
+ bgp_set_delete_comm_payload_contents['inline'] = \
+ {'config': {'communities': set_extcommunity_delete_attrs}}
+
+ request = {
+ 'path': bgp_set_delete_extcomm_uri,
+ 'method': PATCH,
+ 'data': bgp_set_delete_extcomm_payload
+ }
+ dict_delete_requests.append(request)
+
+ # If the "replaced" command set includes ipv6_next_hop attributes that
+ # differ from the currently configured attributes, delete
+ # ipv6_next_hop configuration, if it exists, for any ipv6_next hop
+ # attributes that are not specified in the received command.
+ if 'ipv6_next_hop' in cmd_set_top:
+ ipv6_next_hop_deleted_members = {}
+ if 'ipv6_next_hop' in cfg_set_top:
+ symmetric_diff_set = \
+ (set(cmd_set_top['ipv6_next_hop'].keys()).symmetric_difference(
+ set(cfg_set_top['ipv6_next_hop'].keys())))
+ intersection_diff_set = \
+ (set(cmd_set_top['ipv6_next_hop'].keys()).intersection(
+ set(cfg_set_top['ipv6_next_hop'].keys())))
+ if (symmetric_diff_set or
+ (any(keyname for keyname in intersection_diff_set if
+ cmd_set_top['ipv6_next_hop'][keyname] !=
+ cfg_set_top['ipv6_next_hop'][keyname]))):
+ set_uri = set_uri_attr['ipv6_next_hop']
+ for member_key in set_uri:
+ if (cfg_set_top['ipv6_next_hop'].get(member_key) is not None and
+ cmd_set_top['ipv6_next_hop'].get(member_key) is None):
+ ipv6_next_hop_deleted_members[member_key] = \
+ cfg_set_top['ipv6_next_hop'][member_key]
+ request = {'path': set_uri[member_key], 'method': DELETE}
+ dict_delete_requests.append(request)
+ command['set'].pop('ipv6_next_hop')
+ if ipv6_next_hop_deleted_members:
+ command['set']['ipv6_next_hop'] = ipv6_next_hop_deleted_members
+
+ if dict_delete_requests:
+ requests.extend(dict_delete_requests)
+
+ def validate_and_normalize_config(self, input_config_list):
+ '''For each input route map dict in the input_config_list list,
+ remove empty entries, validate the contents of the dict against the
+ argspec constraints for route maps, and convert input interface names to
+ the format required for the currently configured interface naming
+ mode.'''
+ updated_config_list = remove_empties_from_list(input_config_list)
+ validate_config(self._module.argument_spec, {'config': updated_config_list})
+
+ # - Verify that parameters required for most "states" are present in
+ # each dict in the input list.
+ # - Check for interface names in the input configuration and
+ # perform any needed reformatting of the names.
+ for route_map in updated_config_list:
+
+ # Verify the presence of a "sequence number" and "action" value
+ # for all states other than "deleted"
+ if self._module.params['state'] != 'deleted':
+ check_required(self._module, ['action', 'sequence_num'], route_map, ['config'])
+
+ # Check for interface names requiring re-formatting.
+ if not route_map.get('match'):
+ continue
+
+ if route_map['match'].get('interface'):
+ intf_name = route_map['match']['interface']
+ updated_intf_name = get_normalize_interface_name(intf_name, self._module)
+ route_map['match']['interface'] = updated_intf_name
+
+ if route_map['match'].get('peer') and route_map['match']['peer'].get('interface'):
+ intf_name = route_map['match']['peer']['interface']
+ updated_intf_name = get_normalize_interface_name(intf_name, self._module)
+ route_map['match']['peer']['interface'] = updated_intf_name
+
+ return updated_config_list
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py
index 047357470..c3d62d852 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/static_routes/static_routes.py
@@ -28,6 +28,12 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
update_states,
get_diff,
+ get_replaced_config,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
)
network_instance_path = '/data/openconfig-network-instance:network-instances/network-instance'
@@ -41,6 +47,61 @@ TEST_KEYS = [
{'next_hops': {'index': ''}},
]
+is_delete_all = False
+
+
+def __derive_static_route_next_hop_config_key_match_op(key_set, command, exist_conf):
+ bh = command['index'].get('blackhole', None)
+ itf = command['index'].get('interface', None)
+ nv = command['index'].get('nexthop_vrf', None)
+ nh = command['index'].get('next_hop', None)
+ conf_bh = exist_conf['index'].get('blackhole', None)
+ conf_itf = exist_conf['index'].get('interface', None)
+ conf_nv = exist_conf['index'].get('nexthop_vrf', None)
+ conf_nh = exist_conf['index'].get('next_hop', None)
+
+ if bh == conf_bh and itf == conf_itf and nv == conf_nv and nh == conf_nh:
+ return True
+ else:
+ return False
+
+
+def __derive_static_route_next_hop_config_delete_op(key_set, command, exist_conf):
+ new_conf = []
+
+ if is_delete_all:
+ return True, new_conf
+
+ metric = command.get('metric', None)
+ tag = command.get('tag', None)
+ track = command.get('track', None)
+
+ if metric is None and tag is None and track is None:
+ return True, new_conf
+
+ new_conf = exist_conf
+
+ conf_metric = new_conf.get('metric', None)
+ conf_tag = new_conf.get('tag', None)
+ conf_track = new_conf.get('track', None)
+
+ if metric == conf_metric:
+ new_conf['metric'] = None
+ if tag == conf_tag:
+ new_conf['tag'] = None
+ if track == conf_track:
+ new_conf['track'] = None
+
+ return True, new_conf
+
+
+TEST_KEYS_formatted_diff = [
+ {'config': {'vrf_name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {'static_list': {'prefix': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+ {'next_hops': {'index': '', '__delete_op': __derive_static_route_next_hop_config_delete_op,
+ '__key_match_op': __derive_static_route_next_hop_config_key_match_op}}
+]
+
class Static_routes(ConfigBase):
"""
@@ -97,6 +158,21 @@ class Static_routes(ConfigBase):
if result['changed']:
result['after'] = changed_static_routes_facts
+ new_config = changed_static_routes_facts
+ old_config = existing_static_routes_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_static_routes_facts,
+ TEST_KEYS_formatted_diff)
+ self.post_process_generated_config(new_config)
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ self.sort_lists_in_config(new_config)
+ self.sort_lists_in_config(old_config)
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -132,10 +208,14 @@ class Static_routes(ConfigBase):
if state == 'deleted':
commands, requests = self._state_deleted(want, have, diff)
elif state == 'merged':
- commands, requests = self._state_merged(want, have, diff)
+ commands, requests = self._state_merged(diff)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have, diff)
return commands, requests
- def _state_merged(self, want, have, diff):
+ def _state_merged(self, diff):
""" The command generator when state is merged
:rtype: A list
@@ -143,7 +223,7 @@ class Static_routes(ConfigBase):
the current configuration
"""
commands = diff
- requests = self.get_modify_static_routes_requests(commands, have)
+ requests = self.get_modify_static_routes_requests(commands)
if commands and len(requests) > 0:
commands = update_states(commands, "merged")
@@ -159,6 +239,7 @@ class Static_routes(ConfigBase):
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
+ global is_delete_all
is_delete_all = False
# if want is none, then delete ALL
if not want:
@@ -176,7 +257,73 @@ class Static_routes(ConfigBase):
return commands, requests
- def get_modify_static_routes_requests(self, commands, have):
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ global is_delete_all
+
+ commands = []
+ requests = []
+ self.sort_lists_in_config(want)
+ self.sort_lists_in_config(have)
+
+ if have and have != want:
+ is_delete_all = True
+ del_requests = self.get_delete_static_routes_requests(have, None, is_delete_all)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ if not have and want:
+ mod_commands = want
+ mod_requests = self.get_modify_static_routes_requests(mod_commands)
+
+ if len(mod_requests) > 0:
+ requests.extend(mod_requests)
+ commands.extend(update_states(mod_commands, "overridden"))
+
+ return commands, requests
+
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ global is_delete_all
+
+ commands = []
+ requests = []
+ replaced_config = get_replaced_config(want, have, TEST_KEYS)
+
+ mod_commands = []
+ if replaced_config:
+ self.sort_lists_in_config(replaced_config)
+ self.sort_lists_in_config(have)
+ is_delete_all = (replaced_config == have)
+ del_requests = self.get_delete_static_routes_requests(replaced_config, have, is_delete_all)
+ requests.extend(del_requests)
+ commands.extend(update_states(replaced_config, "deleted"))
+ mod_commands = want
+ else:
+ mod_commands = diff
+
+ if mod_commands:
+ mod_requests = self.get_modify_static_routes_requests(mod_commands)
+
+ if len(mod_requests) > 0:
+ requests.extend(mod_requests)
+ commands.extend(update_states(mod_commands, "replaced"))
+
+ return commands, requests
+
+ def get_modify_static_routes_requests(self, commands):
requests = []
if not commands:
@@ -208,7 +355,7 @@ class Static_routes(ConfigBase):
idx = self.generate_index(index)
if idx:
next_hop_cfg['index'] = idx
- if blackhole:
+ if blackhole is not None:
next_hop_cfg['blackhole'] = blackhole
if nexthop_vrf:
next_hop_cfg['network-instance'] = nexthop_vrf
@@ -342,3 +489,33 @@ class Static_routes(ConfigBase):
request = {'path': url, 'method': DELETE}
return request
+
+ def sort_lists_in_config(self, config):
+ if config:
+ config.sort(key=self.get_vrf_name)
+ for cfg in config:
+ if 'static_list' in cfg and cfg['static_list']:
+ cfg['static_list'].sort(key=self.get_prefix)
+ for rt in cfg['static_list']:
+ if 'next_hops' in rt and rt['next_hops']:
+ rt['next_hops'].sort(key=lambda x: (x['index'].get('blackhole', None) is not None,
+ x['index'].get('interface', None) is not None,
+ x['index'].get('nexthop_vrf', None) is not None,
+ x['index'].get('next_hop', None) is not None))
+
+ def get_vrf_name(self, vrf_name):
+ return vrf_name.get('vrf_name')
+
+ def get_prefix(self, prefix):
+ return prefix.get('prefix')
+
+ def post_process_generated_config(self, configs):
+ for conf in configs[:]:
+ sls = conf.get('static_list', [])
+ if sls:
+ for sl in sls[:]:
+ if not sl.get('next_hops', []):
+ sls.remove(sl)
+
+ if not conf.get('static_list', []):
+ configs.remove(conf)
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tests/flow_monitor_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/stp/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tests/flow_monitor_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/stp/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/stp/stp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/stp/stp.py
new file mode 100644
index 000000000..031c794ae
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/stp/stp.py
@@ -0,0 +1,1404 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_stp class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+from copy import deepcopy
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ update_states,
+ get_ranges_in_list,
+ get_diff,
+ remove_empties,
+)
+from ansible.module_utils.connection import ConnectionError
+
+
+PATCH = 'patch'
+DELETE = 'delete'
+TEST_KEYS = [
+ {'interfaces': {'intf_name': ''}},
+ {'mst_instances': {'mst_id': ''}},
+ {'pvst': {'vlan_id': ''}},
+ {'rapid_pvst': {'vlan_id': ''}},
+]
+STP_PATH = 'data/openconfig-spanning-tree:stp'
+stp_map = {
+ True: 'EDGE_ENABLE',
+ False: 'EDGE_DISABLE',
+ 'mst': 'MSTP',
+ 'pvst': 'PVST',
+ 'rapid_pvst': 'RAPID_PVST',
+ 'point-to-point': 'P2P',
+ 'shared': 'SHARED',
+ 'loop': 'LOOP',
+ 'root': 'ROOT',
+ 'none': 'NONE'
+}
+
+
+class Stp(ConfigBase):
+ """
+ The sonic_stp class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'stp',
+ ]
+
+ def __init__(self, module):
+ super(Stp, self).__init__(module)
+
+ def get_stp_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ stp_facts = facts['ansible_network_resources'].get('stp')
+ if not stp_facts:
+ return []
+ return stp_facts
+
+ def execute_module(self):
+ """ Execute the module
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = []
+ commands = []
+
+ existing_stp_facts = self.get_stp_facts()
+ commands, requests = self.set_config(existing_stp_facts)
+ if commands and len(requests) > 0:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_stp_facts = self.get_stp_facts()
+
+ result['before'] = existing_stp_facts
+ if result['changed']:
+ result['after'] = changed_stp_facts
+
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_stp_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = self._module.params['config']
+ have = existing_stp_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ state = self._module.params['state']
+ diff = get_diff(want, have, TEST_KEYS)
+
+ if state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(diff, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have, diff)
+ return commands, requests
+
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ mod_commands = []
+ replaced_config, requests = self.get_replaced_config(want, have)
+
+ if replaced_config:
+ commands.extend(update_states(replaced_config, "deleted"))
+ mod_commands = want
+ else:
+ mod_commands = diff
+
+ if mod_commands:
+ mod_requests = self.get_modify_stp_requests(mod_commands, have)
+ if len(mod_requests) > 0:
+ requests.extend(mod_requests)
+ commands.extend(update_states(mod_commands, "replaced"))
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ del_commands = get_diff(have, want, TEST_KEYS)
+ self.remove_default_entries(del_commands)
+ del_commands = remove_empties(del_commands)
+
+ if del_commands:
+ is_delete_all = True
+ del_requests = self.get_delete_stp_requests(del_commands, have, is_delete_all)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = {}
+
+ if not have and want:
+ mod_commands = want
+ mod_requests = self.get_modify_stp_requests(mod_commands, have)
+
+ if len(mod_requests) > 0:
+ requests.extend(mod_requests)
+ commands.extend(update_states(mod_commands, "overridden"))
+ return commands, requests
+
+ def _state_merged(self, diff, have):
+ """ The command generator when state is merged
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff
+ requests = self.get_modify_stp_requests(commands, have)
+ commands = remove_empties(commands)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "merged")
+ else:
+ commands = []
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ is_delete_all = False
+ want = remove_empties(want)
+
+ if not want:
+ commands = deepcopy(have)
+ is_delete_all = True
+ else:
+ commands = deepcopy(want)
+
+ self.remove_default_entries(commands)
+ commands = remove_empties(commands)
+ requests = self.get_delete_stp_requests(commands, have, is_delete_all)
+
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "deleted")
+ else:
+ commands = []
+ return commands, requests
+
+ def get_modify_stp_requests(self, commands, have):
+ requests = []
+
+ if not commands:
+ return requests
+
+ global_request = self.get_modify_stp_global_request(commands, have)
+ interfaces_request = self.get_modify_stp_interfaces_request(commands)
+ mstp_requests = self.get_modify_stp_mstp_request(commands, have)
+ pvst_request = self.get_modify_stp_pvst_request(commands)
+ rapid_pvst_request = self.get_modify_stp_rapid_pvst_request(commands)
+
+ if global_request:
+ requests.append(global_request)
+ if interfaces_request:
+ requests.append(interfaces_request)
+ if mstp_requests:
+ requests.append(mstp_requests)
+ if pvst_request:
+ requests.append(pvst_request)
+ if rapid_pvst_request:
+ requests.append(rapid_pvst_request)
+
+ return requests
+
+ def get_modify_stp_global_request(self, commands, have):
+ request = None
+
+ if not commands:
+ return request
+
+ stp_global = commands.get('global', None)
+ if stp_global:
+ global_dict = {}
+ config_dict = {}
+ enabled_protocol = stp_global.get('enabled_protocol', None)
+ loop_guard = stp_global.get('loop_guard', None)
+ bpdu_filter = stp_global.get('bpdu_filter', None)
+ disabled_vlans = stp_global.get('disabled_vlans', None)
+ root_guard_timeout = stp_global.get('root_guard_timeout', None)
+ portfast = stp_global.get('portfast', None)
+ hello_time = stp_global.get('hello_time', None)
+ max_age = stp_global.get('max_age', None)
+ fwd_delay = stp_global.get('fwd_delay', None)
+ bridge_priority = stp_global.get('bridge_priority', None)
+
+ if enabled_protocol:
+ config_dict['enabled-protocol'] = [stp_map[enabled_protocol]]
+ if loop_guard is not None:
+ config_dict['loop-guard'] = loop_guard
+ if bpdu_filter is not None:
+ config_dict['bpdu-filter'] = bpdu_filter
+ if disabled_vlans:
+ if have:
+ cfg_stp_global = have.get('global', None)
+ if cfg_stp_global:
+ cfg_disabled_vlans = cfg_stp_global.get('disabled_vlans', None)
+ if cfg_disabled_vlans:
+ disabled_vlans = self.get_vlans_diff(disabled_vlans, cfg_disabled_vlans)
+ if not disabled_vlans:
+ commands['global'].pop('disabled_vlans')
+ if disabled_vlans:
+ config_dict['openconfig-spanning-tree-ext:disabled-vlans'] = self.convert_vlans_list(disabled_vlans)
+ if root_guard_timeout:
+ config_dict['openconfig-spanning-tree-ext:rootguard-timeout'] = root_guard_timeout
+ if portfast is not None and enabled_protocol == 'pvst':
+ config_dict['openconfig-spanning-tree-ext:portfast'] = portfast
+ elif portfast:
+ self._module.fail_json(msg='Portfast only configurable for pvst protocol.')
+ if hello_time:
+ config_dict['openconfig-spanning-tree-ext:hello-time'] = hello_time
+ if max_age:
+ config_dict['openconfig-spanning-tree-ext:max-age'] = max_age
+ if fwd_delay:
+ config_dict['openconfig-spanning-tree-ext:forwarding-delay'] = fwd_delay
+ if bridge_priority:
+ config_dict['openconfig-spanning-tree-ext:bridge-priority'] = bridge_priority
+ if config_dict:
+ global_dict['config'] = config_dict
+ url = '%s/global' % (STP_PATH)
+ payload = {'openconfig-spanning-tree:global': global_dict}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+
+ return request
+
+ def get_modify_stp_interfaces_request(self, commands):
+ request = None
+ interfaces = commands.get('interfaces', None)
+
+ if interfaces:
+ intf_list = []
+ for intf in interfaces:
+ intf_dict = {}
+ config_dict = {}
+ intf_name = intf.get('intf_name', None)
+ edge_port = intf.get('edge_port', None)
+ link_type = intf.get('link_type', None)
+ guard = intf.get('guard', None)
+ bpdu_guard = intf.get('bpdu_guard', None)
+ bpdu_filter = intf.get('bpdu_filter', None)
+ portfast = intf.get('portfast', None)
+ uplink_fast = intf.get('uplink_fast', None)
+ shutdown = intf.get('shutdown', None)
+ cost = intf.get('cost', None)
+ port_priority = intf.get('port_priority', None)
+ stp_enable = intf.get('stp_enable', None)
+
+ if intf_name:
+ config_dict['name'] = intf_name
+ if edge_port is not None:
+ config_dict['edge-port'] = stp_map[edge_port]
+ if link_type:
+ config_dict['link-type'] = stp_map[link_type]
+ if guard:
+ config_dict['guard'] = stp_map[guard]
+ if bpdu_guard is not None:
+ config_dict['bpdu-guard'] = bpdu_guard
+ if bpdu_filter is not None:
+ config_dict['bpdu-filter'] = bpdu_filter
+ if portfast is not None:
+ config_dict['openconfig-spanning-tree-ext:portfast'] = portfast
+ if uplink_fast is not None:
+ config_dict['openconfig-spanning-tree-ext:uplink-fast'] = uplink_fast
+ if shutdown is not None:
+ config_dict['openconfig-spanning-tree-ext:bpdu-guard-port-shutdown'] = shutdown
+ if cost:
+ config_dict['openconfig-spanning-tree-ext:cost'] = cost
+ if port_priority:
+ config_dict['openconfig-spanning-tree-ext:port-priority'] = port_priority
+ if stp_enable is not None:
+ config_dict['openconfig-spanning-tree-ext:spanning-tree-enable'] = stp_enable
+ if config_dict:
+ intf_dict['name'] = intf_name
+ intf_dict['config'] = config_dict
+ intf_list.append(intf_dict)
+ if intf_list:
+ url = '%s/interfaces' % (STP_PATH)
+ payload = {'openconfig-spanning-tree:interfaces': {'interface': intf_list}}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+
+ return request
+
+ def get_modify_stp_mstp_request(self, commands, have):
+ request = None
+
+ if not commands:
+ return request
+
+ mstp = commands.get('mstp', None)
+
+ if mstp:
+ mstp_dict = {}
+ config_dict = {}
+ mst_name = mstp.get('mst_name', None)
+ revision = mstp.get('revision', None)
+ max_hop = mstp.get('max_hop', None)
+ hello_time = mstp.get('hello_time', None)
+ max_age = mstp.get('max_age', None)
+ fwd_delay = mstp.get('fwd_delay', None)
+ mst_instances = mstp.get('mst_instances', None)
+
+ if mst_name:
+ config_dict['name'] = mst_name
+ if revision:
+ config_dict['revision'] = revision
+ if max_hop:
+ config_dict['max-hop'] = max_hop
+ if hello_time:
+ config_dict['hello-time'] = hello_time
+ if max_age:
+ config_dict['max-age'] = max_age
+ if fwd_delay:
+ config_dict['forwarding-delay'] = fwd_delay
+ if mst_instances:
+ mst_inst_list = []
+ pop_list = []
+ for mst in mst_instances:
+ mst_inst_dict = {}
+ mst_cfg_dict = {}
+ mst_index = mst_instances.index(mst)
+ mst_id = mst.get('mst_id', None)
+ bridge_priority = mst.get('bridge_priority', None)
+ interfaces = mst.get('interfaces', None)
+ vlans = mst.get('vlans', None)
+
+ if mst_id:
+ mst_cfg_dict['mst-id'] = mst_id
+ if bridge_priority:
+ mst_cfg_dict['bridge-priority'] = bridge_priority
+ if interfaces:
+ intf_list = self.get_interfaces_list(interfaces)
+ if intf_list:
+ mst_inst_dict['interfaces'] = {'interface': intf_list}
+ if vlans:
+ if have:
+ cfg_mstp = have.get('mstp', None)
+ if cfg_mstp:
+ cfg_mst_instances = cfg_mstp.get('mst_instances', None)
+ if cfg_mst_instances:
+ for cfg_mst in cfg_mst_instances:
+ cfg_mst_id = cfg_mst.get('mst_id', None)
+ cfg_vlans = cfg_mst.get('vlans', None)
+
+ if mst_id == cfg_mst_id and cfg_vlans:
+ vlans = self.get_vlans_diff(vlans, cfg_vlans)
+ if not vlans:
+ pop_list.insert(0, mst_index)
+ if vlans:
+ mst_cfg_dict['vlan'] = self.convert_vlans_list(vlans)
+ if mst_cfg_dict:
+ mst_inst_dict['mst-id'] = mst_id
+ mst_inst_dict['config'] = mst_cfg_dict
+ if mst_inst_dict:
+ mst_inst_list.append(mst_inst_dict)
+ if pop_list:
+ for i in pop_list:
+ commands['mstp']['mst_instances'][i].pop('vlans')
+ if mst_inst_list:
+ mstp_dict['mst-instances'] = {'mst-instance': mst_inst_list}
+
+ if config_dict:
+ mstp_dict['config'] = config_dict
+
+ if mstp_dict:
+ url = '%s/mstp' % (STP_PATH)
+ payload = {'openconfig-spanning-tree:mstp': mstp_dict}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+
+ return request
+
+ def get_modify_stp_pvst_request(self, commands):
+ request = None
+ pvst = commands.get('pvst', None)
+
+ if pvst:
+ vlans_list = self.get_vlans_list(pvst)
+ if vlans_list:
+ url = '%s/openconfig-spanning-tree-ext:pvst' % (STP_PATH)
+ payload = {'openconfig-spanning-tree-ext:pvst': {'vlans': vlans_list}}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+
+ return request
+
+ def get_modify_stp_rapid_pvst_request(self, commands):
+ request = None
+ rapid_pvst = commands.get('rapid_pvst', None)
+
+ if rapid_pvst:
+ vlans_list = self.get_vlans_list(rapid_pvst)
+ if vlans_list:
+ url = '%s/rapid-pvst' % (STP_PATH)
+ payload = {'openconfig-spanning-tree:rapid-pvst': {'vlan': vlans_list}}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+
+ return request
+
+ def get_vlans_list(self, data):
+ vlans_list = []
+
+ for vlan in data:
+ vlans_dict = {}
+ config_dict = {}
+ vlan_id = vlan.get('vlan_id', None)
+ hello_time = vlan.get('hello_time', None)
+ max_age = vlan.get('max_age', None)
+ fwd_delay = vlan.get('fwd_delay', None)
+ bridge_priority = vlan.get('bridge_priority', None)
+ interfaces = vlan.get('interfaces', None)
+
+ if vlan_id:
+ config_dict['vlan-id'] = vlan_id
+ if hello_time:
+ config_dict['hello-time'] = hello_time
+ if max_age:
+ config_dict['max-age'] = max_age
+ if fwd_delay:
+ config_dict['forwarding-delay'] = fwd_delay
+ if bridge_priority:
+ config_dict['bridge-priority'] = bridge_priority
+ if interfaces:
+ intf_list = self.get_interfaces_list(interfaces)
+ if intf_list:
+ vlans_dict['interfaces'] = {'interface': intf_list}
+ if config_dict:
+ vlans_dict['vlan-id'] = vlan_id
+ vlans_dict['config'] = config_dict
+ if vlans_dict:
+ vlans_list.append(vlans_dict)
+
+ return vlans_list
+
+ def get_interfaces_list(self, interfaces):
+ intf_list = []
+ for intf in interfaces:
+ intf_dict = {}
+ intf_cfg_dict = {}
+ intf_name = intf.get('intf_name', None)
+ cost = intf.get('cost', None)
+ port_priority = intf.get('port_priority', None)
+
+ if intf_name:
+ intf_cfg_dict['name'] = intf_name
+ if cost:
+ intf_cfg_dict['cost'] = cost
+ if port_priority:
+ intf_cfg_dict['port-priority'] = port_priority
+ if intf_cfg_dict:
+ intf_dict['name'] = intf_name
+ intf_dict['config'] = intf_cfg_dict
+ intf_list.append(intf_dict)
+
+ return intf_list
+
+ def get_vlans_common(self, vlans, cfg_vlans):
+ """Returns the vlan ranges that are common in the want and have
+ vlans lists
+ """
+ vlans = self.get_vlan_id_list(vlans)
+ cfg_vlans = self.get_vlan_id_list(cfg_vlans)
+ return self.get_vlan_range_list(list(set(vlans).intersection(set(cfg_vlans))))
+
+ def get_vlans_diff(self, vlans, cfg_vlans):
+ """Returns the vlan ranges present only in the want vlans list
+ and not in the have vlans list
+ """
+ vlans = self.get_vlan_id_list(vlans)
+ cfg_vlans = self.get_vlan_id_list(cfg_vlans)
+ return self.get_vlan_range_list(list(set(vlans) - set(cfg_vlans)))
+
+ @staticmethod
+ def get_vlan_id_list(vlans):
+ """Returns a list of all VLAN IDs specified in a vlans list"""
+ vlan_id_list = []
+
+ if vlans:
+ for vlan_val in vlans:
+ if '-' in vlan_val or '..' in vlan_val:
+ start, end = re.split(r'-|\.\.', vlan_val)
+ vlan_id_list.extend(range(int(start), int(end) + 1))
+ else:
+ # Single VLAN ID
+ vlan_id_list.append(int(vlan_val))
+
+ return vlan_id_list
+
+ @staticmethod
+ def get_vlan_range_list(vlan_id_list):
+ """Returns the vlans list for a given list of VLAN IDs"""
+ vlan_range_list = []
+
+ if vlan_id_list:
+ vlan_id_list.sort()
+ for vlan_range in get_ranges_in_list(vlan_id_list):
+ vlan_range_list.append('-'.join(map(str, (vlan_range[0], vlan_range[-1])[:len(vlan_range)])))
+
+ return vlan_range_list
+
+ def convert_vlans_list(self, vlans):
+ converted_vlans = []
+
+ for vlan in vlans:
+ if len(vlan) == 1:
+ converted_vlans.append(int(vlan))
+ else:
+ converted_vlans.append(vlan.replace('-', '..'))
+
+ return converted_vlans
+
+ def get_delete_stp_requests(self, commands, have, is_delete_all):
+ requests = []
+
+ if not commands:
+ return requests
+
+ if is_delete_all:
+ requests.append(self.get_delete_all_stp_request())
+ else:
+ requests.extend(self.get_delete_stp_mstp_requests(commands, have))
+ requests.extend(self.get_delete_stp_pvst_requests(commands, have))
+ requests.extend(self.get_delete_stp_rapid_pvst_requests(commands, have))
+ requests.extend(self.get_delete_stp_interfaces_requests(commands, have))
+ requests.extend(self.get_delete_stp_global_requests(commands, have))
+
+ return requests
+
+ def get_delete_stp_global_requests(self, commands, have):
+ requests = []
+
+ stp_global = commands.get('global', None)
+ if stp_global:
+ enabled_protocol = stp_global.get('enabled_protocol', None)
+ loop_guard = stp_global.get('loop_guard', None)
+ bpdu_filter = stp_global.get('bpdu_filter', None)
+ disabled_vlans = stp_global.get('disabled_vlans', None)
+ root_guard_timeout = stp_global.get('root_guard_timeout', None)
+ portfast = stp_global.get('portfast', None)
+ hello_time = stp_global.get('hello_time', None)
+ max_age = stp_global.get('max_age', None)
+ fwd_delay = stp_global.get('fwd_delay', None)
+ bridge_priority = stp_global.get('bridge_priority', None)
+
+ cfg_stp_global = have.get('global', None)
+ if cfg_stp_global:
+ cfg_enabled_protocol = cfg_stp_global.get('enabled_protocol', None)
+ cfg_loop_guard = cfg_stp_global.get('loop_guard', None)
+ cfg_bpdu_filter = cfg_stp_global.get('bpdu_filter', None)
+ cfg_disabled_vlans = cfg_stp_global.get('disabled_vlans', None)
+ cfg_root_guard_timeout = cfg_stp_global.get('root_guard_timeout', None)
+ cfg_portfast = cfg_stp_global.get('portfast', None)
+ cfg_hello_time = cfg_stp_global.get('hello_time', None)
+ cfg_max_age = cfg_stp_global.get('max_age', None)
+ cfg_fwd_delay = cfg_stp_global.get('fwd_delay', None)
+ cfg_bridge_priority = cfg_stp_global.get('bridge_priority', None)
+
+ # Default loop_guard is false, don't delete if false
+ if loop_guard and loop_guard == cfg_loop_guard:
+ requests.append(self.get_delete_stp_global_attr('loop-guard'))
+ # Default bpdu_filter is false, don't delete if false
+ if bpdu_filter and bpdu_filter == cfg_bpdu_filter:
+ requests.append(self.get_delete_stp_global_attr('bpdu-filter'))
+ if disabled_vlans and cfg_disabled_vlans:
+ disabled_vlans_to_delete = self.get_vlans_common(disabled_vlans, cfg_disabled_vlans)
+ for i, vlan in enumerate(disabled_vlans_to_delete):
+ if '-' in vlan:
+ disabled_vlans_to_delete[i] = vlan.replace('-', '..')
+ if disabled_vlans_to_delete:
+ encoded_vlans = '%2C'.join(disabled_vlans_to_delete)
+ attr = 'openconfig-spanning-tree-ext:disabled-vlans=%s' % (encoded_vlans)
+ requests.append(self.get_delete_stp_global_attr(attr))
+ else:
+ commands['global'].pop('disabled_vlans')
+ if root_guard_timeout:
+ if root_guard_timeout == cfg_root_guard_timeout:
+ requests.append(self.get_delete_stp_global_attr('openconfig-spanning-tree-ext:rootguard-timeout'))
+ else:
+ commands['global'].pop('root_guard_timeout')
+ # Default portfast is false, don't delete if false
+ if portfast and portfast == cfg_portfast:
+ requests.append(self.get_delete_stp_global_attr('openconfig-spanning-tree-ext:portfast'))
+ if hello_time and hello_time == cfg_hello_time:
+ requests.append(self.get_delete_stp_global_attr('openconfig-spanning-tree-ext:hello-time'))
+ if max_age and max_age == cfg_max_age:
+ requests.append(self.get_delete_stp_global_attr('openconfig-spanning-tree-ext:max-age'))
+ if fwd_delay and fwd_delay == cfg_fwd_delay:
+ requests.append(self.get_delete_stp_global_attr('openconfig-spanning-tree-ext:forwarding-delay'))
+ if bridge_priority and bridge_priority == cfg_bridge_priority:
+ requests.append(self.get_delete_stp_global_attr('openconfig-spanning-tree-ext:bridge-priority'))
+ if enabled_protocol:
+ if enabled_protocol == cfg_enabled_protocol:
+ requests.append(self.get_delete_stp_global_attr('enabled-protocol'))
+ else:
+ commands['global'].pop('enabled_protocol')
+
+ return requests
+
+ def get_delete_stp_interfaces_requests(self, commands, have):
+ requests = []
+
+ interfaces = commands.get('interfaces', None)
+ if interfaces:
+ intf_list = []
+ for intf in interfaces:
+ intf_dict = {}
+ intf_name = intf.get('intf_name', None)
+ edge_port = intf.get('edge_port', None)
+ link_type = intf.get('link_type', None)
+ guard = intf.get('guard', None)
+ bpdu_guard = intf.get('bpdu_guard', None)
+ bpdu_filter = intf.get('bpdu_filter', None)
+ portfast = intf.get('portfast', None)
+ uplink_fast = intf.get('uplink_fast', None)
+ shutdown = intf.get('shutdown', None)
+ cost = intf.get('cost', None)
+ port_priority = intf.get('port_priority', None)
+ stp_enable = intf.get('stp_enable', None)
+
+ cfg_interfaces = have.get('interfaces', None)
+ if cfg_interfaces:
+ for cfg_intf in cfg_interfaces:
+ cfg_intf_name = cfg_intf.get('intf_name', None)
+ cfg_edge_port = cfg_intf.get('edge_port', None)
+ cfg_link_type = cfg_intf.get('link_type', None)
+ cfg_guard = cfg_intf.get('guard', None)
+ cfg_bpdu_guard = cfg_intf.get('bpdu_guard', None)
+ cfg_bpdu_filter = cfg_intf.get('bpdu_filter', None)
+ cfg_portfast = cfg_intf.get('portfast', None)
+ cfg_uplink_fast = cfg_intf.get('uplink_fast', None)
+ cfg_shutdown = cfg_intf.get('shutdown', None)
+ cfg_cost = cfg_intf.get('cost', None)
+ cfg_port_priority = cfg_intf.get('port_priority', None)
+ cfg_stp_enable = cfg_intf.get('stp_enable', None)
+
+ if intf_name and intf_name == cfg_intf_name:
+ # Default edge_port is false, don't delete if false
+ if edge_port and edge_port == cfg_edge_port:
+ requests.append(self.get_delete_stp_interface_attr(intf_name, 'edge-port'))
+ intf_dict.update({'intf_name': intf_name, 'edge_port': edge_port})
+ if link_type and link_type == cfg_link_type:
+ requests.append(self.get_delete_stp_interface_attr(intf_name, 'link-type'))
+ intf_dict.update({'intf_name': intf_name, 'link_type': link_type})
+ if guard and guard == cfg_guard:
+ requests.append(self.get_delete_stp_interface_attr(intf_name, 'guard'))
+ intf_dict.update({'intf_name': intf_name, 'guard': guard})
+ # Default bpdu_guard is false, don't delete if false
+ if bpdu_guard and bpdu_guard == cfg_bpdu_guard:
+ url = '%s/interfaces/interface=%s/config/bpdu-guard' % (STP_PATH, intf_name)
+ payload = {'openconfig-spanning-tree:bpdu-guard': False}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+ requests.append(request)
+ intf_dict.update({'intf_name': intf_name, 'bpdu_guard': bpdu_guard})
+ # Default bpdu_filter is false, don't delete if false
+ if bpdu_filter and bpdu_filter == cfg_bpdu_filter:
+ requests.append(self.get_delete_stp_interface_attr(intf_name, 'bpdu-filter'))
+ intf_dict.update({'intf_name': intf_name, 'bpdu_filter': bpdu_filter})
+ # Default portfast is false, don't delete if false
+ if portfast and portfast == cfg_portfast:
+ requests.append(self.get_delete_stp_interface_attr(intf_name, 'openconfig-spanning-tree-ext:portfast'))
+ intf_dict.update({'intf_name': intf_name, 'portfast': portfast})
+ # Default uplink_fast is false, don't delete if false
+ if uplink_fast and uplink_fast == cfg_uplink_fast:
+ url = '%s/interfaces/interface=%s/config/openconfig-spanning-tree-ext:uplink-fast' % (STP_PATH, intf_name)
+ payload = {'openconfig-spanning-tree-ext:uplink-fast': False}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+ requests.append(request)
+ intf_dict.update({'intf_name': intf_name, 'uplink_fast': uplink_fast})
+ # Default shutdown is false, don't delete if false
+ if shutdown and shutdown == cfg_shutdown:
+ url = '%s/interfaces/interface=%s/config/openconfig-spanning-tree-ext:bpdu-guard-port-shutdown' % (STP_PATH, intf_name)
+ payload = {'openconfig-spanning-tree-ext:bpdu-guard-port-shutdown': False}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+ requests.append(request)
+ intf_dict.update({'intf_name': intf_name, 'shutdown': shutdown})
+ if cost and cost == cfg_cost:
+ requests.append(self.get_delete_stp_interface_attr(intf_name, 'openconfig-spanning-tree-ext:cost'))
+ intf_dict.update({'intf_name': intf_name, 'cost': cost})
+ if port_priority and port_priority == cfg_port_priority:
+ requests.append(self.get_delete_stp_interface_attr(intf_name, 'openconfig-spanning-tree-ext:port-priority'))
+ intf_dict.update({'intf_name': intf_name, 'port_priority': port_priority})
+ # Default stp_enable is true, don't delete if true
+ if stp_enable is False and stp_enable == cfg_stp_enable:
+ url = '%s/interfaces/interface=%s/config/openconfig-spanning-tree-ext:spanning-tree-enable' % (STP_PATH, intf_name)
+ payload = {'openconfig-spanning-tree-ext:spanning-tree-enable': True}
+ request = {'path': url, 'method': PATCH, 'data': payload}
+ requests.append(request)
+ intf_dict.update({'intf_name': intf_name, 'stp_enable': stp_enable})
+ if (edge_port is None and not link_type and not guard and bpdu_guard is None and bpdu_filter is None and portfast is None and
+ uplink_fast is None and shutdown is None and not cost and not port_priority and stp_enable is None):
+ requests.append(self.get_delete_stp_interface(intf_name))
+ intf_dict.update({'intf_name': intf_name})
+ if intf_dict:
+ intf_list.append(intf_dict)
+ if intf_list:
+ commands['interfaces'] = intf_list
+ else:
+ commands.pop('interfaces')
+
+ return requests
+
+ def get_delete_stp_mstp_requests(self, commands, have):
+ requests = []
+
+ mstp = commands.get('mstp', None)
+ if mstp:
+ mst_name = mstp.get('mst_name', None)
+ revision = mstp.get('revision', None)
+ max_hop = mstp.get('max_hop', None)
+ hello_time = mstp.get('hello_time', None)
+ max_age = mstp.get('max_age', None)
+ fwd_delay = mstp.get('fwd_delay', None)
+ mst_instances = mstp.get('mst_instances', None)
+
+ cfg_mstp = have.get('mstp', None)
+ if cfg_mstp:
+ cfg_mst_name = cfg_mstp.get('mst_name', None)
+ cfg_revision = cfg_mstp.get('revision', None)
+ cfg_max_hop = cfg_mstp.get('max_hop', None)
+ cfg_hello_time = cfg_mstp.get('hello_time', None)
+ cfg_max_age = cfg_mstp.get('max_age', None)
+ cfg_fwd_delay = cfg_mstp.get('fwd_delay', None)
+ cfg_mst_instances = cfg_mstp.get('mst_instances', None)
+
+ if mst_name:
+ if mst_name == cfg_mst_name:
+ requests.append(self.get_delete_stp_mstp_cfg_attr('name'))
+ else:
+ commands['mstp'].pop('mst_name')
+ if revision:
+ if revision == cfg_revision:
+ requests.append(self.get_delete_stp_mstp_cfg_attr('revision'))
+ else:
+ commands['mstp'].pop('revision')
+ if max_hop:
+ if max_hop == cfg_max_hop:
+ requests.append(self.get_delete_stp_mstp_cfg_attr('max-hop'))
+ else:
+ commands['mstp'].pop('max_hop')
+ if hello_time:
+ if hello_time == cfg_hello_time:
+ requests.append(self.get_delete_stp_mstp_cfg_attr('hello-time'))
+ else:
+ commands['mstp'].pop('hello_time')
+ if max_age:
+ if max_age == cfg_max_age:
+ requests.append(self.get_delete_stp_mstp_cfg_attr('max-age'))
+ else:
+ commands['mstp'].pop('max_age')
+ if fwd_delay:
+ if fwd_delay == cfg_fwd_delay:
+ requests.append(self.get_delete_stp_mstp_cfg_attr('forwarding-delay'))
+ else:
+ commands['mstp'].pop('fwd_delay')
+ if mst_instances:
+ mst_inst_list = []
+ for mst in mst_instances:
+ mst_inst_dict = {}
+ mst_id = mst.get('mst_id', None)
+ bridge_priority = mst.get('bridge_priority', None)
+ interfaces = mst.get('interfaces', None)
+ vlans = mst.get('vlans', None)
+ if cfg_mst_instances:
+ for cfg_mst in cfg_mst_instances:
+ cfg_mst_id = cfg_mst.get('mst_id', None)
+ cfg_bridge_priority = cfg_mst.get('bridge_priority', None)
+ cfg_interfaces = cfg_mst.get('interfaces', None)
+ cfg_vlans = cfg_mst.get('vlans', None)
+
+ if mst_id == cfg_mst_id:
+ if bridge_priority and bridge_priority == cfg_bridge_priority:
+ requests.append(self.get_delete_mst_inst_cfg_attr(mst_id, 'bridge-priority'))
+ mst_inst_dict.update({'mst_id': mst_id, 'bridge_priority': bridge_priority})
+ if interfaces:
+ intf_list = []
+ for intf in interfaces:
+ intf_dict = {}
+ intf_name = intf.get('intf_name', None)
+ cost = intf.get('cost', None)
+ port_priority = intf.get('port_priority', None)
+
+ if cfg_interfaces:
+ for cfg_intf in cfg_interfaces:
+ cfg_intf_name = cfg_intf.get('intf_name', None)
+ cfg_cost = cfg_intf.get('cost', None)
+ cfg_port_priority = cfg_intf.get('port_priority', None)
+
+ if intf_name == cfg_intf_name:
+ if cost and cost == cfg_cost:
+ requests.append(self.get_delete_mst_intf_cfg_attr(mst_id, intf_name, 'cost'))
+ intf_dict.update({'intf_name': intf_name, 'cost': cost})
+ if port_priority and port_priority == cfg_port_priority:
+ requests.append(self.get_delete_mst_intf_cfg_attr(mst_id, intf_name, 'port-priority'))
+ intf_dict.update({'intf_name': intf_name, 'port_priority': port_priority})
+ if not cost and not port_priority:
+ requests.append(self.get_delete_mst_intf(mst_id, intf_name))
+ intf_dict.update({'intf_name': intf_name})
+ if intf_dict:
+ intf_list.append(intf_dict)
+ if intf_list:
+ mst_inst_dict.update({'mst_id': mst_id, 'interfaces': intf_list})
+
+ if vlans and cfg_vlans:
+ vlans_to_delete = self.get_vlans_common(vlans, cfg_vlans)
+ cmd_vlans = deepcopy(vlans_to_delete)
+ for i, vlan in enumerate(vlans_to_delete):
+ if '-' in vlan:
+ vlans_to_delete[i] = vlan.replace('-', '..')
+ if vlans_to_delete:
+ encoded_vlans = '%2C'.join(vlans_to_delete)
+ attr = 'vlan=%s' % (encoded_vlans)
+ requests.append(self.get_delete_mst_inst_cfg_attr(mst_id, attr))
+ mst_inst_dict.update({'mst_id': mst_id, 'vlans': cmd_vlans})
+ if not bridge_priority and not vlans and not interfaces:
+ requests.append(self.get_delete_mst_inst(mst_id))
+ mst_inst_dict.update({'mst_id': mst_id})
+ if mst_inst_dict:
+ mst_inst_list.append(mst_inst_dict)
+ if mst_inst_list:
+ commands['mstp']['mst_instances'] = mst_inst_list
+ else:
+ commands['mstp'].pop('mst_instances')
+ if not commands['mstp']:
+ commands.pop('mstp')
+
+ return requests
+
+ def get_delete_stp_pvst_requests(self, commands, have):
+ requests = []
+
+ pvst = commands.get('pvst', None)
+ if pvst:
+ vlans_list = []
+ for vlan in pvst:
+ vlans_dict = {}
+ vlan_id = vlan.get('vlan_id', None)
+ hello_time = vlan.get('hello_time', None)
+ max_age = vlan.get('max_age', None)
+ fwd_delay = vlan.get('fwd_delay', None)
+ bridge_priority = vlan.get('bridge_priority', None)
+ interfaces = vlan.get('interfaces', [])
+
+ cfg_pvst = have.get('pvst', None)
+ if cfg_pvst:
+ for cfg_vlan in cfg_pvst:
+ cfg_vlan_id = cfg_vlan.get('vlan_id', None)
+ cfg_hello_time = cfg_vlan.get('hello_time', None)
+ cfg_max_age = cfg_vlan.get('max_age', None)
+ cfg_fwd_delay = cfg_vlan.get('fwd_delay', None)
+ cfg_bridge_priority = cfg_vlan.get('bridge_priority', None)
+ cfg_interfaces = cfg_vlan.get('interfaces', [])
+
+ if vlan_id == cfg_vlan_id:
+ if hello_time and hello_time == cfg_hello_time:
+ requests.append(self.get_delete_pvst_vlan_cfg_attr(vlan_id, 'hello-time'))
+ vlans_dict.update({'vlan_id': vlan_id, 'hello_time': hello_time})
+ if max_age and max_age == cfg_max_age:
+ requests.append(self.get_delete_pvst_vlan_cfg_attr(vlan_id, 'max-age'))
+ vlans_dict.update({'vlan_id': vlan_id, 'max_age': max_age})
+ if fwd_delay and fwd_delay == cfg_fwd_delay:
+ requests.append(self.get_delete_pvst_vlan_cfg_attr(vlan_id, 'forwarding-delay'))
+ vlans_dict.update({'vlan_id': vlan_id, 'fwd_delay': fwd_delay})
+ if bridge_priority and bridge_priority == cfg_bridge_priority:
+ requests.append(self.get_delete_pvst_vlan_cfg_attr(vlan_id, 'bridge-priority'))
+ vlans_dict.update({'vlan_id': vlan_id, 'bridge_priority': bridge_priority})
+
+ if interfaces:
+ intf_list = []
+ for intf in interfaces:
+ intf_dict = {}
+ intf_name = intf.get('intf_name', None)
+ cost = intf.get('cost', None)
+ port_priority = intf.get('port_priority', None)
+
+ if cfg_interfaces:
+ for cfg_intf in cfg_interfaces:
+ cfg_intf_name = cfg_intf.get('intf_name', None)
+ cfg_cost = cfg_intf.get('cost', None)
+ cfg_port_priority = cfg_intf.get('port_priority', None)
+
+ if intf_name == cfg_intf_name:
+ if cost and cost == cfg_cost:
+ requests.append(self.get_delete_pvst_intf_cfg_attr(vlan_id, intf_name, 'cost'))
+ intf_dict.update({'intf_name': intf_name, 'cost': cost})
+ if port_priority and port_priority == cfg_port_priority:
+ requests.append(self.get_delete_pvst_intf_cfg_attr(vlan_id, intf_name, 'port-priority'))
+ intf_dict.update({'intf_name': intf_name, 'port_priority': port_priority})
+ if not cost and not port_priority:
+ requests.append(self.get_delete_pvst_intf(vlan_id, intf_name))
+ intf_dict.update({'intf_name': intf_name})
+ if intf_dict:
+ intf_list.append(intf_dict)
+ if intf_list:
+ vlans_dict.update({'vlan_id': vlan_id, 'interfaces': intf_list})
+ if vlans_dict:
+ vlans_list.append(vlans_dict)
+ if vlans_list:
+ commands['pvst'] = vlans_list
+ else:
+ commands.pop('pvst')
+
+ return requests
+
+ def get_delete_stp_rapid_pvst_requests(self, commands, have):
+ requests = []
+
+ rapid_pvst = commands.get('rapid_pvst', None)
+ if rapid_pvst:
+ vlans_list = []
+ for vlan in rapid_pvst:
+ vlans_dict = {}
+ vlan_id = vlan.get('vlan_id', None)
+ hello_time = vlan.get('hello_time', None)
+ max_age = vlan.get('max_age', None)
+ fwd_delay = vlan.get('fwd_delay', None)
+ bridge_priority = vlan.get('bridge_priority', None)
+ interfaces = vlan.get('interfaces', [])
+
+ cfg_rapid_pvst = have.get('rapid_pvst', None)
+ if cfg_rapid_pvst:
+ for cfg_vlan in cfg_rapid_pvst:
+ cfg_vlan_id = cfg_vlan.get('vlan_id', None)
+ cfg_hello_time = cfg_vlan.get('hello_time', None)
+ cfg_max_age = cfg_vlan.get('max_age', None)
+ cfg_fwd_delay = cfg_vlan.get('fwd_delay', None)
+ cfg_bridge_priority = cfg_vlan.get('bridge_priority', None)
+ cfg_interfaces = cfg_vlan.get('interfaces', [])
+
+ if vlan_id == cfg_vlan_id:
+ if hello_time and hello_time == cfg_hello_time:
+ requests.append(self.get_delete_rapid_pvst_vlan_cfg_attr(vlan_id, 'hello-time'))
+ vlans_dict.update({'vlan_id': vlan_id, 'hello_time': hello_time})
+ if max_age and max_age == cfg_max_age:
+ requests.append(self.get_delete_rapid_pvst_vlan_cfg_attr(vlan_id, 'max-age'))
+ vlans_dict.update({'vlan_id': vlan_id, 'max_age': max_age})
+ if fwd_delay and fwd_delay == cfg_fwd_delay:
+ requests.append(self.get_delete_rapid_pvst_vlan_cfg_attr(vlan_id, 'forwarding-delay'))
+ vlans_dict.update({'vlan_id': vlan_id, 'fwd_delay': fwd_delay})
+ if bridge_priority and bridge_priority == cfg_bridge_priority:
+ requests.append(self.get_delete_rapid_pvst_vlan_cfg_attr(vlan_id, 'bridge-priority'))
+ vlans_dict.update({'vlan_id': vlan_id, 'bridge_priority': bridge_priority})
+
+ if interfaces:
+ intf_list = []
+ for intf in interfaces:
+ intf_dict = {}
+ intf_name = intf.get('intf_name', None)
+ cost = intf.get('cost', None)
+ port_priority = intf.get('port_priority', None)
+
+ if cfg_interfaces:
+ for cfg_intf in cfg_interfaces:
+ cfg_intf_name = cfg_intf.get('intf_name', None)
+ cfg_cost = cfg_intf.get('cost', None)
+ cfg_port_priority = cfg_intf.get('port_priority', None)
+
+ if intf_name == cfg_intf_name:
+ if cost and cost == cfg_cost:
+ requests.append(self.get_delete_rapid_pvst_intf_cfg_attr(vlan_id, intf_name, 'cost'))
+ intf_dict.update({'intf_name': intf_name, 'cost': cost})
+ if port_priority and port_priority == cfg_port_priority:
+ requests.append(self.get_delete_rapid_pvst_intf_cfg_attr(vlan_id, intf_name, 'port-priority'))
+ intf_dict.update({'intf_name': intf_name, 'port_priority': port_priority})
+ if not cost and not port_priority:
+ requests.append(self.get_delete_rapid_pvst_intf(vlan_id, intf_name))
+ intf_dict.update({'intf_name': intf_name})
+ if intf_dict:
+ intf_list.append(intf_dict)
+ if intf_list:
+ vlans_dict.update({'vlan_id': vlan_id, 'interfaces': intf_list})
+ if vlans_dict:
+ vlans_list.append(vlans_dict)
+ if vlans_list:
+ commands['rapid_pvst'] = vlans_list
+ else:
+ commands.pop('rapid_pvst')
+
+ return requests
+
+ def get_delete_all_stp_request(self):
+ request = {'path': STP_PATH, 'method': DELETE}
+
+ return request
+
+ def get_delete_stp_global_attr(self, attr):
+ url = '%s/global/config/%s' % (STP_PATH, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_stp_interface(self, intf_name):
+ url = '%s/interfaces/interface=%s' % (STP_PATH, intf_name)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_stp_interface_attr(self, intf_name, attr):
+ url = '%s/interfaces/interface=%s/config/%s' % (STP_PATH, intf_name, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_stp_mstp_cfg_attr(self, attr):
+ url = '%s/mstp/config/%s' % (STP_PATH, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mst_inst(self, mst_id):
+ url = '%s/mstp/mst-instances/mst-instance=%s' % (STP_PATH, mst_id)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mst_inst_cfg_attr(self, mst_id, attr):
+ url = '%s/mstp/mst-instances/mst-instance=%s/config/%s' % (STP_PATH, mst_id, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mst_intf(self, mst_id, intf_name):
+ url = '%s/mstp/mst-instances/mst-instance=%s/interfaces/interface=%s' % (STP_PATH, mst_id, intf_name)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_mst_intf_cfg_attr(self, mst_id, intf_name, attr):
+ url = '%s/mstp/mst-instances/mst-instance=%s/interfaces/interface=%s/config/%s' % (STP_PATH, mst_id, intf_name, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_pvst_vlan_cfg_attr(self, vlan_id, attr):
+ url = '%s/openconfig-spanning-tree-ext:pvst/vlans=%s/config/%s' % (STP_PATH, vlan_id, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_pvst_intf(self, vlan_id, intf_name):
+ url = '%s/openconfig-spanning-tree-ext:pvst/vlans=%s/interfaces/interface=%s' % (STP_PATH, vlan_id, intf_name)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_pvst_intf_cfg_attr(self, vlan_id, intf_name, attr):
+ url = '%s/openconfig-spanning-tree-ext:pvst/vlans=%s/interfaces/interface=%s/config/%s' % (STP_PATH, vlan_id, intf_name, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_rapid_pvst_vlan_cfg_attr(self, vlan_id, attr):
+ url = '%s/rapid-pvst/vlan=%s/config/%s' % (STP_PATH, vlan_id, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_rapid_pvst_intf(self, vlan_id, intf_name):
+ url = '%s/rapid-pvst/vlan=%s/interfaces/interface=%s' % (STP_PATH, vlan_id, intf_name)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def get_delete_rapid_pvst_intf_cfg_attr(self, vlan_id, intf_name, attr):
+ url = '%s/rapid-pvst/vlan=%s/interfaces/interface=%s/config/%s' % (STP_PATH, vlan_id, intf_name, attr)
+ request = {'path': url, 'method': DELETE}
+
+ return request
+
+ def remove_default_entries(self, data):
+ stp_global = data.get('global', None)
+ interfaces = data.get('interfaces', None)
+
+ if stp_global:
+ loop_guard = stp_global.get('loop_guard', None)
+ bpdu_filter = stp_global.get('bpdu_filter', None)
+ portfast = stp_global.get('portfast', None)
+ hello_time = stp_global.get('hello_time', None)
+ max_age = stp_global.get('max_age', None)
+ fwd_delay = stp_global.get('fwd_delay', None)
+ bridge_priority = stp_global.get('bridge_priority', None)
+
+ if loop_guard is False:
+ stp_global.pop('loop_guard')
+ if bpdu_filter is False:
+ stp_global.pop('bpdu_filter')
+ if portfast is False:
+ stp_global.pop('portfast')
+ if hello_time == 2:
+ stp_global.pop('hello_time')
+ if max_age == 20:
+ stp_global.pop('max_age')
+ if fwd_delay == 15:
+ stp_global.pop('fwd_delay')
+ if bridge_priority == 32768:
+ stp_global.pop('bridge_priority')
+ if not stp_global:
+ data.pop('global')
+
+ if interfaces:
+ for intf in interfaces:
+ edge_port = intf.get('edge_port', None)
+ bpdu_guard = intf.get('bpdu_guard', None)
+ bpdu_filter = intf.get('bpdu_filter', None)
+ portfast = intf.get('portfast', None)
+ uplink_fast = intf.get('uplink_fast', None)
+ shutdown = intf.get('shutdown', None)
+ stp_enable = intf.get('stp_enable', None)
+
+ if edge_port is False:
+ intf.pop('edge_port')
+ if bpdu_guard is False:
+ intf.pop('bpdu_guard')
+ if bpdu_filter is False:
+ intf.pop('bpdu_filter')
+ if portfast is False:
+ intf.pop('portfast')
+ if uplink_fast is False:
+ intf.pop('uplink_fast')
+ if shutdown is False:
+ intf.pop('shutdown')
+ if stp_enable:
+ intf.pop('stp_enable')
+
+ def get_replaced_config(self, want, have):
+ config_dict = {}
+ requests = []
+ stp_global = want.get('global', None)
+ new_have = self.remove_default_entries(deepcopy(have))
+ new_have = remove_empties(new_have)
+ cfg_stp_global = new_have.get('global', None)
+
+ if stp_global and cfg_stp_global and stp_global != cfg_stp_global:
+ requests.append(self.get_delete_all_stp_request())
+ return have, requests
+
+ interfaces = want.get('interfaces', None)
+ cfg_interfaces = have.get('interfaces', None)
+ if interfaces and cfg_interfaces:
+ intf_list = []
+ for intf in interfaces:
+ intf_name = intf.get('intf_name', None)
+ for cfg_intf in cfg_interfaces:
+ cfg_intf_name = cfg_intf.get('intf_name', None)
+ if intf_name == cfg_intf_name:
+ if intf != cfg_intf:
+ intf_list.append(cfg_intf)
+ requests.append(self.get_delete_stp_interface(cfg_intf_name))
+ if intf_list:
+ config_dict['interfaces'] = intf_list
+
+ mstp = want.get('mstp', None)
+ cfg_mstp = have.get('mstp', None)
+ if mstp and cfg_mstp:
+ mst_name = mstp.get('mst_name', None)
+ revision = mstp.get('revision', None)
+ max_hop = mstp.get('max_hop', None)
+ hello_time = mstp.get('hello_time', None)
+ max_age = mstp.get('max_age', None)
+ fwd_delay = mstp.get('fwd_delay', None)
+ mst_instances = mstp.get('mst_instances', None)
+
+ cfg_mst_name = cfg_mstp.get('mst_name', None)
+ cfg_revision = cfg_mstp.get('revision', None)
+ cfg_max_hop = cfg_mstp.get('max_hop', None)
+ cfg_hello_time = cfg_mstp.get('hello_time', None)
+ cfg_max_age = cfg_mstp.get('max_age', None)
+ cfg_fwd_delay = cfg_mstp.get('fwd_delay', None)
+ cfg_mst_instances = cfg_mstp.get('mst_instances', None)
+
+ if ((mst_name and mst_name != cfg_mst_name) or (revision and revision != cfg_revision) or (max_hop and max_hop != cfg_max_hop) or
+ (hello_time and hello_time != cfg_hello_time) or (max_age and max_age != cfg_max_age) or
+ (fwd_delay and fwd_delay != cfg_fwd_delay)):
+ config_dict['mstp'] = cfg_mstp
+ requests.append({'path': '%s/mstp/config' % STP_PATH, 'method': DELETE})
+ requests.append({'path': '%s/mstp/mst-instances' % STP_PATH, 'method': DELETE})
+ else:
+ if mst_instances and cfg_mst_instances:
+ mst_inst_list = []
+ for mst in mst_instances:
+ mst_id = mst.get('mst_id', None)
+ bridge_priority = mst.get('bridge_priority', None)
+ vlans = mst.get('vlans', None)
+ if vlans:
+ vlans.sort()
+ interfaces = mst.get('interfaces', None)
+ for cfg_mst in cfg_mst_instances:
+ cfg_mst_id = cfg_mst.get('mst_id', None)
+ cfg_bridge_priority = cfg_mst.get('bridge_priority', None)
+ cfg_vlans = cfg_mst.get('vlans', None)
+ if cfg_vlans:
+ cfg_vlans.sort()
+ cfg_interfaces = cfg_mst.get('interfaces', None)
+
+ if mst_id == cfg_mst_id:
+ if ((bridge_priority and bridge_priority != cfg_bridge_priority) or (vlans and vlans != cfg_vlans)):
+ mst_inst_list.append(cfg_mst)
+ requests.append(self.get_delete_mst_inst(cfg_mst_id))
+ else:
+ if interfaces and cfg_interfaces:
+ intf_list = []
+ for intf in interfaces:
+ intf_name = intf.get('intf_name', None)
+ for cfg_intf in cfg_interfaces:
+ cfg_intf_name = cfg_intf.get('intf_name', None)
+ if intf_name == cfg_intf_name:
+ if intf != cfg_intf:
+ intf_list.append(cfg_intf)
+ mst_inst_list.append({'mst_id': cfg_mst_id, 'interfaces': intf_list})
+ requests.append(self.get_delete_mst_intf(cfg_mst_id, cfg_intf_name))
+ if mst_inst_list:
+ config_dict['mstp'] = {'mst_instances': mst_inst_list}
+
+ pvst = want.get('pvst', None)
+ cfg_pvst = have.get('pvst', None)
+ if pvst and cfg_pvst:
+ vlans_list, vlans_requests = self.get_replaced_vlans_list(pvst, cfg_pvst, 'pvst')
+ if vlans_list:
+ config_dict['pvst'] = vlans_list
+ requests.extend(vlans_requests)
+
+ rapid_pvst = want.get('rapid_pvst', None)
+ cfg_rapid_pvst = have.get('rapid_pvst', None)
+ if rapid_pvst and cfg_rapid_pvst:
+ vlans_list, vlans_requests = self.get_replaced_vlans_list(rapid_pvst, cfg_rapid_pvst, 'rapid_pvst')
+ if vlans_list:
+ config_dict['rapid_pvst'] = vlans_list
+ requests.extend(vlans_requests)
+
+ return config_dict, requests
+
+ def get_replaced_vlans_list(self, want_data, have_data, protocol):
+ vlans_list = []
+ requests = []
+ for vlan in want_data:
+ vlan_id = vlan.get('vlan_id', None)
+ hello_time = vlan.get('hello_time', None)
+ max_age = vlan.get('max_age', None)
+ fwd_delay = vlan.get('fwd_delay', None)
+ bridge_priority = vlan.get('bridge_priority', None)
+ interfaces = vlan.get('interfaces', None)
+
+ for cfg_vlan in have_data:
+ cfg_vlan_id = cfg_vlan.get('vlan_id', None)
+ cfg_hello_time = cfg_vlan.get('hello_time', None)
+ cfg_max_age = cfg_vlan.get('max_age', None)
+ cfg_fwd_delay = cfg_vlan.get('fwd_delay', None)
+ cfg_bridge_priority = cfg_vlan.get('bridge_priority', None)
+ cfg_interfaces = cfg_vlan.get('interfaces', None)
+
+ if vlan_id == cfg_vlan_id:
+ if ((hello_time and hello_time != cfg_hello_time) or (max_age and max_age != cfg_max_age) or
+ (fwd_delay and fwd_delay != cfg_fwd_delay) or (bridge_priority and bridge_priority != cfg_bridge_priority)):
+ vlans_list.append(cfg_vlan)
+
+ if cfg_hello_time:
+ if protocol == 'pvst':
+ requests.append(self.get_delete_pvst_vlan_cfg_attr(cfg_vlan_id, 'hello-time'))
+ elif protocol == 'rapid_pvst':
+ requests.append(self.get_delete_rapid_pvst_vlan_cfg_attr(cfg_vlan_id, 'hello-time'))
+ if cfg_max_age:
+ if protocol == 'pvst':
+ requests.append(self.get_delete_pvst_vlan_cfg_attr(cfg_vlan_id, 'max-age'))
+ elif protocol == 'rapid_pvst':
+ requests.append(self.get_delete_rapid_pvst_vlan_cfg_attr(cfg_vlan_id, 'max-age'))
+ if cfg_fwd_delay:
+ if protocol == 'pvst':
+ requests.append(self.get_delete_pvst_vlan_cfg_attr(cfg_vlan_id, 'forwarding-delay'))
+ elif protocol == 'rapid_pvst':
+ requests.append(self.get_delete_rapid_pvst_vlan_cfg_attr(cfg_vlan_id, 'forwarding-delay'))
+ if cfg_bridge_priority:
+ if protocol == 'pvst':
+ requests.append(self.get_delete_pvst_vlan_cfg_attr(cfg_vlan_id, 'bridge-priority'))
+ elif protocol == 'rapid_pvst':
+ requests.append(self.get_delete_rapid_pvst_vlan_cfg_attr(cfg_vlan_id, 'bridge-priority'))
+ if cfg_interfaces:
+ for cfg_intf in cfg_interfaces:
+ cfg_intf_name = cfg_intf.get('intf_name', None)
+ if protocol == 'pvst':
+ requests.append(self.get_delete_pvst_intf(cfg_vlan_id, cfg_intf_name))
+ elif protocol == 'rapid_pvst':
+ requests.append(self.get_delete_rapid_pvst_intf(cfg_vlan_id, cfg_intf_name))
+
+ else:
+ if interfaces and cfg_interfaces:
+ intf_list = []
+ for intf in interfaces:
+ intf_name = intf.get('intf_name', None)
+ for cfg_intf in cfg_interfaces:
+ cfg_intf_name = cfg_intf.get('intf_name', None)
+ if intf_name == cfg_intf_name:
+ if intf != cfg_intf:
+ intf_list.append(cfg_intf)
+ vlans_list.append({'vlan_id': cfg_vlan_id, 'interfaces': intf_list})
+ if protocol == 'pvst':
+ requests.append(self.get_delete_pvst_intf(cfg_vlan_id, cfg_intf_name))
+ elif protocol == 'rapid_pvst':
+ requests.append(self.get_delete_rapid_pvst_intf(cfg_vlan_id, cfg_intf_name))
+
+ return vlans_list, requests
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py
index 21d575a1f..50225718b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/system/system.py
@@ -26,17 +26,45 @@ from ansible_collections.ansible.netcommon.plugins.module_utils.network.common i
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
update_states,
+ send_requests,
get_diff,
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
to_request,
edit_config
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ get_new_config,
+ get_formatted_config_diff
+)
PATCH = 'patch'
DELETE = 'delete'
+def __derive_system_config_delete_op(key_set, command, exist_conf):
+ new_conf = exist_conf
+
+ if 'hostname' in command:
+ new_conf['hostname'] = 'sonic'
+ if 'interface_naming' in command:
+ new_conf['interface_naming'] = 'native'
+ if 'anycast_address' in command and 'anycast_address' in new_conf:
+ if 'ipv4' in command['anycast_address']:
+ new_conf['anycast_address']['ipv4'] = True
+ if 'ipv6' in command['anycast_address']:
+ new_conf['anycast_address']['ipv6'] = True
+ if 'mac_address' in command['anycast_address']:
+ new_conf['anycast_address']['mac_address'] = None
+
+ return True, new_conf
+
+
+TEST_KEYS_formatted_diff = [
+ {'__default_ops': {'__delete_op': __derive_system_config_delete_op}},
+]
+
+
class System(ConfigBase):
"""
The sonic_system class
@@ -90,6 +118,17 @@ class System(ConfigBase):
if result['changed']:
result['after'] = changed_system_facts
+ new_config = changed_system_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_system_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_system_facts,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -127,6 +166,11 @@ class System(ConfigBase):
elif state == 'merged':
diff = get_diff(want, have)
commands = self._state_merged(want, have, diff)
+ elif state == 'overridden':
+ commands = self._state_overridden(want, have)
+ elif state == 'replaced':
+ commands = self._state_replaced(want, have)
+
return commands
def _state_merged(self, want, have, diff):
@@ -142,6 +186,7 @@ class System(ConfigBase):
requests = self.get_create_system_request(want, diff)
if len(requests) > 0:
commands = update_states(diff, "merged")
+
return commands, requests
def _state_deleted(self, want, have):
@@ -167,6 +212,70 @@ class System(ConfigBase):
requests = self.get_delete_all_system_request(diff_want)
if len(requests) > 0:
commands = update_states(diff_want, "deleted")
+
+ return commands, requests
+
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ new_want = self.patch_want_with_default(want, ac_address_only=True)
+ replaced_config = self.get_replaced_config(have, new_want)
+ if replaced_config:
+ requests = self.get_delete_all_system_request(replaced_config)
+ send_requests(self._module, requests)
+ commands = new_want
+ else:
+ diff = get_diff(new_want, have)
+ commands = diff
+ if not commands:
+ commands = []
+
+ requests = []
+
+ if commands:
+ requests = self.get_create_system_request(have, commands)
+
+ if len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ new_want = self.patch_want_with_default(want)
+ if have and have != new_want:
+ requests = self.get_delete_all_system_request(have)
+ send_requests(self._module, requests)
+ have = []
+
+ commands = []
+ requests = []
+
+ if not have and new_want:
+ commands = new_want
+ requests = self.get_create_system_request(have, commands)
+ if len(requests) > 0:
+ commands = update_states(commands, "overridden")
+ else:
+ commands = []
+
return commands, requests
def get_create_system_request(self, want, commands):
@@ -190,8 +299,9 @@ class System(ConfigBase):
return requests
def build_create_hostname_payload(self, commands):
- payload = {"openconfig-system:config": {}}
+ payload = {}
if "hostname" in commands and commands["hostname"]:
+ payload = {"openconfig-system:config": {}}
payload['openconfig-system:config'].update({"hostname": commands["hostname"]})
return payload
@@ -221,6 +331,63 @@ class System(ConfigBase):
payload["sonic-sag:SAG_GLOBAL_LIST"].append(temp)
return payload
+ def patch_want_with_default(self, want, ac_address_only=False):
+ new_want = {}
+ if want is None:
+ if ac_address_only:
+ new_want = {'anycast_address': {'ipv4': True, 'ipv6': True, 'mac_address': None}}
+ else:
+ new_want = {'hostname': 'sonic', 'interface_naming': 'native',
+ 'anycast_address': {'ipv4': True, 'ipv6': True, 'mac_address': None}}
+ else:
+ new_want = want.copy()
+ new_anycast = {}
+ anycast = want.get('anycast_address', None)
+ if not anycast:
+ new_anycast = {'ipv4': True, 'ipv6': True, 'mac_address': None}
+ else:
+ new_anycast = anycast.copy()
+ ipv4 = anycast.get("ipv4", None)
+ if ipv4 is None:
+ new_anycast["ipv4"] = True
+ ipv6 = anycast.get("ipv6", None)
+ if ipv6 is None:
+ new_anycast["ipv6"] = True
+ mac = anycast.get("mac_address", None)
+ if mac is None:
+ new_anycast["mac_address"] = None
+ new_want["anycast_address"] = new_anycast
+
+ if not ac_address_only:
+ hostname = want.get('hostname', None)
+ if hostname is None:
+ new_want["hostname"] = 'sonic'
+ intf_name = want.get('interface_naming', None)
+ if intf_name is None:
+ new_want["interface_naming"] = 'native'
+ return new_want
+
+ def get_replaced_config(self, have, want):
+
+ replaced_config = dict()
+
+ h_hostname = have.get('hostname', None)
+ w_hostname = want.get('hostname', None)
+ if (h_hostname != w_hostname) and w_hostname:
+ replaced_config = have.copy()
+ return replaced_config
+ h_intf_name = have.get('interface_naming', None)
+ w_intf_name = want.get('interface_naming', None)
+ if (h_intf_name != w_intf_name) and w_intf_name:
+ replaced_config = have.copy()
+ return replaced_config
+ h_ac_addr = have.get('anycast_address', None)
+ w_ac_addr = want.get('anycast_address', None)
+ if (h_ac_addr != w_ac_addr) and w_ac_addr:
+ replaced_config['anycast_address'] = h_ac_addr
+ return replaced_config
+ return replaced_config
+
def remove_default_entries(self, data):
new_data = {}
if not data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py
index 498fcbe28..e376fc82a 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/tacacs_server/tacacs_server.py
@@ -27,14 +27,25 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
update_states,
get_diff,
+ get_replaced_config,
get_normalize_interface_name,
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ __DELETE_LEAFS_OR_CONFIG_IF_NO_NON_KEY_LEAF,
+ get_new_config,
+ get_formatted_config_diff
+)
PATCH = 'patch'
DELETE = 'delete'
TEST_KEYS = [
{'host': {'name': ''}},
]
+TEST_KEYS_formatted_diff = [
+ {'__default_ops': {'__delete_op': __DELETE_LEAFS_OR_CONFIG_IF_NO_NON_KEY_LEAF}},
+ {'host': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
class Tacacs_server(ConfigBase):
@@ -91,6 +102,17 @@ class Tacacs_server(ConfigBase):
if result['changed']:
result['after'] = changed_tacacs_server_facts
+ new_config = changed_tacacs_server_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_tacacs_server_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_tacacs_server_facts,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -180,6 +202,67 @@ class Tacacs_server(ConfigBase):
return commands, requests
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ replaced_config = get_replaced_config(want, have, TEST_KEYS)
+
+ add_commands = []
+ if replaced_config:
+ del_requests = self.get_delete_tacacs_server_requests(replaced_config, have)
+ requests.extend(del_requests)
+ commands.extend(update_states(replaced_config, "deleted"))
+ add_commands = want
+ else:
+ add_commands = diff
+
+ if add_commands:
+ add_requests = self.get_modify_tacacs_server_requests(add_commands, have)
+ if len(add_requests) > 0:
+ requests.extend(add_requests)
+ commands.extend(update_states(add_commands, "replaced"))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have, diff):
+ """ The command generator when state is overridden
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ r_diff = get_diff(have, want, TEST_KEYS)
+ if have and (diff or r_diff):
+ del_requests = self.get_delete_tacacs_server_requests(have, have)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ if not have and want:
+ want_commands = want
+ want_requests = self.get_modify_tacacs_server_requests(want_commands, have)
+
+ if len(want_requests) > 0:
+ requests.extend(want_requests)
+ commands.extend(update_states(want_commands, "overridden"))
+
+ return commands, requests
+
def get_tacacs_global_payload(self, conf):
payload = {}
global_cfg = {}
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py
index 73398cf74..9c79cd0e4 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/users/users.py
@@ -26,14 +26,21 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
edit_config
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
- dict_to_set,
update_states,
get_diff,
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
from ansible.module_utils.connection import ConnectionError
PATCH = 'patch'
DELETE = 'delete'
+TEST_KEYS_formatted_diff = [
+ {'config': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
class Users(ConfigBase):
@@ -83,7 +90,7 @@ class Users(ConfigBase):
except ConnectionError as exc:
try:
json_obj = json.loads(str(exc).replace("'", '"'))
- if json_obj and type(json_obj) is dict and 401 == json_obj['code']:
+ if json_obj and isinstance(json_obj, dict) and 401 == json_obj['code']:
auth_error = True
warnings.append("Unable to get after configs as password got changed for current user")
else:
@@ -101,6 +108,19 @@ class Users(ConfigBase):
if result['changed']:
result['after'] = changed_users_facts
+ new_config = changed_users_facts
+ old_config = existing_users_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_users_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+ if self._module._diff:
+ self.sort_lists_in_config(new_config)
+ self.sort_lists_in_config(old_config)
+ result['diff'] = get_formatted_config_diff(old_config,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -133,8 +153,7 @@ class Users(ConfigBase):
want = []
new_want = [{'name': conf['name'], 'role': conf['role']} for conf in want]
- new_have = [{'name': conf['name'], 'role': conf['role']} for conf in have]
- new_diff = get_diff(new_want, new_have)
+ new_diff = get_diff(new_want, have)
diff = []
for cfg in new_diff:
@@ -187,7 +206,7 @@ class Users(ConfigBase):
:returns: the commands necessary to remove the current configuration
of the provided objects
"""
- # if want is none, then delete all the usersi except admin
+ # if want is none, then delete all the users except admin
if not want:
commands = have
else:
@@ -202,6 +221,65 @@ class Users(ConfigBase):
return commands, requests
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is merged
+
+ :param want: the additive configuration as a dictionary
+ :param obj_in_have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to replace the current configuration
+ wit the provided configuration
+ """
+ self.validate_new_users(want, have)
+
+ commands = diff
+ requests = self.get_modify_users_requests(commands, have)
+ if commands and len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have, diff):
+ """ The command generator when state is overridden
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+ self.sort_lists_in_config(want)
+ self.sort_lists_in_config(have)
+ new_want = [{'name': conf['name'], 'role': conf['role']} for conf in want]
+ new_have = []
+ for conf in have:
+ # Exclude admin user from new_have if it isn't present in new_want
+ if conf['name'] == 'admin' and not any(cfg['name'] == 'admin' for cfg in new_want):
+ continue
+ else:
+ new_have.append({'name': conf['name'], 'role': conf['role']})
+
+ if diff or new_want != new_have:
+ # Delete all users except admin
+ del_requests = self.get_delete_users_requests(have, have)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ # Merge want configuration
+ mod_commands = want
+ mod_requests = self.get_modify_users_requests(mod_commands, have)
+
+ if mod_commands and len(mod_requests) > 0:
+ requests.extend(mod_requests)
+ commands.extend(update_states(mod_commands, "overridden"))
+
+ return commands, requests
+
def get_pwd(self, pw):
clear_pwd = hashed_pwd = ""
pwd = pw.replace("\\", "")
@@ -281,7 +359,7 @@ class Users(ConfigBase):
if not commands:
return requests
- # Skip the asmin user in 'deleted' state. we cannot delete all users
+ # Skip the admin user in 'deleted' state. we cannot delete all users
admin_usr = None
for conf in commands:
@@ -297,3 +375,7 @@ class Users(ConfigBase):
if admin_usr:
commands.remove(admin_usr)
return requests
+
+ def sort_lists_in_config(self, config):
+ if config:
+ config.sort(key=lambda x: x['name'])
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tests/interface_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlan_mapping/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tests/interface_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlan_mapping/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlan_mapping/vlan_mapping.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlan_mapping/vlan_mapping.py
new file mode 100644
index 000000000..acb72db17
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlan_mapping/vlan_mapping.py
@@ -0,0 +1,517 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic_vlan_mapping class
+It is in this file where the current configuration (as dict)
+is compared to the provided configuration (as dict) and the command set
+necessary to bring the current configuration to it's desired end-state is
+created
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
+ ConfigBase,
+)
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ to_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff,
+ update_states,
+ remove_empties_from_list,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+
+TEST_KEYS = [
+ {'config': {'name': ''}},
+ {'mapping': {'service_vlan': '', 'dot1q_tunnel': ''}},
+]
+
+
+class Vlan_mapping(ConfigBase):
+ """
+ The sonic_vlan_mapping class
+ """
+
+ gather_subset = [
+ '!all',
+ '!min',
+ ]
+
+ gather_network_resources = [
+ 'vlan_mapping',
+ ]
+
+ def __init__(self, module):
+ super(Vlan_mapping, self).__init__(module)
+
+ def get_vlan_mapping_facts(self):
+ """ Get the 'facts' (the current configuration)
+
+ :rtype: A dictionary
+ :returns: The current configuration as a dictionary
+ """
+ facts, _warnings = Facts(self._module).get_facts(self.gather_subset, self.gather_network_resources)
+ vlan_mapping_facts = facts['ansible_network_resources'].get('vlan_mapping')
+ if not vlan_mapping_facts:
+ return []
+ return vlan_mapping_facts
+
+ def execute_module(self):
+ """ Execute the module
+
+ :rtype: A dictionary
+ :returns: The result from module execution
+ """
+ result = {'changed': False}
+ warnings = list()
+
+ existing_vlan_mapping_facts = self.get_vlan_mapping_facts()
+ commands, requests = self.set_config(existing_vlan_mapping_facts)
+ if commands:
+ if not self._module.check_mode:
+ try:
+ edit_config(self._module, to_request(self._module, requests))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ result['changed'] = True
+ result['commands'] = commands
+
+ changed_vlan_mapping_facts = self.get_vlan_mapping_facts()
+
+ result['before'] = existing_vlan_mapping_facts
+ if result['changed']:
+ result['after'] = changed_vlan_mapping_facts
+
+ result['warnings'] = warnings
+ return result
+
+ def set_config(self, existing_vlan_mapping_facts):
+ """ Collect the configuration from the args passed to the module,
+ collect the current configuration (as a dict from facts)
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ want = remove_empties_from_list(self._module.params['config'])
+ have = existing_vlan_mapping_facts
+ resp = self.set_state(want, have)
+ return to_list(resp)
+
+ def set_state(self, want, have):
+ """ Select the appropriate function based on the state provided
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ state = self._module.params['state']
+ have = self.convert_vlan_ids_range(have)
+ want = self.convert_vlan_ids_range(want)
+ diff = get_diff(want, have, TEST_KEYS)
+
+ if state == 'overridden':
+ commands, requests = self._state_overridden(want, have, diff)
+ elif state == 'deleted':
+ commands, requests = self._state_deleted(want, have)
+ elif state == 'merged':
+ commands, requests = self._state_merged(want, have, diff)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have, diff)
+
+ ret_commands = remove_empties_from_list(commands)
+ return ret_commands, requests
+
+ def _state_replaced(self, want, have, diff):
+ """ The command generator when state is replaced
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ requests = []
+ commands = []
+ commands_del = []
+
+ commands_del = self.get_replaced_delete_list(want, have)
+
+ if commands_del:
+ commands.extend(update_states(commands_del, "deleted"))
+
+ requests_del = self.get_delete_vlan_mapping_requests(commands_del, have, is_delete_all=True)
+ if requests_del:
+ requests.extend(requests_del)
+
+ if diff or commands_del:
+ requests_rep = self.get_create_vlan_mapping_requests(want, have)
+ if len(requests_rep):
+ requests.extend(requests_rep)
+ commands = update_states(want, "replaced")
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_overridden(self, want, have, diff):
+ """ The command generator when state is overridden
+
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ commands_del = get_diff(have, want, TEST_KEYS)
+ if commands_del:
+ requests_del = self.get_delete_vlan_mapping_requests(commands_del, have, is_delete_all=True)
+ requests.extend(requests_del)
+ commands_del = update_states(commands_del, "deleted")
+ commands.extend(commands_del)
+
+ commands_over = diff
+ if diff:
+ requests_over = self.get_create_vlan_mapping_requests(commands_over, have)
+ requests.extend(requests_over)
+ commands_over = update_states(commands_over, "overridden")
+ commands.extend(commands_over)
+
+ return commands, requests
+
+ def _state_merged(self, want, have, diff):
+ """ The command generator when state is merged
+
+ :rtype: A list
+ :returns: the commands necessary to merge the provided into
+ the current configuration
+ """
+ commands = diff
+ requests = self.get_create_vlan_mapping_requests(commands, have)
+
+ if commands and len(requests):
+ commands = update_states(commands, 'merged')
+ else:
+ commands = []
+
+ return commands, requests
+
+ def _state_deleted(self, want, have):
+ """ The command generator when state is deleted
+
+ :rtype: A list
+ :returns: the commands necessary to remove the current configuration
+ of the provided objects
+ """
+ commands = []
+ requests = []
+ is_delete_all = False
+
+ if not want:
+ commands = have
+ is_delete_all = True
+ else:
+ commands = want
+
+ requests.extend(self.get_delete_vlan_mapping_requests(commands, have, is_delete_all))
+
+ if len(requests) == 0:
+ commands = []
+
+ if commands:
+ commands = update_states(commands, 'deleted')
+
+ return commands, requests
+
+ def get_replaced_delete_list(self, commands, have):
+ matched = []
+
+ for cmd in commands:
+ name = cmd.get('name', None)
+ interface_name = name.replace('/', '%2f')
+ mapping_list = cmd.get('mapping', [])
+
+ matched_interface_name = None
+ matched_mapping_list = []
+ for existing in have:
+ have_name = existing.get('name', None)
+ have_interface_name = have_name.replace('/', '%2f')
+ have_mapping_list = existing.get('mapping', [])
+ if interface_name == have_interface_name:
+ matched_interface_name = have_interface_name
+ matched_mapping_list = have_mapping_list
+
+ if mapping_list and matched_mapping_list:
+ returned_mapping_list = []
+ for mapping in mapping_list:
+ service_vlan = mapping.get('service_vlan', None)
+
+ for matched_mapping in matched_mapping_list:
+ matched_service_vlan = matched_mapping.get('service_vlan', None)
+
+ if matched_service_vlan and service_vlan:
+ if matched_service_vlan == service_vlan:
+ priority = mapping.get('priority', None)
+ have_priority = matched_mapping.get('priority', None)
+ inner_vlan = mapping.get('inner_vlan', None)
+ have_inner_vlan = matched_mapping.get('inner_vlan', None)
+ dot1q_tunnel = mapping.get('dot1q_tunnel', False)
+ have_dot1q_tunnel = matched_mapping.get('dot1q_tunnel', False)
+ vlan_ids = mapping.get('vlan_ids', [])
+ have_vlan_ids = matched_mapping.get('vlan_ids', [])
+
+ if priority != have_priority:
+ returned_mapping_list.append(mapping)
+ elif inner_vlan != have_inner_vlan:
+ returned_mapping_list.append(mapping)
+ elif dot1q_tunnel != have_dot1q_tunnel:
+ returned_mapping_list.append(mapping)
+ elif sorted(vlan_ids) != sorted(have_vlan_ids):
+ returned_mapping_list.append(mapping)
+
+ if returned_mapping_list:
+ matched.append({'name': interface_name, 'mapping': returned_mapping_list})
+
+ return matched
+
+ def get_delete_vlan_mapping_requests(self, commands, have, is_delete_all):
+ """ Get list of requests to delete vlan mapping configurations
+ for all interfaces specified by the commands
+ """
+ url = "data/openconfig-interfaces:interfaces/interface={}/openconfig-interfaces-ext:mapped-vlans/mapped-vlan={}"
+ priority_url = "/ingress-mapping/config/mapped-vlan-priority"
+ vlan_ids_url = "/match/single-tagged/config/vlan-ids={}"
+ method = "DELETE"
+ requests = []
+
+ # Delete all vlan mappings
+ if is_delete_all:
+ for cmd in commands:
+ name = cmd.get('name', None)
+ interface_name = name.replace('/', '%2f')
+ mapping_list = cmd.get('mapping', [])
+
+ if mapping_list:
+ for mapping in mapping_list:
+ service_vlan = mapping.get('service_vlan', None)
+ path = url.format(interface_name, service_vlan)
+ request = {"path": path, "method": method}
+ requests.append(request)
+
+ return requests
+
+ else:
+ for cmd in commands:
+ name = cmd.get('name', None)
+ interface_name = name.replace('/', '%2f')
+ mapping_list = cmd.get('mapping', [])
+
+ # Checks if there is a interface matching the delete command
+ have_interface_name = None
+ have_mapping_list = []
+ for tmp in have:
+ tmp_name = tmp.get('name', None)
+ tmp_interface_name = tmp_name.replace('/', '%2f')
+ tmp_mapping_list = tmp.get('mapping', [])
+ if interface_name == tmp_interface_name:
+ have_interface_name = tmp_interface_name
+ have_mapping_list = tmp_mapping_list
+
+ # Delete part or all of single mapping
+ if mapping_list:
+ for mapping in mapping_list:
+ service_vlan = mapping.get('service_vlan', None)
+ vlan_ids = mapping.get('vlan_ids', None)
+ priority = mapping.get('priority', None)
+
+ # Checks if there is a vlan mapping matching the delete command
+ have_service_vlan = None
+ have_vlan_ids = None
+ have_priority = None
+ for have_mapping in have_mapping_list:
+ if have_mapping.get('service_vlan', None) == service_vlan:
+ have_service_vlan = have_mapping.get('service_vlan', None)
+ have_vlan_ids = have_mapping.get('vlan_ids', None)
+ have_priority = have_mapping.get('priority', None)
+
+ if service_vlan and have_service_vlan:
+ if vlan_ids or priority:
+ # Delete priority
+ if priority and have_priority:
+ path = url.format(interface_name, service_vlan) + priority_url
+ request = {"path": path, "method": method}
+ requests.append(request)
+ # Delete vlan ids
+ if vlan_ids and have_vlan_ids:
+ vlan_ids_str = ""
+ same_vlan_ids_list = self.get_vlan_ids_diff(vlan_ids, have_vlan_ids, same=True)
+ if same_vlan_ids_list:
+ for vlan in same_vlan_ids_list:
+ if vlan_ids_str:
+ vlan_ids_str = vlan_ids_str + "%2C" + vlan.replace("-", "..")
+ else:
+ vlan_ids_str = vlan.replace("-", "..")
+ path = url.format(interface_name, service_vlan) + vlan_ids_url.format(vlan_ids_str)
+ request = {"path": path, "method": method}
+ requests.append(request)
+ # Delete entire mapping
+ else:
+ path = url.format(interface_name, service_vlan)
+ request = {"path": path, "method": method}
+ requests.append(request)
+ # Delete all mappings in an interface
+ else:
+ if have_mapping_list:
+ for mapping in have_mapping_list:
+ service_vlan = mapping.get('service_vlan', None)
+ path = url.format(interface_name, service_vlan)
+ request = {"path": path, "method": method}
+ requests.append(request)
+
+ return requests
+
+ def get_create_vlan_mapping_requests(self, commands, have):
+ """ Get list of requests to create/modify vlan mapping configurations
+ for all interfaces specified by the commands
+ """
+ requests = []
+ if not commands:
+ return requests
+
+ for cmd in commands:
+ name = cmd.get('name', None)
+ interface_name = name.replace('/', '%2f')
+ mapping_list = cmd.get('mapping', [])
+
+ if mapping_list:
+ for mapping in mapping_list:
+ requests.append(self.get_create_vlan_mapping_request(interface_name, mapping))
+ return requests
+
+ def get_create_vlan_mapping_request(self, interface_name, mapping):
+ url = "data/openconfig-interfaces:interfaces/interface={}/openconfig-interfaces-ext:mapped-vlans"
+ body = {}
+ method = "PATCH"
+ match_data = None
+
+ service_vlan = mapping.get('service_vlan', None)
+ priority = mapping.get('priority', None)
+ vlan_ids = mapping.get('vlan_ids', [])
+ dot1q_tunnel = mapping.get('dot1q_tunnel', None)
+ inner_vlan = mapping.get('inner_vlan', None)
+
+ if not dot1q_tunnel:
+ if len(vlan_ids) > 1:
+ raise Exception("When dot1q-tunnel is false only one VLAN ID can be passed to the vlan_ids list")
+ if not vlan_ids and priority:
+ match_data = None
+ elif vlan_ids:
+ if inner_vlan:
+ match_data = {'double-tagged': {'config': {'inner-vlan-id': inner_vlan, 'outer-vlan-id': int(vlan_ids[0])}}}
+ else:
+ match_data = {'single-tagged': {'config': {'vlan-ids': [int(vlan_ids[0])]}}}
+ if priority:
+ ing_data = {'config': {'vlan-stack-action': 'SWAP', 'mapped-vlan-priority': priority}}
+ egr_data = {'config': {'vlan-stack-action': 'SWAP', 'mapped-vlan-priority': priority}}
+ else:
+ ing_data = {'config': {'vlan-stack-action': 'SWAP'}}
+ egr_data = {'config': {'vlan-stack-action': 'SWAP'}}
+ else:
+ if inner_vlan:
+ raise Exception("Inner vlan can only be passed when dot1q_tunnel is false")
+ if not vlan_ids and priority:
+ match_data = None
+ elif vlan_ids:
+ vlan_ids_list = []
+ for vlan in vlan_ids:
+ vlan_ids_list.append(int(vlan))
+ match_data = {'single-tagged': {'config': {'vlan-ids': vlan_ids_list}}}
+ if priority:
+ ing_data = {'config': {'vlan-stack-action': 'PUSH', 'mapped-vlan-priority': priority}}
+ egr_data = {'config': {'vlan-stack-action': 'POP', 'mapped-vlan-priority': priority}}
+ else:
+ ing_data = {'config': {'vlan-stack-action': 'PUSH'}}
+ egr_data = {'config': {'vlan-stack-action': 'POP'}}
+ if match_data:
+ body = {'openconfig-interfaces-ext:mapped-vlans': {'mapped-vlan': [
+ {'vlan-id': service_vlan,
+ 'config': {'vlan-id': service_vlan},
+ 'match': match_data,
+ 'ingress-mapping': ing_data,
+ 'egress-mapping': egr_data}
+ ]}}
+ else:
+ body = {'openconfig-interfaces-ext:mapped-vlans': {'mapped-vlan': [
+ {'vlan-id': service_vlan,
+ 'config': {'vlan-id': service_vlan},
+ 'ingress-mapping': ing_data,
+ 'egress-mapping': egr_data}
+ ]}}
+
+ request = {"path": url.format(interface_name), "method": method, "data": body}
+ return request
+
+ def get_vlan_ids_diff(self, vlan_ids, have_vlan_ids, same):
+ """ Takes two vlan id lists and finds the difference.
+ :param vlan_ids: list of vlan ids that is looking for diffs
+ :param have_vlan_ids: list of vlan ids that is being compared to
+ :param same: if true will instead return list of shared values
+ :rtype: list(str)
+ """
+ results = []
+
+ for vlan_id in vlan_ids:
+ if same:
+ if vlan_id in have_vlan_ids:
+ results.append(vlan_id)
+ else:
+ if vlan_id not in have_vlan_ids:
+ results.append(vlan_id)
+
+ return results
+
+ def vlanIdsRangeStr(self, vlanList):
+ rangeList = []
+ for vid in vlanList:
+ if "-" in vid:
+ vidList = vid.split("-")
+ lower = int(vidList[0])
+ upper = int(vidList[1])
+ for i in range(lower, upper + 1):
+ rangeList.append(str(i))
+ else:
+ rangeList.append(vid)
+ return rangeList
+
+ def convert_vlan_ids_range(self, config):
+
+ interface_index = 0
+ for conf in config:
+ name = conf.get('name', None)
+ interface_name = name.replace('/', '%2f')
+ mapping_list = conf.get('mapping', [])
+
+ mapping_index = 0
+ if mapping_list:
+ for mapping in mapping_list:
+ vlan_ids = mapping.get('vlan_ids', None)
+
+ if vlan_ids:
+ config[interface_index]['mapping'][mapping_index]['vlan_ids'] = self.vlanIdsRangeStr(vlan_ids)
+ mapping_index = mapping_index + 1
+ interface_index = interface_index + 1
+
+ return config
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py
index 404051074..0a0b105a7 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vlans/vlans.py
@@ -14,17 +14,17 @@ created
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import json
-
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
+ search_obj_in_list,
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts import Facts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
get_diff,
+ get_replaced_config,
update_states,
remove_empties_from_list,
)
@@ -35,23 +35,20 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
to_request,
edit_config
)
-from ansible.module_utils._text import to_native
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
from ansible.module_utils.connection import ConnectionError
-import traceback
-
-LIB_IMP_ERR = None
-ERR_MSG = None
-try:
- import jinja2
- HAS_LIB = True
-except Exception as e:
- HAS_LIB = False
- ERR_MSG = to_native(e)
- LIB_IMP_ERR = traceback.format_exc()
+
TEST_KEYS = [
{'config': {'vlan_id': ''}},
]
+TEST_KEYS_formatted_diff = [
+ {'config': {'vlan_id': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}},
+]
class Vlans(ConfigBase):
@@ -109,6 +106,18 @@ class Vlans(ConfigBase):
if result['changed']:
result['after'] = changed_vlans_facts
+ new_config = changed_vlans_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_vlans_facts,
+ TEST_KEYS_formatted_diff)
+ new_config.sort(key=lambda x: x['vlan_id'])
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_vlans_facts,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -121,7 +130,7 @@ class Vlans(ConfigBase):
to the desired configuration
"""
want = remove_empties_from_list(self._module.params['config'])
- have = existing_vlans_facts
+ have = remove_empties_from_list(existing_vlans_facts)
resp = self.set_state(want, have)
return to_list(resp)
@@ -157,7 +166,29 @@ class Vlans(ConfigBase):
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
- return self._state_merged(want, have, diff)
+ commands = []
+ requests = []
+
+ replaced_config = get_replaced_config(want, have, TEST_KEYS)
+ replaced_vlans = []
+ for config in replaced_config:
+ vlan_obj = search_obj_in_list(config['vlan_id'], want, 'vlan_id')
+ if vlan_obj and vlan_obj.get('description', None) is None:
+ replaced_vlans.append(config)
+
+ if replaced_vlans:
+ del_requests = self.get_delete_vlans_requests(replaced_vlans, False)
+ requests.extend(del_requests)
+ commands.extend(update_states(replaced_config, "deleted"))
+
+ if diff:
+ rep_commands = diff
+ rep_requests = self.get_create_vlans_requests(rep_commands)
+ if len(rep_requests) > 0:
+ requests.extend(rep_requests)
+ commands.extend(update_states(rep_commands, "replaced"))
+
+ return commands, requests
def _state_overridden(self, want, have, diff):
""" The command generator when state is overridden
@@ -166,20 +197,41 @@ class Vlans(ConfigBase):
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
- ret_requests = list()
- commands = list()
- vlans_to_delete = get_diff(have, want, TEST_KEYS)
- if vlans_to_delete:
- delete_vlans_requests = self.get_delete_vlans_requests(vlans_to_delete)
- ret_requests.extend(delete_vlans_requests)
- commands.extend(update_states(vlans_to_delete, "deleted"))
+ commands = []
+ requests = []
+
+ r_diff = get_diff(have, want, TEST_KEYS)
+ if not diff and not r_diff:
+ return commands, requests
+
+ del_vlans = []
+ del_descr_vlans = []
+ for config in r_diff:
+ vlan_obj = search_obj_in_list(config['vlan_id'], want, 'vlan_id')
+ if vlan_obj:
+ if vlan_obj.get('description', None) is None:
+ del_descr_vlans.append(config)
+ else:
+ del_vlans.append(config)
+
+ if del_vlans:
+ del_requests = self.get_delete_vlans_requests(del_vlans, True)
+ requests.extend(del_requests)
+ commands.extend(update_states(del_vlans, "deleted"))
+
+ if del_descr_vlans:
+ del_requests = self.get_delete_vlans_requests(del_descr_vlans, False)
+ requests.extend(del_requests)
+ commands.extend(update_states(del_descr_vlans, "deleted"))
if diff:
- vlans_to_create_requests = self.get_create_vlans_requests(diff)
- ret_requests.extend(vlans_to_create_requests)
- commands.extend(update_states(diff, "merged"))
+ ovr_commands = diff
+ ovr_requests = self.get_create_vlans_requests(ovr_commands)
+ if len(ovr_requests) > 0:
+ requests.extend(ovr_requests)
+ commands.extend(update_states(ovr_commands, "overridden"))
- return commands, ret_requests
+ return commands, requests
def _state_merged(self, want, have, diff):
""" The command generator when state is merged
@@ -204,16 +256,18 @@ class Vlans(ConfigBase):
"""
commands = list()
# if want is none, then delete all the vlans
+ delete_vlan = False
if not want:
commands = have
+ delete_vlan = True
else: # delete specific vlans
commands = get_diff(want, diff, TEST_KEYS)
- requests = self.get_delete_vlans_requests(commands)
+ requests = self.get_delete_vlans_requests(commands, delete_vlan)
commands = update_states(commands, "deleted")
return commands, requests
- def get_delete_vlans_requests(self, configs):
+ def get_delete_vlans_requests(self, configs, delete_vlan=False):
requests = []
if not configs:
return requests
@@ -223,7 +277,7 @@ class Vlans(ConfigBase):
for vlan in configs:
vlan_id = vlan.get("vlan_id")
description = vlan.get("description")
- if description:
+ if description and not delete_vlan:
path = self.get_delete_vlan_config_attr(vlan_id, "description")
else:
path = url.format(vlan_id)
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py
index 83deb0ecb..2a07e6456 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vrfs/vrfs.py
@@ -14,6 +14,7 @@ created
from __future__ import absolute_import, division, print_function
__metaclass__ = type
+from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.cfg.base import (
ConfigBase,
)
@@ -30,12 +31,22 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
update_states,
normalize_interface_name
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.formatted_diff_utils import (
+ __DELETE_CONFIG_IF_NO_SUBCONFIG,
+ get_new_config,
+ get_formatted_config_diff
+)
from ansible.module_utils.connection import ConnectionError
PATCH = 'patch'
DELETE = 'DELETE'
+MGMT_VRF_NAME = 'mgmt'
TEST_KEYS = [
- {'interfaces': {'name': ''}},
+ {'interfaces': {'name': ''}}
+]
+TEST_KEYS_formatted_diff = [
+ {'config': {'name': ''}},
+ {'interfaces': {'name': '', '__delete_op': __DELETE_CONFIG_IF_NO_SUBCONFIG}}
]
@@ -97,6 +108,17 @@ class Vrfs(ConfigBase):
if result['changed']:
result['after'] = changed_vrf_interfaces_facts
+ new_config = changed_vrf_interfaces_facts
+ if self._module.check_mode:
+ result.pop('after', None)
+ new_config = get_new_config(commands, existing_vrf_interfaces_facts,
+ TEST_KEYS_formatted_diff)
+ result['after(generated)'] = new_config
+
+ if self._module._diff:
+ result['diff'] = get_formatted_config_diff(existing_vrf_interfaces_facts,
+ new_config,
+ self._module._verbosity)
result['warnings'] = warnings
return result
@@ -137,6 +159,10 @@ class Vrfs(ConfigBase):
commands, requests = self._state_deleted(want, have)
elif state == 'merged':
commands, requests = self._state_merged(want, have, diff)
+ elif state == 'overridden':
+ commands, requests = self._state_overridden(want, have)
+ elif state == 'replaced':
+ commands, requests = self._state_replaced(want, have)
return commands, requests
@@ -172,7 +198,7 @@ class Vrfs(ConfigBase):
"""
# if want is none, then delete all the vrfs
if not want:
- commands = have
+ commands = self.preprocess_mgmt_vrf_for_deleted(have)
self.delete_all_flag = True
else:
commands = want
@@ -180,7 +206,7 @@ class Vrfs(ConfigBase):
requests = []
if commands:
- requests = self.get_delete_vrf_interface_requests(commands, have, want)
+ requests = self.get_delete_vrf_interface_requests(commands, have)
if len(requests) > 0:
commands = update_states(commands, "deleted")
@@ -189,7 +215,76 @@ class Vrfs(ConfigBase):
return commands, requests
- def get_delete_vrf_interface_requests(self, configs, have, want):
+ def _state_replaced(self, want, have):
+ """ The command generator when state is replaced
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ commands = []
+ requests = []
+
+ replaced_config = self.get_replaced_config(have, want)
+ self.sort_config(replaced_config)
+ self.sort_config(want)
+
+ if replaced_config and replaced_config != want:
+ self.delete_all_flag = False
+ del_requests = self.get_delete_vrf_interface_requests(replaced_config, have, 'replaced')
+ requests.extend(del_requests)
+ commands.extend(update_states(replaced_config, "deleted"))
+ replaced_config = []
+
+ if not replaced_config and want:
+ add_commands = want
+ add_requests = self.get_create_requests(add_commands, have)
+
+ if len(add_requests) > 0:
+ requests.extend(add_requests)
+ commands.extend(update_states(add_commands, "replaced"))
+
+ return commands, requests
+
+ def _state_overridden(self, want, have):
+ """ The command generator when state is overridden
+
+ :param want: the desired configuration as a dictionary
+ :param have: the current configuration as a dictionary
+ :param diff: the difference between want and have
+ :rtype: A list
+ :returns: the commands necessary to migrate the current configuration
+ to the desired configuration
+ """
+ self.sort_config(have)
+ self.sort_config(want)
+
+ commands = []
+ requests = []
+
+ if have and have != want:
+ want, have = self.preprocess_mgmt_vrf_for_overridden(want, have)
+
+ self.delete_all_flag = True
+ del_requests = self.get_delete_vrf_interface_requests(have, have)
+ requests.extend(del_requests)
+ commands.extend(update_states(have, "deleted"))
+ have = []
+
+ if not have and want:
+ add_commands = want
+ add_requests = self.get_create_requests(add_commands, have)
+
+ if len(add_requests) > 0:
+ requests.extend(add_requests)
+ commands.extend(update_states(add_commands, "overridden"))
+
+ return commands, requests
+
+ def get_delete_vrf_interface_requests(self, configs, have, state=None):
requests = []
if not configs:
return requests
@@ -211,21 +306,29 @@ class Vrfs(ConfigBase):
continue
# if members are not mentioned delet the vrf name
- if (self._module.params['state'] == 'deleted' and self.delete_all_flag) or empty_flag:
+ adjusted_delete_all_flag = name != MGMT_VRF_NAME and self.delete_all_flag
+ adjusted_empty_flag = empty_flag
+ if state == 'replaced':
+ adjusted_empty_flag = empty_flag and name != MGMT_VRF_NAME
+
+ if adjusted_delete_all_flag or adjusted_empty_flag:
url = 'data/openconfig-network-instance:network-instances/network-instance={0}'.format(name)
request = {"path": url, "method": method}
requests.append(request)
else:
- matched_members = matched.get('members', None)
-
- if matched_members:
- matched_intf = matched_members.get('interfaces', None)
- if matched_intf:
- for del_mem in matched_intf:
- url = 'data/openconfig-network-instance:network-instances/'
- url = url + 'network-instance={0}/interfaces/interface={1}'.format(name, del_mem['name'])
- request = {"path": url, "method": method}
- requests.append(request)
+ have_members = matched.get('members', None)
+ conf_members = conf.get('members', None)
+
+ if have_members:
+ have_intf = have_members.get('interfaces', None)
+ conf_intf = conf_members.get('interfaces', None)
+ if conf_intf:
+ for del_mem in conf_intf:
+ if del_mem in have_intf:
+ url = 'data/openconfig-network-instance:network-instances/'
+ url = url + 'network-instance={0}/interfaces/interface={1}'.format(name, del_mem['name'])
+ request = {"path": url, "method": method}
+ requests.append(request)
return requests
@@ -301,3 +404,62 @@ class Vrfs(ConfigBase):
network_inst_payload["openconfig-network-instance:interface"].append(member_payload)
return network_inst_payload
+
+ def get_vrf_name(self, vrf):
+ return vrf.get('name')
+
+ def get_interface_name(self, intf):
+ return intf.get('name')
+
+ def sort_config(self, conf):
+ if conf:
+ conf.sort(key=self.get_vrf_name)
+ for vrf in conf:
+ if vrf.get('members', None) and vrf['members'].get('interfaces', None):
+ vrf['members']['interfaces'].sort(key=self.get_interface_name)
+
+ def get_replaced_config(self, have, want):
+
+ replaced_vrfs = []
+ for vrf in want:
+ vrf_name = vrf['name']
+ have_vrf = next((h_vrf for h_vrf in have if h_vrf['name'] == vrf_name), None)
+ if have_vrf:
+ replaced_vrfs.append(have_vrf)
+
+ return replaced_vrfs
+
+ def preprocess_mgmt_vrf_for_deleted(self, have):
+ new_have = have
+ conf = next((vrf for vrf in new_have if vrf['name'] == MGMT_VRF_NAME), None)
+ if conf:
+ new_have = deepcopy(have)
+ new_have.remove(conf)
+ return new_have
+
+ def preprocess_mgmt_vrf_for_overridden(self, want, have):
+ new_want = deepcopy(want)
+ new_have = deepcopy(have)
+ h_conf = next((vrf for vrf in new_have if vrf['name'] == MGMT_VRF_NAME), None)
+ if h_conf:
+ conf = next((vrf for vrf in new_want if vrf['name'] == MGMT_VRF_NAME), None)
+ if conf:
+ mv_intfs = []
+ if conf.get('members', None) and conf['members'].get('interfaces', None):
+ mv_intfs = conf['members'].get('interfaces', [])
+
+ h_mv_intfs = []
+ if h_conf.get('members', None) and h_conf['members'].get('interfaces', None):
+ h_mv_intfs = h_conf['members'].get('interfaces', [])
+
+ mv_intfs.sort(key=lambda x: x['name'])
+ h_mv_intfs.sort(key=lambda x: x['name'])
+ if mv_intfs == h_mv_intfs:
+ new_want.remove(conf)
+ new_have.remove(h_conf)
+ elif not h_mv_intfs:
+ new_have.remove(h_conf)
+ else:
+ new_have.remove(h_conf)
+
+ return new_want, new_have
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py
index d44adcedf..0a87c98c8 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/config/vxlans/vxlans.py
@@ -26,7 +26,9 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
get_diff,
- update_states
+ update_states,
+ get_replaced_config,
+ send_requests
)
from ansible.module_utils.connection import ConnectionError
@@ -124,7 +126,7 @@ class Vxlans(ConfigBase):
diff = get_diff(want, have, test_keys)
if state == 'overridden':
- commands, requests = self._state_overridden(want, have, diff)
+ commands, requests = self._state_overridden(want, have)
elif state == 'deleted':
commands, requests = self._state_deleted(want, have, diff)
elif state == 'merged':
@@ -141,57 +143,63 @@ class Vxlans(ConfigBase):
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
+ requests = []
+ replaced_config = get_replaced_config(want, have, test_keys)
+
+ if replaced_config:
+ self.sort_lists_in_config(replaced_config)
+ self.sort_lists_in_config(have)
+ is_delete_all = (replaced_config == have)
+ if is_delete_all:
+ requests = self.get_delete_all_vxlan_request(have)
+ else:
+ requests = self.get_delete_vxlan_request(replaced_config, have)
+
+ send_requests(self._module, requests)
+ commands = want
+ else:
+ commands = diff
requests = []
- commands = []
- commands_del = get_diff(have, want, test_keys)
- requests_del = []
- if commands_del:
- requests_del = self.get_delete_vxlan_request(commands_del, have)
- if requests_del:
- requests.extend(requests_del)
- commands_del = update_states(commands_del, "deleted")
- commands.extend(commands_del)
-
- commands_rep = diff
- requests_rep = []
- if commands_rep:
- requests_rep = self.get_create_vxlans_request(commands_rep, have)
- if requests_rep:
- requests.extend(requests_rep)
- commands_rep = update_states(commands_rep, "replaced")
- commands.extend(commands_rep)
+ if commands:
+ requests = self.get_create_vxlans_request(commands, have)
+ if len(requests) > 0:
+ commands = update_states(commands, "replaced")
+ else:
+ commands = []
+ else:
+ commands = []
return commands, requests
- def _state_overridden(self, want, have, diff):
+ def _state_overridden(self, want, have):
""" The command generator when state is overridden
:rtype: A list
:returns: the commands necessary to migrate the current configuration
to the desired configuration
"""
- requests = []
+ self.sort_lists_in_config(want)
+ self.sort_lists_in_config(have)
+
+ if have and have != want:
+ requests = self.get_delete_all_vxlan_request(have)
+ send_requests(self._module, requests)
+
+ have = []
+
commands = []
+ requests = []
+
+ if not have and want:
+ commands = want
+ requests = self.get_create_vxlans_request(commands, have)
- commands_del = get_diff(have, want)
- requests_del = []
- if commands_del:
- requests_del = self.get_delete_vxlan_request(commands_del, have)
- if requests_del:
- requests.extend(requests_del)
- commands_del = update_states(commands_del, "deleted")
- commands.extend(commands_del)
-
- commands_over = diff
- requests_over = []
- if commands_over:
- requests_over = self.get_create_vxlans_request(commands_over, have)
- if requests_over:
- requests.extend(requests_over)
- commands_over = update_states(commands_over, "overridden")
- commands.extend(commands_over)
+ if len(requests) > 0:
+ commands = update_states(commands, "overridden")
+ else:
+ commands = []
return commands, requests
@@ -271,6 +279,7 @@ class Vxlans(ConfigBase):
vlan_map_requests = []
src_ip_requests = []
primary_ip_requests = []
+ evpn_nvo_requests = []
tunnel_requests = []
# Need to delete in reverse order of creation.
@@ -282,6 +291,7 @@ class Vxlans(ConfigBase):
vrf_map_list = conf.get('vrf_map', [])
src_ip = conf.get('source_ip', None)
primary_ip = conf.get('primary_ip', None)
+ evpn_nvo = conf.get('evpn_nvo', None)
if vrf_map_list:
vrf_map_requests.extend(self.get_delete_vrf_map_request(conf, conf, name, vrf_map_list))
@@ -291,6 +301,8 @@ class Vxlans(ConfigBase):
src_ip_requests.extend(self.get_delete_src_ip_request(conf, conf, name, src_ip))
if primary_ip:
primary_ip_requests.extend(self.get_delete_primary_ip_request(conf, conf, name, primary_ip))
+ if evpn_nvo:
+ evpn_nvo_requests.extend(self.get_delete_evpn_request(conf, conf, evpn_nvo))
tunnel_requests.extend(self.get_delete_tunnel_request(conf, conf, name))
if vrf_map_requests:
@@ -301,6 +313,8 @@ class Vxlans(ConfigBase):
requests.extend(src_ip_requests)
if primary_ip_requests:
requests.extend(primary_ip_requests)
+ if evpn_nvo_requests:
+ requests.extend(evpn_nvo_requests)
if tunnel_requests:
requests.extend(tunnel_requests)
@@ -315,6 +329,7 @@ class Vxlans(ConfigBase):
vrf_map_requests = []
vlan_map_requests = []
src_ip_requests = []
+ evpn_nvo_requests = []
primary_ip_requests = []
tunnel_requests = []
@@ -325,6 +340,7 @@ class Vxlans(ConfigBase):
name = conf['name']
src_ip = conf.get('source_ip', None)
+ evpn_nvo = conf.get('evpn_nvo', None)
primary_ip = conf.get('primary_ip', None)
vlan_map_list = conf.get('vlan_map', None)
vrf_map_list = conf.get('vrf_map', None)
@@ -342,7 +358,7 @@ class Vxlans(ConfigBase):
is_delete_full = False
if (name and vlan_map_list is None and vrf_map_list is None and
- src_ip is None and primary_ip is None):
+ src_ip is None and evpn_nvo is None and primary_ip is None):
is_delete_full = True
vrf_map_list = matched.get("vrf_map", [])
vlan_map_list = matched.get("vlan_map", [])
@@ -364,7 +380,8 @@ class Vxlans(ConfigBase):
have_vlan_map_count -= len(temp_vlan_map_requests)
if src_ip:
src_ip_requests.extend(self.get_delete_src_ip_request(conf, matched, name, src_ip))
-
+ if evpn_nvo:
+ evpn_nvo_requests.extend(self.get_delete_evpn_request(conf, matched, evpn_nvo))
if primary_ip:
primary_ip_requests.extend(self.get_delete_primary_ip_request(conf, matched, name, primary_ip))
if is_delete_full:
@@ -376,6 +393,8 @@ class Vxlans(ConfigBase):
requests.extend(vlan_map_requests)
if src_ip_requests:
requests.extend(src_ip_requests)
+ if evpn_nvo_requests:
+ requests.extend(evpn_nvo_requests)
if primary_ip_requests:
requests.extend(primary_ip_requests)
if tunnel_requests:
@@ -399,7 +418,7 @@ class Vxlans(ConfigBase):
payload = self.build_create_tunnel_payload(conf)
request = {"path": url, "method": PATCH, "data": payload}
requests.append(request)
- if conf.get('source_ip', None):
+ if conf.get('evpn_nvo', None):
requests.append(self.get_create_evpn_request(conf))
return requests
@@ -502,12 +521,23 @@ class Vxlans(ConfigBase):
payload_url = dict({"sonic-vrf:vni": vrf_map['vni']})
return payload_url
- def get_delete_evpn_request(self, conf):
+ def get_delete_evpn_request(self, conf, matched, del_evpn_nvo):
# Create URL and payload
- url = "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST={evpn_nvo}".format(evpn_nvo=conf['evpn_nvo'])
- request = {"path": url, "method": DELETE}
+ requests = []
- return request
+ url = "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST={evpn_nvo}"
+
+ is_change_needed = False
+ if matched:
+ matched_evpn_nvo = matched.get('evpn_nvo', None)
+ if matched_evpn_nvo and matched_evpn_nvo == del_evpn_nvo:
+ is_change_needed = True
+
+ if is_change_needed:
+ request = {"path": url.format(evpn_nvo=conf['evpn_nvo']), "method": DELETE}
+ requests.append(request)
+
+ return requests
def get_delete_tunnel_request(self, conf, matched, name):
# Create URL and payload
@@ -530,9 +560,7 @@ class Vxlans(ConfigBase):
if matched_source_ip and matched_source_ip == del_source_ip:
is_change_needed = True
- # Delete the EVPN NVO if the source_ip address is being deleted.
if is_change_needed:
- requests.append(self.get_delete_evpn_request(conf))
request = {"path": url.format(name=name), "method": DELETE}
requests.append(request)
@@ -604,3 +632,18 @@ class Vxlans(ConfigBase):
requests.append(request)
return requests
+
+ def sort_lists_in_config(self, config):
+ if config:
+ config.sort(key=self.get_name)
+ for cfg in config:
+ if 'vlan_map' in cfg and cfg['vlan_map']:
+ cfg['vlan_map'].sort(key=self.get_vni)
+ if 'vrf_map' in cfg and cfg['vrf_map']:
+ cfg['vrf_map'].sort(key=self.get_vni)
+
+ def get_name(self, name):
+ return name.get('name')
+
+ def get_vni(self, vni):
+ return vni.get('vni')
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py
index 5a7bd05c9..541a5805e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/aaa/aaa.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tests/lag_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/acl_interfaces/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tests/lag_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/acl_interfaces/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/acl_interfaces/acl_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/acl_interfaces/acl_interfaces.py
new file mode 100644
index 000000000..ec2973d84
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/acl_interfaces/acl_interfaces.py
@@ -0,0 +1,148 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic acl_interfaces fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible.module_utils.connection import ConnectionError
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.acl_interfaces.acl_interfaces import Acl_interfacesArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+
+class Acl_interfacesFacts(object):
+ """ The sonic acl_interfaces fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = Acl_interfacesArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for acl_interfaces
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+
+ if not data:
+ acl_interfaces_configs = self.get_acl_interfaces()
+
+ objs = []
+ for interface_config in acl_interfaces_configs.items():
+ obj = self.render_config(self.generated_spec, interface_config)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts['ansible_network_resources'].pop('acl_interfaces', None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['acl_interfaces'] = params['config']
+
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ config['name'] = conf[0]
+ config['access_groups'] = []
+
+ acls = {'mac': [], 'ipv4': [], 'ipv6': []}
+ for acl in conf[1]:
+ acl_type = acl.pop('type')
+ if acl_type in ('ACL_L2', 'openconfig-acl:ACL_L2'):
+ acls['mac'].append(acl)
+ elif acl_type in ('ACL_IPV4', 'openconfig-acl:ACL_IPV4'):
+ acls['ipv4'].append(acl)
+ elif acl_type in ('ACL_IPV6', 'openconfig-acl:ACL_IPV6'):
+ acls['ipv6'].append(acl)
+
+ for acl_type, acl_list in acls.items():
+ if acl_list:
+ config['access_groups'].append({
+ 'type': acl_type,
+ 'acls': acl_list
+ })
+
+ return config
+
+ def get_acl_interfaces(self):
+ """Get all interface access-group configurations available in chassis"""
+ acl_interfaces_path = 'data/openconfig-acl:acl/interfaces'
+ method = 'GET'
+ request = [{'path': acl_interfaces_path, 'method': method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ acl_interfaces = []
+ if response[0][1].get('openconfig-acl:interfaces'):
+ acl_interfaces = response[0][1]['openconfig-acl:interfaces'].get('interface', [])
+
+ acl_interfaces_configs = {}
+ for interface in acl_interfaces:
+ acls_list = []
+
+ ingress_acls = interface.get('ingress-acl-sets', {}).get('ingress-acl-set', [])
+ for acl in ingress_acls:
+ if acl.get('config'):
+ acls_list.append({
+ 'name': acl['config']['set-name'],
+ 'type': acl['config']['type'],
+ 'direction': 'in'
+ })
+
+ egress_acls = interface.get('egress-acl-sets', {}).get('egress-acl-set', [])
+ for acl in egress_acls:
+ if acl.get('config'):
+ acls_list.append({
+ 'name': acl['config']['set-name'],
+ 'type': acl['config']['type'],
+ 'direction': 'out'
+ })
+
+ acl_interfaces_configs[interface['id']] = acls_list
+
+ return acl_interfaces_configs
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tests/lldp_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bfd/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tests/lldp_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bfd/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bfd/bfd.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bfd/bfd.py
new file mode 100644
index 000000000..b8786947d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bfd/bfd.py
@@ -0,0 +1,236 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic bfd fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bfd.bfd import BfdArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+
+class BfdFacts(object):
+ """ The sonic bfd fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = BfdArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for bfd
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+
+ if not data:
+ bfd_cfg = self.get_bfd_config(self._module)
+ data = self.update_bfd(bfd_cfg)
+ objs = self.render_config(self.generated_spec, data)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['bfd'] = params['config']
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ return conf
+
+ def update_bfd(self, data):
+ bfd_dict = {}
+ if data:
+ bfd_dict['profiles'] = self.update_profiles(data)
+ bfd_dict['single_hops'] = self.update_single_hops(data)
+ bfd_dict['multi_hops'] = self.update_multi_hops(data)
+
+ return bfd_dict
+
+ def update_profiles(self, data):
+ all_profiles = []
+ bfd_profile = data.get('openconfig-bfd-ext:bfd-profile', None)
+ if bfd_profile:
+ profile_list = bfd_profile.get('profile', None)
+ if profile_list:
+ for profile in profile_list:
+ profile_dict = {}
+ profile_name = profile['profile-name']
+ config = profile['config']
+ enabled = config.get('enabled', None)
+ transmit_interval = config.get('desired-minimum-tx-interval', None)
+ receive_interval = config.get('required-minimum-receive', None)
+ detect_multiplier = config.get('detection-multiplier', None)
+ passive_mode = config.get('passive-mode', None)
+ min_ttl = config.get('minimum-ttl', None)
+ echo_interval = config.get('desired-minimum-echo-receive', None)
+ echo_mode = config.get('echo-active', None)
+
+ if profile_name:
+ profile_dict['profile_name'] = profile_name
+ if enabled is not None:
+ profile_dict['enabled'] = enabled
+ if transmit_interval:
+ profile_dict['transmit_interval'] = transmit_interval
+ if receive_interval:
+ profile_dict['receive_interval'] = receive_interval
+ if detect_multiplier:
+ profile_dict['detect_multiplier'] = detect_multiplier
+ if passive_mode is not None:
+ profile_dict['passive_mode'] = passive_mode
+ if min_ttl:
+ profile_dict['min_ttl'] = min_ttl
+ if echo_interval:
+ profile_dict['echo_interval'] = echo_interval
+ if echo_mode is not None:
+ profile_dict['echo_mode'] = echo_mode
+ if profile_dict:
+ all_profiles.append(profile_dict)
+
+ return all_profiles
+
+ def update_single_hops(self, data):
+ all_single_hops = []
+ bfd_single_hop = data.get('openconfig-bfd-ext:bfd-shop-sessions', None)
+ if bfd_single_hop:
+ single_hop_list = bfd_single_hop.get('single-hop', None)
+ if single_hop_list:
+ for hop in single_hop_list:
+ single_hop_dict = {}
+ remote_address = hop['remote-address']
+ vrf = hop['vrf']
+ interface = hop['interface']
+ local_address = hop['local-address']
+ config = hop['config']
+ enabled = config.get('enabled', None)
+ transmit_interval = config.get('desired-minimum-tx-interval', None)
+ receive_interval = config.get('required-minimum-receive', None)
+ detect_multiplier = config.get('detection-multiplier', None)
+ passive_mode = config.get('passive-mode', None)
+ echo_interval = config.get('desired-minimum-echo-receive', None)
+ echo_mode = config.get('echo-active', None)
+ profile_name = config.get('profile-name', None)
+
+ if remote_address:
+ single_hop_dict['remote_address'] = remote_address
+ if vrf:
+ single_hop_dict['vrf'] = vrf
+ if interface:
+ single_hop_dict['interface'] = interface
+ if local_address:
+ single_hop_dict['local_address'] = local_address
+ if enabled is not None:
+ single_hop_dict['enabled'] = enabled
+ if transmit_interval:
+ single_hop_dict['transmit_interval'] = transmit_interval
+ if receive_interval:
+ single_hop_dict['receive_interval'] = receive_interval
+ if detect_multiplier:
+ single_hop_dict['detect_multiplier'] = detect_multiplier
+ if passive_mode is not None:
+ single_hop_dict['passive_mode'] = passive_mode
+ if echo_interval:
+ single_hop_dict['echo_interval'] = echo_interval
+ if echo_mode is not None:
+ single_hop_dict['echo_mode'] = echo_mode
+ if profile_name:
+ single_hop_dict['profile_name'] = profile_name
+ if single_hop_dict:
+ all_single_hops.append(single_hop_dict)
+
+ return all_single_hops
+
+ def update_multi_hops(self, data):
+ all_multi_hops = []
+ bfd_multi_hop = data.get('openconfig-bfd-ext:bfd-mhop-sessions', None)
+ if bfd_multi_hop:
+ multi_hop_list = bfd_multi_hop.get('multi-hop', None)
+ if multi_hop_list:
+ for hop in multi_hop_list:
+ multi_hop_dict = {}
+ remote_address = hop['remote-address']
+ vrf = hop['vrf']
+ local_address = hop['local-address']
+ config = hop['config']
+ enabled = config.get('enabled', None)
+ transmit_interval = config.get('desired-minimum-tx-interval', None)
+ receive_interval = config.get('required-minimum-receive', None)
+ detect_multiplier = config.get('detection-multiplier', None)
+ passive_mode = config.get('passive-mode', None)
+ min_ttl = config.get('minimum-ttl', None)
+ profile_name = config.get('profile-name', None)
+
+ if remote_address:
+ multi_hop_dict['remote_address'] = remote_address
+ if vrf:
+ multi_hop_dict['vrf'] = vrf
+ if local_address:
+ multi_hop_dict['local_address'] = local_address
+ if enabled is not None:
+ multi_hop_dict['enabled'] = enabled
+ if transmit_interval:
+ multi_hop_dict['transmit_interval'] = transmit_interval
+ if receive_interval:
+ multi_hop_dict['receive_interval'] = receive_interval
+ if detect_multiplier:
+ multi_hop_dict['detect_multiplier'] = detect_multiplier
+ if passive_mode is not None:
+ multi_hop_dict['passive_mode'] = passive_mode
+ if min_ttl:
+ multi_hop_dict['min_ttl'] = min_ttl
+ if profile_name:
+ multi_hop_dict['profile_name'] = profile_name
+ if multi_hop_dict:
+ all_multi_hops.append(multi_hop_dict)
+
+ return all_multi_hops
+
+ def get_bfd_config(self, module):
+ bfd_cfg = None
+ get_bfd_path = '/data/openconfig-bfd:bfd'
+ request = {'path': get_bfd_path, 'method': 'get'}
+
+ try:
+ response = edit_config(module, to_request(module, request))
+ if 'openconfig-bfd:bfd' in response[0][1]:
+ bfd_cfg = response[0][1].get('openconfig-bfd:bfd', None)
+ except ConnectionError as exc:
+ module.fail_json(msg=str(exc), code=exc.code)
+ return bfd_cfg
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py
index c86b53c2a..7ecf253e7 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp/bgp.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -49,6 +48,7 @@ class BgpFacts(object):
'admin_max_med': ['max-med', 'admin-max-med-val'],
'max_med_on_startup_timer': ['max-med', 'time'],
'max_med_on_startup_med_val': ['max-med', 'max-med-val'],
+ 'rt_delay': 'route-map-process-delay'
}
def __init__(self, module, subspec='config', options='options'):
@@ -92,8 +92,8 @@ class BgpFacts(object):
ansible_facts['ansible_network_resources'].pop('bgp', None)
facts = {}
if objs:
- params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)})
- facts['bgp'] = params['config']
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['bgp'] = remove_empties_from_list(params['config'])
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py
index fd37533e4..511fb024a 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_af/bgp_af.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -64,6 +63,10 @@ class Bgp_afFacts(object):
'network': ['network-config', 'network'],
'dampening': ['route-flap-damping', 'config', 'enabled'],
'route_advertise_list': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:route-advertise', 'route-advertise-list'],
+ 'rd': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'route-distinguisher'],
+ 'rt_in': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'import-rts'],
+ 'rt_out': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:config', 'export-rts'],
+ 'vnis': ['l2vpn-evpn', 'openconfig-bgp-evpn-ext:vnis', 'vni']
}
af_redis_params_map = {
@@ -104,6 +107,7 @@ class Bgp_afFacts(object):
self.update_max_paths(data)
self.update_network(data)
self.update_route_advertise_list(data)
+ self.update_vnis(data)
bgp_redis_data = get_all_bgp_af_redistribute(self._module, vrf_list, self.af_redis_params_map)
self.update_redis_data(data, bgp_redis_data)
self.update_afis(data)
@@ -119,8 +123,8 @@ class Bgp_afFacts(object):
ansible_facts['ansible_network_resources'].pop('bgp_af', None)
facts = {}
if objs:
- params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)})
- facts['bgp_af'] = params['config']
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['bgp_af'] = remove_empties_from_list(params['config'])
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
@@ -241,6 +245,38 @@ class Bgp_afFacts(object):
rt_adv_lst.append(rt_adv_dict)
af['route_advertise_list'] = rt_adv_lst
+ def update_vnis(self, data):
+ for conf in data:
+ afs = conf.get('address_family', [])
+ if afs:
+ for af in afs:
+ vnis = af.get('vnis', None)
+ if vnis:
+ vnis_list = []
+ for vni in vnis:
+ vni_dict = {}
+ vni_config = vni['config']
+ vni_number = vni_config.get('vni-number', None)
+ vni_adv_gw = vni_config.get('advertise-default-gw', None)
+ vni_adv_svi = vni_config.get('advertise-svi-ip', None)
+ vni_rd = vni_config.get('route-distinguisher', None)
+ vni_rt_in = vni_config.get('import-rts', [])
+ vni_rt_out = vni_config.get('export-rts', [])
+ if vni_number:
+ vni_dict['vni_number'] = vni_number
+ if vni_adv_gw is not None:
+ vni_dict['advertise_default_gw'] = vni_adv_gw
+ if vni_adv_svi is not None:
+ vni_dict['advertise_svi_ip'] = vni_adv_svi
+ if vni_rd:
+ vni_dict['rd'] = vni_rd
+ if vni_rt_in:
+ vni_dict['rt_in'] = vni_rt_in
+ if vni_rt_out:
+ vni_dict['rt_out'] = vni_rt_out
+ vnis_list.append(vni_dict)
+ af['vnis'] = vnis_list
+
def normalize_af_redis_params(self, af):
norm_af = list()
for e_af in af:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py
index 822db22a4..31cada350 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_as_paths/bgp_as_paths.py
@@ -11,7 +11,6 @@ based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -73,8 +72,6 @@ class Bgp_as_pathsFacts(object):
else:
result['permit'] = False
as_path_list_configs.append(result)
- # with open('/root/ansible_log.log', 'a+') as fp:
- # fp.write('as_path_list: ' + str(as_path_list_configs) + '\n')
return as_path_list_configs
def populate_facts(self, connection, ansible_facts, data=None):
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py
index ffa294221..ff4827e61 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_communities/bgp_communities.py
@@ -11,7 +11,6 @@ based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -67,26 +66,37 @@ class Bgp_communitiesFacts(object):
match = member_config['match-set-options']
permit_str = member_config.get('openconfig-bgp-policy-ext:action', None)
members = member_config.get("community-member", [])
- result['name'] = name
+ result['name'] = str(name)
result['match'] = match
+ result['members'] = None
+ result['permit'] = False
if permit_str and permit_str == 'PERMIT':
result['permit'] = True
- else:
- result['permit'] = False
if members:
result['type'] = 'expanded' if 'REGEX' in members[0] else 'standard'
+ if result['type'] == 'expanded':
+ members = [':'.join(i.split(':')[1:]) for i in members]
+ members.sort()
+ result['members'] = {'regex': members}
else:
- result['type'] = ''
- if result['type'] == 'expanded':
- members = [':'.join(i.split(':')[1:]) for i in members]
- result['local_as'] = True if "NO_EXPORT_SUBCONFED" in members else False
- result['no_advertise'] = True if "NO_ADVERTISE" in members else False
- result['no_export'] = True if "NO_EXPORT" in members else False
- result['no_peer'] = True if "NOPEER" in members else False
- result['members'] = {'regex': members}
+ result['type'] = 'standard'
+
+ if result['type'] == 'standard':
+ result['local_as'] = None
+ result['no_advertise'] = None
+ result['no_export'] = None
+ result['no_peer'] = None
+ for i in members:
+ if "NO_EXPORT_SUBCONFED" in i:
+ result['local_as'] = True
+ elif "NO_ADVERTISE" in i:
+ result['no_advertise'] = True
+ elif "NO_EXPORT" in i:
+ result['no_export'] = True
+ elif "NOPEER" in i:
+ result['no_peer'] = True
+
bgp_communities_configs.append(result)
- # with open('/root/ansible_log.log', 'a+') as fp:
- # fp.write('bgp_communities: ' + str(bgp_communities_configs) + '\n')
return bgp_communities_configs
def populate_facts(self, connection, ansible_facts, data=None):
@@ -129,17 +139,5 @@ class Bgp_communitiesFacts(object):
:rtype: dictionary
:returns: The generated config
"""
- config = deepcopy(spec)
- try:
- config['name'] = str(conf['name'])
- config['members'] = conf['members']
- config['match'] = conf['match']
- config['type'] = conf['type']
- config['permit'] = conf['permit']
- except TypeError:
- config['name'] = None
- config['members'] = None
- config['match'] = None
- config['type'] = None
- config['permit'] = None
- return utils.remove_empties(config)
+
+ return conf
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py
index b1d7c4ad0..814a25d11 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_ext_communities/bgp_ext_communities.py
@@ -11,7 +11,6 @@ based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -69,34 +68,38 @@ class Bgp_ext_communitiesFacts(object):
match = member_config['match-set-options']
permit_str = member_config.get('openconfig-bgp-policy-ext:action', None)
members = member_config.get("ext-community-member", [])
- result['name'] = name
+ result['name'] = str(name)
result['match'] = match.lower()
-
+ result['members'] = dict()
+ result['type'] = 'standard'
+ result['permit'] = False
if permit_str and permit_str == 'PERMIT':
result['permit'] = True
+ if members:
+ result['type'] = 'expanded' if 'REGEX' in members[0] else 'standard'
+ if result['type'] == 'expanded':
+ members = [':'.join(i.split(':')[1:]) for i in members]
+ members_list = list(map(str, members))
+ members_list.sort()
+ result['members'] = {'regex': members_list}
else:
- result['permit'] = False
-
- result['members'] = dict()
- rt = list()
- soo = list()
- regex = list()
- for member in members:
- if member.startswith('route-target'):
- rt.append(':'.join(member.split(':')[1:]))
- elif member.startswith('route-origin'):
- soo.append(':'.join(member.split(':')[1:]))
- elif member.startswith('REGEX'):
- regex.append(':'.join(member.split(':')[1:]))
-
- result['type'] = 'standard'
- if regex and len(regex) > 0:
- result['type'] = 'expanded'
- result['members']['regex'] = regex
- if rt and len(rt) > 0:
- result['members']['route_target'] = rt
- if soo and len(soo) > 0:
- result['members']['route_origin'] = soo
+ rt = list()
+ soo = list()
+ for member in members:
+ if member.startswith('route-origin'):
+ soo.append(':'.join(member.split(':')[1:]))
+ else:
+ rt.append(':'.join(member.split(':')[1:]))
+ route_target_list = list(map(str, rt))
+ route_origin_list = list(map(str, soo))
+ route_target_list.sort()
+ route_origin_list.sort()
+
+ if route_target_list and len(route_target_list) > 0:
+ result['members']['route_target'] = route_target_list
+
+ if route_origin_list and len(route_origin_list) > 0:
+ result['members']['route_origin'] = route_origin_list
bgp_extcommunities_configs.append(result)
@@ -142,17 +145,5 @@ class Bgp_ext_communitiesFacts(object):
:rtype: dictionary
:returns: The generated config
"""
- config = deepcopy(spec)
- try:
- config['name'] = str(conf['name'])
- config['members'] = conf['members']
- config['match'] = conf['match']
- config['type'] = conf['type']
- config['permit'] = conf['permit']
- except TypeError:
- config['name'] = None
- config['members'] = None
- config['match'] = None
- config['type'] = None
- config['permit'] = None
- return utils.remove_empties(config)
+
+ return conf
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py
index 903b93de1..687420991 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors/bgp_neighbors.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -110,8 +109,8 @@ class Bgp_neighborsFacts(object):
ansible_facts['ansible_network_resources'].pop('bgp_neighbors', None)
facts = {}
if objs:
- params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)})
- facts['bgp_neighbors'] = params['config']
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['bgp_neighbors'] = remove_empties_from_list(params['config'])
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py
index 26119b61c..8f034bb2a 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/bgp_neighbors_af/bgp_neighbors_af.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -121,7 +120,6 @@ class Bgp_neighbors_afFacts(object):
ipv4_unicast = norm_nei_af.get('ipv4_unicast', None)
ipv6_unicast = norm_nei_af.get('ipv6_unicast', None)
- l2vpn_evpn = norm_nei_af.get('l2vpn_evpn', None)
if ipv4_unicast:
if 'config' in ipv4_unicast:
ip_afi = update_bgp_nbr_pg_ip_afi_dict(ipv4_unicast['config'])
@@ -142,12 +140,6 @@ class Bgp_neighbors_afFacts(object):
if prefix_limit:
norm_nei_af['prefix_limit'] = prefix_limit
norm_nei_af.pop('ipv6_unicast')
- elif l2vpn_evpn:
- if 'config' in l2vpn_evpn:
- prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(l2vpn_evpn['config'])
- if prefix_limit:
- norm_nei_af['prefix_limit'] = prefix_limit
- norm_nei_af.pop('l2vpn_evpn')
norm_neighbor_afs.append(norm_nei_af)
if norm_neighbor_afs:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tests/logging_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/copp/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tests/logging_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/copp/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/copp/copp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/copp/copp.py
new file mode 100644
index 000000000..52a01d3d1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/copp/copp.py
@@ -0,0 +1,127 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic copp fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.copp.copp import CoppArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+
+class CoppFacts(object):
+ """ The sonic copp fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = CoppArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for bfd
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+
+ if not data:
+ copp_cfg = self.get_copp_config(self._module)
+ data = self.update_copp_groups(copp_cfg)
+ objs = self.render_config(self.generated_spec, data)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['copp'] = params['config']
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ return conf
+
+ def update_copp_groups(self, data):
+ config_dict = {}
+ all_copp_groups = []
+ if data:
+ copp_groups = data.get('copp-groups', None)
+ if copp_groups:
+ copp_group_list = copp_groups.get('copp-group', None)
+ if copp_group_list:
+ for group in copp_group_list:
+ group_dict = {}
+ copp_name = group['name']
+ config = group['config']
+ trap_priority = config.get('trap-priority', None)
+ trap_action = config.get('trap-action', None)
+ queue = config.get('queue', None)
+ cir = config.get('cir', None)
+ cbs = config.get('cbs', None)
+
+ if copp_name:
+ group_dict['copp_name'] = copp_name
+ if trap_priority:
+ group_dict['trap_priority'] = trap_priority
+ if trap_action:
+ group_dict['trap_action'] = trap_action
+ if queue:
+ group_dict['queue'] = queue
+ if cir:
+ group_dict['cir'] = cir
+ if cbs:
+ group_dict['cbs'] = cbs
+ if group_dict:
+ all_copp_groups.append(group_dict)
+ if all_copp_groups:
+ config_dict['copp_groups'] = all_copp_groups
+
+ return config_dict
+
+ def get_copp_config(self, module):
+ copp_cfg = None
+ get_copp_path = '/data/openconfig-copp-ext:copp'
+ request = {'path': get_copp_path, 'method': 'get'}
+
+ try:
+ response = edit_config(module, to_request(module, request))
+ if 'openconfig-copp-ext:copp' in response[0][1]:
+ copp_cfg = response[0][1].get('openconfig-copp-ext:copp', None)
+ except ConnectionError as exc:
+ module.fail_json(msg=str(exc), code=exc.code)
+ return copp_cfg
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tests/prefix_list_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_relay/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tests/prefix_list_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_relay/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_relay/dhcp_relay.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_relay/dhcp_relay.py
new file mode 100644
index 000000000..70f78dc24
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_relay/dhcp_relay.py
@@ -0,0 +1,208 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic dhcp_relay fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.dhcp_relay.dhcp_relay import Dhcp_relayArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+SELECT_VALUE_TO_BOOL = {
+ 'ENABLE': True,
+ 'DISABLE': False
+}
+
+
+class Dhcp_relayFacts(object):
+ """ The sonic dhcp_relay fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = Dhcp_relayArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for dhcp_relay
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+
+ if not data:
+ dhcp_relay_configs = self.get_dhcp_relay()
+ dhcpv6_relay_configs = self.get_dhcpv6_relay()
+
+ all_relay_configs = {}
+ for intf_name, dhcp_relay_config in dhcp_relay_configs.items():
+ all_relay_configs[intf_name] = {}
+ all_relay_configs[intf_name]['ipv4'] = dhcp_relay_config
+
+ for intf_name, dhcpv6_relay_config in dhcpv6_relay_configs.items():
+ if all_relay_configs.get(intf_name):
+ all_relay_configs[intf_name]['ipv6'] = dhcpv6_relay_config
+ else:
+ all_relay_configs[intf_name] = {}
+ all_relay_configs[intf_name]['ipv6'] = dhcpv6_relay_config
+
+ objs = []
+ for relay_config in all_relay_configs.items():
+ obj = self.render_config(self.generated_spec, relay_config)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts['ansible_network_resources'].pop('dhcp_relay', None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['dhcp_relay'] = utils.remove_empties({'config': params['config']})['config']
+
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ config['name'] = conf[0]
+
+ if conf[1].get('ipv4'):
+ ipv4_dict = conf[1]['ipv4']
+ if ipv4_dict.get('policy_action'):
+ ipv4_dict['policy_action'] = ipv4_dict['policy_action'].lower()
+
+ ipv4_dict['link_select'] = SELECT_VALUE_TO_BOOL.get(ipv4_dict['link_select'])
+ ipv4_dict['vrf_select'] = SELECT_VALUE_TO_BOOL.get(ipv4_dict['vrf_select'])
+
+ config['ipv4'] = ipv4_dict
+ else:
+ config.pop('ipv4')
+
+ if conf[1].get('ipv6'):
+ ipv6_dict = conf[1]['ipv6']
+ ipv6_dict['vrf_select'] = SELECT_VALUE_TO_BOOL.get(ipv6_dict['vrf_select'])
+
+ config['ipv6'] = ipv6_dict
+ else:
+ config.pop('ipv6')
+
+ return config
+
+ def get_dhcp_relay(self):
+ """Get all DHCP relay configurations available in chassis"""
+ dhcp_relay_interfaces_path = 'data/openconfig-relay-agent:relay-agent/dhcp'
+ method = 'GET'
+ request = [{'path': dhcp_relay_interfaces_path, 'method': method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ dhcp_relay_interfaces = []
+ if (response[0][1].get('openconfig-relay-agent:dhcp')
+ and response[0][1]['openconfig-relay-agent:dhcp'].get('interfaces')):
+ dhcp_relay_interfaces = response[0][1]['openconfig-relay-agent:dhcp']['interfaces'].get('interface', [])
+
+ dhcp_relay_configs = {}
+ for interface in dhcp_relay_interfaces:
+ ipv4_dict = {}
+ server_addresses = []
+
+ config = interface.get('config', {})
+ for address in config.get('helper-address', []):
+ temp = {}
+ temp['address'] = address
+ server_addresses.append(temp)
+ ipv4_dict['server_addresses'] = server_addresses
+
+ ipv4_dict['max_hop_count'] = config.get('openconfig-relay-agent-ext:max-hop-count')
+ ipv4_dict['policy_action'] = config.get('openconfig-relay-agent-ext:policy-action')
+ ipv4_dict['source_interface'] = config.get('openconfig-relay-agent-ext:src-intf')
+ ipv4_dict['vrf_name'] = config.get('openconfig-relay-agent-ext:vrf')
+
+ opt_config = interface.get('agent-information-option', {}).get('config', {})
+ ipv4_dict['circuit_id'] = opt_config.get('circuit-id')
+ ipv4_dict['link_select'] = opt_config.get('openconfig-relay-agent-ext:link-select')
+ ipv4_dict['vrf_select'] = opt_config.get('openconfig-relay-agent-ext:vrf-select')
+
+ dhcp_relay_configs[interface['id']] = ipv4_dict
+
+ return dhcp_relay_configs
+
+ def get_dhcpv6_relay(self):
+ """Get all DHCPv6 relay configurations available in chassis"""
+ dhcpv6_relay_interfaces_path = 'data/openconfig-relay-agent:relay-agent/dhcpv6'
+ method = 'GET'
+ request = [{'path': dhcpv6_relay_interfaces_path, 'method': method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ dhcpv6_relay_interfaces = []
+ if (response[0][1].get('openconfig-relay-agent:dhcpv6')
+ and response[0][1]['openconfig-relay-agent:dhcpv6'].get('interfaces')):
+ dhcpv6_relay_interfaces = response[0][1]['openconfig-relay-agent:dhcpv6']['interfaces'].get('interface', [])
+
+ dhcpv6_relay_configs = {}
+ for interface in dhcpv6_relay_interfaces:
+ ipv6_dict = {}
+ server_addresses = []
+
+ config = interface.get('config', {})
+ for address in config.get('helper-address', []):
+ temp = {}
+ temp['address'] = address
+ server_addresses.append(temp)
+ ipv6_dict['server_addresses'] = server_addresses
+
+ ipv6_dict['max_hop_count'] = config.get('openconfig-relay-agent-ext:max-hop-count')
+ ipv6_dict['source_interface'] = config.get('openconfig-relay-agent-ext:src-intf')
+ ipv6_dict['vrf_name'] = config.get('openconfig-relay-agent-ext:vrf')
+
+ opt_config = interface.get('options', {}).get('config', {})
+ ipv6_dict['vrf_select'] = opt_config.get('openconfig-relay-agent-ext:vrf-select')
+
+ dhcpv6_relay_configs[interface['id']] = ipv6_dict
+
+ return dhcpv6_relay_configs
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tests/qos_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_snooping/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tests/qos_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_snooping/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_snooping/dhcp_snooping.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_snooping/dhcp_snooping.py
new file mode 100644
index 000000000..c0464c8f1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/dhcp_snooping/dhcp_snooping.py
@@ -0,0 +1,213 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic dhcp_snooping fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.dhcp_snooping.dhcp_snooping import Dhcp_snoopingArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+
+class Dhcp_snoopingFacts(object):
+ """ The sonic dhcp_snooping fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = Dhcp_snoopingArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for dhcp_snooping
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+
+ if not data:
+ data = self.get_dhcp_snooping()
+
+ obj = self.render_config(self.generated_spec, data)
+
+ ansible_facts['ansible_network_resources'].pop('dhcp_snooping', None)
+ facts = {}
+ if obj:
+ params = utils.validate_config(self.argument_spec, {'config': obj})
+ params_cleaned = {'config': utils.remove_empties(params['config'])}
+ facts['dhcp_snooping'] = params_cleaned['config']
+
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def get_dhcp_snooping(self):
+ config = {}
+
+ config['top_level'] = self.get_dhcp_snooping_top_level()
+ config['binding'] = self.get_dhcp_snooping_binding()
+
+ return config
+
+ def get_dhcp_snooping_top_level(self):
+ """Get all DHCP snooping configurations available in chassis"""
+ dhcp_snooping_path = 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ method = 'GET'
+ request = [{'path': dhcp_snooping_path, 'method': method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ config = {}
+ if (response[0][1].get('openconfig-dhcp-snooping:dhcp-snooping')):
+ config = response[0][1].get('openconfig-dhcp-snooping:dhcp-snooping')
+
+ return config
+
+ def get_dhcp_snooping_binding(self):
+ dhcp_binding_snooping_path = 'data/openconfig-dhcp-snooping:dhcp-snooping-binding'
+ method = 'GET'
+ request = [{'path': dhcp_binding_snooping_path, 'method': method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ config = {}
+ if (response[0][1].get('openconfig-dhcp-snooping:dhcp-snooping-binding')):
+ config = response[0][1].get('openconfig-dhcp-snooping:dhcp-snooping-binding')
+
+ return config
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+
+ v4 = {'afi': 'ipv4'}
+ v6 = {'afi': 'ipv6'}
+ config['afis'] = [v4, v6]
+
+ # Start with the top-level config from the device.
+ top_level = conf.get('top_level', {})
+
+ # Transform the "config" dict from the top-level device config.
+ deviceConfig = top_level.get('config', {})
+
+ v4_enabled = deviceConfig.get('dhcpv4-admin-enable', None)
+ if v4_enabled:
+ v4['enabled'] = True
+ else:
+ v4['enabled'] = False
+ v6_enabled = deviceConfig.get('dhcpv6-admin-enable', None)
+ if v6_enabled:
+ v6['enabled'] = True
+ else:
+ v6['enabled'] = False
+
+ v4_verify_mac = deviceConfig.get('dhcpv4-verify-mac-address', None)
+ if v4_verify_mac is False:
+ v4['verify_mac'] = False
+ else:
+ v4['verify_mac'] = True
+ v6_verify_mac = deviceConfig.get('dhcpv6-verify-mac-address', None)
+ if v6_verify_mac is False:
+ v6['verify_mac'] = False
+ else:
+ v6['verify_mac'] = True
+
+ # Transform the "state" dict from the top-level device config.
+ state = top_level.get('state', {})
+
+ v4_vlans = state.get('dhcpv4-snooping-vlan', [])
+ if len(v4_vlans) > 0:
+ v4['vlans'] = v4_vlans
+ v6_vlans = state.get('dhcpv6-snooping-vlan', [])
+ if len(v6_vlans) > 0:
+ v6['vlans'] = v6_vlans
+
+ STANDARD_ETH = "Eth"
+ PC = 'PortChannel'
+ v4_trusted_intf = state.get('dhcpv4-trusted-intf', [])
+ if len(v4_trusted_intf) > 0:
+ v4['trusted'] = []
+ for intfName in v4_trusted_intf:
+ intf = {}
+ if intfName.startswith(STANDARD_ETH) or intfName.startswith(PC):
+ intf['intf_name'] = intfName
+ else:
+ continue
+ v4['trusted'].append(intf)
+ v6_trusted_intf = state.get('dhcpv6-trusted-intf', [])
+ if len(v6_trusted_intf) > 0:
+ v6['trusted'] = []
+ for intfName in v6_trusted_intf:
+ intf = {}
+ if intfName.startswith(STANDARD_ETH) or intfName.startswith(PC):
+ intf['intf_name'] = intfName
+ else:
+ continue
+ v6['trusted'].append(intf)
+
+ # Transform the binding config from the device.
+ binding = conf.get('binding', {})
+ binding_list_container = binding.get('dhcp-snooping-binding-entry-list', {})
+ binding_list = binding_list_container.get('dhcp-snooping-binding-list', [])
+ if len(binding_list) > 0:
+ v4_entries = []
+ v6_entries = []
+ for entry in binding_list:
+ binding = {}
+ binding['mac_addr'] = entry['mac']
+ binding['ip_addr'] = entry['state']['ipaddress']
+ binding['intf_name'] = entry['state']['intf']
+ binding['vlan_id'] = entry['state']['vlan']
+ if entry['iptype'] == 'ipv4':
+ v4_entries.append(binding)
+ elif entry['iptype'] == 'ipv6':
+ v6_entries.append(binding)
+ if len(v4_entries) > 0:
+ v4['source_bindings'] = v4_entries
+ if len(v6_entries) > 0:
+ v6['source_bindings'] = v6_entries
+
+ return config
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py
index 75622632a..dbe597448 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/facts.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -32,6 +32,7 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.mclag.mclag import MclagFacts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.prefix_lists.prefix_lists import Prefix_listsFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vlan_mapping.vlan_mapping import Vlan_mappingFacts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vrfs.vrfs import VrfsFacts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vxlans.vxlans import VxlansFacts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.users.users import UsersFacts
@@ -42,6 +43,21 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.radius_server.radius_server import Radius_serverFacts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.static_routes.static_routes import Static_routesFacts
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.ntp.ntp import NtpFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.logging.logging import LoggingFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.pki.pki import PkiFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.ip_neighbor.ip_neighbor import Ip_neighborFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.port_group.port_group import Port_groupFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.dhcp_relay.dhcp_relay import Dhcp_relayFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.dhcp_snooping.dhcp_snooping import Dhcp_snoopingFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.acl_interfaces.acl_interfaces import Acl_interfacesFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.l2_acls.l2_acls import L2_aclsFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.l3_acls.l3_acls import L3_aclsFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.lldp_global.lldp_global import Lldp_globalFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.mac.mac import MacFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bfd.bfd import BfdFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.copp.copp import CoppFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.route_maps.route_maps import Route_mapsFacts
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.stp.stp import StpFacts
FACT_LEGACY_SUBSETS = {}
FACT_RESOURCE_SUBSETS = dict(
@@ -59,6 +75,7 @@ FACT_RESOURCE_SUBSETS = dict(
bgp_ext_communities=Bgp_ext_communitiesFacts,
mclag=MclagFacts,
prefix_lists=Prefix_listsFacts,
+ vlan_mapping=Vlan_mappingFacts,
vrfs=VrfsFacts,
vxlans=VxlansFacts,
users=UsersFacts,
@@ -69,6 +86,21 @@ FACT_RESOURCE_SUBSETS = dict(
radius_server=Radius_serverFacts,
static_routes=Static_routesFacts,
ntp=NtpFacts,
+ logging=LoggingFacts,
+ pki=PkiFacts,
+ ip_neighbor=Ip_neighborFacts,
+ port_group=Port_groupFacts,
+ dhcp_relay=Dhcp_relayFacts,
+ dhcp_snooping=Dhcp_snoopingFacts,
+ acl_interfaces=Acl_interfacesFacts,
+ l2_acls=L2_aclsFacts,
+ l3_acls=L3_aclsFacts,
+ lldp_global=Lldp_globalFacts,
+ mac=MacFacts,
+ bfd=BfdFacts,
+ copp=CoppFacts,
+ route_maps=Route_mapsFacts,
+ stp=StpFacts
)
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py
index a36b5d3c0..7ce15fe1b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/interfaces/interfaces.py
@@ -1,6 +1,6 @@
#
# -*- coding: utf-8 -*-
-# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# © Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -59,6 +58,7 @@ class InterfacesFacts(object):
if "openconfig-interfaces:interfaces" in response[0][1]:
all_interfaces = response[0][1].get("openconfig-interfaces:interfaces", {})
+
return all_interfaces['interface']
def populate_facts(self, connection, ansible_facts, data=None):
@@ -94,8 +94,8 @@ class InterfacesFacts(object):
if objs:
facts['interfaces'] = []
params = utils.validate_config(self.argument_spec, {'config': objs})
- if params:
- facts['interfaces'].extend(params['config'])
+ for cfg in params['config']:
+ facts['interfaces'].append(utils.remove_empties(cfg))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
@@ -115,7 +115,7 @@ class InterfacesFacts(object):
def transform_config(self, conf):
exist_cfg = conf['config']
- trans_cfg = None
+ trans_cfg = dict()
is_loop_back = False
name = conf['name']
@@ -125,16 +125,29 @@ class InterfacesFacts(object):
if pos > 0:
name = name[0:pos]
- if not (is_loop_back and self.is_loop_back_already_esist(name)) and (name != "eth0"):
- trans_cfg = dict()
+ if not (is_loop_back and self.is_loop_back_already_exist(name)) and (name != "eth0") and (name != "Management0"):
trans_cfg['name'] = name
if is_loop_back:
self.update_loop_backs(name)
else:
trans_cfg['enabled'] = exist_cfg['enabled'] if exist_cfg.get('enabled') is not None else True
- trans_cfg['description'] = exist_cfg['description'] if exist_cfg.get('description') else ""
+ trans_cfg['description'] = exist_cfg.get('description')
trans_cfg['mtu'] = exist_cfg['mtu'] if exist_cfg.get('mtu') else 9100
+ if name.startswith('Eth') and 'openconfig-if-ethernet:ethernet' in conf:
+ if conf['openconfig-if-ethernet:ethernet'].get('config', None):
+ eth_conf = conf['openconfig-if-ethernet:ethernet']['config']
+ if 'auto-negotiate' in eth_conf:
+ trans_cfg['auto_negotiate'] = eth_conf['auto-negotiate']
+ trans_cfg['speed'] = eth_conf['port-speed'].split(':', 1)[-1]
+ if 'openconfig-if-ethernet-ext2:advertised-speed' in eth_conf:
+ adv_speed_str = eth_conf['openconfig-if-ethernet-ext2:advertised-speed']
+ if adv_speed_str != '':
+ trans_cfg['advertised_speed'] = adv_speed_str.split(",")
+ trans_cfg['advertised_speed'].sort()
+ if 'openconfig-if-ethernet-ext2:port-fec' in eth_conf:
+ trans_cfg['fec'] = eth_conf['openconfig-if-ethernet-ext2:port-fec'].split(':', 1)[-1]
+
return trans_cfg
def reset_loop_backs(self):
@@ -143,5 +156,5 @@ class InterfacesFacts(object):
def update_loop_backs(self, loop_back):
self.loop_backs += "{0},".format(loop_back)
- def is_loop_back_already_esist(self, loop_back):
+ def is_loop_back_already_exist(self, loop_back):
return (",{0},".format(loop_back) in self.loop_backs)
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ip_neighbor/ip_neighbor.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ip_neighbor/ip_neighbor.py
new file mode 100644
index 000000000..4c077c43f
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ip_neighbor/ip_neighbor.py
@@ -0,0 +1,126 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic ip_neighbor fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import re
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.ip_neighbor.ip_neighbor import Ip_neighborArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+GET = "get"
+
+
+class Ip_neighborFacts(object):
+ """ The sonic ip_neighbor fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = Ip_neighborArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for ip_neighbor
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ # typically data is populated from the current device configuration
+ # data = connection.get('show running-config | section neighbor')
+ # using mock data instead
+ data = self.get_ip_neighbor_global()
+
+ objs = self.render_config(self.generated_spec, data)
+
+ ansible_facts['ansible_network_resources'].pop('ip_neighbor', None)
+
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['ip_neighbor'] = params['config']
+
+ ansible_facts['ansible_network_resources'].update(facts)
+
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ return conf
+
+ def get_ip_neighbor_global(self):
+ """Get IP neighbor global configurations"""
+
+ config_path = "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ config_request = [{"path": config_path, "method": GET}]
+ config_response = []
+
+ ip_neigh_glb_conf = dict()
+
+ try:
+ config_response = edit_config(self._module, to_request(self._module, config_request))
+ except ConnectionError as exc:
+ if re.search("code.*404", str(exc)):
+ # 'code': 404, 'error-message': 'Resource not found'
+ return ip_neigh_glb_conf
+ else:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ config = dict()
+ if 'openconfig-neighbor:config' in config_response[0][1]:
+ config = config_response[0][1].get('openconfig-neighbor:config', {})
+
+ if "ipv4-arp-timeout" in config:
+ ip_neigh_glb_conf["ipv4_arp_timeout"] = config["ipv4-arp-timeout"]
+
+ if "ipv4-drop-neighbor-aging-time" in config:
+ ip_neigh_glb_conf["ipv4_drop_neighbor_aging_time"] = config["ipv4-drop-neighbor-aging-time"]
+
+ if "ipv6-drop-neighbor-aging-time" in config:
+ ip_neigh_glb_conf["ipv6_drop_neighbor_aging_time"] = config["ipv6-drop-neighbor-aging-time"]
+
+ if "ipv6-nd-cache-expiry" in config:
+ ip_neigh_glb_conf["ipv6_nd_cache_expiry"] = config["ipv6-nd-cache-expiry"]
+
+ if "num-local-neigh" in config:
+ ip_neigh_glb_conf["num_local_neigh"] = config["num-local-neigh"]
+
+ return ip_neigh_glb_conf
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tests/route_map_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_acls/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tests/route_map_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_acls/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_acls/l2_acls.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_acls/l2_acls.py
new file mode 100644
index 000000000..5644cf876
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_acls/l2_acls.py
@@ -0,0 +1,236 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic l2_acls fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible.module_utils.connection import ConnectionError
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l2_acls.l2_acls import L2_aclsArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+ETHERTYPE_FORMAT = '0x{:04x}'
+
+action_payload_to_value_map = {
+ 'ACCEPT': 'permit',
+ 'DISCARD': 'discard',
+ 'DO_NOT_NAT': 'do-not-nat',
+ 'DROP': 'deny',
+ 'TRANSIT': 'transit',
+}
+ethertype_payload_to_protocol_map = {
+ '0x0800': 'ipv4',
+ '0x0806': 'arp',
+ '0x86dd': 'ipv6',
+ 'ETHERTYPE_ARP': 'arp',
+ 'ETHERTYPE_IPV4': 'ipv4',
+ 'ETHERTYPE_IPV6': 'ipv6'
+}
+ethertype_payload_to_value_map = {
+ 'ETHERTYPE_LLDP': '0x88cc',
+ 'ETHERTYPE_MPLS': '0x8847',
+ 'ETHERTYPE_ROCE': '0x8915'
+}
+pcp_value_to_traffic_map = {
+ 0: 'be',
+ 1: 'bk',
+ 2: 'ee',
+ 3: 'ca',
+ 4: 'vi',
+ 5: 'vo',
+ 6: 'ic',
+ 7: 'nc'
+}
+
+
+class L2_aclsFacts(object):
+ """ The sonic l2_acls fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = L2_aclsArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for l2_acls
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+
+ if not data:
+ l2_acls_configs = self.get_l2_acls()
+
+ objs = []
+ for l2_acl_config in l2_acls_configs:
+ obj = self.render_config(self.generated_spec, l2_acl_config)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts['ansible_network_resources'].pop('l2_acls', None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['l2_acls'] = utils.remove_empties({'config': params['config']})['config']
+
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ config['name'] = conf['name']
+ config['remark'] = conf['remark']
+ config['rules'] = conf['rules']
+
+ for rule in config['rules']:
+ if ":" in rule['action']:
+ rule['action'] = rule['action'].split(":")[-1]
+ rule['action'] = action_payload_to_value_map[rule['action']]
+
+ rule['source'] = {}
+ rule['destination'] = {}
+ if rule.get('l2') is None:
+ rule['source']['any'] = True
+ rule['destination']['any'] = True
+ continue
+
+ l2_config = rule.pop('l2')
+ if l2_config.get('source-mac') and l2_config.get('source-mac-mask'):
+ if l2_config['source-mac-mask'].lower() == 'ff:ff:ff:ff:ff:ff':
+ rule['source']['host'] = l2_config['source-mac'].lower()
+ else:
+ rule['source']['address'] = l2_config['source-mac'].lower()
+ rule['source']['address_mask'] = l2_config['source-mac-mask'].lower()
+ elif l2_config.get('source-mac'):
+ rule['source']['host'] = l2_config['source-mac'].lower()
+ else:
+ rule['source']['any'] = True
+
+ if l2_config.get('destination-mac') and l2_config.get('destination-mac-mask'):
+ if l2_config['destination-mac-mask'].lower() == 'ff:ff:ff:ff:ff:ff':
+ rule['destination']['host'] = l2_config['destination-mac'].lower()
+ else:
+ rule['destination']['address'] = l2_config['destination-mac'].lower()
+ rule['destination']['address_mask'] = l2_config['destination-mac-mask'].lower()
+ elif l2_config.get('destination-mac'):
+ rule['destination']['host'] = l2_config['destination-mac'].lower()
+ else:
+ rule['destination']['any'] = True
+
+ if l2_config.get('ethertype'):
+ ethertype = l2_config['ethertype']
+ rule['ethertype'] = {}
+ if isinstance(ethertype, str):
+ ethertype = ethertype.split(':')[-1]
+ if ethertype in ethertype_payload_to_protocol_map:
+ rule['ethertype'][ethertype_payload_to_protocol_map[ethertype]] = True
+ else:
+ rule['ethertype']['value'] = ethertype_payload_to_value_map[ethertype]
+ else:
+ ethertype = ETHERTYPE_FORMAT.format(ethertype)
+ if ethertype in ethertype_payload_to_protocol_map:
+ rule['ethertype'][ethertype_payload_to_protocol_map[ethertype]] = True
+ else:
+ rule['ethertype']['value'] = ethertype
+
+ if l2_config.get('openconfig-acl-ext:vlanid'):
+ rule['vlan_id'] = l2_config['openconfig-acl-ext:vlanid']
+ if l2_config.get('openconfig-acl-ext:vlan-tag-format') == 'openconfig-acl-ext:MULTI_TAGGED':
+ rule['vlan_tag_format'] = {'multi_tagged': True}
+
+ if l2_config.get('openconfig-acl-ext:dei') is not None:
+ rule['dei'] = l2_config['openconfig-acl-ext:dei']
+
+ if l2_config.get('openconfig-acl-ext:pcp') is not None:
+ rule['pcp'] = {}
+ if l2_config.get('openconfig-acl-ext:pcp-mask') is not None:
+ rule['pcp']['value'] = l2_config['openconfig-acl-ext:pcp']
+ rule['pcp']['mask'] = l2_config['openconfig-acl-ext:pcp-mask']
+ else:
+ rule['pcp']['traffic_type'] = pcp_value_to_traffic_map[l2_config['openconfig-acl-ext:pcp']]
+
+ return config
+
+ def get_l2_acls(self):
+ """Get all l2 acl configurations available in chassis"""
+ acls_path = 'data/openconfig-acl:acl/acl-sets'
+ method = 'GET'
+ request = [{'path': acls_path, 'method': method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ acls = []
+ if response[0][1].get('openconfig-acl:acl-sets'):
+ acls = response[0][1]['openconfig-acl:acl-sets'].get('acl-set', [])
+
+ l2_acls_configs = []
+ for acl in acls:
+ acl_config = {}
+ acl_rules = []
+
+ config = acl['config']
+ if config.get('type') not in ('ACL_L2', 'openconfig-acl:ACL_L2'):
+ continue
+
+ acl_config['name'] = config['name']
+ acl_config['remark'] = config.get('description')
+ acl_config['rules'] = acl_rules
+
+ acl_entries = acl.get('acl-entries', {}).get('acl-entry', [])
+ for acl_entry in acl_entries:
+ acl_rule = {}
+
+ acl_entry_config = acl_entry['config']
+ acl_rule['sequence_num'] = acl_entry_config['sequence-id']
+ acl_rule['remark'] = acl_entry_config.get('description')
+
+ acl_rule['action'] = acl_entry['actions']['config']['forwarding-action']
+ acl_rule['l2'] = acl_entry.get('l2', {}).get('config', {})
+
+ acl_rules.append(acl_rule)
+
+ l2_acls_configs.append(acl_config)
+
+ return l2_acls_configs
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py
index 07d7f97dd..78d5b002e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l2_interfaces/l2_interfaces.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -47,8 +46,8 @@ class L2_interfacesFacts(object):
self.generated_spec = utils.generate_dict(facts_argument_spec)
- def vlan_range_to_list(self, in_range):
- range_bounds = in_range.split('-')
+ def vlan_range_to_list(self, in_range, range_str):
+ range_bounds = in_range.split(range_str)
range_bottom = int(range_bounds[0])
range_top = int(range_bounds[1]) + 1
vlan_list = list(range(range_bottom, range_top))
@@ -79,15 +78,22 @@ class L2_interfacesFacts(object):
new_det['trunk'] = {}
new_det['trunk']['allowed_vlans'] = []
- # Save trunk vlans as a list of single vlan dicts: Convert
- # any ranges to lists of individual vlan dicts and merge
- # each resulting "range list" onto the main list for the
- # interface.
+ # Save trunk vlans and vlan ranges as a list of single vlan dicts:
+ # Convert single vlan values to strings and convert any ranges
+ # to the argspec range format. (This block assumes that any string
+ # value received is a range, using either ".." or "-" as a
+ # separator between the boundaries of the range. It also assumes
+ # that any non-string value received is an integer specifying a
+ # single vlan.)
for vlan in open_cfg_vlan['config'].get('trunk-vlans'):
- if isinstance(vlan, str) and '-' in vlan:
- new_det['trunk']['allowed_vlans'].extend(self.vlan_range_to_list(vlan))
+ vlan_argspec = ''
+ if isinstance(vlan, str):
+ vlan_argspec = vlan.replace('"', '')
+ if '..' in vlan_argspec:
+ vlan_argspec = vlan_argspec.replace('..', '-')
else:
- new_det['trunk']['allowed_vlans'].append({'vlan': vlan})
+ vlan_argspec = str(vlan)
+ new_det['trunk']['allowed_vlans'].append({'vlan': vlan_argspec})
l2_interfaces.append(new_det)
return l2_interfaces
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tests/snmp_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_acls/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tests/snmp_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_acls/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_acls/l3_acls.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_acls/l3_acls.py
new file mode 100644
index 000000000..799064c9b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_acls/l3_acls.py
@@ -0,0 +1,322 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic l3_acls fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible.module_utils.connection import ConnectionError
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l3_acls.l3_acls import L3_aclsArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+IPV4_HOST_MASK = '/32'
+IPV6_HOST_MASK = '/128'
+L4_PORT_START = 0
+L4_PORT_END = 65535
+
+action_payload_to_value_map = {
+ 'ACCEPT': 'permit',
+ 'DISCARD': 'discard',
+ 'DO_NOT_NAT': 'do-not-nat',
+ 'DROP': 'deny',
+ 'TRANSIT': 'transit',
+}
+protocol_payload_to_value_map = {
+ 'IP_ICMP': 'icmp',
+ 'IP_IGMP': 2,
+ 'IP_TCP': 'tcp',
+ 'IP_UDP': 'udp',
+ 'IP_RSVP': 46,
+ 'IP_GRE': 47,
+ 'IP_AUTH': 51,
+ 'IP_PIM': 103,
+ 'IP_L2TP': 115
+}
+protocol_number_to_name_map = {
+ 1: 'icmp',
+ 6: 'tcp',
+ 17: 'udp',
+ 58: 'icmpv6'
+}
+dscp_value_to_name_map = {
+ 0: 'default',
+ 8: 'cs1',
+ 16: 'cs2',
+ 24: 'cs3',
+ 32: 'cs4',
+ 40: 'cs5',
+ 48: 'cs6',
+ 56: 'cs7',
+ 10: 'af11',
+ 12: 'af12',
+ 14: 'af13',
+ 18: 'af21',
+ 20: 'af22',
+ 22: 'af23',
+ 26: 'af31',
+ 28: 'af32',
+ 30: 'af33',
+ 34: 'af41',
+ 36: 'af42',
+ 38: 'af43',
+ 46: 'ef',
+ 44: 'voice_admit'
+}
+
+
+class L3_aclsFacts(object):
+ """ The sonic l3_acls fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = L3_aclsArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for l3_acls
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+
+ if not data:
+ l3_acls_configs = self.get_l3_acls()
+
+ objs = []
+ for l3_acl_config in l3_acls_configs:
+ obj = self.render_config(self.generated_spec, l3_acl_config)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts['ansible_network_resources'].pop('l3_acls', None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['l3_acls'] = utils.remove_empties({'config': params['config']})['config']
+
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ config['address_family'] = conf['address_family']
+ config['acls'] = conf['acls']
+ is_ipv4 = bool(config['address_family'] == 'ipv4')
+
+ for acl in config['acls']:
+ for rule in acl['rules']:
+ rule['source'] = {}
+ rule['destination'] = {}
+ rule['protocol'] = {}
+ rule['protocol_options'] = {}
+
+ if ":" in rule['action']:
+ rule['action'] = rule['action'].split(":")[-1]
+ rule['action'] = action_payload_to_value_map[rule['action']]
+
+ l2_config = rule.pop('l2', None)
+ l3_config = rule.pop('l3', None)
+ l4_config = rule.pop('l4', None)
+ if l3_config is None:
+ if is_ipv4:
+ rule['protocol']['name'] = 'ip'
+ else:
+ rule['protocol']['name'] = 'ipv6'
+
+ rule['source']['any'] = True
+ rule['destination']['any'] = True
+ continue
+
+ protocol = l3_config.get('protocol')
+ if protocol is not None:
+ if isinstance(protocol, str):
+ protocol = protocol.replace('openconfig-packet-match-types:', '')
+ protocol = protocol_payload_to_value_map[protocol]
+ if isinstance(protocol, str):
+ rule['protocol']['name'] = protocol
+ else:
+ rule['protocol']['number'] = protocol
+ else:
+ protocol = protocol_number_to_name_map.get(protocol, protocol)
+ if isinstance(protocol, str):
+ rule['protocol']['name'] = protocol
+ else:
+ rule['protocol']['number'] = protocol
+ else:
+ if is_ipv4:
+ rule['protocol']['name'] = 'ip'
+ else:
+ rule['protocol']['name'] = 'ipv6'
+
+ rule['source'] = self._convert_ip_addr_to_spec_fmt(l3_config.get('source-address'), is_ipv4)
+ rule['destination'] = self._convert_ip_addr_to_spec_fmt(l3_config.get('destination-address'), is_ipv4)
+ if protocol in ('tcp', 'udp'):
+ rule['source']['port_number'] = self._convert_l4_port_to_spec_fmt(l4_config.get('source-port'))
+ rule['destination']['port_number'] = self._convert_l4_port_to_spec_fmt(l4_config.get('destination-port'))
+
+ if protocol in ('icmp', 'icmpv6'):
+ rule['protocol_options'][protocol] = {
+ 'code': l4_config.get('openconfig-acl-ext:icmp-code'),
+ 'type': l4_config.get('openconfig-acl-ext:icmp-type')
+ }
+ elif protocol == 'tcp':
+ rule['protocol_options']['tcp'] = {}
+ if l4_config.get('openconfig-acl-ext:tcp-session-established'):
+ rule['protocol_options']['tcp']['established'] = True
+ else:
+ for flag in l4_config.get('tcp-flags', []):
+ flag = flag.split(':')[-1].replace('TCP_', '').lower()
+ rule['protocol_options']['tcp'][flag] = True
+
+ dscp = l3_config.get('dscp')
+ if dscp in dscp_value_to_name_map:
+ rule['dscp'] = {dscp_value_to_name_map[dscp]: True}
+ else:
+ rule['dscp'] = {'value': dscp}
+
+ rule['vlan_id'] = l2_config.get('openconfig-acl-ext:vlanid')
+
+ return config
+
+ def get_l3_acls(self):
+ """Get all l3 acl configurations available in chassis"""
+ acls_path = 'data/openconfig-acl:acl/acl-sets'
+ method = 'GET'
+ request = [{'path': acls_path, 'method': method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ acls = []
+ if response[0][1].get('openconfig-acl:acl-sets'):
+ acls = response[0][1]['openconfig-acl:acl-sets'].get('acl-set', [])
+
+ ipv4_acls_configs = []
+ ipv6_acls_configs = []
+ for acl in acls:
+ is_ipv4 = False
+ acl_config = {}
+ acl_rules = []
+
+ config = acl['config']
+ if config.get('type') in ('ACL_IPV4', 'openconfig-acl:ACL_IPV4'):
+ is_ipv4 = True
+ elif config.get('type') in ('ACL_IPV6', 'openconfig-acl:ACL_IPV6'):
+ is_ipv4 = False
+ else:
+ continue
+
+ acl_config['name'] = config['name']
+ acl_config['remark'] = config.get('description')
+ acl_config['rules'] = acl_rules
+
+ acl_entries = acl.get('acl-entries', {}).get('acl-entry', [])
+ for acl_entry in acl_entries:
+ acl_rule = {}
+
+ acl_entry_config = acl_entry['config']
+ acl_rule['sequence_num'] = acl_entry_config['sequence-id']
+ acl_rule['remark'] = acl_entry_config.get('description')
+
+ acl_rule['action'] = acl_entry['actions']['config']['forwarding-action']
+ acl_rule['l2'] = acl_entry.get('l2', {}).get('config', {})
+ if is_ipv4:
+ acl_rule['l3'] = acl_entry.get('ipv4', {}).get('config', {})
+ else:
+ acl_rule['l3'] = acl_entry.get('ipv6', {}).get('config', {})
+ acl_rule['l4'] = acl_entry.get('transport', {}).get('config', {})
+
+ acl_rules.append(acl_rule)
+
+ if is_ipv4:
+ ipv4_acls_configs.append(acl_config)
+ else:
+ ipv6_acls_configs.append(acl_config)
+
+ l3_acls_configs = []
+ if ipv4_acls_configs:
+ l3_acls_configs.append({'address_family': 'ipv4', 'acls': ipv4_acls_configs})
+ if ipv6_acls_configs:
+ l3_acls_configs.append({'address_family': 'ipv6', 'acls': ipv6_acls_configs})
+
+ return l3_acls_configs
+
+ @staticmethod
+ def _convert_ip_addr_to_spec_fmt(ip_addr, is_ipv4=False):
+ spec_fmt = {}
+ if ip_addr is not None:
+ ip_addr = ip_addr.lower()
+ if is_ipv4:
+ host_mask = IPV4_HOST_MASK
+ else:
+ host_mask = IPV6_HOST_MASK
+
+ if ip_addr.endswith(host_mask):
+ spec_fmt['host'] = ip_addr.replace(host_mask, '')
+ else:
+ spec_fmt['prefix'] = ip_addr
+ else:
+ spec_fmt['any'] = True
+
+ return spec_fmt
+
+ @staticmethod
+ def _convert_l4_port_to_spec_fmt(l4_port):
+ spec_fmt = {}
+ if l4_port is not None:
+ if isinstance(l4_port, str) and '..' in l4_port:
+ l4_port = [int(i) for i in l4_port.split('..')]
+ if l4_port[0] == L4_PORT_START:
+ spec_fmt['lt'] = l4_port[1]
+ elif l4_port[1] == L4_PORT_END:
+ spec_fmt['gt'] = l4_port[0]
+ else:
+ spec_fmt['range'] = {
+ 'begin': l4_port[0],
+ 'end': l4_port[1]
+ }
+ else:
+ spec_fmt['eq'] = int(l4_port)
+
+ return spec_fmt
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py
index 69a6dcd44..e91a2b033 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/l3_interfaces/l3_interfaces.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py
index 728196813..d83659d92 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lag_interfaces/lag_interfaces.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tests/system_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lldp_global/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tests/system_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lldp_global/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lldp_global/lldp_global.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lldp_global/lldp_global.py
new file mode 100644
index 000000000..75f3dad51
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/lldp_global/lldp_global.py
@@ -0,0 +1,114 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic lldp_global fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.lldp_global.lldp_global import Lldp_globalArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+
+GET = "get"
+
+
+class Lldp_globalFacts(object):
+ """ The sonic lldp_global fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = Lldp_globalArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for lldp_global
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+
+ obj = self.get_all_lldp_configs()
+
+ ansible_facts['ansible_network_resources'].pop('lldp_global', None)
+ facts = {}
+ if obj:
+ params = utils.validate_config(self.argument_spec, {'config': obj})
+ facts['lldp_global'] = utils.remove_empties(params['config'])
+
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ return conf
+
+ def get_all_lldp_configs(self):
+ """Get all the lldp_global configured in the device"""
+ request = [{"path": "data/openconfig-lldp:lldp/config", "method": GET}]
+ lldp_global_data = {}
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+ lldp_global_data['tlv_select'] = {}
+ lldp_global_data['tlv_select']['management_address'] = True
+ lldp_global_data['tlv_select']['system_capabilities'] = True
+ lldp_global_data['enable'] = True
+ if 'openconfig-lldp:config' in response[0][1]:
+ raw_lldp_global_data = response[0][1]['openconfig-lldp:config']
+ if 'enabled' in raw_lldp_global_data:
+ lldp_global_data['enable'] = raw_lldp_global_data['enabled']
+ if 'hello-timer' in raw_lldp_global_data:
+ lldp_global_data['hello_time'] = raw_lldp_global_data['hello-timer']
+ if 'openconfig-lldp-ext:mode' in raw_lldp_global_data:
+ lldp_global_data['mode'] = raw_lldp_global_data['openconfig-lldp-ext:mode'].lower()
+ if 'system-description' in raw_lldp_global_data:
+ lldp_global_data['system_description'] = raw_lldp_global_data['system-description']
+ if 'system-name' in raw_lldp_global_data:
+ lldp_global_data['system_name'] = raw_lldp_global_data['system-name']
+ if 'openconfig-lldp-ext:multiplier' in raw_lldp_global_data:
+ lldp_global_data['multiplier'] = raw_lldp_global_data['openconfig-lldp-ext:multiplier']
+ if 'suppress-tlv-advertisement' in raw_lldp_global_data:
+ for tlv_select in raw_lldp_global_data['suppress-tlv-advertisement']:
+ tlv_select = tlv_select.replace('openconfig-lldp-types:', '').lower()
+ if tlv_select in ('management_address', 'system_capabilities'):
+ lldp_global_data['tlv_select'][tlv_select] = False
+ return lldp_global_data
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/logging/logging.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/logging/logging.py
new file mode 100644
index 000000000..c3c05035e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/logging/logging.py
@@ -0,0 +1,128 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic logging fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.logging.logging import LoggingArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+GET = "get"
+
+
+class LoggingFacts(object):
+ """ The sonic logging fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = LoggingArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for logging
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ # typically data is populated from the current device configuration
+ # data = connection.get('show running-config | section ^interface')
+ # using mock data instead
+ data = self.get_logging_configuration()
+
+ obj = self.render_config(self.generated_spec, data)
+
+ ansible_facts['ansible_network_resources'].pop('logging', None)
+ facts = {}
+ if obj:
+ params = utils.validate_config(self.argument_spec, {'config': obj})
+ facts['logging'] = params['config']
+
+ ansible_facts['ansible_network_resources'].update(facts)
+
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ return conf
+
+ def get_logging_configuration(self):
+ """Get all logging configuration"""
+
+ config_request = [{"path": "data/openconfig-system:system/logging", "method": GET}]
+ config_response = []
+ try:
+ config_response = edit_config(self._module, to_request(self._module, config_request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ logging_response = dict()
+ if 'openconfig-system:logging' in config_response[0][1]:
+ logging_response = config_response[0][1].get('openconfig-system:logging', {})
+
+ remote_servers = []
+ if 'remote-servers' in logging_response:
+ remote_servers = logging_response['remote-servers'].get('remote-server', [])
+
+ logging_config = dict()
+
+ logging_servers = []
+ for remote_server in remote_servers:
+ rs_config = remote_server.get('config', {})
+ logging_server = {}
+ logging_server['host'] = rs_config['host']
+ if 'openconfig-system-ext:message-type' in rs_config:
+ logging_server['message_type'] = rs_config['openconfig-system-ext:message-type']
+ if 'openconfig-system-ext:source-interface' in rs_config:
+ logging_server['source_interface'] = rs_config['openconfig-system-ext:source-interface']
+ if logging_server['source_interface'].startswith("Management") or \
+ logging_server['source_interface'].startswith("Mgmt"):
+ logging_server['source_interface'] = 'eth0'
+ if 'openconfig-system-ext:vrf-name' in rs_config:
+ logging_server['vrf'] = rs_config['openconfig-system-ext:vrf-name']
+ if 'remote-port' in rs_config:
+ logging_server['remote_port'] = rs_config['remote-port']
+
+ logging_servers.append(logging_server)
+
+ logging_config['remote_servers'] = logging_servers
+
+ return logging_config
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tests/uplink_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mac/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tests/uplink_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mac/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mac/mac.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mac/mac.py
new file mode 100644
index 000000000..26f705040
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mac/mac.py
@@ -0,0 +1,151 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic mac_address fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ remove_empties_from_list
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.mac.mac import MacArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils import (
+ get_all_vrfs,
+)
+
+NETWORK_INSTANCE_PATH = '/data/openconfig-network-instance:network-instances/network-instance'
+
+
+class MacFacts(object):
+ """ The sonic mac fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = MacArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for mac_address
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+ if connection: # just for linting purposes, remove
+ pass
+
+ if not data:
+ data = self.update_mac(self._module)
+ # operate on a collection of resource x
+ for conf in data:
+ if conf:
+ obj = self.render_config(conf)
+ # split the config into instances of the resource
+ if obj:
+ objs.append(obj)
+
+ ansible_facts['ansible_network_resources'].pop('mac', None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': remove_empties_from_list(objs)})
+ facts['mac'] = params['config']
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ return conf
+
+ def update_mac(self, module):
+ mac_address_cfg_list = []
+ vrfs = get_all_vrfs(module)
+ for vrf_name in vrfs:
+ aging_time = self.get_config(vrf_name, module, 'fdb/config/mac-aging-time', 'openconfig-network-instance:mac-aging-time')
+ dampening_cfg_dict = self.get_config(vrf_name, module, 'openconfig-mac-dampening:mac-dampening/config', 'openconfig-mac-dampening:config')
+ entries_dict = self.get_config(vrf_name, module, 'fdb/mac-table/entries', 'openconfig-network-instance:entries')
+ cfg_dict = {}
+ mac_dict = {}
+ mac_table_entries = []
+ dampening_interval = dampening_cfg_dict.get('interval', None)
+ dampening_threshold = dampening_cfg_dict.get('threshold', None)
+
+ if entries_dict:
+ entry_list = entries_dict.get('entry', [])
+ for entry in entry_list:
+ entry_dict = {}
+ mac_address = entry.get('mac-address', None)
+ vlan_id = entry.get('vlan', None)
+ interface = entry.get('interface', {}).get('interface-ref', {}).get('config', {}).get('interface', None)
+ if mac_address:
+ entry_dict['mac_address'] = mac_address
+ if vlan_id:
+ entry_dict['vlan_id'] = vlan_id
+ if interface:
+ entry_dict['interface'] = interface
+ if entry_dict:
+ mac_table_entries.append(entry_dict)
+
+ if aging_time:
+ mac_dict['aging_time'] = aging_time
+ if dampening_interval:
+ mac_dict['dampening_interval'] = dampening_interval
+ if dampening_threshold:
+ mac_dict['dampening_threshold'] = dampening_threshold
+ if mac_table_entries:
+ mac_dict['mac_table_entries'] = mac_table_entries
+ if mac_dict:
+ cfg_dict['mac'] = mac_dict
+ cfg_dict['vrf_name'] = vrf_name
+ mac_address_cfg_list.append(cfg_dict)
+
+ return mac_address_cfg_list
+
+ def get_config(self, vrf_name, module, path, name):
+ cfg_dict = {}
+ get_path = '%s=%s/%s' % (NETWORK_INSTANCE_PATH, vrf_name, path)
+ request = {'path': get_path, 'method': 'get'}
+
+ try:
+ response = edit_config(module, to_request(module, request))
+ if name in response[0][1]:
+ cfg_dict = response[0][1].get(name, None)
+ except Exception as exc:
+ pass
+
+ return cfg_dict
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py
index 69864cdf9..9c57f6cc0 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/mclag/mclag.py
@@ -23,6 +23,9 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
to_request,
edit_config
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_ranges_in_list
+)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.mclag.mclag import MclagArgs
from ansible.module_utils.connection import ConnectionError
@@ -76,7 +79,7 @@ class MclagFacts(object):
facts = {}
if objs:
params = utils.validate_config(self.argument_spec, {'config': objs})
- facts['mclag'] = params['config']
+ facts['mclag'] = utils.remove_empties(params['config'])
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
@@ -118,6 +121,8 @@ class MclagFacts(object):
config['peer_link'] = domain_config['peer-link']
if domain_config.get('mclag-system-mac', None):
config['system_mac'] = domain_config['mclag-system-mac']
+ if domain_config.get('delay-restore', None):
+ config['delay_restore'] = domain_config['delay-restore']
if conf.get('vlan-interfaces', None) and conf['vlan-interfaces'].get('vlan-interface', None):
vlans_list = []
@@ -125,7 +130,15 @@ class MclagFacts(object):
for vlan in vlan_data:
vlans_list.append({'vlan': vlan['name']})
if vlans_list:
- config['unique_ip'] = {'vlans': vlans_list}
+ config['unique_ip'] = {'vlans': self.get_vlan_range_list(vlans_list)}
+
+ if conf.get('vlan-ifs', None) and conf['vlan-ifs'].get('vlan-if', None):
+ vlans_list = []
+ vlan_data = conf['vlan-ifs']['vlan-if']
+ for vlan in vlan_data:
+ vlans_list.append({'vlan': vlan['name']})
+ if vlans_list:
+ config['peer_gateway'] = {'vlans': self.get_vlan_range_list(vlans_list)}
if conf.get('interfaces', None) and conf['interfaces'].get('interface', None):
portchannels_list = []
@@ -136,4 +149,27 @@ class MclagFacts(object):
if portchannels_list:
config['members'] = {'portchannels': portchannels_list}
+ if conf.get('mclag-gateway-macs', None) and conf['mclag-gateway-macs'].get('mclag-gateway-mac', None):
+ gw_mac_data = conf['mclag-gateway-macs']['mclag-gateway-mac']
+ if gw_mac_data[0].get('config', None) and gw_mac_data[0]['config'].get('gateway-mac', None):
+ config['gateway_mac'] = gw_mac_data[0]['config']['gateway-mac']
+
return config
+
+ @staticmethod
+ def get_vlan_range_list(vlans_list):
+ """Returns list of VLAN ranges for given list of VLANs"""
+ vlan_range_list = []
+ vlan_id_list = []
+
+ for vlan in vlans_list:
+ match = re.match(r'Vlan(\d+)', vlan['vlan'])
+ if match:
+ vlan_id_list.append(int(match.group(1)))
+
+ if vlan_id_list:
+ vlan_id_list.sort()
+ for vlan_range in get_ranges_in_list(vlan_id_list):
+ vlan_range_list.append({'vlan': 'Vlan{0}'.format('-'.join(map(str, (vlan_range[0], vlan_range[-1])[:len(vlan_range)])))})
+
+ return vlan_range_list
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py
index a47142b47..d52516705 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/ntp/ntp.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -113,16 +112,16 @@ class NtpFacts(object):
ntp_config = dict()
- if 'network-instance' in ntp_global_config:
+ if 'network-instance' in ntp_global_config and ntp_global_config['network-instance']:
ntp_config['vrf'] = ntp_global_config['network-instance']
if 'enable-ntp-auth' in ntp_global_config:
ntp_config['enable_ntp_auth'] = ntp_global_config['enable-ntp-auth']
- if 'source-interface' in ntp_global_config:
+ if 'source-interface' in ntp_global_config and ntp_global_config['source-interface']:
ntp_config['source_interfaces'] = ntp_global_config['source-interface']
- if 'trusted-key' in ntp_global_config:
+ if 'trusted-key' in ntp_global_config and ntp_global_config['trusted-key']:
ntp_config['trusted_keys'] = ntp_global_config['trusted-key']
servers = []
@@ -134,8 +133,10 @@ class NtpFacts(object):
server['key_id'] = ntp_server['config']['key-id']
server['minpoll'] = ntp_server['config'].get('minpoll', None)
server['maxpoll'] = ntp_server['config'].get('maxpoll', None)
+ server['prefer'] = ntp_server['config'].get('prefer', None)
servers.append(server)
- ntp_config['servers'] = servers
+ if servers:
+ ntp_config['servers'] = servers
keys = []
for ntp_key in ntp_keys:
@@ -148,6 +149,7 @@ class NtpFacts(object):
key['key_type'] = key_type
key['key_value'] = ntp_key['config'].get('key-value', None)
keys.append(key)
- ntp_config['ntp_keys'] = keys
+ if keys:
+ ntp_config['ntp_keys'] = keys
return ntp_config
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tests/users_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/pki/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tests/users_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/pki/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/pki/pki.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/pki/pki.py
new file mode 100644
index 000000000..240c50335
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/pki/pki.py
@@ -0,0 +1,144 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell EMC
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic pki fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.pki.pki import (
+ PkiArgs,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config,
+)
+
+pki_path = "data/openconfig-pki:pki/"
+security_profiles_path = "data/openconfig-pki:pki/security-profiles"
+
+
+class PkiFacts(object):
+ """The sonic pki fact class"""
+
+ def __init__(self, module, subspec="config", options="options"):
+ self._module = module
+ self.argument_spec = PkiArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """Populate the facts for pki
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+ resources = {}
+ if not data:
+ result = self.get_pki()
+ if len(result) > 0 and result[0]:
+ code, resources = result[0]
+
+ objs = {}
+ if (
+ resources.get("openconfig-pki:pki")
+ and resources.get("openconfig-pki:pki").get("security-profiles")
+ and resources.get("openconfig-pki:pki")
+ .get("security-profiles")
+ .get("security-profile")
+ ):
+ sps = (
+ resources.get("openconfig-pki:pki")
+ .get("security-profiles")
+ .get("security-profile")
+ )
+ sps_conf = [r.get("config") for r in sps]
+ rep_conf = []
+ for c in sps_conf:
+ conf = {}
+ for k, v in c.items():
+ conf[k.replace("-", "_")] = v
+ rep_conf.append(conf)
+ objs["security_profiles"] = rep_conf
+ if (
+ resources.get("openconfig-pki:pki")
+ and resources.get("openconfig-pki:pki").get("trust-stores")
+ and resources.get("openconfig-pki:pki")
+ .get("trust-stores")
+ .get("trust-store")
+ ):
+ tsts = (
+ resources.get("openconfig-pki:pki")
+ .get("trust-stores")
+ .get("trust-store")
+ )
+ tsts_conf = [r.get("config") for r in tsts]
+ rep_conf = []
+ for c in tsts_conf:
+ conf = {}
+ for k, v in c.items():
+ conf[k.replace("-", "_")] = v
+ rep_conf.append(conf)
+
+ objs["trust_stores"] = rep_conf
+
+ ansible_facts["ansible_network_resources"].pop("pki", None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(
+ self.argument_spec, {"config": objs}
+ )
+ facts["pki"] = params["config"]
+
+ ansible_facts["ansible_network_resources"].update(facts)
+
+ return ansible_facts
+
+ def get_pki(self):
+ request = {"path": pki_path, "method": "get"}
+ try:
+ response = edit_config(
+ self._module, to_request(self._module, request)
+ )
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ return response
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ return utils.remove_empties(config)
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py
index 938bd6423..08b143dd5 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_breakout/port_breakout.py
@@ -11,8 +11,6 @@ based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
-import json
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -29,7 +27,6 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
from ansible.module_utils.connection import ConnectionError
GET = "get"
-POST = "post"
class Port_breakoutFacts(object):
@@ -98,8 +95,8 @@ class Port_breakoutFacts(object):
return conf
def get_all_port_breakout(self):
- """Get all the port_breakout configured in the device"""
- request = [{"path": "operations/sonic-port-breakout:breakout_capabilities", "method": POST}]
+ """Get all the port_breakout configured on the device"""
+ request = [{"path": "data/sonic-port-breakout:sonic-port-breakout/BREAKOUT_CFG/BREAKOUT_CFG_LIST", "method": GET}]
port_breakout_list = []
try:
response = edit_config(self._module, to_request(self._module, request))
@@ -107,12 +104,12 @@ class Port_breakoutFacts(object):
self._module.fail_json(msg=str(exc), code=exc.code)
raw_port_breakout_list = []
- if "sonic-port-breakout:output" in response[0][1]:
- raw_port_breakout_list = response[0][1].get("sonic-port-breakout:output", {}).get('caps', [])
+ if "sonic-port-breakout:BREAKOUT_CFG_LIST" in response[0][1]:
+ raw_port_breakout_list = response[0][1].get("sonic-port-breakout:BREAKOUT_CFG_LIST", [])
for port_breakout in raw_port_breakout_list:
name = port_breakout.get('port', None)
- mode = port_breakout.get('defmode', None)
+ mode = port_breakout.get('brkout_mode', None)
if name and mode:
if '[' in mode:
mode = mode[:mode.index('[')]
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_group/port_group.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_group/port_group.py
new file mode 100644
index 000000000..c6e4816c4
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/port_group/port_group.py
@@ -0,0 +1,116 @@
+#
+# -*- coding: utf-8 -*-
+# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic port group fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.port_group.port_group import Port_groupArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+GET = "get"
+
+
+class Port_groupFacts(object):
+ """ The sonic port group fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = Port_groupArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for port groups
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ # typically data is populated from the current device configuration
+ # data = connection.get('show running-config | section port-group')
+ # using mock data instead
+ data = self.get_port_groups()
+
+ objs = []
+ for conf in data:
+ if conf:
+ obj = self.render_config(self.generated_spec, conf)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts['ansible_network_resources'].pop('port_group', None)
+ facts = {}
+ if objs:
+ facts['port_group'] = []
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ if params:
+ facts['port_group'].extend(params['config'])
+ ansible_facts['ansible_network_resources'].update(facts)
+
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ return conf
+
+ def get_port_groups(self):
+ """Get all the port group configurations"""
+
+ pgs_request = [{"path": "data/openconfig-port-group:port-groups/port-group", "method": GET}]
+ try:
+ pgs_response = edit_config(self._module, to_request(self._module, pgs_request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ pgs_config = []
+ if "openconfig-port-group:port-group" in pgs_response[0][1]:
+ pgs_config = pgs_response[0][1].get("openconfig-port-group:port-group", [])
+
+ pgs = []
+ for pg_config in pgs_config:
+ pg = dict()
+ if 'config' in pg_config:
+ pg['id'] = pg_config['id']
+ speed_str = pg_config['config'].get('speed', None)
+ if speed_str:
+ pg['speed'] = speed_str.split(":", 1)[-1]
+ pgs.append(pg)
+
+ return pgs
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py
index 72593b225..33ab55a72 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/radius_server/radius_server.py
@@ -11,8 +11,6 @@ based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
-import json
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -103,7 +101,7 @@ class Radius_serverFacts(object):
if 'auth-type' in raw_radius_global_data:
radius_server_data['auth_type'] = raw_radius_global_data['auth-type']
- if 'secret-key' in raw_radius_global_data:
+ if 'secret-key' in raw_radius_global_data and raw_radius_global_data['secret-key']:
radius_server_data['key'] = raw_radius_global_data['secret-key']
if 'timeout' in raw_radius_global_data:
radius_server_data['timeout'] = raw_radius_global_data['timeout']
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tests/vlan_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/route_maps/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tests/vlan_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/route_maps/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/route_maps/route_maps.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/route_maps/route_maps.py
new file mode 100644
index 000000000..05e6d6188
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/route_maps/route_maps.py
@@ -0,0 +1,517 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic route_maps fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import re
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.route_maps.route_maps import Route_mapsArgs
+
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import remove_empties_from_list
+
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic \
+ import to_request, edit_config
+
+
+class Route_mapsFacts(object):
+ """ The sonic route_maps fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = Route_mapsArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for route_maps
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if not data:
+ # Fetch data from the current device configuration
+ # (Skip if operating on previously fetched configuration.)
+ data = self.get_all_route_maps()
+
+ # split the unparsed route map configuration list into a list
+ # of parsed route map statement "instances" (dictonary "objects").
+ route_maps = []
+ for route_map_cfg in data:
+ route_map_stmts = self.route_map_cfg_parse(route_map_cfg)
+ if route_map_stmts:
+ route_maps.extend(route_map_stmts)
+
+ ansible_facts['ansible_network_resources'].pop('route_maps', None)
+ facts = {}
+ if route_maps:
+ params = utils.validate_config(self.argument_spec,
+ {'config': route_maps})
+ params_cleaned = {'config': remove_empties_from_list(params['config'])}
+ facts['route_maps'] = params_cleaned['config']
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def get_all_route_maps(self):
+ '''Execute a REST "GET" API to fetch all of the current route map configuration
+ from the target device.'''
+
+ route_map_fetch_spec = \
+ "openconfig-routing-policy:routing-policy/policy-definitions"
+ route_map_resp_key = "openconfig-routing-policy:policy-definitions"
+ route_map_key = "policy-definition"
+ url = "data/%s" % route_map_fetch_spec
+ method = "GET"
+ request = [{"path": url, "method": method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc))
+
+ route_maps_unparsed = []
+ resp_route_map_envelope = response[0][1].get(route_map_resp_key, None)
+ if resp_route_map_envelope:
+ route_maps_unparsed = resp_route_map_envelope.get(route_map_key, None)
+ return route_maps_unparsed
+
+ def route_map_cfg_parse(self, unparsed_route_map):
+ '''Parse the raw input configuration JSON representation for the route map specified
+ by the "unparsed_route_map" input parameter. Parse the information to
+ convert it to a dictionary matching the "argspec" for the "route_maps" resource
+ module.'''
+
+ parsed_route_map_stmts = []
+
+ if not unparsed_route_map.get("config"):
+ return parsed_route_map_stmts
+ route_map_name = unparsed_route_map.get('name')
+ if not route_map_name:
+ return parsed_route_map_stmts
+ route_map_statements = unparsed_route_map.get('statements')
+ if not route_map_statements:
+ return parsed_route_map_stmts
+ route_map_stmts_list = route_map_statements.get('statement')
+ if not route_map_stmts_list:
+ return parsed_route_map_stmts
+
+ for route_map_stmt in route_map_stmts_list:
+ parsed_route_map_stmt = {}
+ parsed_seq_num = route_map_stmt.get('name')
+ if not parsed_seq_num:
+ continue
+ parsed_route_map_stmt['map_name'] = route_map_name
+ parsed_route_map_stmt['sequence_num'] = parsed_seq_num
+ self.get_route_map_stmt_set_attr(route_map_stmt, parsed_route_map_stmt)
+ self.get_route_map_stmt_match_attr(route_map_stmt, parsed_route_map_stmt)
+ self.get_route_map_call_attr(route_map_stmt, parsed_route_map_stmt)
+ parsed_route_map_stmts.append(parsed_route_map_stmt)
+
+ return parsed_route_map_stmts
+
+ def get_route_map_stmt_set_attr(self, route_map_stmt, parsed_route_map_stmt):
+ '''Parse the "set" attribute portion of the raw input configuration JSON
+ representation for the route map "statement" specified
+ by the "route_map_stmt," input parameter. Parse the information to
+ convert it to a dictionary matching the "argspec" for the "route_maps" resource
+ module.'''
+
+ stmt_actions = route_map_stmt.get('actions')
+ if not stmt_actions:
+ return
+
+ # Fetch the permit/deny action for the route map statement
+ actions_config = stmt_actions.get('config')
+ if not actions_config:
+ return
+ permit_deny_config = actions_config.get('policy-result')
+ if not permit_deny_config:
+ return
+ if permit_deny_config == "ACCEPT_ROUTE":
+ parsed_route_map_stmt['action'] = "permit"
+ elif permit_deny_config == "REJECT_ROUTE":
+ parsed_route_map_stmt['action'] = "deny"
+ else:
+ return
+
+ # Create a dict object to hold "set" attributes.
+ parsed_route_map_stmt['set'] = {}
+ parsed_route_map_stmt_set = parsed_route_map_stmt['set']
+
+ # Fetch non-required top level set attributes
+ set_metric_action = stmt_actions.get('metric-action')
+ if set_metric_action:
+ set_metric_action_cfg = set_metric_action.get('config')
+ if set_metric_action_cfg:
+ metric_action = set_metric_action_cfg.get('action')
+ if metric_action:
+ parsed_route_map_stmt_set['metric'] = {}
+ if metric_action == 'openconfig-routing-policy:METRIC_SET_VALUE':
+ value = set_metric_action_cfg.get('metric')
+ if value:
+ parsed_route_map_stmt_set['metric']['value'] = value
+ elif metric_action == 'openconfig-routing-policy:METRIC_SET_RTT':
+ parsed_route_map_stmt_set['metric']['rtt_action'] = 'set'
+ elif metric_action == 'openconfig-routing-policy:METRIC_ADD_RTT':
+ parsed_route_map_stmt_set['metric']['rtt_action'] = 'add'
+ elif metric_action == 'openconfig-routing-policy:METRIC_SUBTRACT_RTT':
+ parsed_route_map_stmt_set['metric']['rtt_action'] = 'subtract'
+
+ # Possible anomalous state due to partial deletion of metric config via REST
+ if parsed_route_map_stmt_set['metric'] == {}:
+ parsed_route_map_stmt_set.pop('metric')
+
+ # Fetch BGP policy action attributes
+ set_bgp_policy = stmt_actions.get('openconfig-bgp-policy:bgp-actions')
+ if set_bgp_policy:
+ self.get_route_map_set_bgp_policy_attr(set_bgp_policy, parsed_route_map_stmt_set)
+
+ def get_route_map_set_bgp_policy_attr(self, set_bgp_policy, parsed_route_map_stmt_set):
+ '''Parse the BGP policy "set" attribute portion of the raw input
+ configuration JSON representation within the route map "statement"
+ that is currently being parsed. The configuration section to be parsed
+ is specified by the "set_bgp_policy" input parameter. Parse the
+ information to convert it to a dictionary matching the "argspec" for
+ the "route_maps" resource module.'''
+
+ # Fetch as_path_prepend config
+ set_as_path_top = set_bgp_policy.get('set-as-path-prepend')
+ if set_as_path_top and set_as_path_top.get('config'):
+ as_path_prepend = \
+ set_as_path_top['config'].get(
+ 'openconfig-routing-policy-ext:asn-list')
+ if as_path_prepend:
+ parsed_route_map_stmt_set['as_path_prepend'] = \
+ as_path_prepend
+
+ # Fetch community list "delete" config
+ set_comm_list_delete_top = set_bgp_policy.get('set-community-delete')
+ if set_comm_list_delete_top:
+ set_comm_list_delete_config = set_comm_list_delete_top.get('config')
+ if set_comm_list_delete_config:
+ comm_list_delete = \
+ set_comm_list_delete_config.get('community-set-delete')
+ if comm_list_delete:
+ parsed_route_map_stmt_set['comm_list_delete'] = \
+ comm_list_delete
+
+ # Fetch community attributes.
+ self.get_rmap_set_community(set_bgp_policy, parsed_route_map_stmt_set)
+
+ # Fetch extended community attributes.
+ self.get_rmap_set_extcommunity(set_bgp_policy, parsed_route_map_stmt_set)
+
+ # Fetch other BGP policy "set" attributes
+ set_bgp_policy_cfg = set_bgp_policy.get('config')
+ if set_bgp_policy_cfg:
+ ip_next_hop = set_bgp_policy_cfg.get('set-next-hop')
+ if ip_next_hop:
+ parsed_route_map_stmt_set['ip_next_hop'] = ip_next_hop
+
+ ipv6_next_hop_global_addr = set_bgp_policy_cfg.get('set-ipv6-next-hop-global')
+ ipv6_prefer_global = set_bgp_policy_cfg.get('set-ipv6-next-hop-prefer-global')
+ if ipv6_next_hop_global_addr or (ipv6_prefer_global is not None):
+ parsed_route_map_stmt_set['ipv6_next_hop'] = {}
+ set_ipv6_nexthop = parsed_route_map_stmt_set['ipv6_next_hop']
+ if ipv6_next_hop_global_addr:
+ set_ipv6_nexthop['global_addr'] = ipv6_next_hop_global_addr
+ if ipv6_prefer_global is not None:
+ set_ipv6_nexthop['prefer_global'] = ipv6_prefer_global
+
+ local_preference = set_bgp_policy_cfg.get('set-local-pref')
+ if local_preference:
+ parsed_route_map_stmt_set['local_preference'] = local_preference
+
+ set_origin = set_bgp_policy_cfg.get('set-route-origin')
+ if set_origin:
+ if set_origin == 'EGP':
+ parsed_route_map_stmt_set['origin'] = 'egp'
+ elif set_origin == 'IGP':
+ parsed_route_map_stmt_set['origin'] = 'igp'
+ elif set_origin == 'INCOMPLETE':
+ parsed_route_map_stmt_set['origin'] = 'incomplete'
+
+ weight = set_bgp_policy_cfg.get('set-weight')
+ if weight:
+ parsed_route_map_stmt_set['weight'] = weight
+
+ @staticmethod
+ def get_rmap_set_community(set_bgp_policy, parsed_route_map_stmt_set):
+ '''Parse the "community" sub-section of the BGP policy "set" attribute
+ portion of the raw input configuration JSON representation.
+ The BGP policy "set" configuration section to be parsed is specified
+ by the "set_bgp_policy" input parameter. Parse the information
+ to convert it to a dictionary matching the "argspec" for the "route_maps"
+ resource module.'''
+
+ set_community_top = set_bgp_policy.get('set-community')
+ if (set_community_top and set_community_top.get('inline') and
+ set_community_top['inline'].get('config') and
+ set_community_top['inline']['config'].get('communities')):
+
+ set_community_config_list = \
+ set_community_top['inline']['config']['communities']
+ parsed_route_map_stmt_set['community'] = {}
+ parsed_rmap_stmt_set_comm = parsed_route_map_stmt_set['community']
+ for set_community_config_item in set_community_config_list:
+ if (set_community_config_item.split(':')[0] in
+ ('openconfig-bgp-types', 'openconfig-routing-policy-ext')):
+ set_community_attr = set_community_config_item.split(':')[1]
+ if not parsed_rmap_stmt_set_comm.get('community_attributes'):
+ parsed_rmap_stmt_set_comm['community_attributes'] = []
+ parsed_comm_attr_list = \
+ parsed_rmap_stmt_set_comm['community_attributes']
+ comm_attr_rest_to_argspec = {
+ 'NO_EXPORT_SUBCONFED': 'local_as',
+ 'NO_ADVERTISE': 'no_advertise',
+ 'NO_EXPORT': 'no_export',
+ 'NOPEER': 'no_peer',
+ 'NONE': 'none',
+ 'ADDITIVE': 'additive'
+ }
+ if set_community_attr in comm_attr_rest_to_argspec:
+ parsed_comm_attr_list.append(
+ comm_attr_rest_to_argspec[set_community_attr])
+ else:
+ if not parsed_rmap_stmt_set_comm.get('community_number'):
+ parsed_rmap_stmt_set_comm['community_number'] = []
+ parsed_comm_num_list = \
+ parsed_rmap_stmt_set_comm['community_number']
+ set_community_num_val_match = \
+ re.match(r'\d+:\d+$', set_community_config_item)
+ if set_community_num_val_match:
+ parsed_comm_num_list.append(set_community_config_item)
+
+ @staticmethod
+ def get_rmap_set_extcommunity(set_bgp_policy, parsed_route_map_stmt_set):
+ '''Parse the "extcommunity" sub-section of the BGP policy "set"
+ attribute portion of the raw input configuration JSON representation.
+ The BGP policy "set" configuration section to be parsed is specified
+ by the "set_bgp_policy" input parameter. Parse the information
+ to convert it to a dictionary matching the "argspec" for the "route_maps"
+ resource module.'''
+ set_extcommunity_top = set_bgp_policy.get('set-ext-community')
+ if (set_extcommunity_top and set_extcommunity_top.get('inline') and
+ set_extcommunity_top['inline'].get('config') and
+ set_extcommunity_top['inline']['config'].get('communities')):
+ set_extcommunity_config_list = \
+ set_extcommunity_top['inline']['config']['communities']
+ if set_extcommunity_config_list:
+ parsed_route_map_stmt_set['extcommunity'] = {}
+ parsed_rmap_stmt_set_extcomm = parsed_route_map_stmt_set['extcommunity']
+ for set_extcommunity_config_item in set_extcommunity_config_list:
+ if 'route-target:' in set_extcommunity_config_item:
+ rt_val = set_extcommunity_config_item.replace('route-target:', '')
+ if parsed_rmap_stmt_set_extcomm.get('rt'):
+ parsed_rmap_stmt_set_extcomm['rt'].append(rt_val)
+ else:
+ parsed_rmap_stmt_set_extcomm['rt'] = [rt_val]
+ elif 'route-origin:' in set_extcommunity_config_item:
+ soo_val = set_extcommunity_config_item.replace('route-origin:', '')
+ if parsed_rmap_stmt_set_extcomm.get('soo'):
+ parsed_rmap_stmt_set_extcomm['soo'].append(soo_val)
+ else:
+ parsed_rmap_stmt_set_extcomm['soo'] = [soo_val]
+
+ @staticmethod
+ def get_route_map_call_attr(route_map_stmt, parsed_route_map_stmt):
+ '''Parse the "call" attribute portion of the raw input configuration JSON
+ representation for the route map "statement" specified
+ by the "route_map_stmt," input parameter. Parse the information to
+ convert it to a dictionary matching the "argspec" for the "route_maps" resource
+ module.'''
+
+ stmt_conditions = route_map_stmt.get('conditions')
+ if not stmt_conditions:
+ return
+
+ # Fetch the "call" policy configuration for the route map statement
+ conditions_config = stmt_conditions.get('config')
+ if not conditions_config:
+ return
+ call_str = conditions_config.get('call-policy')
+ if not call_str:
+ return
+ parsed_route_map_stmt['call'] = call_str
+
+ def get_route_map_stmt_match_attr(self, route_map_stmt, parsed_route_map_stmt):
+ '''Parse the "match" attributes in the raw input configuration JSON
+ representation for the route map "statement" specified
+ by the "route_map_stmt," input parameter. Parse the information to
+ convert it to a dictionary matching the "argspec" for the "route_maps" resource
+ module.'''
+
+ # Create a dict object to hold "match" attributes.
+ parsed_route_map_stmt['match'] = {}
+ parsed_rmap_match = parsed_route_map_stmt['match']
+
+ stmt_conditions = route_map_stmt.get('conditions')
+ if not stmt_conditions:
+ return
+
+ # Fetch match as-path configuration
+ if (stmt_conditions.get('match-as-path-set') and
+ stmt_conditions['match-as-path-set'].get('config')):
+ as_path = \
+ stmt_conditions['match-as-path-set']['config'].get('as-path-set')
+ if as_path:
+ parsed_rmap_match['as_path'] = as_path
+
+ # Fetch BGP policy match attributes.
+ rmap_bgp_policy_match = stmt_conditions.get('openconfig-bgp-policy:bgp-conditions')
+ if rmap_bgp_policy_match:
+ self.get_rmap_match_bgp_policy_attr(rmap_bgp_policy_match, parsed_rmap_match)
+
+ # Fetch other match attributes
+ if (stmt_conditions.get('match-interface') and
+ stmt_conditions['match-interface'].get('config')):
+ match_interface = stmt_conditions['match-interface']['config'].get('interface')
+ if match_interface:
+ parsed_rmap_match['interface'] = match_interface
+
+ if (stmt_conditions.get('match-prefix-set') and
+ stmt_conditions['match-prefix-set']['config']):
+ match_prefix_set = \
+ stmt_conditions['match-prefix-set']['config']
+ if match_prefix_set and match_prefix_set.get('prefix-set'):
+ if not parsed_rmap_match.get('ip'):
+ parsed_rmap_match['ip'] = {}
+ parsed_rmap_match['ip']['address'] = \
+ match_prefix_set['prefix-set']
+ if (match_prefix_set and
+ match_prefix_set.get('openconfig-routing-policy-ext:ipv6-prefix-set')):
+ parsed_rmap_match['ipv6'] = {}
+ parsed_rmap_match['ipv6']['address'] = \
+ match_prefix_set['openconfig-routing-policy-ext:ipv6-prefix-set']
+
+ if (stmt_conditions.get('match-neighbor-set') and
+ stmt_conditions['match-neighbor-set'].get('config') and
+ stmt_conditions['match-neighbor-set']['config'].get(
+ 'openconfig-routing-policy-ext:address')):
+ parsed_rmap_match_peer = stmt_conditions[
+ 'match-neighbor-set']['config']['openconfig-routing-policy-ext:address'][0]
+ parsed_rmap_match['peer'] = {}
+ if ':' in parsed_rmap_match_peer:
+ parsed_rmap_match['peer']['ipv6'] = parsed_rmap_match_peer
+ elif '.' in parsed_rmap_match_peer:
+ parsed_rmap_match['peer']['ip'] = parsed_rmap_match_peer
+ else:
+ parsed_rmap_match['peer']['interface'] = parsed_rmap_match_peer
+
+ if (stmt_conditions.get('config') and
+ stmt_conditions['config'].get('install-protocol-eq')):
+ parsed_rmap_match_source_protocol = \
+ stmt_conditions['config']['install-protocol-eq']
+ if parsed_rmap_match_source_protocol == "openconfig-policy-types:BGP":
+ parsed_rmap_match['source_protocol'] = "bgp"
+ elif parsed_rmap_match_source_protocol == "openconfig-policy-types:OSPF":
+ parsed_rmap_match['source_protocol'] = "ospf"
+ elif parsed_rmap_match_source_protocol == "openconfig-policy-types:STATIC":
+ parsed_rmap_match['source_protocol'] = "static"
+ elif parsed_rmap_match_source_protocol == \
+ "openconfig-policy-types:DIRECTLY_CONNECTED":
+ parsed_rmap_match['source_protocol'] = "connected"
+
+ if stmt_conditions.get(
+ 'openconfig-routing-policy-ext:match-src-network-instance'):
+ match_src_vrf = \
+ stmt_conditions[
+ 'openconfig-routing-policy-ext:match-src-network-instance'].get('config')
+ if match_src_vrf and match_src_vrf.get('name'):
+ parsed_rmap_match['source_vrf'] = match_src_vrf['name']
+
+ if (stmt_conditions.get('match-tag-set') and
+ stmt_conditions['match-tag-set'].get('config')):
+ match_tag = \
+ stmt_conditions['match-tag-set']['config'].get(
+ 'openconfig-routing-policy-ext:tag-value')
+ if match_tag:
+ parsed_rmap_match['tag'] = match_tag[0]
+
+ @staticmethod
+ def get_rmap_match_bgp_policy_attr(rmap_bgp_policy_match, parsed_rmap_match):
+ '''Parse the BGP policy "match" attribute portion of the raw input
+ configuration JSON representation within the route map "statement"
+ that is currently being parsed. The configuration section to be parsed
+ is specified by the "rmap_bgp_match_cfg" input parameter. Parse the
+ information to convert it to a dictionary matching the "argspec" for
+ the "route_maps" resource module.'''
+
+ if (rmap_bgp_policy_match.get('match-as-path-set') and
+ rmap_bgp_policy_match['match-as-path-set'].get('config')):
+ as_path = rmap_bgp_policy_match['match-as-path-set']['config'].get('as-path-set')
+ if as_path:
+ parsed_rmap_match['as_path'] = as_path
+
+ # Fetch BGP policy match "config" attributes
+ rmap_bgp_match_cfg = rmap_bgp_policy_match.get('config')
+ if rmap_bgp_match_cfg:
+ match_metric = rmap_bgp_match_cfg.get('med-eq')
+ if match_metric:
+ parsed_rmap_match['metric'] = match_metric
+
+ match_origin = rmap_bgp_match_cfg.get('origin-eq')
+ if match_origin:
+ if match_origin == 'IGP':
+ parsed_rmap_match['origin'] = 'igp'
+ elif match_origin == 'EGP':
+ parsed_rmap_match['origin'] = 'egp'
+ elif match_origin == 'INCOMPLETE':
+ parsed_rmap_match['origin'] = 'incomplete'
+
+ if rmap_bgp_match_cfg.get('local-pref-eq'):
+ parsed_rmap_match['local_preference'] = rmap_bgp_match_cfg['local-pref-eq']
+
+ if rmap_bgp_match_cfg.get('community-set'):
+ parsed_rmap_match['community'] = rmap_bgp_match_cfg['community-set']
+
+ if rmap_bgp_match_cfg.get('ext-community-set'):
+ parsed_rmap_match['ext_comm'] = rmap_bgp_match_cfg['ext-community-set']
+
+ if rmap_bgp_match_cfg.get('openconfig-bgp-policy-ext:next-hop-set'):
+ parsed_rmap_match['ip'] = {}
+ parsed_rmap_match['ip']['next_hop'] = \
+ rmap_bgp_match_cfg['openconfig-bgp-policy-ext:next-hop-set']
+
+ # Fetch BGP policy match "evpn" attributes
+ if rmap_bgp_policy_match.get('openconfig-bgp-policy-ext:match-evpn-set'):
+ bgp_policy_match_evpn_cfg = \
+ rmap_bgp_policy_match['openconfig-bgp-policy-ext:match-evpn-set'].get('config')
+ if bgp_policy_match_evpn_cfg:
+ parsed_rmap_match['evpn'] = {}
+ if bgp_policy_match_evpn_cfg.get('vni-number'):
+ parsed_rmap_match['evpn']['vni'] = \
+ bgp_policy_match_evpn_cfg.get('vni-number')
+ if bgp_policy_match_evpn_cfg.get('default-type5-route'):
+ parsed_rmap_match['evpn']['default_route'] = True
+ evpn_route_type = bgp_policy_match_evpn_cfg.get('route-type')
+ if evpn_route_type:
+ if evpn_route_type == "openconfig-bgp-policy-ext:MACIP":
+ parsed_rmap_match['evpn']['route_type'] = "macip"
+ elif evpn_route_type == "openconfig-bgp-policy-ext:MULTICAST":
+ parsed_rmap_match['evpn']['route_type'] = "multicast"
+ elif evpn_route_type == "openconfig-bgp-policy-ext:PREFIX":
+ parsed_rmap_match['evpn']['route_type'] = "prefix"
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py
index f83566440..e0d404be7 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/static_routes/static_routes.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -139,7 +138,7 @@ class Static_routesFacts(object):
blackhole = config.get('blackhole', None)
track = config.get('track', None)
tag = config.get('tag', None)
- if blackhole:
+ if blackhole is not None:
index_dict['blackhole'] = blackhole
if interface:
index_dict['interface'] = interface
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tests/vlt_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/stp/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tests/vlt_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/stp/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/stp/stp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/stp/stp.py
new file mode 100644
index 000000000..da779c502
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/stp/stp.py
@@ -0,0 +1,364 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic stp fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ remove_empties
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.stp.stp import StpArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+
+stp_map = {
+ 'openconfig-spanning-tree-types:EDGE_ENABLE': True,
+ 'openconfig-spanning-tree-types:EDGE_DISABLE': False,
+ 'openconfig-spanning-tree-types:MSTP': 'mst',
+ 'openconfig-spanning-tree-ext:PVST': 'pvst',
+ 'openconfig-spanning-tree-types:RAPID_PVST': 'rapid_pvst',
+ 'P2P': 'point-to-point',
+ 'SHARED': 'shared',
+ 'LOOP': 'loop',
+ 'ROOT': 'root',
+ 'NONE': 'none'
+}
+
+
+class StpFacts(object):
+ """ The sonic stp fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = StpArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for stp
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ objs = []
+
+ if not data:
+ stp_cfg = self.get_stp_config(self._module)
+ data = self.update_stp(stp_cfg)
+ objs = self.render_config(self.generated_spec, data)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': remove_empties(objs)})
+ facts['stp'] = params['config']
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ return conf
+
+ def update_stp(self, data):
+ config_dict = {}
+ if data:
+ config_dict['global'] = self.update_global(data)
+ config_dict['interfaces'] = self.update_interfaces(data)
+ config_dict['mstp'] = self.update_mstp(data)
+ config_dict['pvst'] = self.update_pvst(data)
+ config_dict['rapid_pvst'] = self.update_rapid_pvst(data)
+
+ return config_dict
+
+ def update_global(self, data):
+ global_dict = {}
+ stp_global = data.get('global', None)
+
+ if stp_global:
+ config = stp_global.get('config', None)
+ if config:
+ enabled_protocol = config.get('enabled-protocol', None)
+ loop_guard = config.get('loop-guard', None)
+ bpdu_filter = config.get('bpdu-filter', None)
+ disabled_vlans = config.get('openconfig-spanning-tree-ext:disabled-vlans', None)
+ root_guard_timeout = config.get('openconfig-spanning-tree-ext:rootguard-timeout', None)
+ portfast = config.get('openconfig-spanning-tree-ext:portfast', None)
+ hello_time = config.get('openconfig-spanning-tree-ext:hello-time', None)
+ max_age = config.get('openconfig-spanning-tree-ext:max-age', None)
+ fwd_delay = config.get('openconfig-spanning-tree-ext:forwarding-delay', None)
+ bridge_priority = config.get('openconfig-spanning-tree-ext:bridge-priority', None)
+
+ if enabled_protocol:
+ global_dict['enabled_protocol'] = stp_map[enabled_protocol[0]]
+ if loop_guard is not None:
+ global_dict['loop_guard'] = loop_guard
+ if bpdu_filter is not None:
+ global_dict['bpdu_filter'] = bpdu_filter
+ if disabled_vlans:
+ global_dict['disabled_vlans'] = self.convert_vlans_list(disabled_vlans)
+ if root_guard_timeout:
+ global_dict['root_guard_timeout'] = root_guard_timeout
+ if portfast is not None:
+ global_dict['portfast'] = portfast
+ if hello_time:
+ global_dict['hello_time'] = hello_time
+ if max_age:
+ global_dict['max_age'] = max_age
+ if fwd_delay:
+ global_dict['fwd_delay'] = fwd_delay
+ if bridge_priority:
+ global_dict['bridge_priority'] = bridge_priority
+
+ return global_dict
+
+ def update_interfaces(self, data):
+ interfaces_list = []
+ interfaces = data.get('interfaces', None)
+
+ if interfaces:
+ intf_list = interfaces.get('interface', None)
+ if intf_list:
+ for intf in intf_list:
+ intf_dict = {}
+ config = intf.get('config', None)
+ intf_name = config.get('name', None)
+ edge_port = config.get('edge-port', None)
+ link_type = config.get('link-type', None)
+ guard = config.get('guard', None)
+ bpdu_guard = config.get('bpdu-guard', None)
+ bpdu_filter = config.get('bpdu-filter', None)
+ portfast = config.get('openconfig-spanning-tree-ext:portfast', None)
+ uplink_fast = config.get('openconfig-spanning-tree-ext:uplink-fast', None)
+ shutdown = config.get('openconfig-spanning-tree-ext:bpdu-guard-port-shutdown', None)
+ cost = config.get('openconfig-spanning-tree-ext:cost', None)
+ port_priority = config.get('openconfig-spanning-tree-ext:port-priority', None)
+ stp_enable = config.get('openconfig-spanning-tree-ext:spanning-tree-enable', None)
+
+ if intf_name:
+ intf_dict['intf_name'] = intf_name
+ if edge_port is not None:
+ intf_dict['edge_port'] = stp_map[edge_port]
+ if link_type:
+ intf_dict['link_type'] = stp_map[link_type]
+ if guard:
+ intf_dict['guard'] = stp_map[guard]
+ if bpdu_guard is not None:
+ intf_dict['bpdu_guard'] = bpdu_guard
+ if bpdu_filter is not None:
+ intf_dict['bpdu_filter'] = bpdu_filter
+ if portfast is not None:
+ intf_dict['portfast'] = portfast
+ if uplink_fast is not None:
+ intf_dict['uplink_fast'] = uplink_fast
+ if shutdown is not None:
+ intf_dict['shutdown'] = shutdown
+ if cost:
+ intf_dict['cost'] = cost
+ if port_priority:
+ intf_dict['port_priority'] = port_priority
+ if stp_enable is not None:
+ intf_dict['stp_enable'] = stp_enable
+ if intf_dict:
+ interfaces_list.append(intf_dict)
+
+ return interfaces_list
+
+ def update_mstp(self, data):
+ mstp_dict = {}
+ mstp = data.get('mstp', None)
+
+ if mstp:
+ config = mstp.get('config', None)
+ mst_instances = mstp.get('mst-instances', None)
+ interfaces = mstp.get('interfaces', None)
+ if config:
+ mst_name = config.get('name', None)
+ revision = config.get('revision', None)
+ max_hop = config.get('max-hop', None)
+ hello_time = config.get('hello-time', None)
+ max_age = config.get('max-age', None)
+ fwd_delay = config.get('forwarding-delay', None)
+
+ if mst_name:
+ mstp_dict['mst_name'] = mst_name
+ if revision:
+ mstp_dict['revision'] = revision
+ if max_hop:
+ mstp_dict['max_hop'] = max_hop
+ if hello_time:
+ mstp_dict['hello_time'] = hello_time
+ if max_age:
+ mstp_dict['max_age'] = max_age
+ if fwd_delay:
+ mstp_dict['fwd_delay'] = fwd_delay
+
+ if mst_instances:
+ mst_instance = mst_instances.get('mst-instance', None)
+ if mst_instance:
+ mst_instances_list = []
+ for inst in mst_instance:
+ inst_dict = {}
+ mst_id = inst.get('mst-id', None)
+ config = inst.get('config', None)
+ interfaces = inst.get('interfaces', None)
+ if mst_id:
+ inst_dict['mst_id'] = mst_id
+ if interfaces:
+ intf_list = self.get_interfaces_list(interfaces)
+ if intf_list:
+ inst_dict['interfaces'] = intf_list
+ if config:
+ vlans = config.get('vlan', None)
+ bridge_priority = config.get('bridge-priority', None)
+ if vlans:
+ inst_dict['vlans'] = self.convert_vlans_list(vlans)
+ if bridge_priority:
+ inst_dict['bridge_priority'] = bridge_priority
+ if inst_dict:
+ mst_instances_list.append(inst_dict)
+ if mst_instances_list:
+ mstp_dict['mst_instances'] = mst_instances_list
+
+ return mstp_dict
+
+ def update_pvst(self, data):
+ pvst_list = []
+ pvst = data.get('openconfig-spanning-tree-ext:pvst', None)
+
+ if pvst:
+ vlans = pvst.get('vlans', None)
+ if vlans:
+ vlans_list = self.get_vlans_list(vlans)
+ if vlans_list:
+ pvst_list = vlans_list
+
+ return pvst_list
+
+ def update_rapid_pvst(self, data):
+ rapid_pvst_list = []
+ rapid_pvst = data.get('rapid-pvst', None)
+
+ if rapid_pvst:
+ vlans = rapid_pvst.get('vlan', None)
+ if vlans:
+ vlans_list = self.get_vlans_list(vlans)
+ if vlans_list:
+ rapid_pvst_list = vlans_list
+
+ return rapid_pvst_list
+
+ def get_stp_config(self, module):
+ stp_cfg = None
+ get_stp_path = '/data/openconfig-spanning-tree:stp'
+ request = {'path': get_stp_path, 'method': 'get'}
+
+ try:
+ response = edit_config(module, to_request(module, request))
+ stp_cfg = response[0][1].get('openconfig-spanning-tree:stp', None)
+ except ConnectionError as exc:
+ module.fail_json(msg=str(exc), code=exc.code)
+
+ return stp_cfg
+
+ def get_interfaces_list(self, data):
+ intf_list = []
+ interface_list = data.get('interface', None)
+
+ if interface_list:
+ for intf in interface_list:
+ intf_dict = {}
+ config = intf.get('config', None)
+ if config:
+ intf_name = config.get('name', None)
+ cost = config.get('cost', None)
+ port_priority = config.get('port-priority', None)
+
+ if intf_name:
+ intf_dict['intf_name'] = intf_name
+ if cost:
+ intf_dict['cost'] = cost
+ if port_priority:
+ intf_dict['port_priority'] = port_priority
+ if intf_dict:
+ intf_list.append(intf_dict)
+
+ return intf_list
+
+ def get_vlans_list(self, data):
+ vlan_list = []
+
+ for vlan in data:
+ vlan_dict = {}
+ vlan_id = vlan.get('vlan-id')
+ config = vlan.get('config', None)
+ interfaces = vlan.get('interfaces', None)
+
+ if vlan_id:
+ vlan_dict['vlan_id'] = vlan_id
+ if interfaces:
+ intf_list = self.get_interfaces_list(interfaces)
+ if intf_list:
+ vlan_dict['interfaces'] = intf_list
+ if config:
+ hello_time = config.get('hello-time', None)
+ max_age = config.get('max-age', None)
+ fwd_delay = config.get('forwarding-delay', None)
+ bridge_priority = config.get('bridge-priority', None)
+
+ if hello_time:
+ vlan_dict['hello_time'] = hello_time
+ if max_age:
+ vlan_dict['max_age'] = max_age
+ if fwd_delay:
+ vlan_dict['fwd_delay'] = fwd_delay
+ if bridge_priority:
+ vlan_dict['bridge_priority'] = bridge_priority
+ if vlan_dict:
+ vlan_list.append(vlan_dict)
+
+ return vlan_list
+
+ def convert_vlans_list(self, vlans):
+ converted_vlans = []
+
+ for vlan in vlans:
+ if isinstance(vlan, int):
+ converted_vlans.append(str(vlan))
+
+ else:
+ converted_vlans.append(vlan.replace('..', '-'))
+
+ return converted_vlans
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py
index 1d7a82d83..65c4491d3 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/system/system.py
@@ -11,7 +11,6 @@ based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py
index a1e79910f..b752b7a83 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/tacacs_server/tacacs_server.py
@@ -11,8 +11,6 @@ based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
-import json
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py
index 038e97f83..59f08e63e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/users/users.py
@@ -11,7 +11,6 @@ based on the configuration.
"""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -22,6 +21,9 @@ from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.s
to_request,
edit_config
)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ remove_empties_from_list
+)
from ansible.module_utils.connection import ConnectionError
GET = "get"
@@ -74,8 +76,9 @@ class UsersFacts(object):
if objs:
facts['users'] = []
params = utils.validate_config(self.argument_spec, {'config': objs})
+
if params:
- facts['users'].extend(params['config'])
+ facts['users'].extend(remove_empties_from_list(params['config']))
ansible_facts['ansible_network_resources'].update(facts)
return ansible_facts
@@ -94,7 +97,7 @@ class UsersFacts(object):
def get_all_users(self):
"""Get all the users configured in the device"""
- request = [{"path": "data/sonic-system-aaa:sonic-system-aaa/USER", "method": GET}]
+ request = [{"path": "data/openconfig-system:system/aaa/authentication/users", "method": GET}]
users = []
try:
response = edit_config(self._module, to_request(self._module, request))
@@ -102,21 +105,16 @@ class UsersFacts(object):
self._module.fail_json(msg=str(exc), code=exc.code)
raw_users = []
- if "sonic-system-aaa:USER" in response[0][1]:
- raw_users = response[0][1].get("sonic-system-aaa:USER", {}).get('USER_LIST', [])
+ if "openconfig-system:users" in response[0][1]:
+ raw_users = response[0][1].get("openconfig-system:users", {}).get('user', [])
for raw_user in raw_users:
name = raw_user.get('username', None)
- role = raw_user.get('role', [])
- if role and len(role) > 0:
- role = role[0]
- password = raw_user.get('password', None)
+ role = raw_user.get('config', {}).get('role', None)
user = {}
if name and role:
user['name'] = name
user['role'] = role
- if password:
- user['password'] = password
if user:
users.append(user)
return users
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tests/vrrp_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlan_mapping/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tests/vrrp_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlan_mapping/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlan_mapping/vlan_mapping.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlan_mapping/vlan_mapping.py
new file mode 100644
index 000000000..ac53415c7
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlan_mapping/vlan_mapping.py
@@ -0,0 +1,225 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+"""
+The sonic vlan_mapping fact class
+It is in this file the configuration is collected from the device
+for a given resource, parsed, and the facts tree is populated
+based on the configuration.
+"""
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.vlan_mapping.vlan_mapping import Vlan_mappingArgs
+
+from copy import deepcopy
+
+from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
+ utils,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+from ansible.module_utils.connection import ConnectionError
+
+
+class Vlan_mappingFacts(object):
+ """ The sonic vlan_mapping fact class
+ """
+
+ def __init__(self, module, subspec='config', options='options'):
+ self._module = module
+ self.argument_spec = Vlan_mappingArgs.argument_spec
+ spec = deepcopy(self.argument_spec)
+ if subspec:
+ if options:
+ facts_argument_spec = spec[subspec][options]
+ else:
+ facts_argument_spec = spec[subspec]
+ else:
+ facts_argument_spec = spec
+
+ self.generated_spec = utils.generate_dict(facts_argument_spec)
+
+ def populate_facts(self, connection, ansible_facts, data=None):
+ """ Populate the facts for vlan_mapping
+ :param connection: the device connection
+ :param ansible_facts: Facts dictionary
+ :param data: previously collected conf
+ :rtype: dictionary
+ :returns: facts
+ """
+ if connection: # just for linting purposes, remove
+ pass
+
+ all_vlan_mapping_configs = {}
+ if not data:
+ vlan_mapping_configs = self.get_vlan_mappings()
+ for interface, vlan_config in vlan_mapping_configs.items():
+ vlan_mapping_configs_dict = {}
+ vlan_mapping_configs_dict['name'] = interface
+ vlan_mapping_configs_dict['mapping'] = vlan_config
+ all_vlan_mapping_configs[interface] = vlan_mapping_configs_dict
+
+ objs = []
+ for vlan_mapping_config in all_vlan_mapping_configs.items():
+ obj = self.render_config(self.generated_spec, vlan_mapping_config)
+ if obj:
+ objs.append(obj)
+
+ ansible_facts['ansible_network_resources'].pop('vlan_mapping', None)
+ facts = {}
+ if objs:
+ params = utils.validate_config(self.argument_spec, {'config': objs})
+ facts['vlan_mapping'] = params['config']
+
+ ansible_facts['ansible_network_resources'].update(facts)
+ return ansible_facts
+
+ def render_config(self, spec, conf):
+ """
+ Render config as dictionary structure and delete keys
+ from spec for null values
+
+ :param spec: The facts tree, generated from the argspec
+ :param conf: The configuration
+ :rtype: dictionary
+ :returns: The generated config
+ """
+ config = deepcopy(spec)
+ config['name'] = conf[1]['name']
+ config['mapping'] = conf[1]['mapping']
+
+ return utils.remove_empties(config)
+
+ def get_vlan_mappings(self):
+ """Get all vlan mappings on device"""
+ interfaces = self.get_ports() + self.get_portchannels()
+
+ vlan_mapping_configs = {}
+ for interface in interfaces:
+ response = self.get_port_mappings(interface)
+ if "openconfig-interfaces-ext:mapped-vlans" in response:
+ vlan_list = response["openconfig-interfaces-ext:mapped-vlans"].get("mapped-vlan", {})
+ for vlan_mapping in vlan_list:
+ vlan_mapping_dict = {}
+ vlan_mapping_dict["vlan_ids"] = []
+
+ tmp_dot1q_tunnel = (vlan_mapping
+ .get("egress-mapping", {})
+ .get("config", {})
+ .get("vlan-stack-action", "SWAP"))
+ if tmp_dot1q_tunnel == "SWAP":
+ vlan_mapping_dict["dot1q_tunnel"] = False
+ vlan_mapping_dict["inner_vlan"] = (vlan_mapping
+ .get("match", {})
+ .get("double-tagged", {})
+ .get("config", {})
+ .get("inner-vlan-id", None))
+ if vlan_mapping_dict["inner_vlan"]:
+ (vlan_mapping_dict["vlan_ids"]
+ .append(vlan_mapping.get("match", {})
+ .get("double-tagged", {})
+ .get("config", {})
+ .get("outer-vlan-id", None)))
+ else:
+ (vlan_mapping_dict["vlan_ids"]
+ .append(vlan_mapping.get("match", {})
+ .get("single-tagged", {})
+ .get("config", {})
+ .get("vlan-ids", None)))
+ if vlan_mapping_dict["vlan_ids"]:
+ vlan_mapping_dict["vlan_ids"][0] = vlan_mapping_dict["vlan_ids"][0][0]
+ else:
+ vlan_mapping_dict["dot1q_tunnel"] = True
+ tmp_vlan_ids = (vlan_mapping
+ .get("match", {})
+ .get("single-tagged", {})
+ .get("config", {})
+ .get("vlan-ids", None))
+ if tmp_vlan_ids:
+ vlan_mapping_dict["vlan_ids"].extend(tmp_vlan_ids[0].replace('..', '-').split(','))
+
+ vlan_mapping_dict["service_vlan"] = vlan_mapping.get("vlan-id", None)
+ vlan_mapping_dict["priority"] = (vlan_mapping
+ .get("egress-mapping", {})
+ .get("config", {})
+ .get("mapped-vlan-priority", None))
+
+ if interface["ifname"] in vlan_mapping_configs:
+ vlan_mapping_configs[interface["ifname"]].append(vlan_mapping_dict)
+ else:
+ vlan_mapping_configs[interface["ifname"]] = []
+ vlan_mapping_configs[interface["ifname"]].append(vlan_mapping_dict)
+
+ return vlan_mapping_configs
+
+ def get_port_mappings(self, interface):
+ """Get a ports vlan mappings from device"""
+ ifname = interface["ifname"]
+ if '/' in ifname:
+ ifname = ifname.replace('/', '%2F')
+
+ port_mappings = "data/openconfig-interfaces:interfaces/interface=%s/openconfig-interfaces-ext:mapped-vlans" % ifname
+ method = "GET"
+ request = [{"path": port_mappings, "method": method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ return response[0][1]
+
+ def get_ports(self):
+ """Get all port names on device"""
+ all_ports_path = "data/sonic-port:sonic-port/PORT_TABLE"
+ method = "GET"
+ request = [{"path": all_ports_path, "method": method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ response = response[0][1]
+
+ port_list = []
+
+ if "sonic-port:PORT_TABLE" in response:
+ component = response["sonic-port:PORT_TABLE"]
+ if "PORT_TABLE_LIST" in component:
+ for port in component["PORT_TABLE_LIST"]:
+ if "Eth" in port["ifname"]:
+ port_list.append({"ifname": port["ifname"]})
+
+ return port_list
+
+ def get_portchannels(self):
+ """Get all portchannel names on device"""
+ all_portchannels_path = "data/sonic-portchannel:sonic-portchannel"
+ method = "GET"
+ request = [{"path": all_portchannels_path, "method": method}]
+
+ try:
+ response = edit_config(self._module, to_request(self._module, request))
+ except ConnectionError as exc:
+ self._module.fail_json(msg=str(exc), code=exc.code)
+
+ response = response[0][1]
+
+ portchannel_list = []
+
+ if "sonic-portchannel:sonic-portchannel" in response:
+ component = response["sonic-portchannel:sonic-portchannel"]
+ if "PORTCHANNEL" in component:
+ component = component["PORTCHANNEL"]
+ if "PORTCHANNEL_LIST" in component:
+ component = component["PORTCHANNEL_LIST"]
+ for portchannel in component:
+ portchannel_list.append({"ifname": portchannel["name"]})
+
+ return portchannel_list
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py
index 7c4af2ea8..3df200048 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vlans/vlans.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py
index 797612bc4..375c453d5 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vrfs/vrfs.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py
index 51aec6561..e521313b8 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/facts/vxlans/vxlans.py
@@ -13,7 +13,6 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
from copy import deepcopy
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
@@ -166,10 +165,9 @@ class VxlansFacts(object):
vxlan['source_ip'] = each_tunnel.get('src_ip', None)
vxlan['primary_ip'] = each_tunnel.get('primary_ip', None)
vxlan['evpn_nvo'] = None
- if vxlan['source_ip']:
- evpn_nvo = next((nvo_map['name'] for nvo_map in vxlans_evpn_nvo_list if nvo_map['source_vtep'] == vxlan['name']), None)
- if evpn_nvo:
- vxlan['evpn_nvo'] = evpn_nvo
+ evpn_nvo = next((nvo_map['name'] for nvo_map in vxlans_evpn_nvo_list if nvo_map['source_vtep'] == vxlan['name']), None)
+ if evpn_nvo:
+ vxlan['evpn_nvo'] = evpn_nvo
vxlans.append(vxlan)
def fill_vlan_map(self, vxlans, vxlan_vlan_map):
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py
index 77a63d425..30739ef82 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/sonic.py
@@ -33,7 +33,6 @@ import json
import re
from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
to_list,
ComplexList
@@ -132,7 +131,7 @@ def edit_config(module, commands, skip_code=None):
# Start: This is to convert interface name from Eth1/1 to Eth1%2f1
for request in commands:
# This check is to differenciate between requests and commands
- if type(request) is dict:
+ if isinstance(request, dict):
url = request.get("path", None)
if url:
request["path"] = update_url(url)
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py
index 7471bcb11..9c2d18a52 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/bgp_utils.py
@@ -13,16 +13,10 @@ based on the configuration.
from __future__ import absolute_import, division, print_function
__metaclass__ = type
-import re
-from copy import deepcopy
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common import (
- utils,
-)
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
normalize_interface_name,
)
-from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bgp.bgp import BgpArgs
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
to_request,
edit_config
@@ -195,11 +189,6 @@ def get_peergroups(module, vrf_name):
prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(pfx_lmt_conf)
if prefix_limit:
samp.update({'prefix_limit': prefix_limit})
- elif 'l2vpn-evpn' in each and 'prefix-limit' in each['l2vpn-evpn'] and 'config' in each['l2vpn-evpn']['prefix-limit']:
- pfx_lmt_conf = each['l2vpn-evpn']['prefix-limit']['config']
- prefix_limit = update_bgp_nbr_pg_prefix_limit_dict(pfx_lmt_conf)
- if prefix_limit:
- samp.update({'prefix_limit': prefix_limit})
if 'prefix-list' in each and 'config' in each['prefix-list']:
pfx_lst_conf = each['prefix-list']['config']
if 'import-policy' in pfx_lst_conf and pfx_lst_conf['import-policy']:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/formatted_diff_utils.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/formatted_diff_utils.py
new file mode 100644
index 000000000..f6385294f
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/formatted_diff_utils.py
@@ -0,0 +1,588 @@
+#
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+from copy import (
+ deepcopy
+)
+from difflib import (
+ context_diff
+)
+
+
+def get_key_sets(dict_conf):
+ key_set = set(dict_conf.keys())
+ trival_key_set = set()
+ dict_list_key_set = set()
+ for key in key_set:
+ if dict_conf[key] not in [None, [], {}]:
+ if isinstance(dict_conf[key], (list, dict)):
+ dict_list_key_set.add(key)
+ else:
+ trival_key_set.add(key)
+ return trival_key_set, dict_list_key_set
+
+
+def get_test_key_set(key, test_keys):
+ tst_keys = deepcopy(test_keys)
+ t_key_set = set()
+ if not tst_keys or not key:
+ return t_key_set
+
+ t_keys = next((t_key_item[key] for t_key_item in tst_keys if key in t_key_item), None)
+ if t_keys:
+ t_keys.pop('__merge_op', None)
+ t_keys.pop('__delete_op', None)
+ t_keys.pop('__key_match_op', None)
+ t_key_set = set(t_keys.keys())
+
+ return t_key_set
+
+
+#
+# Pre-defined Key Match Operations
+#
+
+
+"""
+Default key match operation.
+"""
+
+
+def __KEY_MATCH_OP_DEFAULT(key_set, command, exist_conf):
+ trival_cmd_key_set, dict_list_cmd_key_set = get_key_sets(command)
+ trival_exist_key_set, dict_list_exist_key_set = get_key_sets(exist_conf)
+
+ common_trival_key_set = trival_cmd_key_set.intersection(trival_exist_key_set)
+ common_dict_list_key_set = dict_list_cmd_key_set.intersection(dict_list_exist_key_set)
+
+ key_matched_cnt = 0
+ for key in common_trival_key_set.union(common_dict_list_key_set):
+ if command[key] == exist_conf[key]:
+ if key in key_set:
+ key_matched_cnt += 1
+
+ key_matched = (key_matched_cnt == len(key_set))
+ return key_matched
+
+
+def get_key_match_op(key, test_keys):
+ k_match_op = __KEY_MATCH_OP_DEFAULT
+ t_key_set = set()
+ if not test_keys or not key:
+ return k_match_op
+
+ t_keys = next((t_key_item[key] for t_key_item in test_keys if key in t_key_item), None)
+ if t_keys:
+ k_match_op = t_keys.get('__key_match_op', __KEY_MATCH_OP_DEFAULT)
+
+ return k_match_op
+
+
+#
+# Pre-defined Merge Operations
+#
+
+
+"""
+Default key match operation: simply merge command to existing config.
+"""
+
+
+def __MERGE_OP_DEFAULT(key_set, command, exist_conf):
+ new_conf = exist_conf
+ trival_cmd_key_set, dict_list_cmd_key_set = get_key_sets(command)
+ nu, dict_list_exist_key_set = get_key_sets(new_conf)
+
+ for key in trival_cmd_key_set:
+ new_conf[key] = command[key]
+
+ only_cmd_dict_list_key_set = dict_list_cmd_key_set.difference(dict_list_exist_key_set)
+ for key in only_cmd_dict_list_key_set:
+ new_conf[key] = command[key]
+
+ return False, new_conf
+
+
+def get_merge_op(key, test_keys):
+ mrg_op = __MERGE_OP_DEFAULT
+ if not test_keys:
+ return mrg_op
+ if not key:
+ key = '__default_ops'
+ t_keys = next((t_key_item[key] for t_key_item in test_keys if key in t_key_item), None)
+ if t_keys:
+ mrg_op = t_keys.get('__merge_op', __MERGE_OP_DEFAULT)
+
+ return mrg_op
+
+
+#
+# Pre-defined Delete Operations
+#
+
+
+"""
+Delete entire configuration.
+"""
+
+
+def __DELETE_CONFIG(key_set, command, exist_conf):
+ new_conf = []
+ return True, new_conf
+
+
+"""
+Delete entire configuration if there is no sub-configuration.
+"""
+
+
+def __DELETE_CONFIG_IF_NO_SUBCONFIG(key_set, command, exist_conf):
+ nu, dict_list_cmd_key_set = get_key_sets(command)
+ if len(dict_list_cmd_key_set) == 0:
+ new_conf = []
+ return True, new_conf
+ else:
+ new_conf = exist_conf
+ return False, new_conf
+
+
+"""
+Delete sub-configuration and leaf configuration, if any.
+"""
+
+
+def __DELETE_SUBCONFIG_AND_LEAFS(key_set, command, exist_conf):
+ new_conf = exist_conf
+
+ trival_cmd_key_set, dict_list_cmd_key_set = get_key_sets(command)
+ trival_cmd_key_not_key_set = trival_cmd_key_set.difference(key_set)
+ for key in trival_cmd_key_not_key_set:
+ new_conf.pop(key, None)
+
+ nu, dict_list_exist_key_set = get_key_sets(exist_conf)
+ common_dict_list_key_set = dict_list_cmd_key_set.intersection(dict_list_exist_key_set)
+ if len(common_dict_list_key_set) != 0:
+ for key in common_dict_list_key_set:
+ new_conf.pop(key, None)
+
+ return True, new_conf
+
+
+"""
+Delete sub-configuration only, if any.
+"""
+
+
+def __DELETE_SUBCONFIG_ONLY(key_set, command, exist_conf):
+ new_conf = exist_conf
+ nu, dict_list_cmd_key_set = get_key_sets(command)
+ nu, dict_list_exist_key_set = get_key_sets(exist_conf)
+ common_dict_list_key_set = dict_list_cmd_key_set.intersection(dict_list_exist_key_set)
+ for key in common_dict_list_key_set:
+ new_conf.pop(key, None)
+ return True, new_conf
+
+
+"""
+Delete configuration if there is no non-key leaf, and
+delete non-key leaf configuration, if any.
+"""
+
+
+def __DELETE_LEAFS_OR_CONFIG_IF_NO_NON_KEY_LEAF(key_set, command, exist_conf):
+ new_conf = exist_conf
+ trival_cmd_key_set, dict_list_cmd_key_set = get_key_sets(command)
+
+ if (len(key_set) == len(trival_cmd_key_set)) and \
+ (len(dict_list_cmd_key_set) == 0):
+ new_conf = []
+ return True, new_conf
+
+ trival_cmd_key_not_key_set = trival_cmd_key_set.difference(key_set)
+ for key in trival_cmd_key_not_key_set:
+ new_conf.pop(key, None)
+
+ return False, new_conf
+
+
+"""
+This is default deletion operation.
+Delete configuration if there is no non-key leaf, and
+delete non-key leaf configuration, if any, if the values of non-key leaf are
+equal between command and existing configuration.
+"""
+
+
+def __DELETE_OP_DEFAULT(key_set, command, exist_conf):
+ new_conf = exist_conf
+ trival_cmd_key_set, dict_list_cmd_key_set = get_key_sets(command)
+
+ if (len(key_set) == len(trival_cmd_key_set)) and \
+ (len(dict_list_cmd_key_set) == 0):
+ new_conf = []
+ return True, new_conf
+
+ trival_cmd_key_not_key_set = trival_cmd_key_set.difference(key_set)
+ for key in trival_cmd_key_not_key_set:
+ command_val = command.get(key, None)
+ new_conf_val = new_conf.get(key, None)
+ if command_val == new_conf_val:
+ new_conf.pop(key, None)
+
+ return False, new_conf
+
+
+def get_delete_op(key, test_keys):
+ del_op = __DELETE_OP_DEFAULT
+ if not test_keys:
+ return del_op
+ if not key:
+ key = '__default_ops'
+ t_keys = next((t_key_item[key] for t_key_item in test_keys if key in t_key_item), None)
+ if t_keys:
+ del_op = t_keys.get('__delete_op', __DELETE_OP_DEFAULT)
+
+ return del_op
+
+
+def get_new_config(commands, exist_conf, test_keys=None):
+
+ if not commands:
+ return exist_conf
+
+ cmds = deepcopy(commands)
+
+ n_conf = list()
+ e_conf = exist_conf
+ for cmd in cmds:
+ state = cmd['state']
+ cmd.pop('state')
+
+ if state == 'merged':
+ n_conf = derive_config_from_merged_cmd(cmd, e_conf, test_keys)
+ elif state == 'deleted':
+ n_conf = derive_config_from_deleted_cmd(cmd, e_conf, test_keys)
+ elif state == 'replaced':
+ n_conf = derive_config_from_merged_cmd(cmd, e_conf, test_keys)
+ elif state == 'overridden':
+ n_conf = derive_config_from_merged_cmd(cmd, e_conf, test_keys)
+ # If the "cmd" is derived from playbook, that is "want", the below
+ # line should be good enough:
+ # n_conf = cmd
+
+ e_conf = n_conf
+
+ return n_conf
+
+
+def derive_config_from_merged_cmd(command, exist_conf, test_keys=None):
+
+ if not command:
+ return exist_conf
+
+ if isinstance(command, list) and isinstance(exist_conf, list):
+ nu, new_conf_dict = derive_config_from_merged_cmd_dict({"config": command},
+ {"config": exist_conf},
+ test_keys)
+ new_conf = new_conf_dict.get("config", [])
+ elif isinstance(command, dict) and isinstance(exist_conf, dict):
+ merge_op_dft = get_merge_op('__default_ops', test_keys)
+ nu, new_conf = derive_config_from_merged_cmd_dict(command, exist_conf,
+ test_keys, None,
+ None, merge_op_dft)
+ elif isinstance(command, dict) and isinstance(exist_conf, list):
+ nu, new_conf_dict = derive_config_from_merged_cmd_dict({"config": [command]},
+ {"config": exist_conf},
+ test_keys)
+ new_conf = new_conf_dict.get("config", [])
+ else:
+ new_conf = exist_conf
+
+ return new_conf
+
+
+def derive_config_from_merged_cmd_dict(command, exist_conf, test_keys=None, key_set=None,
+ key_match_op=None, merge_op=None):
+
+ if test_keys is None:
+ test_keys = []
+ if key_set is None:
+ key_set = set()
+ if key_match_op is None:
+ key_match_op = __KEY_MATCH_OP_DEFAULT
+ if merge_op is None:
+ merge_op = __MERGE_OP_DEFAULT
+
+ new_conf = deepcopy(exist_conf)
+ if not command:
+ return False, new_conf
+
+ trival_cmd_key_set, dict_list_cmd_key_set = get_key_sets(command)
+ trival_exist_key_set, dict_list_exist_key_set = get_key_sets(new_conf)
+
+ common_trival_key_set = trival_cmd_key_set.intersection(trival_exist_key_set)
+ common_dict_list_key_set = dict_list_cmd_key_set.intersection(dict_list_exist_key_set)
+
+ key_matched = key_match_op(key_set, command, new_conf)
+ if key_matched:
+ done, new_conf = merge_op(key_set, command, new_conf)
+ if done:
+ return key_matched, new_conf
+ else:
+ nu, dict_list_exist_key_set = get_key_sets(new_conf)
+ common_dict_list_key_set = dict_list_cmd_key_set.intersection(dict_list_exist_key_set)
+ else:
+ return key_matched, new_conf
+
+ for key in key_set:
+ common_dict_list_key_set.discard(key)
+
+ for key in common_dict_list_key_set:
+
+ cmd_value = command[key]
+ exist_value = new_conf[key]
+
+ t_key_set = get_test_key_set(key, test_keys)
+ t_key_match_op = get_key_match_op(key, test_keys)
+ t_merge_op = get_merge_op(key, test_keys)
+
+ if (isinstance(cmd_value, list) and isinstance(exist_value, list)):
+ c_list = cmd_value
+ e_list = exist_value
+
+ new_conf_list = list()
+ not_dict_item = False
+ dict_no_key_item = False
+ for c_item in c_list:
+ matched_key_dict = False
+ for e_item in e_list:
+ if (isinstance(c_item, dict) and isinstance(e_item, dict)):
+ if t_key_set:
+ remaining_keys = [t_key_item for t_key_item in test_keys if key not in t_key_item]
+ k_mtchd, new_conf_dict = derive_config_from_merged_cmd_dict(c_item,
+ e_item,
+ remaining_keys,
+ t_key_set,
+ t_key_match_op,
+ t_merge_op)
+ if k_mtchd:
+ new_conf[key].remove(e_item)
+ if new_conf_dict:
+ new_conf_list.append(new_conf_dict)
+ matched_key_dict = True
+ break
+ else:
+ dict_no_key_item = True
+ break
+
+ else:
+ not_dict_item = True
+ break
+
+ if not matched_key_dict:
+ new_conf_list.append(c_item)
+
+ if not_dict_item or dict_no_key_item:
+ break
+
+ if dict_no_key_item:
+ new_conf_list = e_list + c_list
+
+ if not_dict_item:
+ c_set = set(c_list)
+ e_set = set(e_list)
+ merge_set = c_set.union(e_set)
+ if merge_set:
+ new_conf[key] = list(merge_set)
+ elif new_conf_list:
+ new_conf[key].extend(new_conf_list)
+
+ elif (isinstance(cmd_value, dict) and isinstance(exist_value, dict)):
+ k_mtchd, new_conf_dict = derive_config_from_merged_cmd_dict(cmd_value,
+ exist_value,
+ test_keys,
+ None,
+ t_key_match_op,
+ t_merge_op)
+ if k_mtchd and new_conf_dict:
+ new_conf[key] = new_conf_dict
+
+ elif (isinstance(cmd_value, (list, dict)) or isinstance(exist_value, (list, dict))):
+ new_conf[key] = exist_value
+ break
+
+ else:
+ continue
+
+ return key_matched, new_conf
+
+
+def derive_config_from_deleted_cmd(command, exist_conf, test_keys=None):
+
+ if not command or not exist_conf:
+ return exist_conf
+
+ if isinstance(command, list) and isinstance(exist_conf, list):
+ nu, new_conf_dict = derive_config_from_deleted_cmd_dict({"config": command},
+ {"config": exist_conf},
+ test_keys)
+ new_conf = new_conf_dict.get("config", [])
+ elif isinstance(command, dict) and isinstance(exist_conf, dict):
+ delete_op_dft = get_delete_op('__default_ops', test_keys)
+ nu, new_conf = derive_config_from_deleted_cmd_dict(command, exist_conf,
+ test_keys, None,
+ None, delete_op_dft)
+ elif isinstance(command, dict) and isinstance(exist_conf, list):
+ nu, new_conf_dict = derive_config_from_deleted_cmd_dict({"config": [command]},
+ {"config": exist_conf},
+ test_keys)
+ new_conf = new_conf_dict.get("config", [])
+ else:
+ new_conf = exist_conf
+
+ return new_conf
+
+
+def derive_config_from_deleted_cmd_dict(command, exist_conf, test_keys=None, key_set=None,
+ key_match_op=None, delete_op=None):
+
+ if test_keys is None:
+ test_keys = []
+ if key_set is None:
+ key_set = set()
+ if key_match_op is None:
+ key_match_op = __KEY_MATCH_OP_DEFAULT
+ if delete_op is None:
+ delete_op = __DELETE_OP_DEFAULT
+
+ new_conf = deepcopy(exist_conf)
+ if not command:
+ return True, []
+
+ trival_cmd_key_set, dict_list_cmd_key_set = get_key_sets(command)
+ trival_exist_key_set, dict_list_exist_key_set = get_key_sets(new_conf)
+
+ common_trival_key_set = trival_cmd_key_set.intersection(trival_exist_key_set)
+ common_dict_list_key_set = dict_list_cmd_key_set.intersection(dict_list_exist_key_set)
+
+ key_matched = key_match_op(key_set, command, new_conf)
+ if key_matched:
+ done, new_conf = delete_op(key_set, command, new_conf)
+ if done:
+ return key_matched, new_conf
+ else:
+ nu, dict_list_exist_key_set = get_key_sets(new_conf)
+ common_dict_list_key_set = dict_list_cmd_key_set.intersection(dict_list_exist_key_set)
+ else:
+ return key_matched, new_conf
+
+ for key in key_set:
+ common_dict_list_key_set.discard(key)
+
+ for key in common_dict_list_key_set:
+
+ cmd_value = command[key]
+ exist_value = new_conf[key]
+
+ t_key_set = get_test_key_set(key, test_keys)
+ t_key_match_op = get_key_match_op(key, test_keys)
+ t_delete_op = get_delete_op(key, test_keys)
+
+ if (isinstance(cmd_value, list) and isinstance(exist_value, list)):
+ c_list = cmd_value
+ e_list = exist_value
+
+ new_conf_list = list()
+ not_dict_item = False
+ dict_no_key_item = False
+ for c_item in c_list:
+ for e_item in e_list:
+ if (isinstance(c_item, dict) and isinstance(e_item, dict)):
+ if t_key_set:
+ remaining_keys = [t_key_item for t_key_item in test_keys if key not in t_key_item]
+ k_mtchd, new_conf_dict = derive_config_from_deleted_cmd_dict(c_item, e_item,
+ remaining_keys,
+ t_key_set,
+ t_key_match_op,
+ t_delete_op)
+ if k_mtchd:
+ new_conf[key].remove(e_item)
+ if new_conf_dict:
+ new_conf_list.append(new_conf_dict)
+ break
+ else:
+ dict_no_key_item = True
+ break
+
+ else:
+ not_dict_item = True
+ break
+
+ if not_dict_item or dict_no_key_item:
+ break
+
+ if dict_no_key_item:
+ new_conf_list = e_list
+
+ if not_dict_item:
+ c_set = set(c_list)
+ e_set = set(e_list)
+ delete_set = e_set.difference(c_set)
+ if delete_set:
+ new_conf[key] = list(delete_set)
+ else:
+ new_conf[key] = []
+ elif new_conf_list:
+ new_conf[key].extend(new_conf_list)
+
+ elif (isinstance(cmd_value, dict) and isinstance(exist_value, dict)):
+ k_mtchd, new_conf_dict = derive_config_from_deleted_cmd_dict(cmd_value,
+ exist_value,
+ test_keys,
+ None,
+ t_key_match_op,
+ t_delete_op)
+ if k_mtchd:
+ new_conf.pop(key, None)
+ if new_conf_dict:
+ new_conf[key] = new_conf_dict
+
+ elif (isinstance(cmd_value, (list, dict)) or isinstance(exist_value, (list, dict))):
+ new_conf[key] = exist_value
+ break
+
+ else:
+ continue
+
+ return key_matched, new_conf
+
+
+def get_formatted_config_diff(exist_conf, new_conf, verbosity=0):
+
+ exist_conf = json.dumps(exist_conf, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
+ new_conf = json.dumps(new_conf, sort_keys=True, indent=4, separators=(u',', u': ')) + u'\n'
+
+ bfr = exist_conf.replace("\"", "\'")
+ aft = new_conf.replace("\"", "\'")
+
+ bfr_list = bfr.splitlines(True)
+ aft_list = aft.splitlines(True)
+ diffs = context_diff(bfr_list, aft_list, fromfile='before', tofile='after')
+
+ if verbosity >= 3:
+ formatted_diff = list()
+ for diff in diffs:
+ formatted_diff.append(diff.rstrip('\n'))
+
+ else:
+ formatted_diff = {'prepared': u''.join(diffs)}
+
+ return formatted_diff
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py
index a7f6e9063..60df9251d 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/interfaces_util.py
@@ -27,10 +27,16 @@ __metaclass__ = type
import traceback
import json
+import re
from ansible.module_utils._text import to_native
try:
+ from urllib import quote
+except ImportError:
+ from urllib.parse import quote
+
+try:
import jinja2
HAS_LIB = True
except Exception as e:
@@ -38,6 +44,29 @@ except Exception as e:
ERR_MSG = to_native(e)
LIB_IMP_ERR = traceback.format_exc()
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ to_request,
+ edit_config
+)
+
+intf_speed_map = {
+ 0: 'SPEED_DEFAULT',
+ 10: "SPEED_10MB",
+ 100: "SPEED_100MB",
+ 1000: "SPEED_1GB",
+ 2500: "SPEED_2500MB",
+ 5000: "SPEED_5GB",
+ 10000: "SPEED_10GB",
+ 20000: "SPEED_20GB",
+ 25000: "SPEED_25GB",
+ 40000: "SPEED_40GB",
+ 50000: "SPEED_50GB",
+ 100000: "SPEED_100GB",
+ 200000: "SPEED_200GB",
+ 400000: "SPEED_400GB",
+ 800000: "SPEED_800GB"
+}
+
# To create Loopback, VLAN interfaces
def build_interfaces_create_request(interface_name):
@@ -53,3 +82,60 @@ def build_interfaces_create_request(interface_name):
"method": method,
"data": ret_payload}
return request
+
+
+def retrieve_port_group_interfaces(module):
+ port_group_interfaces = []
+ method = "get"
+ port_num_regex = re.compile(r'[\d]{1,4}$')
+ port_group_url = 'data/openconfig-port-group:port-groups'
+ request = {"path": port_group_url, "method": method}
+ try:
+ response = edit_config(module, to_request(module, request))
+ except ConnectionError as exc:
+ module.fail_json(msg=str(exc), code=exc.code)
+
+ if 'openconfig-port-group:port-groups' in response[0][1] and "port-group" in response[0][1]['openconfig-port-group:port-groups']:
+ port_groups = response[0][1]['openconfig-port-group:port-groups']['port-group']
+ for pg_config in port_groups:
+ if 'state' in pg_config:
+ member_start = pg_config['state'].get('member-if-start', '')
+ member_start = re.search(port_num_regex, member_start)
+ member_end = pg_config['state'].get('member-if-end', '')
+ member_end = re.search(port_num_regex, member_end)
+ if member_start and member_end:
+ member_start = int(member_start.group(0))
+ member_end = int(member_end.group(0))
+ port_group_interfaces.extend(range(member_start, member_end + 1))
+
+ return port_group_interfaces
+
+
+def retrieve_default_intf_speed(module, intf_name):
+
+ # Read the valid_speeds
+ dft_intf_speed = 'SPEED_DEFAULT'
+ method = "get"
+ sonic_port_url = 'data/sonic-port:sonic-port/PORT/PORT_LIST=%s'
+ sonic_port_vs_url = (sonic_port_url + '/valid_speeds') % quote(intf_name, safe='')
+ request = {"path": sonic_port_vs_url, "method": method}
+ try:
+ response = edit_config(module, to_request(module, request))
+ except ConnectionError as exc:
+ module.fail_json(msg=str(exc), code=exc.code)
+ if 'sonic-port:valid_speeds' in response[0][1]:
+ v_speeds = response[0][1].get('sonic-port:valid_speeds', '')
+ v_speeds_list = v_speeds.split(",")
+ v_speeds_int_list = []
+ for vs in v_speeds_list:
+ v_speeds_int_list.append(int(vs))
+
+ dft_speed_int = 0
+ if v_speeds_int_list:
+ dft_speed_int = max(v_speeds_int_list)
+ dft_intf_speed = intf_speed_map.get(dft_speed_int, 'SPEED_DEFAULT')
+
+ if dft_intf_speed == 'SPEED_DEFAULT':
+ module.fail_json(msg="Unable to retireve default port speed for the interface {0}".format(intf_name))
+
+ return dft_intf_speed
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py
index 0d6e6d1a0..bc865790b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/module_utils/network/sonic/utils/utils.py
@@ -13,12 +13,17 @@ __metaclass__ = type
import re
import json
import ast
+from copy import copy
+from itertools import (count, groupby)
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
+ remove_empties
+)
+from ansible.module_utils.common.network import (
is_masklen,
to_netmask,
- remove_empties
)
+from ansible.module_utils.common.validation import check_required_arguments
from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
to_request,
edit_config
@@ -31,6 +36,21 @@ GET = 'get'
intf_naming_mode = ""
+def remove_matching_defaults(root, default_entry):
+ if isinstance(root, list):
+ for list_item in root:
+ remove_matching_defaults(list_item, default_entry)
+ elif isinstance(root, dict):
+ nextobj = root.get(default_entry[0]['name'])
+ if nextobj is not None:
+ if len(default_entry) > 1:
+ remove_matching_defaults(nextobj, default_entry[1:])
+ else:
+ # Leaf
+ if nextobj == default_entry[0]['default']:
+ root.pop(default_entry[0]['name'])
+
+
def get_diff(base_data, compare_with_data, test_keys=None, is_skeleton=None):
diff = []
if is_skeleton is None:
@@ -319,10 +339,13 @@ def netmask_to_cidr(netmask):
def remove_empties_from_list(config_list):
ret_config = []
- if not config_list:
+ if not config_list or not isinstance(config_list, list):
return ret_config
for config in config_list:
- ret_config.append(remove_empties(config))
+ if isinstance(config, dict):
+ ret_config.append(remove_empties(config))
+ else:
+ ret_config.append(copy(config))
return ret_config
@@ -432,14 +455,7 @@ def get_normalize_interface_name(intf_name, module):
def get_speed_from_breakout_mode(breakout_mode):
- speed = None
- speed_breakout_mode_map = {
- "4x10G": "SPEED_10GB", "1x100G": "SPEED_100GB", "1x40G": "SPEED_40GB", "4x25G": "SPEED_25GB", "2x50G": "SPEED_50GB",
- "1x400G": "SPEED_400GB", "4x100G": "SPEED_100GB", "4x50G": "SPEED_50GB", "2x100G": "SPEED_100GB", "2x200G": "SPEED_200GB"
- }
- if breakout_mode in speed_breakout_mode_map:
- speed = speed_breakout_mode_map[breakout_mode]
- return speed
+ return 'SPEED_' + breakout_mode.split('x')[1].replace('G', 'GB')
def get_breakout_mode(module, name):
@@ -455,7 +471,7 @@ def get_breakout_mode(module, name):
except ConnectionError as exc:
try:
json_obj = json.loads(str(exc).replace("'", '"'))
- if json_obj and type(json_obj) is dict and 404 == json_obj['code']:
+ if json_obj and isinstance(json_obj, dict) and 404 == json_obj['code']:
response = None
else:
module.fail_json(msg=str(exc), code=exc.code)
@@ -509,3 +525,205 @@ def command_list_str_to_dict(module, warnings, cmd_list_in, exec_cmd=False):
cmd_list_out.append(cmd_out)
return cmd_list_out
+
+
+def send_requests(module, requests):
+
+ reply = dict()
+ response = []
+ if not module.check_mode and requests:
+ try:
+ response = edit_config(module, to_request(module, requests))
+ except ConnectionError as exc:
+ module.fail_json(msg=str(exc), code=exc.code)
+
+ reply = response[0][1]
+
+ return reply
+
+
+def get_replaced_config(new_conf, exist_conf, test_keys=None):
+
+ replace_conf = []
+ if not new_conf or not exist_conf:
+ return replace_conf
+
+ if isinstance(new_conf, list) and isinstance(exist_conf, list):
+
+ replace_conf_dict = get_replaced_config_dict({"config": new_conf},
+ {"config": exist_conf},
+ test_keys)
+ replaced_conf = replace_conf_dict.get("config", [])
+ else:
+ replaced_conf = get_replaced_config_dict(new_conf, exist_conf, test_keys)
+
+ return replaced_conf
+
+
+def get_replaced_config_dict(new_conf, exist_conf, test_keys=None, key_set=None):
+
+ replaced_conf = dict()
+
+ if test_keys is None:
+ test_keys = []
+ if key_set is None:
+ key_set = []
+
+ if not new_conf:
+ return replaced_conf
+
+ new_key_set = set(new_conf.keys())
+ exist_key_set = set(exist_conf.keys())
+
+ trival_new_key_set = set()
+ dict_list_new_key_set = set()
+ for key in new_key_set:
+ if new_conf[key] not in [None, [], {}]:
+ if isinstance(new_conf[key], (list, dict)):
+ dict_list_new_key_set.add(key)
+ else:
+ trival_new_key_set.add(key)
+
+ trival_exist_key_set = set()
+ dict_list_exist_key_set = set()
+ for key in exist_key_set:
+ if exist_conf[key] not in [None, [], {}]:
+ if isinstance(exist_conf[key], (list, dict)):
+ dict_list_exist_key_set.add(key)
+ else:
+ trival_exist_key_set.add(key)
+
+ common_trival_key_set = trival_new_key_set.intersection(trival_exist_key_set)
+ common_dict_list_key_set = dict_list_new_key_set.intersection(dict_list_exist_key_set)
+
+ key_matched_cnt = 0
+ common_trival_key_matched = True
+ for key in common_trival_key_set:
+ if new_conf[key] == exist_conf[key]:
+ if key in key_set:
+ key_matched_cnt += 1
+ else:
+ if key not in key_set:
+ common_trival_key_matched = False
+
+ for key in common_dict_list_key_set:
+ if new_conf[key] == exist_conf[key]:
+ if key in key_set:
+ key_matched_cnt += 1
+
+ key_matched = (key_matched_cnt == len(key_set))
+ if key_matched:
+ extra_trival_new_key_set = trival_new_key_set - common_trival_key_set
+ extra_trival_exist_key_set = trival_exist_key_set - common_trival_key_set
+ if extra_trival_new_key_set or extra_trival_exist_key_set or \
+ not common_trival_key_matched:
+ # Replace whole dict.
+ replaced_conf = exist_conf
+ return replaced_conf
+ else:
+ replaced_conf = []
+ return replaced_conf
+
+ for key in key_set:
+ common_dict_list_key_set.discard(key)
+
+ replace_whole_dict = False
+ replace_some_list = False
+ replace_some_dict = False
+ for key in common_dict_list_key_set:
+
+ new_value = new_conf[key]
+ exist_value = exist_conf[key]
+
+ if (isinstance(new_value, list) and isinstance(exist_value, list)):
+ n_list = new_value
+ e_list = exist_value
+ t_keys = next((t_key_item[key] for t_key_item in test_keys if key in t_key_item), None)
+ t_key_set = set()
+ if t_keys:
+ t_key_set = set(t_keys.keys())
+
+ replaced_list = list()
+ not_dict_item = False
+ dict_no_key_item = False
+ for n_item in n_list:
+ for e_item in e_list:
+ if (isinstance(n_item, dict) and isinstance(e_item, dict)):
+ if t_keys:
+ remaining_keys = [t_key_item for t_key_item in test_keys if key not in t_key_item]
+ replaced_dict = get_replaced_config_dict(n_item, e_item,
+ remaining_keys, t_key_set)
+ else:
+ dict_no_key_item = True
+ break
+
+ if replaced_dict:
+ replaced_list.append(replaced_dict)
+ break
+ else:
+ not_dict_item = True
+ break
+
+ if not_dict_item or dict_no_key_item:
+ break
+
+ if dict_no_key_item:
+ replaced_list = e_list
+
+ if not_dict_item:
+ n_set = set(n_list)
+ e_set = set(e_list)
+ diff_set = n_set.symmetric_difference(e_set)
+ if diff_set:
+ replaced_conf[key] = e_list
+ replace_some_list = True
+
+ elif replaced_list:
+ replaced_conf[key] = replaced_list
+ replace_some_list = True
+
+ elif (isinstance(new_value, dict) and isinstance(exist_value, dict)):
+ replaced_dict = get_replaced_config_dict(new_conf[key], exist_conf[key], test_keys)
+ if replaced_dict:
+ replaced_conf[key] = replaced_dict
+ replace_some_dict = True
+
+ elif (isinstance(new_value, (list, dict)) or isinstance(exist_value, (list, dict))):
+ # Replace whole dict.
+ replaced_conf = exist_conf
+ replace_whole_dict = True
+ break
+
+ else:
+ continue
+
+ if ((replace_some_dict or replace_some_list) and (not replace_whole_dict)):
+ for key in key_set:
+ replaced_conf[key] = exist_conf[key]
+
+ return replaced_conf
+
+
+def check_required(module, required_parameters, parameters, options_context=None):
+ '''This utility is a wrapper for the Ansible "check_required_arguments"
+ function. The "required_parameters" input list provides a list of
+ key names that are required in the dictionary specified by "parameters".
+ The optional "options_context" parameter specifies the context/path
+ from the top level parent dict to the dict being checked.'''
+ if required_parameters:
+ spec = {}
+ for parameter in required_parameters:
+ spec[parameter] = {'required': True}
+
+ try:
+ check_required_arguments(spec, parameters, options_context)
+ except TypeError as exc:
+ module.fail_json(msg=str(exc))
+
+
+def get_ranges_in_list(num_list):
+ """Returns a generator for list(s) of consecutive numbers
+ present in the given sorted list of numbers
+ """
+ for key, group in groupby(num_list, lambda num, i=count(): num - next(i)):
+ yield list(group)
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py
index ddc71331f..c17c0f711 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_aaa.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright 2021 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -79,8 +79,10 @@ options:
- Specifies the operation to be performed on the aaa parameters configured on the device.
- In case of merged, the input configuration will be merged with the existing aaa configuration on the device.
- In case of deleted the existing aaa configuration will be removed from the device.
+ - In case of replaced, the existing aaa configuration will be replaced with provided configuration.
+ - In case of overridden, the existing aaa configuration will be overridden with the provided configuration.
default: merged
- choices: ['merged', 'deleted']
+ choices: ['merged', 'deleted', 'overridden', 'replaced']
type: str
"""
EXAMPLES = """
@@ -169,6 +171,65 @@ EXAMPLES = """
# login-method : local
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# do show aaa
+# AAA Authentication Information
+# ---------------------------------------------------------
+# failthrough : False
+# login-method : local, radius
+
+- name: Replace aaa configurations
+ dellemc.enterprise_sonic.sonic_aaa:
+ config:
+ authentication:
+ data:
+ group: ldap
+ fail_through: true
+ state: replaced
+
+# After state:
+# ------------
+#
+# do show aaa
+# AAA Authentication Information
+# ---------------------------------------------------------
+# failthrough : True
+# login-method : local, ldap
+
+
+# Using overridden
+#
+# Before state:
+# -------------
+#
+# do show aaa
+# AAA Authentication Information
+# ---------------------------------------------------------
+# failthrough : False
+# login-method : local, radius
+
+- name: Override aaa configurations
+ dellemc.enterprise_sonic.sonic_aaa:
+ config:
+ authentication:
+ data:
+ group: tacacs+
+ fail_through: true
+ state: overridden
+
+# After state:
+# ------------
+#
+# do show aaa
+# AAA Authentication Information
+# ---------------------------------------------------------
+# failthrough : True
+# login-method : tacacs+
+
"""
RETURN = """
before:
@@ -185,6 +246,13 @@ after:
sample: >
The configuration returned will always be in the same format
of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_acl_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_acl_interfaces.py
new file mode 100644
index 000000000..883252bc8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_acl_interfaces.py
@@ -0,0 +1,385 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_acl_interfaces
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: sonic_acl_interfaces
+version_added: '2.1.0'
+notes:
+ - Supports C(check_mode).
+short_description: Manage access control list (ACL) to interface binding on SONiC
+description:
+ - This module provides configuration management of applying access control lists (ACL)
+ to interfaces in devices running SONiC.
+ - ACL needs to be created earlier in the device.
+author: 'Arun Saravanan Balachandran (@ArunSaravananBalachandran)'
+options:
+ config:
+ description:
+ - Specifies interface access-group configurations.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Full name of the interface, i.e. Eth1/1.
+ type: str
+ required: true
+ access_groups:
+ description:
+ - Access-group configurations to be set for the interface.
+ type: list
+ elements: dict
+ suboptions:
+ type:
+ description:
+ - Type of the ACLs to be applied on the interface.
+ type: str
+ required: true
+ choices:
+ - mac
+ - ipv4
+ - ipv6
+ acls:
+ description:
+ - List of ACLs for the given type.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Name of the ACL to be applied on the interface.
+ type: str
+ required: true
+ direction:
+ description:
+ - Specifies the direction of the packets that the ACL will be applied on.
+ type: str
+ required: true
+ choices:
+ - in
+ - out
+ state:
+ description:
+ - The state of the configuration after module completion.
+ - I(merged) - Merges provided interface access-group configuration with on-device configuration.
+ - I(replaced) - Replaces on-device access-group configuration of the specified interfaces with provided configuration.
+ - I(overridden) - Overrides all on-device interface access-group configurations with the provided configuration.
+ - I(deleted) - Deletes on-device interface access-group configuration.
+ type: str
+ choices:
+ - merged
+ - replaced
+ - overridden
+ - deleted
+ default: merged
+"""
+EXAMPLES = """
+# Using merged
+#
+# Before State:
+# -------------
+#
+# sonic# show mac access-group
+# sonic#
+# sonic# show ip access-group
+# sonic#
+# sonic# show ipv6 access-group
+# Ingress IPV6 access-list ipv6-acl-1 on Eth1/1
+# sonic#
+
+ - name: Merge provided interface access-group configurations
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config:
+ - name: 'Eth1/1'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: 'mac-acl-1'
+ direction: 'in'
+ - name: 'mac-acl-2'
+ direction: 'out'
+ - type: 'ipv6'
+ acls:
+ - name: 'ipv6-acl-2'
+ direction: 'out'
+ - name: 'Eth1/2'
+ access_groups:
+ - type: 'ipv4'
+ acls:
+ - name: 'ip-acl-1'
+ direction: 'in'
+ state: merged
+
+# After State:
+# ------------
+#
+# sonic# show mac access-group
+# Ingress MAC access-list mac-acl-1 on Eth1/1
+# Egress MAC access-list mac-acl-2 on Eth1/1
+# sonic#
+# sonic# show ip access-group
+# Ingress IP access-list ip-acl-1 on Eth1/2
+# sonic#
+# sonic# show ipv6 access-group
+# Ingress IPV6 access-list ipv6-acl-1 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/1
+# sonic#
+
+
+# Using replaced
+#
+# Before State:
+# -------------
+#
+# sonic# show mac access-group
+# Ingress MAC access-list mac-acl-1 on Eth1/1
+# Egress MAC access-list mac-acl-2 on Eth1/1
+# sonic#
+# sonic# show ip access-group
+# Ingress IP access-list ip-acl-1 on Eth1/2
+# sonic#
+# sonic# show ipv6 access-group
+# Ingress IPV6 access-list ipv6-acl-1 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/1
+# sonic#
+
+ - name: Replace device access-group configuration of specified interfaces with provided configuration
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config:
+ - name: 'Eth1/2'
+ access_groups:
+ - type: 'ipv6'
+ acls:
+ - name: 'ipv6-acl-2'
+ direction: 'out'
+ - name: 'Eth1/3'
+ access_groups:
+ - type: 'ipv4'
+ acls:
+ - name: 'ip-acl-2'
+ direction: 'out'
+ state: replaced
+
+# After State:
+# ------------
+#
+# sonic# show mac access-group
+# Ingress MAC access-list mac-acl-1 on Eth1/1
+# Egress MAC access-list mac-acl-2 on Eth1/1
+# sonic#
+# sonic# show ip access-group
+# Egress IP access-list ip-acl-2 on Eth1/3
+# sonic#
+# sonic# show ipv6 access-group
+# Ingress IPV6 access-list ipv6-acl-1 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/2
+# sonic#
+
+
+# Using overridden
+#
+# Before State:
+# -------------
+#
+# sonic# show mac access-group
+# Ingress MAC access-list mac-acl-1 on Eth1/1
+# Egress MAC access-list mac-acl-2 on Eth1/1
+# sonic#
+# sonic# show ip access-group
+# Egress IP access-list ip-acl-2 on Eth1/3
+# sonic#
+# sonic# show ipv6 access-group
+# Ingress IPV6 access-list ipv6-acl-1 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/2
+# sonic#
+
+ - name: Override all interfaces access-group device configuration with provided configuration
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config:
+ - name: 'Eth1/1'
+ access_groups:
+ - type: 'ip'
+ acls:
+ - name: 'ip-acl-2'
+ direction: 'out'
+ - name: 'Eth1/2'
+ access_groups:
+ - type: 'ip'
+ acls:
+ - name: 'ip-acl-2'
+ direction: 'out'
+ state: overridden
+
+# After State:
+# ------------
+#
+# sonic# show mac access-group
+# sonic#
+# sonic# show ip access-group
+# Egress IP access-list ip-acl-2 on Eth1/1
+# Egress IP access-list ip-acl-2 on Eth1/2
+# sonic#
+# sonic# show ipv6 access-group
+# sonic#
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show mac access-group
+# Ingress MAC access-list mac-acl-1 on Eth1/1
+# Egress MAC access-list mac-acl-2 on Eth1/1
+# sonic#
+# sonic# show ip access-group
+# Egress IP access-list ip-acl-2 on Eth1/3
+# sonic#
+# sonic# show ipv6 access-group
+# Ingress IPV6 access-list ipv6-acl-1 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/2
+# sonic#
+
+ - name: Delete specified interfaces access-group configurations
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config:
+ - name: 'Eth1/1'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: 'mac-acl-1'
+ direction: 'in'
+ - type: 'ipv6'
+ - name: 'Eth1/2'
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show mac access-group
+# Egress MAC access-list mac-acl-2 on Eth1/1
+# sonic#
+# sonic# show ip access-group
+# Egress IP access-list ip-acl-2 on Eth1/3
+# sonic#
+# sonic# show ipv6 access-group
+# sonic#
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show mac access-group
+# Ingress MAC access-list mac-acl-1 on Eth1/1
+# Egress MAC access-list mac-acl-2 on Eth1/1
+# sonic#
+# sonic# show ip access-group
+# Egress IP access-list ip-acl-2 on Eth1/3
+# sonic#
+# sonic# show ipv6 access-group
+# Ingress IPV6 access-list ipv6-acl-1 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/1
+# Egress IPV6 access-list ipv6-acl-2 on Eth1/2
+# sonic#
+
+ - name: Delete all interface access-group configurations
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config:
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show mac access-group
+# sonic#
+# sonic# show ip access-group
+# sonic#
+# sonic# show ipv6 access-group
+# sonic#
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.acl_interfaces.acl_interfaces import Acl_interfacesArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.acl_interfaces.acl_interfaces import Acl_interfaces
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=Acl_interfacesArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Acl_interfaces(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bfd.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bfd.py
new file mode 100644
index 000000000..c969b1a69
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bfd.py
@@ -0,0 +1,684 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_bfd
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = """
+---
+module: sonic_bfd
+version_added: "2.1.0"
+short_description: Manage BFD configuration on SONiC
+description:
+ - This module provides configuration management of BFD for devices running SONiC
+author: "Shade Talabi (@stalabi1)"
+options:
+ config:
+ description:
+ - Specifies BFD configurations
+ type: dict
+ suboptions:
+ profiles:
+ description:
+ - List of preconfiguration profiles
+ type: list
+ elements: dict
+ suboptions:
+ profile_name:
+ description:
+ - BFD profile name
+ type: str
+ required: True
+ enabled:
+ description:
+ - Enables BFD session when set to true
+ type: bool
+ default: True
+ transmit_interval:
+ description:
+ - Specifies peer transmit interval
+ type: int
+ default: 300
+ receive_interval:
+ description:
+ - Specifies peer receive interval
+ type: int
+ default: 300
+ detect_multiplier:
+ description:
+ - Number of missed packets to bring down a BFD session
+ type: int
+ default: 3
+ passive_mode:
+ description:
+ - Specifies BFD peer as passive when set to true
+ type: bool
+ default: False
+ min_ttl:
+ description:
+ - Minimum expected TTL on received packets
+ type: int
+ default: 254
+ echo_interval:
+ description:
+ - Specifies echo interval
+ type: int
+ default: 300
+ echo_mode:
+ description:
+ - Echo mode is enabled when set to true
+ type: bool
+ default: False
+ single_hops:
+ description:
+ - List of single-hop sessions
+ type: list
+ elements: dict
+ suboptions:
+ remote_address:
+ description:
+ - IP address used by the remote system for the BFD session
+ type: str
+ required: True
+ vrf:
+ description:
+ - Name of the configured VRF on the device
+ type: str
+ required: True
+ interface:
+ description:
+ - Interface to use to contact peer
+ type: str
+ required: True
+ local_address:
+ description:
+ - Source IP address to be used for BFD sessions over the interface
+ type: str
+ required: True
+ enabled:
+ description:
+ - Enables BFD session when set to true
+ type: bool
+ default: True
+ transmit_interval:
+ description:
+ - Specifies peer transmit interval
+ type: int
+ default: 300
+ receive_interval:
+ description:
+ - Specifies peer receive interval
+ type: int
+ default: 300
+ detect_multiplier:
+ description:
+ - Number of missed packets to bring down a BFD session
+ type: int
+ default: 3
+ passive_mode:
+ description:
+ - Specifies BFD peer as passive when set to true
+ type: bool
+ default: False
+ echo_interval:
+ description:
+ - Specifies echo interval
+ type: int
+ default: 300
+ echo_mode:
+ description:
+ - Echo mode is enabled when set to true
+ type: bool
+ default: False
+ profile_name:
+ description:
+ - BFD profile name
+ type: str
+ multi_hops:
+ description:
+ - List of multi-hop sessions
+ type: list
+ elements: dict
+ suboptions:
+ remote_address:
+ description:
+ - IP address used by the remote system for the BFD session
+ type: str
+ required: True
+ vrf:
+ description:
+ - Name of the configured VRF on the device
+ type: str
+ required: True
+ local_address:
+ description:
+ - Source IP address to be used for BFD sessions over the interface
+ type: str
+ required: True
+ enabled:
+ description:
+ - Enables BFD session when set to true
+ type: bool
+ default: True
+ transmit_interval:
+ description:
+ - Specifies peer transmit interval
+ type: int
+ default: 300
+ receive_interval:
+ description:
+ - Specifies peer receive interval
+ type: int
+ default: 300
+ detect_multiplier:
+ description:
+ - Number of missed packets to bring down a BFD session
+ type: int
+ default: 3
+ passive_mode:
+ description:
+ - Specifies BFD peer as passive when set to true
+ type: bool
+ default: False
+ min_ttl:
+ description:
+ - Minimum expected TTL on received packets
+ type: int
+ default: 254
+ profile_name:
+ description:
+ - BFD profile name
+ type: str
+ state:
+ description:
+ - The state of the configuration after module completion.
+ type: str
+ choices: ['merged', 'deleted', 'replaced', 'overridden']
+ default: merged
+"""
+EXAMPLES = """
+# Using Merged
+#
+# Before state:
+# -------------
+#
+# sonic# show bfd profile
+# (No "bfd profile" configuration present)
+# sonic# show bfd peers
+# (No "bfd peers" configuration present)
+
+ - name: Merge BFD configuration
+ dellemc.enterprise_sonic.sonic_bfd:
+ config:
+ profiles:
+ - profile_name: 'p1'
+ enabled: True
+ transmit_interval: 120
+ receive_interval: 200
+ detect_multiplier: 2
+ passive_mode: True
+ min_ttl: 140
+ echo_interval: 150
+ echo_mode: True
+ single_hops:
+ - remote_address: '196.88.6.1'
+ vrf: 'default'
+ interface: 'Ethernet20'
+ local_address: '1.1.1.1'
+ enabled: True
+ transmit_interval: 50
+ receive_interval: 80
+ detect_multiplier: 4
+ passive_mode: True
+ echo_interval: 110
+ echo_mode: True
+ profile_name: 'p1'
+ multi_hops:
+ - remote_address: '192.40.1.3'
+ vrf: 'default'
+ local_address: '3.3.3.3'
+ enabled: True
+ transmit_interval: 75
+ receive_interval: 100
+ detect_multiplier: 3
+ passive_mode: True
+ min_ttl: 125
+ profile_name: 'p1'
+ state: merged
+
+# After state:
+# ------------
+#
+# sonic# show bfd profile
+# BFD Profile:
+# Profile-name: p1
+# Enabled: True
+# Echo-mode: Enabled
+# Passive-mode: Enabled
+# Minimum-Ttl: 140
+# Detect-multiplier: 2
+# Receive interval: 200ms
+# Transmission interval: 120ms
+# Echo transmission interval: 150ms
+# sonic# show bfd peers
+# BFD Peers:
+#
+# peer 192.40.1.3 multihop local-address 3.3.3.3 vrf default
+# ID: 989720421
+# Remote ID: 0
+# Passive mode: Enabled
+# Profile: p1
+# Minimum TTL: 125
+# Status: down
+# Downtime: 0 day(s), 0 hour(s), 1 min(s), 46 sec(s)
+# Diagnostics: ok
+# Remote diagnostics: ok
+# Peer Type: configured
+# Local timers:
+# Detect-multiplier: 2
+# Receive interval: 100ms
+# Transmission interval: 75ms
+# Echo transmission interval: ms
+# Remote timers:
+# Detect-multiplier: 3
+# Receive interval: 1000ms
+# Transmission interval: 1000ms
+# Echo transmission interval: 0ms
+#
+# peer 196.88.6.1 local-address 1.1.1.1 vrf default interface Ethernet20
+# ID: 1134635660
+# Remote ID: 0
+# Passive mode: Enabled
+# Profile: p1
+# Status: down
+# Downtime: 0 day(s), 1 hour(s), 50 min(s), 48 sec(s)
+# Diagnostics: ok
+# Remote diagnostics: ok
+# Peer Type: configured
+# Local timers:
+# Detect-multiplier: 4
+# Receive interval: 80ms
+# Transmission interval: 50ms
+# Echo transmission interval: 110ms
+# Remote timers:
+# Detect-multiplier: 3
+# Receive interval: 1000ms
+# Transmission interval: 1000ms
+# Echo transmission interval: 0ms
+#
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# sonic# show bfd profile
+# BFD Profile:
+# Profile-name: p1
+# Enabled: True
+# Echo-mode: Enabled
+# Passive-mode: Enabled
+# Minimum-Ttl: 140
+# Detect-multiplier: 2
+# Receive interval: 200ms
+# Transmission interval: 120ms
+# Echo transmission interval: 150ms
+# Profile-name: p2
+# Enabled: True
+# Echo-mode: Disabled
+# Passive-mode: Disabled
+# Minimum-Ttl: 254
+# Detect-multiplier: 3
+# Receive interval: 300ms
+# Transmission interval: 300ms
+# Echo transmission interval: 300ms
+
+ - name: Replace BFD configuration
+ dellemc.enterprise_sonic.sonic_bfd:
+ config:
+ profiles:
+ - profile_name: 'p1'
+ transmit_interval: 144
+ - profile_name: 'p2'
+ enabled: False
+ transmit_interval: 110
+ receive_interval: 235
+ detect_multiplier: 5
+ passive_mode: True
+ min_ttl: 155
+ echo_interval: 163
+ echo_mode: True
+ state: replaced
+
+# After state:
+# ------------
+#
+# sonic# show bfd profile
+# BFD Profile:
+# Profile-name: p1
+# Enabled: True
+# Echo-mode: Enabled
+# Passive-mode: Enabled
+# Minimum-Ttl: 140
+# Detect-multiplier: 2
+# Receive interval: 200ms
+# Transmission interval: 144ms
+# Echo transmission interval: 150ms
+# Profile-name: p2
+# Enabled: False
+# Echo-mode: Enabled
+# Passive-mode: Enabled
+# Minimum-Ttl: 155
+# Detect-multiplier: 5
+# Receive interval: 235ms
+# Transmission interval: 110ms
+# Echo transmission interval: 163ms
+#
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+# sonic# show bfd peers
+# BFD Peers:
+#
+# peer 192.40.1.3 multihop local-address 3.3.3.3 vrf default
+# ID: 989720421
+# Remote ID: 0
+# Passive mode: Enabled
+# Profile: p1
+# Minimum TTL: 125
+# Status: down
+# Downtime: 0 day(s), 0 hour(s), 1 min(s), 46 sec(s)
+# Diagnostics: ok
+# Remote diagnostics: ok
+# Peer Type: configured
+# Local timers:
+# Detect-multiplier: 2
+# Receive interval: 100ms
+# Transmission interval: 75ms
+# Echo transmission interval: ms
+# Remote timers:
+# Detect-multiplier: 3
+# Receive interval: 1000ms
+# Transmission interval: 1000ms
+# Echo transmission interval: 0ms
+#
+# peer 196.88.6.1 local-address 1.1.1.1 vrf default interface Ethernet20
+# ID: 1134635660
+# Remote ID: 0
+# Passive mode: Enabled
+# Profile: p1
+# Status: down
+# Downtime: 0 day(s), 1 hour(s), 50 min(s), 48 sec(s)
+# Diagnostics: ok
+# Remote diagnostics: ok
+# Peer Type: configured
+# Local timers:
+# Detect-multiplier: 4
+# Receive interval: 80ms
+# Transmission interval: 50ms
+# Echo transmission interval: 110ms
+# Remote timers:
+# Detect-multiplier: 3
+# Receive interval: 1000ms
+# Transmission interval: 1000ms
+# Echo transmission interval: 0ms
+
+ - name: Override BFD configuration
+ dellemc.enterprise_sonic.sonic_bfd:
+ config:
+ single_hops:
+ - remote_address: '172.68.2.1'
+ vrf: 'default'
+ interface: 'Ethernet16'
+ local_address: '2.2.2.2'
+ enabled: True
+ transmit_interval: 60
+ receive_interval: 88
+ detect_multiplier: 6
+ passive_mode: True
+ echo_interval: 112
+ echo_mode: True
+ profile_name: 'p3'
+ multi_hops:
+ - remote_address: '186.42.1.2'
+ vrf: 'default'
+ local_address: '1.1.1.1'
+ enabled: False
+ transmit_interval: 85
+ receive_interval: 122
+ detect_multiplier: 4
+ passive_mode: False
+ min_ttl: 120
+ profile_name: 'p3'
+ state: overridden
+
+# After state:
+# ------------
+#
+# sonic# show bfd peers
+# BFD Peers:
+#
+# peer 186.42.1.2 multihop local-address 1.1.1.1 vrf default
+# ID: 989720421
+# Remote ID: 0
+# Passive mode: Disabled
+# Profile: p3
+# Minimum TTL: 120
+# Status: down
+# Downtime: 0 day(s), 0 hour(s), 1 min(s), 46 sec(s)
+# Diagnostics: ok
+# Remote diagnostics: ok
+# Peer Type: configured
+# Local timers:
+# Detect-multiplier: 4
+# Receive interval: 122ms
+# Transmission interval: 85ms
+# Echo transmission interval: ms
+# Remote timers:
+# Detect-multiplier: 3
+# Receive interval: 1000ms
+# Transmission interval: 1000ms
+# Echo transmission interval: 0ms
+#
+# peer 172.68.2.1 local-address 2.2.2.2 vrf default interface Ethernet16
+# ID: 1134635660
+# Remote ID: 0
+# Passive mode: Enabled
+# Profile: p3
+# Status: down
+# Downtime: 0 day(s), 1 hour(s), 50 min(s), 48 sec(s)
+# Diagnostics: ok
+# Remote diagnostics: ok
+# Peer Type: configured
+# Local timers:
+# Detect-multiplier: 6
+# Receive interval: 88ms
+# Transmission interval: 60ms
+# Echo transmission interval: 112ms
+# Remote timers:
+# Detect-multiplier: 3
+# Receive interval: 1000ms
+# Transmission interval: 1000ms
+# Echo transmission interval: 0ms
+#
+#
+# Using deleted
+#
+# Before state:
+# -------------
+#
+# sonic# show bfd profile
+# BFD Profile:
+# Profile-name: p1
+# Enabled: True
+# Echo-mode: Enabled
+# Passive-mode: Enabled
+# Minimum-Ttl: 140
+# Detect-multiplier: 2
+# Receive interval: 200ms
+# Transmission interval: 120ms
+# Echo transmission interval: 150ms
+# sonic# show bfd peers
+# BFD Peers:
+#
+# peer 192.40.1.3 multihop local-address 3.3.3.3 vrf default
+# ID: 989720421
+# Remote ID: 0
+# Passive mode: Enabled
+# Profile: p1
+# Minimum TTL: 125
+# Status: down
+# Downtime: 0 day(s), 0 hour(s), 1 min(s), 46 sec(s)
+# Diagnostics: ok
+# Remote diagnostics: ok
+# Peer Type: configured
+# Local timers:
+# Detect-multiplier: 2
+# Receive interval: 100ms
+# Transmission interval: 75ms
+# Echo transmission interval: ms
+# Remote timers:
+# Detect-multiplier: 3
+# Receive interval: 1000ms
+# Transmission interval: 1000ms
+# Echo transmission interval: 0ms
+#
+# peer 196.88.6.1 local-address 1.1.1.1 vrf default interface Ethernet20
+# ID: 1134635660
+# Remote ID: 0
+# Passive mode: Enabled
+# Profile: p1
+# Status: down
+# Downtime: 0 day(s), 1 hour(s), 50 min(s), 48 sec(s)
+# Diagnostics: ok
+# Remote diagnostics: ok
+# Peer Type: configured
+# Local timers:
+# Detect-multiplier: 4
+# Receive interval: 80ms
+# Transmission interval: 50ms
+# Echo transmission interval: 110ms
+# Remote timers:
+# Detect-multiplier: 3
+# Receive interval: 1000ms
+# Transmission interval: 1000ms
+# Echo transmission interval: 0ms
+
+ - name: Delete BFD configuration
+ dellemc.enterprise_sonic.sonic_bfd:
+ config:
+ profiles:
+ - profile_name: 'p1'
+ enabled: True
+ transmit_interval: 120
+ receive_interval: 200
+ detect_multiplier: 2
+ passive_mode: True
+ min_ttl: 140
+ echo_interval: 150
+ echo_mode: True
+ single_hops:
+ - remote_address: '196.88.6.1'
+ vrf: 'default'
+ interface: 'Ethernet20'
+ local_address: '1.1.1.1'
+ multi_hops:
+ - remote_address: '192.40.1.3'
+ vrf: 'default'
+ local_address: '3.3.3.3'
+ state: deleted
+
+# After state
+# -----------
+#
+# sonic# show bfd profile
+# BFD Profile:
+# Profile-name: p1
+# Enabled: True
+# Echo-mode: Disabled
+# Passive-mode: Disabled
+# Minimum-Ttl: 254
+# Detect-multiplier: 3
+# Receive interval: 300ms
+# Transmission interval: 300ms
+# Echo transmission interval: 300ms
+# sonic# show bfd peers
+# (No "bfd peers" configuration present)
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.bfd.bfd import BfdArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bfd.bfd import Bfd
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=BfdArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Bfd(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py
index bc53ca40c..aaf52a40c 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# © Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -143,13 +143,20 @@ options:
description:
- Allows comparing meds from different neighbors if set to true
type: bool
+ rt_delay:
+ description:
+ - Time in seconds to wait before processing route-map changes.
+ - Range is 0-600. 0 disables the timer and changes to route-map will not be updated.
+ type: int
state:
description:
- Specifies the operation to be performed on the BGP process that is configured on the device.
- In case of merged, the input configuration is merged with the existing BGP configuration on the device.
- In case of deleted, the existing BGP configuration is removed from the device.
+ - In case of replaced, the existing configuration of the specified BGP AS will be replaced with provided configuration.
+ - In case of overridden, the existing BGP configuration will be overridden with the provided configuration.
default: merged
- choices: ['merged', 'deleted']
+ choices: ['merged', 'deleted', 'replaced', 'overridden']
type: str
"""
EXAMPLES = """
@@ -158,30 +165,33 @@ EXAMPLES = """
# Before state:
# -------------
#
-#!
-#router bgp 10 vrf VrfCheck1
-# router-id 10.2.2.32
-# log-neighbor-changes
-#!
-#router bgp 11 vrf VrfCheck2
-# log-neighbor-changes
-# bestpath as-path ignore
-# bestpath med missing-as-worst confed
-# bestpath compare-routerid
-#!
-#router bgp 4
-# router-id 10.2.2.4
-# bestpath as-path ignore
-# bestpath as-path confed
-# bestpath med missing-as-worst confed
-# bestpath compare-routerid
-#!
+# !
+# router bgp 10 vrf VrfCheck1
+# router-id 10.2.2.32
+# route-map delay-timer 20
+# log-neighbor-changes
+# !
+# router bgp 11 vrf VrfCheck2
+# log-neighbor-changes
+# bestpath as-path ignore
+# bestpath med missing-as-worst confed
+# bestpath compare-routerid
+# !
+# router bgp 4
+# router-id 10.2.2.4
+# route-map delay-timer 10
+# bestpath as-path ignore
+# bestpath as-path confed
+# bestpath med missing-as-worst confed
+# bestpath compare-routerid
+# !
#
- name: Delete BGP Global attributes
dellemc.enterprise_sonic.sonic_bgp:
config:
- bgp_as: 4
router_id: 10.2.2.4
+ rt_delay: 10
log_neighbor_changes: False
bestpath:
as_path:
@@ -195,6 +205,7 @@ EXAMPLES = """
missing_as_worst: True
- bgp_as: 10
router_id: 10.2.2.32
+ rt_delay: 20
log_neighbor_changes: True
vrf_name: 'VrfCheck1'
- bgp_as: 11
@@ -215,18 +226,18 @@ EXAMPLES = """
# After state:
# ------------
#
-#!
-#router bgp 10 vrf VrfCheck1
-# log-neighbor-changes
-#!
-#router bgp 11 vrf VrfCheck2
-# log-neighbor-changes
-# bestpath compare-routerid
-#!
-#router bgp 4
-# log-neighbor-changes
-# bestpath compare-routerid
-#!
+# !
+# router bgp 10 vrf VrfCheck1
+# log-neighbor-changes
+# !
+# router bgp 11 vrf VrfCheck2
+# log-neighbor-changes
+# bestpath compare-routerid
+# !
+# router bgp 4
+# log-neighbor-changes
+# bestpath compare-routerid
+# !
# Using deleted
@@ -234,24 +245,26 @@ EXAMPLES = """
# Before state:
# -------------
#
-#!
-#router bgp 10 vrf VrfCheck1
-# router-id 10.2.2.32
-# log-neighbor-changes
-#!
-#router bgp 11 vrf VrfCheck2
-# log-neighbor-changes
-# bestpath as-path ignore
-# bestpath med missing-as-worst confed
-# bestpath compare-routerid
-#!
-#router bgp 4
-# router-id 10.2.2.4
-# bestpath as-path ignore
-# bestpath as-path confed
-# bestpath med missing-as-worst confed
-# bestpath compare-routerid
-#!
+# !
+# router bgp 10 vrf VrfCheck1
+# router-id 10.2.2.32
+# route-map delay-timer 20
+# log-neighbor-changes
+# !
+# router bgp 11 vrf VrfCheck2
+# log-neighbor-changes
+# bestpath as-path ignore
+# bestpath med missing-as-worst confed
+# bestpath compare-routerid
+# !
+# router bgp 4
+# router-id 10.2.2.4
+# route-map delay-timer 10
+# bestpath as-path ignore
+# bestpath as-path confed
+# bestpath med missing-as-worst confed
+# bestpath compare-routerid
+# !
- name: Deletes all the bgp global configurations
dellemc.enterprise_sonic.sonic_bgp:
@@ -261,8 +274,8 @@ EXAMPLES = """
# After state:
# ------------
#
-#!
-#!
+# !
+# !
# Using merged
@@ -270,16 +283,17 @@ EXAMPLES = """
# Before state:
# -------------
#
-#!
-#router bgp 4
-# router-id 10.1.1.4
-#!
+# !
+# router bgp 4
+# router-id 10.1.1.4
+# !
#
- name: Merges provided configuration with device configuration
dellemc.enterprise_sonic.sonic_bgp:
config:
- bgp_as: 4
router_id: 10.2.2.4
+ rt_delay: 10
log_neighbor_changes: False
timers:
holdtime: 20
@@ -301,6 +315,7 @@ EXAMPLES = """
med_val: 7878
- bgp_as: 10
router_id: 10.2.2.32
+ rt_delay: 20
log_neighbor_changes: True
vrf_name: 'VrfCheck1'
- bgp_as: 11
@@ -320,28 +335,172 @@ EXAMPLES = """
# After state:
# ------------
#
+# !
+# router bgp 10 vrf VrfCheck1
+# router-id 10.2.2.32
+# route-map delay-timer 20
+# log-neighbor-changes
+# !
+# router bgp 11 vrf VrfCheck2
+# log-neighbor-changes
+# bestpath as-path ignore
+# bestpath med missing-as-worst confed
+# bestpath compare-routerid
+# !
+# router bgp 4
+# router-id 10.2.2.4
+# route-map delay-timer 10
+# bestpath as-path ignore
+# bestpath as-path confed
+# bestpath med missing-as-worst confed
+# bestpath compare-routerid
+# always-compare-med
+# max-med on-startup 667 7878
+# timers 20 30
+#
+# !
+
+
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#!
+#router bgp 10 vrf VrfCheck1
+# router-id 10.2.2.32
+# log-neighbor-changes
+# timers 60 180
+#!
+#router bgp 4
+# router-id 10.2.2.4
+# max-med on-startup 667 7878
+# bestpath as-path ignore
+# bestpath as-path confed
+# bestpath med missing-as-worst confed
+# bestpath compare-routerid
+# timers 20 30
+#!
+#
+
+- name: Replace device configuration of specified BGP AS with provided
+ dellemc.enterprise_sonic.sonic_bgp:
+ config:
+ - bgp_as: 4
+ router_id: 10.2.2.44
+ log_neighbor_changes: True
+ bestpath:
+ as_path:
+ confed: True
+ compare_routerid: True
+ - bgp_as: 11
+ vrf_name: 'VrfCheck2'
+ router_id: 10.2.2.33
+ log_neighbor_changes: True
+ bestpath:
+ as_path:
+ confed: True
+ ignore: True
+ compare_routerid: True
+ med:
+ confed: True
+ missing_as_worst: True
+ state: replaced
+
+#
+# After state:
+# ------------
+#
#!
#router bgp 10 vrf VrfCheck1
# router-id 10.2.2.32
# log-neighbor-changes
+# timers 60 180
#!
#router bgp 11 vrf VrfCheck2
+# router-id 10.2.2.33
# log-neighbor-changes
# bestpath as-path ignore
+# bestpath as-path confed
# bestpath med missing-as-worst confed
# bestpath compare-routerid
+# timers 60 180
+#!
+#router bgp 4
+# router-id 10.2.2.44
+# log-neighbor-changes
+# bestpath as-path confed
+# bestpath compare-routerid
+# timers 60 180
+#!
+
+
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#!
+#router bgp 10 vrf VrfCheck1
+# router-id 10.2.2.32
+# log-neighbor-changes
+# timers 60 180
#!
#router bgp 4
# router-id 10.2.2.4
+# max-med on-startup 667 7878
# bestpath as-path ignore
# bestpath as-path confed
# bestpath med missing-as-worst confed
# bestpath compare-routerid
-# always-compare-med
-# max-med on-startup 667 7878
# timers 20 30
+#!
+#
+
+- name: Override device configuration of global BGP with provided configuration
+ dellemc.enterprise_sonic.sonic_bgp:
+ config:
+ - bgp_as: 4
+ router_id: 10.2.2.44
+ log_neighbor_changes: True
+ bestpath:
+ as_path:
+ confed: True
+ compare_routerid: True
+ - bgp_as: 11
+ vrf_name: 'VrfCheck2'
+ router_id: 10.2.2.33
+ log_neighbor_changes: True
+ bestpath:
+ as_path:
+ confed: True
+ ignore: True
+ compare_routerid: True
+ timers:
+ holdtime: 90
+ keepalive_interval: 30
+ state: overridden
+
+#
+# After state:
+# ------------
#
#!
+#router bgp 11 vrf VrfCheck2
+# router-id 10.2.2.33
+# log-neighbor-changes
+# bestpath as-path ignore
+# bestpath as-path confed
+# bestpath compare-routerid
+# timers 30 90
+#!
+#router bgp 4
+# router-id 10.2.2.44
+# log-neighbor-changes
+# bestpath as-path confed
+# bestpath compare-routerid
+# timers 60 180
+#!
"""
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py
index 6d55355c9..af00093c6 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_af.py
@@ -172,13 +172,62 @@ options:
description:
- Specifies the count of the ebgp multipaths count.
type: int
+ rd:
+ description:
+ - Specifies the route distiguisher to be used by the VRF instance.
+ type: str
+ rt_in:
+ description:
+ - Route-targets to be imported.
+ type: list
+ elements: str
+ rt_out:
+ description:
+ - Route-targets to be exported.
+ type: list
+ elements: str
+ vnis:
+ description:
+ - VNI configuration for the EVPN.
+ type: list
+ elements: dict
+ suboptions:
+ vni_number:
+ description:
+ - Specifies the VNI number.
+ type: int
+ required: True
+ advertise_default_gw:
+ description:
+ - Specifies the advertise default gateway flag.
+ type: bool
+ advertise_svi_ip:
+ description:
+ - Enables advertise SVI MACIP routes
+ type: bool
+ rd:
+ description:
+ - Specifies the route distiguisher to be used by the VRF instance.
+ type: str
+ rt_in:
+ description:
+ - Route-targets to be imported.
+ type: list
+ elements: str
+ rt_out:
+ description:
+ - Route-targets to be exported.
+ type: list
+ elements: str
state:
description:
- Specifies the operation to be performed on the BGP_AF process configured on the device.
- In case of merged, the input configuration is merged with the existing BGP_AF configuration on the device.
- In case of deleted, the existing BGP_AF configuration is removed from the device.
+ - In case of replaced, the existing BGP_AF of specified BGP AS will be replaced with provided configuration.
+ - In case of overridden, the existing BGP_AF configuration will be overridden with the provided configuration.
default: merged
- choices: ['merged', 'deleted']
+ choices: ['merged', 'deleted', 'overridden', 'replaced']
type: str
"""
EXAMPLES = """
@@ -208,8 +257,17 @@ EXAMPLES = """
# address-family l2vpn evpn
# advertise-svi-ip
# advertise ipv6 unicast route-map aa
+# rd 3.3.3.3:33
+# route-target import 22:22
+# route-target export 33:33
# advertise-pip ip 1.1.1.1 peer-ip 2.2.2.2
-#!
+# !
+# vni 1
+# advertise-default-gw
+# advertise-svi-ip
+# rd 5.5.5.5:55
+# route-target import 88:88
+# route-target export 77:77
#
- name: Delete BGP Address family configuration from the device
dellemc.enterprise_sonic.sonic_bgp_af:
@@ -228,6 +286,13 @@ EXAMPLES = """
route_advertise_list:
- advertise_afi: ipv6
route_map: aa
+ rd: "3.3.3.3:33"
+ rt_in:
+ - "22:22"
+ rt_out:
+ - "33:33"
+ vnis:
+ - vni_number: 1
- afi: ipv4
safi: unicast
- afi: ipv6
@@ -320,6 +385,20 @@ EXAMPLES = """
route_advertise_list:
- advertise_afi: ipv4
route_map: bb
+ rd: "1.1.1.1:11"
+ rt_in:
+ - "12:12"
+ rt_out:
+ - "13:13"
+ vnis:
+ - vni_number: 1
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ rd: "5.5.5.5:55"
+ rt_in:
+ - "88:88"
+ rt_out:
+ - "77:77"
- afi: ipv4
safi: unicast
network:
@@ -366,8 +445,300 @@ EXAMPLES = """
# address-family l2vpn evpn
# advertise-svi-ip
# advertise ipv4 unicast route-map bb
+# rd 1.1.1.1:11
+# route-target import 12:12
+# route-target import 13:13
# advertise-pip ip 3.3.3.3 peer-ip 4.4.4.4
+# !
+# vni 1
+# advertise-default-gw
+# advertise-svi-ip
+# rd 5.5.5.5:55
+# route-target import 88:88
+# route-target export 77:77
+#
+
+
+# Using replaced
#
+# Before state:
+# -------------
+#
+#do show running-configuration bgp
+#!
+#router bgp 52 vrf VrfReg1
+# log-neighbor-changes
+# timers 60 180
+# !
+# address-family ipv4 unicast
+# maximum-paths 1
+# maximum-paths ibgp 1
+# network 3.3.3.3/16
+# dampening
+#!
+#router bgp 51
+# router-id 111.2.2.41
+# log-neighbor-changes
+# timers 60 180
+# !
+# address-family ipv4 unicast
+# redistribute connected route-map bb metric 21
+# redistribute ospf route-map bb metric 27
+# maximum-paths 1
+# maximum-paths ibgp 1
+# network 2.2.2.2/16
+# network 192.168.10.1/32
+# dampening
+# !
+# address-family ipv6 unicast
+# redistribute static route-map aa metric 26
+# maximum-paths 4
+# maximum-paths ibgp 5
+# !
+# address-family l2vpn evpn
+# advertise-all-vni
+# advertise-svi-ip
+# advertise ipv4 unicast route-map bb
+# rd 1.1.1.1:11
+# route-target import 12:12
+# route-target export 13:13
+# dup-addr-detection
+# advertise-pip ip 3.3.3.3 peer-ip 4.4.4.4
+# !
+# vni 1
+# advertise-default-gw
+# advertise-svi-ip
+# rd 5.5.5.5:55
+# route-target import 88:88
+# route-target export 77:77
+
+- name: Replace device configuration of address families of specified BGP AS with provided configuration.
+ dellemc.enterprise_sonic.sonic_bgp_af:
+ config:
+ - bgp_as: 51
+ address_family:
+ afis:
+ - afi: l2vpn
+ safi: evpn
+ advertise_pip: True
+ advertise_pip_ip: "3.3.3.3"
+ advertise_pip_peer_ip: "4.4.4.4"
+ advertise_svi_ip: True
+ advertise_all_vni: False
+ advertise_default_gw: False
+ route_advertise_list:
+ - advertise_afi: ipv4
+ route_map: bb
+ rd: "1.1.1.1:11"
+ rt_in:
+ - "22:22"
+ rt_out:
+ - "13:13"
+ vnis:
+ - vni_number: 5
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ rd: "10.10.10.10:55"
+ rt_in:
+ - "88:88"
+ rt_out:
+ - "77:77"
+ - afi: ipv4
+ safi: unicast
+ network:
+ - 2.2.2.2/16
+ - 192.168.10.1/32
+ dampening: True
+ redistribute:
+ - protocol: connected
+ - protocol: ospf
+ metric: 30
+ state: replaced
+
+# After state:
+# ------------
+#
+#do show running-configuration bgp
+#!
+#router bgp 52 vrf VrfReg1
+# log-neighbor-changes
+# timers 60 180
+# !
+# address-family ipv4 unicast
+# maximum-paths 1
+# maximum-paths ibgp 1
+# network 3.3.3.3/16
+# dampening
+#!
+#router bgp 51
+# router-id 111.2.2.41
+# log-neighbor-changes
+# timers 60 180
+# !
+# address-family ipv4 unicast
+# redistribute connected
+# redistribute ospf metric 30
+# maximum-paths 1
+# maximum-paths ibgp 1
+# network 2.2.2.2/16
+# network 192.168.10.1/32
+# dampening
+# !
+# address-family l2vpn evpn
+# advertise-all-vni
+# advertise-svi-ip
+# advertise ipv4 unicast route-map bb
+# rd 1.1.1.1:11
+# route-target import 22:22
+# route-target export 13:13
+# dup-addr-detection
+# advertise-pip ip 3.3.3.3 peer-ip 4.4.4.4
+# !
+# vni 5
+# advertise-default-gw
+# advertise-svi-ip
+# rd 10.10.10.10:55
+# route-target import 88:88
+# route-target export 77:77
+
+
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#do show running-configuration bgp
+#!
+#router bgp 52 vrf VrfReg1
+# log-neighbor-changes
+# timers 60 180
+# !
+# address-family ipv4 unicast
+# maximum-paths 1
+# maximum-paths ibgp 1
+# network 3.3.3.3/16
+# dampening
+#!
+#router bgp 51
+# router-id 111.2.2.41
+# log-neighbor-changes
+# timers 60 180
+# !
+# address-family ipv4 unicast
+# redistribute connected route-map bb metric 21
+# redistribute ospf route-map bb metric 27
+# maximum-paths 1
+# maximum-paths ibgp 1
+# network 2.2.2.2/16
+# network 192.168.10.1/32
+# dampening
+# !
+# address-family ipv6 unicast
+# redistribute static route-map aa metric 26
+# maximum-paths 4
+# maximum-paths ibgp 5
+# !
+# address-family l2vpn evpn
+# advertise-all-vni
+# advertise-svi-ip
+# advertise ipv4 unicast route-map bb
+# rd 1.1.1.1:11
+# route-target import 12:12
+# route-target export 13:13
+# dup-addr-detection
+# advertise-pip ip 3.3.3.3 peer-ip 4.4.4.4
+# !
+# vni 1
+# advertise-default-gw
+# advertise-svi-ip
+# rd 5.5.5.5:55
+# route-target import 88:88
+# route-target export 77:77
+
+- name: Override device configuration of BGP address families with provided configuration.
+ dellemc.enterprise_sonic.sonic_bgp_af:
+ config:
+ - bgp_as: 51
+ address_family:
+ afis:
+ - afi: l2vpn
+ safi: evpn
+ advertise_pip: True
+ advertise_pip_ip: "3.3.3.3"
+ advertise_pip_peer_ip: "4.4.4.4"
+ advertise_svi_ip: True
+ advertise_all_vni: False
+ advertise_default_gw: False
+ route_advertise_list:
+ - advertise_afi: ipv4
+ route_map: bb
+ rd: "1.1.1.1:11"
+ rt_in:
+ - "22:22"
+ rt_out:
+ - "13:13"
+ vnis:
+ - vni_number: 5
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ rd: "10.10.10.10:55"
+ rt_in:
+ - "88:88"
+ rt_out:
+ - "77:77"
+ - afi: ipv4
+ safi: unicast
+ network:
+ - 2.2.2.2/16
+ - 192.168.10.1/32
+ dampening: True
+ redistribute:
+ - protocol: connected
+ - protocol: ospf
+ metric: 30
+ state: overridden
+
+# After state:
+# ------------
+#
+#do show running-configuration bgp
+#!
+#router bgp 52 vrf VrfReg1
+# log-neighbor-changes
+# timers 60 180
+#!
+#router bgp 51
+# router-id 111.2.2.41
+# log-neighbor-changes
+# timers 60 180
+# !
+# address-family ipv4 unicast
+# redistribute connected
+# redistribute ospf metric 30
+# maximum-paths 1
+# maximum-paths ibgp 1
+# network 2.2.2.2/16
+# network 192.168.10.1/32
+# dampening
+# !
+# address-family l2vpn evpn
+# advertise-all-vni
+# advertise-svi-ip
+# advertise ipv4 unicast route-map bb
+# rd 1.1.1.1:11
+# route-target import 22:22
+# route-target export 13:13
+# dup-addr-detection
+# advertise-pip ip 3.3.3.3 peer-ip 4.4.4.4
+# !
+# vni 5
+# advertise-default-gw
+# advertise-svi-ip
+# rd 10.10.10.10:55
+# route-target import 88:88
+# route-target export 77:77
+
+
"""
RETURN = """
before:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py
index bd2ff74a1..9bc3f43f5 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_as_paths.py
@@ -62,7 +62,8 @@ options:
required: False
type: bool
description:
- - Permits or denies this as path.
+ - Permits or denies this as-path.
+ - Default value while adding a new as-path-list is C(False).
state:
description:
- The state of the configuration after module completion.
@@ -70,6 +71,8 @@ options:
choices:
- merged
- deleted
+ - replaced
+ - overridden
default: merged
"""
EXAMPLES = """
@@ -83,21 +86,21 @@ EXAMPLES = """
# action: permit
# members: 808.*,909.*
-- name: Delete BGP as path list
- dellemc.enterprise_sonic.sonic_bgp_as_paths:
- config:
- - name: test
- members:
- - 909.*
- permit: true
- state: deleted
+ - name: Delete BGP as path list
+ dellemc.enterprise_sonic.sonic_bgp_as_paths:
+ config:
+ - name: test
+ members:
+ - 909.*
+ permit: true
+ state: deleted
# After state:
# ------------
#
# show bgp as-path-access-list
# AS path list test:
-# action:
+# action: permit
# members: 808.*
@@ -114,12 +117,12 @@ EXAMPLES = """
# action: deny
# members: 608.*,709.*
-- name: Deletes BGP as-path list
- dellemc.enterprise_sonic.sonic_bgp_as_paths:
- config:
- - name: test
- members:
- state: deleted
+ - name: Deletes BGP as-path list
+ dellemc.enterprise_sonic.sonic_bgp_as_paths:
+ config:
+ - name: test
+ members:
+ state: deleted
# After state:
# ------------
@@ -140,10 +143,10 @@ EXAMPLES = """
# action: permit
# members: 808.*,909.*
-- name: Deletes BGP as-path list
- dellemc.enterprise_sonic.sonic_bgp_as_paths:
- config:
- state: deleted
+ - name: Deletes BGP as-path list
+ dellemc.enterprise_sonic.sonic_bgp_as_paths:
+ config:
+ state: deleted
# After state:
# ------------
@@ -158,16 +161,16 @@ EXAMPLES = """
# -------------
#
# show bgp as-path-access-list
-# AS path list test:
+# (No bgp as-path-access-list configuration present)
-- name: Adds 909.* to test as-path list
- dellemc.enterprise_sonic.sonic_bgp_as_paths:
- config:
- - name: test
- members:
- - 909.*
- permit: true
- state: merged
+ - name: Create a BGP as-path list
+ dellemc.enterprise_sonic.sonic_bgp_as_paths:
+ config:
+ - name: test
+ members:
+ - 909.*
+ permit: true
+ state: merged
# After state:
# ------------
@@ -178,6 +181,78 @@ EXAMPLES = """
# members: 909.*
+# Using replaced
+
+# Before state:
+# -------------
+#
+# show bgp as-path-access-list
+# AS path list test:
+# action: permit
+# members: 800.*,808.*
+# AS path list test1:
+# action: deny
+# members: 500.*
+
+ - name: Replace device configuration of specified BGP as-path lists with provided configuration
+ dellemc.enterprise_sonic.sonic_bgp_as_paths:
+ config:
+ - name: test
+ members:
+ - 900.*
+ - 901.*
+ permit: true
+ - name: test1
+ - name: test2
+ members:
+ - 100.*
+ permit: true
+ state: replaced
+
+# After state:
+# ------------
+#
+# show bgp as-path-access-list
+# AS path list test:
+# action: permit
+# members: 900.*,901.*
+# AS path list test2:
+# action: permit
+# members: 100.*
+
+
+# Using overridden
+
+# Before state:
+# -------------
+#
+# show bgp as-path-access-list
+# AS path list test:
+# action: permit
+# members: 800.*,808.*
+# AS path list test1:
+# action: deny
+# members: 500.*
+
+ - name: Override device configuration of all BGP as-path lists with provided configuration
+ dellemc.enterprise_sonic.sonic_bgp_as_paths:
+ config:
+ - name: test
+ members:
+ - 900.*
+ - 901.*
+ permit: true
+ state: overridden
+
+# After state:
+# ------------
+#
+# show bgp as-path-access-list
+# AS path list test:
+# action: permit
+# members: 900.*,901.*
+
+
"""
RETURN = """
before:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py
index 08c8dcc7f..dd1c2b083 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_communities.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -52,7 +52,7 @@ options:
required: True
type: str
description:
- - Name of the BGP communitylist.
+ - Name of the BGP community-list.
type:
type: str
description:
@@ -67,6 +67,7 @@ options:
type: bool
description:
- Permits or denies this community.
+ - Default value while adding a new community-list is C(False).
aann:
required: False
type: str
@@ -120,6 +121,8 @@ options:
choices:
- merged
- deleted
+ - replaced
+ - overridden
default: merged
"""
EXAMPLES = """
@@ -130,18 +133,21 @@ EXAMPLES = """
#
# show bgp community-list
# Standard community list test: match: ANY
-# 101
-# 201
-# Standard community list test1: match: ANY
-# 301
+# permit local-as
+# permit no-peer
+# Expanded community list test1: match: ANY
+# deny 101
+# deny 302
-- name: Deletes BGP community member
+- name: Delete a BGP community-list member
dellemc.enterprise_sonic.sonic_bgp_communities:
config:
- - name: test
+ - name: test1
+ type: expanded
+ permit: false
members:
regex:
- - 201
+ - 302
state: deleted
# After state:
@@ -149,9 +155,10 @@ EXAMPLES = """
#
# show bgp community-list
# Standard community list test: match: ANY
-# 101
-# Standard community list test1: match: ANY
-# 301
+# permit local-as
+# permit no-peer
+# Expanded community list test1: match: ANY
+# deny 101
# Using deleted
@@ -161,15 +168,17 @@ EXAMPLES = """
#
# show bgp community-list
# Standard community list test: match: ANY
-# 101
+# permit local-as
+# permit no-peer
# Expanded community list test1: match: ANY
-# 201
+# deny 101
+# deny 302
-- name: Deletes a single BGP community
+- name: Delete a single BGP community-list
dellemc.enterprise_sonic.sonic_bgp_communities:
config:
- name: test
- members:
+ type: standard
state: deleted
# After state:
@@ -177,7 +186,8 @@ EXAMPLES = """
#
# show bgp community-list
# Expanded community list test1: match: ANY
-# 201
+# deny 101
+# deny 302
# Using deleted
@@ -187,11 +197,13 @@ EXAMPLES = """
#
# show bgp community-list
# Standard community list test: match: ANY
-# 101
+# permit local-as
+# permit no-peer
# Expanded community list test1: match: ANY
-# 201
+# deny 101
+# deny 302
-- name: Delete All BGP communities
+- name: Delete All BGP community-lists
dellemc.enterprise_sonic.sonic_bgp_communities:
config:
state: deleted
@@ -210,14 +222,17 @@ EXAMPLES = """
#
# show bgp community-list
# Standard community list test: match: ANY
-# 101
+# permit local-as
+# permit no-peer
# Expanded community list test1: match: ANY
-# 201
+# deny 101
+# deny 302
-- name: Deletes all members in a single BGP community
+- name: Delete all members in a single BGP community-list
dellemc.enterprise_sonic.sonic_bgp_communities:
config:
- - name: test
+ - name: test1
+ type: expanded
members:
regex:
state: deleted
@@ -226,9 +241,9 @@ EXAMPLES = """
# ------------
#
# show bgp community-list
-# Expanded community list test: match: ANY
-# Expanded community list test1: match: ANY
-# 201
+# Standard community list test: match: ANY
+# permit local-as
+# permit no-peer
# Using merged
@@ -236,23 +251,105 @@ EXAMPLES = """
# Before state:
# -------------
#
-# show bgp as-path-access-list
-# AS path list test:
+# show bgp community-list
+# Expanded community list test1: match: ANY
+# permit 101
+# permit 302
-- name: Adds 909.* to test as-path list
- dellemc.enterprise_sonic.sonic_bgp_as_paths:
+- name: Add a new BGP community-list
+ dellemc.enterprise_sonic.sonic_bgp_communities:
config:
- - name: test
+ - name: test2
+ type: expanded
+ permit: true
members:
- - 909.*
+ regex:
+ - 909
state: merged
# After state:
# ------------
#
-# show bgp as-path-access-list
-# AS path list test:
-# members: 909.*
+# show bgp community-list
+# Expanded community list test1: match: ANY
+# permit 101
+# permit 302
+# Expanded community list test2: match: ANY
+# permit 909
+
+
+# Using replaced
+
+# Before state:
+# -------------
+#
+# show bgp community-list
+# Standard community list test: match: ANY
+# permit local-as
+# permit no-peer
+# Expanded community list test1: match: ANY
+# deny 101
+# deny 302
+
+- name: Replacing a single BGP community-list
+ dellemc.enterprise_sonic.sonic_bgp_communities:
+ config:
+ - name: test
+ type: expanded
+ members:
+ regex:
+ - 301
+ - name: test3
+ type: standard
+ no_advertise: true
+ no_peer: true
+ permit: false
+ match: ALL
+ state: replaced
+
+# After state:
+# ------------
+#
+# show bgp community-list
+# Expanded community list test: match: ANY
+# deny 301
+# Expanded community list test1: match: ANY
+# deny 101
+# deny 302
+# Standard community list test3: match: ALL
+# deny no-advertise
+# deny no-peer
+
+
+# Using overridden
+
+# Before state:
+# -------------
+#
+# show bgp community-list
+# Standard community list test: match: ANY
+# permit local-as
+# permit no-peer
+# Expanded community list test1: match: ANY
+# deny 101
+# deny 302
+
+- name: Override entire BGP community-lists
+ dellemc.enterprise_sonic.sonic_bgp_communities:
+ config:
+ - name: test3
+ type: expanded
+ members:
+ regex:
+ - 301
+ state: overridden
+
+# After state:
+# ------------
+#
+# show bgp community-list
+# Expanded community list test3: match: ANY
+# deny 301
"""
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py
index c2af0c488..49a30c9f9 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_ext_communities.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -66,6 +66,7 @@ options:
type: bool
description:
- Permits or denies this community.
+ - Default value while adding a new ext-community-list is False.
members:
required: False
type: dict
@@ -106,6 +107,8 @@ options:
choices:
- merged
- deleted
+ - replaced
+ - overridden
default: merged
"""
EXAMPLES = """
@@ -116,15 +119,16 @@ EXAMPLES = """
#
# show bgp ext-community-list
# Standard extended community list test: match: ANY
-# rt:101:101
-# rt:201:201
+# permit rt:101:101
+# permit rt:201:201
- name: Deletes a BGP ext community member
dellemc.enterprise_sonic.sonic_bgp_ext_communities:
config:
- name: test
+ type: standard
members:
- regex:
+ route_target:
- 201:201
state: deleted
@@ -133,7 +137,7 @@ EXAMPLES = """
#
# show bgp ext-community-list
# Standard extended community list test: match: ANY
-# rt:101:101
+# permit rt:101:101
#
@@ -144,9 +148,10 @@ EXAMPLES = """
#
# show bgp ext-community-list
# Standard extended community list test: match: ANY
-# 101
-# Expanded extended community list test1: match: ANY
-# 201
+# permit rt:101:101
+# permit rt:201:201
+# Expanded extended community list test1: match: ALL
+# deny 101:102
- name: Deletes a single BGP extended community
dellemc.enterprise_sonic.sonic_bgp_ext_communities:
@@ -160,7 +165,8 @@ EXAMPLES = """
#
# show bgp ext-community-list
# Standard extended community list test: match: ANY
-# 101
+# permit rt:101:101
+# permit rt:201:201
#
@@ -171,9 +177,10 @@ EXAMPLES = """
#
# show bgp ext-community-list
# Standard extended community list test: match: ANY
-# 101
-# Expanded extended community list test1: match: ANY
-# 201
+# permit rt:101:101
+# permit rt:201:201
+# Expanded extended community list test1: match: ALL
+# deny 101:102
- name: Deletes all BGP extended communities
dellemc.enterprise_sonic.sonic_bgp_ext_communities:
@@ -194,9 +201,10 @@ EXAMPLES = """
#
# show bgp ext-community-list
# Standard extended community list test: match: ANY
-# 101
-# Expanded extended community list test1: match: ANY
-# 201
+# permit rt:101:101
+# permit rt:201:201
+# Expanded extended community list test1: match: ALL
+# deny 101:102
- name: Deletes all members in a single BGP extended community
dellemc.enterprise_sonic.sonic_bgp_ext_communities:
@@ -211,8 +219,8 @@ EXAMPLES = """
#
# show bgp ext-community-list
# Standard extended community list test: match: ANY
-# 101
-# Expanded extended community list test1: match: ANY
+# permit rt:101:101
+# permit rt:201:201
#
@@ -221,23 +229,108 @@ EXAMPLES = """
# Before state:
# -------------
#
-# show bgp as-path-access-list
-# AS path list test:
+# show bgp ext-community-list
+# Standard extended community list test: match: ANY
+# permit rt:101:101
+# permit rt:201:201
+# Expanded extended community list test1: match: ALL
+# deny 101:102
-- name: Adds 909.* to test as-path list
- dellemc.enterprise_sonic.sonic_bgp_as_paths:
+- name: Adds new community list
+ dellemc.enterprise_sonic.sonic_bgp_ext_communities:
config:
- - name: test
+ - name: test3
+ type: standard
+ match: any
+ permit: true
members:
- - 909.*
+ route_origin:
+ - "301:301"
+ - "401:401"
state: merged
# After state:
# ------------
#
-# show bgp as-path-access-list
-# AS path list test:
-# members: 909.*
+# show bgp ext-community-list
+# Standard extended community list test: match: ANY
+# permit rt:101:101
+# permit rt:201:201
+# Expanded extended community list test1: match: ALL
+# deny 101:102
+# Standard extended community list test3: match: ANY
+# permit soo:301:301
+# permit soo:401:401
+
+
+
+# Using replaced
+
+# Before state:
+# -------------
+#
+# show bgp ext-community-list
+# Standard extended community list test: match: ANY
+# permit rt:101:101
+# permit rt:201:201
+# Expanded extended community list test1: match: ALL
+# deny 101:102
+
+- name: Replacing a single BGP extended community
+ dellemc.enterprise_sonic.sonic_bgp_ext_communities:
+ config:
+ - name: test
+ type: expanded
+ permit: true
+ match: all
+ members:
+ regex:
+ - 301:302
+ state: replaced
+
+# After state:
+# ------------
+#
+# show bgp ext-community-list
+# Expanded extended community list test: match: ALL
+# permit 301:302
+# Expanded extended community list test1: match: ALL
+# deny 101:102
+#
+
+
+# Using overridden
+
+# Before state:
+# -------------
+#
+# show bgp ext-community-list
+# Standard extended community list test: match: ANY
+# permit rt:101:101
+# permit rt:201:201
+# Expanded extended community list test1: match: ALL
+# deny 101:102
+
+
+- name: Override the entire list of BGP extended community
+ dellemc.enterprise_sonic.sonic_bgp_ext_communities:
+ config:
+ - name: test3
+ type: expanded
+ permit: true
+ match: all
+ members:
+ regex:
+ - 301:302
+ state: overridden
+
+# After state:
+# ------------
+#
+# show bgp ext-community-list
+# Expanded extended community list test3: match: ALL
+# permit 301:302
+#
"""
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py
index 19aeb6fc9..47a414b0a 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors.py
@@ -296,7 +296,7 @@ options:
default: False
prefix_limit:
description:
- - Specifies prefix limit attributes.
+ - Specifies prefix limit attributes for ipv4-unicast and ipv6-unicast.
type: dict
suboptions:
max_prefixes:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py
index 10400cfe2..d3b23dfb2 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_bgp_neighbors_af.py
@@ -127,7 +127,7 @@ options:
default: False
prefix_limit:
description:
- - Specifies prefix limit attributes.
+ - Specifies prefix limit attributes for ipv4-unicast and ipv6-unicast.
type: dict
suboptions:
max_prefixes:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py
index dd054419f..96c0ee1ba 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_config.py
@@ -318,7 +318,7 @@ def main():
if module.params['save']:
result['changed'] = True
if not module.check_mode:
- cmd = {r'command': ' write memory'}
+ cmd = {r'command': 'write memory'}
run_commands(module, [cmd])
result['saved'] = True
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_copp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_copp.py
new file mode 100644
index 000000000..e4e7d358a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_copp.py
@@ -0,0 +1,295 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_copp
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = """
+---
+module: sonic_copp
+version_added: "2.1.0"
+short_description: Manage CoPP configuration on SONiC
+description:
+ - This module provides configuration management of CoPP for devices running SONiC
+author: "Shade Talabi (@stalabi1)"
+options:
+ config:
+ description:
+ - Specifies CoPP configurations
+ type: dict
+ suboptions:
+ copp_groups:
+ description:
+ - List of CoPP entries that comprise a CoPP group
+ type: list
+ elements: dict
+ suboptions:
+ copp_name:
+ description:
+ - Name of CoPP classifier
+ type: str
+ required: True
+ trap_priority:
+ description:
+ - CoPP trap priority
+ type: int
+ trap_action:
+ description:
+ - CoPP trap action
+ type: str
+ queue:
+ description:
+ - CoPP queue ID
+ type: int
+ cir:
+ description:
+ - Committed information rate in bps or pps (packets per second)
+ type: str
+ cbs:
+ description:
+ - Committed bucket size in packets or bytes
+ type: str
+ state:
+ description:
+ - The state of the configuration after module completion
+ type: str
+ choices: ['merged', 'deleted', 'replaced', 'overridden']
+ default: merged
+"""
+EXAMPLES = """
+# Using merged
+#
+# Before state:
+# -------------
+#
+# sonic# show copp actions
+# (No "copp actions" configuration present)
+
+ - name: Merge CoPP groups configuration
+ dellemc.enterprise_sonic.sonic_copp:
+ config:
+ copp_groups:
+ - copp_name: 'copp-1'
+ trap_priority: 1
+ trap_action: 'DROP'
+ queue: 1
+ cir: '45'
+ cbs: '45'
+ - copp_name: 'copp-2'
+ trap_priority: 2
+ trap_action: 'FORWARD'
+ queue: 2
+ cir: '90'
+ cbs: '90'
+ state: merged
+
+# After state:
+# ------------
+#
+# sonic# show copp actions
+# CoPP action group copp-1
+# trap-action drop
+# trap-priority 1
+# trap-queue 1
+# police cir 45 cbs 45
+# CoPP action group copp-2
+# trap-action forward
+# trap-priority 2
+# trap-queue 2
+# police cir 90 cbs 90
+#
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# sonic# show copp actions
+# CoPP action group copp-1
+# trap-action drop
+# trap-priority 1
+# trap-queue 1
+# police cir 45 cbs 45
+
+ - name: Replace CoPP groups configuration
+ dellemc.enterprise_sonic.sonic_copp:
+ config:
+ copp_groups:
+ - copp_name: 'copp-1'
+ trap_priority: 2
+ trap_action: 'FORWARD'
+ queue: 2
+ - copp_name: 'copp-3'
+ trap_priority: 3
+ trap_action: 'DROP'
+ queue: 3
+ cir: '1000'
+ cbs: '1000'
+ state: replaced
+
+# After state:
+# ------------
+#
+# sonic# show copp actions
+# CoPP action group copp-1
+# trap-action forward
+# trap-priority 2
+# trap-queue 2
+# CoPP action group copp-3
+# trap-action drop
+# trap-priority 3
+# trap-queue 3
+# police cir 1000 cbs 1000
+#
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+# sonic# show copp actions
+# CoPP action group copp-1
+# trap-action forward
+# trap-priority 2
+# trap-queue 2
+# CoPP action group copp-3
+# trap-action drop
+# trap-priority 3
+# trap-queue 3
+# police cir 1000 cbs 1000
+
+ - name: Override CoPP groups configuration
+ dellemc.enterprise_sonic.sonic_copp:
+ config:
+ copp_groups:
+ - copp_name: 'copp-4'
+ trap_priority: 4
+ trap_action: 'FORWARD'
+ queue: 4
+ cir: 200
+ cbs: 200
+ state: overridden
+
+# After state:
+# ------------
+#
+# sonic# show copp actions
+# CoPP action group copp-4
+# trap-action forward
+# trap-priority 4
+# trap-queue 4
+# police cir 200 cbs 200
+#
+#
+# Using deleted
+#
+# Before state:
+# -------------
+#
+# sonic# show copp actions
+# CoPP action group copp-1
+# trap-action drop
+# trap-priority 1
+# trap-queue 1
+# police cir 45 cbs 45
+# CoPP action group copp-2
+# trap-action forward
+# trap-priority 2
+# trap-queue 2
+# police cir 90 cbs 90
+
+ - name: Delete CoPP groups configuration
+ dellemc.enterprise_sonic.sonic_copp:
+ config:
+ copp_groups:
+ - copp_name: 'copp-1'
+ trap_action: 'DROP'
+ cir: '45'
+ cbs: '45'
+ - copp_name: 'copp-2'
+ state: deleted
+
+# After state:
+# ------------
+#
+# sonic# show copp actions
+# CoPP action group copp-1
+# trap-action drop
+# police cir 45 cbs 45
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.copp.copp import CoppArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.copp.copp import Copp
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=CoppArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Copp(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_dhcp_relay.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_dhcp_relay.py
new file mode 100644
index 000000000..321a673eb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_dhcp_relay.py
@@ -0,0 +1,781 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_dhcp_relay
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: sonic_dhcp_relay
+version_added: '2.1.0'
+short_description: Manage DHCP and DHCPv6 relay configurations on SONiC
+description:
+ - This module provides configuration management of DHCP and DHCPv6 relay
+ parameters on Layer 3 interfaces of devices running SONiC.
+ - Layer 3 interface and VRF name need to be created earlier in the device.
+author: 'Arun Saravanan Balachandran (@ArunSaravananBalachandran)'
+options:
+ config:
+ description:
+ - Specifies the DHCP and DHCPv6 relay configurations.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Full name of the Layer 3 interface, i.e. Eth1/1.
+ type: str
+ required: true
+ ipv4:
+ description:
+ - DHCP relay configurations to be set for the interface mentioned in name option.
+ type: dict
+ suboptions:
+ server_addresses:
+ description:
+ - List of DHCP server IPv4 addresses.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - IPv4 address of the DHCP server.
+ type: str
+ vrf_name:
+ description:
+ - Specifies name of the VRF in which the DHCP server resides.
+ - This option is not used with state I(deleted).
+ type: str
+ source_interface:
+ description:
+ - Specifies the DHCP relay source interface.
+ type: str
+ max_hop_count:
+ description:
+ - Specifies the maximum hop count for DHCP relay packets.
+ - The range is from 1 to 16.
+ type: int
+ link_select:
+ description:
+ - Enable link selection suboption.
+ type: bool
+ vrf_select:
+ description:
+ - Enable VRF selection suboption.
+ type: bool
+ circuit_id:
+ description:
+ - Specifies the DHCP relay circuit-id format.
+ - C(%h:%p) - Hostname followed by interface name eg. sonic:Vlan100
+ - C(%i) - Name of the physical interface eg. Eth1/2
+ - C(%p) - Name of the interface eg. Vlan100
+ type: str
+ choices:
+ - '%h:%p'
+ - '%i'
+ - '%p'
+ policy_action:
+ description:
+ - Specifies the policy for handling of DHCP relay options.
+ type: str
+ choices:
+ - append
+ - discard
+ - replace
+ ipv6:
+ description:
+ - DHCPv6 relay configurations to be set for the interface mentioned in name option.
+ type: dict
+ suboptions:
+ server_addresses:
+ description:
+ - List of DHCPv6 server IPv6 addresses.
+ type: list
+ elements: dict
+ suboptions:
+ address:
+ description:
+ - IPv6 address of the DHCPv6 server.
+ type: str
+ vrf_name:
+ description:
+ - Specifies name of the VRF in which the DHCPv6 server resides.
+ - This option is used only with state I(merged).
+ type: str
+ source_interface:
+ description:
+ - Specifies the DHCPv6 relay source interface.
+ type: str
+ max_hop_count:
+ description:
+ - Specifies the maximum hop count for DHCPv6 relay packets.
+ - The range is from 1 to 16.
+ type: int
+ vrf_select:
+ description:
+ - Enable VRF selection suboption.
+ type: bool
+ state:
+ description:
+ - The state of the configuration after module completion.
+ - C(merged) - Merges provided DHCP and DHCPv6 relay configuration with on-device configuration.
+ - C(deleted) - Deletes on-device DHCP and DHCPv6 relay configuration.
+ - C(replaced) - Replaces on-device DHCP and DHCPv6 relay configuration of the specified interfaces with provided configuration.
+ - C(overridden) - Overrides all on-device DHCP and DHCPv6 relay configurations with the provided configuration.
+ type: str
+ choices:
+ - merged
+ - deleted
+ - replaced
+ - overridden
+ default: merged
+"""
+EXAMPLES = """
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 92.1.1.1 vrf VrfReg1
+# ip dhcp-relay max-hop-count 5
+# ip dhcp-relay vrf-select
+# ip dhcp-relay policy-action append
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 91::1 92::1
+# ipv6 dhcp-relay max-hop-count 5
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# !
+
+ - name: Delete DHCP and DHCPv6 relay configurations
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address: '92.1.1.1'
+ vrf_select: true
+ max_hop_count: 5
+ ipv6:
+ server_addresses:
+ - address: '91::1'
+ - address: '92::1'
+ - name: 'Eth1/2'
+ ipv4:
+ server_addresses:
+ - address: '71.1.1.1'
+ - address: '72.1.1.1'
+ source_interface: 'Vlan100'
+ link_select: true
+ circuit_id: '%h:%p'
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 vrf VrfReg1
+# ip dhcp-relay policy-action append
+# ipv6 address 81::1/24
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 73.1.1.1
+# !
+
+
+# Using deleted
+#
+# NOTE: Support is provided in the dhcp_relay resource module for deletion of all attributes for a
+# given address family (IPv4 or IPv6) by using a "special" YAML sequence specifying a server address list
+# containing a single "blank" IP address under the target address family. The following example shows
+# a task using this syntax for deletion of all DHCP (IPv4) configurations for an interface, but the
+# equivalent syntax is supported for DHCPv6 (IPv6) as well.
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 92.1.1.1 vrf VrfReg1
+# ip dhcp-relay max-hop-count 5
+# ip dhcp-relay vrf-select
+# ip dhcp-relay policy-action append
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 91::1 92::1
+# ipv6 dhcp-relay max-hop-count 5
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# !
+
+ - name: Delete all IPv4 DHCP relay configurations for interface Eth1/1
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address:
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 91::1 92::1
+# ipv6 dhcp-relay max-hop-count 5
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# !
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 92.1.1.1 vrf VrfReg1
+# ip dhcp-relay max-hop-count 5
+# ip dhcp-relay vrf-select
+# ip dhcp-relay policy-action append
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 91::1 92::1
+# ipv6 dhcp-relay max-hop-count 5
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# !
+
+ - name: Delete all DHCP and DHCPv6 relay configurations for interface Eth1/1
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config:
+ - name: 'Eth1/1'
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ipv6 address 81::1/24
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# !
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 92.1.1.1 vrf VrfReg1
+# ip dhcp-relay max-hop-count 5
+# ip dhcp-relay vrf-select
+# ip dhcp-relay policy-action append
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 91::1 92::1
+# ipv6 dhcp-relay max-hop-count 5
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# !
+
+ - name: Delete all DHCP and DHCPv6 relay configurations
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config:
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ipv6 address 81::1/24
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# !
+
+
+# Using merged
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ipv6 address 81::1/24
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1
+# !
+
+ - name: Add DHCP and DHCPv6 relay configurations
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address: '91.1.1.1'
+ - address: '92.1.1.1'
+ vrf_name: 'VrfReg1'
+ vrf_select: true
+ max_hop_count: 5
+ policy_action: 'append'
+ ipv6:
+ server_addresses:
+ - address: '91::1'
+ - address: '92::1'
+ max_hop_count: 5
+ - name: 'Eth1/2'
+ ipv4:
+ server_addresses:
+ - address: '73.1.1.1'
+ source_interface: 'Vlan100'
+ link_select: true
+ circuit_id: '%h:%p'
+ state: merged
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 92.1.1.1 vrf VrfReg1
+# ip dhcp-relay max-hop-count 5
+# ip dhcp-relay vrf-select
+# ip dhcp-relay policy-action append
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 91::1 92::1
+# ipv6 dhcp-relay max-hop-count 5
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# !
+
+
+# Using replaced
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 92.1.1.1 vrf VrfReg1
+# ip dhcp-relay max-hop-count 5
+# ip dhcp-relay vrf-select
+# ip dhcp-relay policy-action append
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 91::1 92::1
+# ipv6 dhcp-relay max-hop-count 5
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# ipv6 address 61::1/24
+# ipv6 dhcp-relay 71::1 72::1
+# !
+# interface Eth1/3
+# mtu 9100
+# speed 400000
+# fec RS
+# shutdown
+# ip address 41.1.1.1/24
+# ip dhcp-relay 51.1.1.1 52.1.1.1
+# ip dhcp-relay circuit-id %h:%p
+# ipv6 address 41::1/24
+# ipv6 dhcp-relay 51::1 52::1
+# !
+
+ - name: Replace DHCP and DHCPv6 relay configurations of specified interfaces
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address: '91.1.1.1'
+ - address: '93.1.1.1'
+ - address: '95.1.1.1'
+ vrf_name: 'VrfReg1'
+ vrf_select: true
+ ipv6:
+ server_addresses:
+ - address: '93::1'
+ - address: '94::1'
+ source_interface: 'Vlan100'
+ - name: 'Eth1/2'
+ ipv4:
+ server_addresses:
+ - address: '73.1.1.1'
+ circuit_id: '%h:%p'
+ state: replaced
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 93.1.1.1 95.1.1.1 vrf VrfReg1
+# ip dhcp-relay vrf-select
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 93::1 94::1
+# ipv6 dhcp-relay source-interface Vlan100
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 73.1.1.1
+# ip dhcp-relay circuit-id %h:%p
+# ipv6 address 61::1/24
+# !
+# interface Eth1/3
+# mtu 9100
+# speed 400000
+# fec RS
+# shutdown
+# ip address 41.1.1.1/24
+# ip dhcp-relay 51.1.1.1 52.1.1.1
+# ip dhcp-relay circuit-id %h:%p
+# ipv6 address 41::1/24
+# ipv6 dhcp-relay 51::1 52::1
+# !
+
+
+# Using overridden
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 92.1.1.1 vrf VrfReg1
+# ip dhcp-relay max-hop-count 5
+# ip dhcp-relay vrf-select
+# ip dhcp-relay policy-action append
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 91::1 92::1
+# ipv6 dhcp-relay max-hop-count 5
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 71.1.1.1 72.1.1.1 73.1.1.1
+# ip dhcp-relay source-interface Vlan100
+# ip dhcp-relay link-select
+# ip dhcp-relay circuit-id %h:%p
+# ipv6 address 61::1/24
+# ipv6 dhcp-relay 71::1 72::1
+# !
+# interface Eth1/3
+# mtu 9100
+# speed 400000
+# fec RS
+# shutdown
+# ip address 41.1.1.1/24
+# ip dhcp-relay 51.1.1.1 52.1.1.1
+# ip dhcp-relay circuit-id %h:%p
+# ipv6 address 41::1/24
+# ipv6 dhcp-relay 51::1 52::1
+# !
+
+ - name: Override DHCP and DHCPv6 relay configurations
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address: '91.1.1.1'
+ - address: '93.1.1.1'
+ - address: '95.1.1.1'
+ vrf_name: 'VrfReg1'
+ vrf_select: true
+ ipv6:
+ server_addresses:
+ - address: '93::1'
+ - address: '94::1'
+ source_interface: 'Vlan100'
+ - name: 'Eth1/2'
+ ipv4:
+ server_addresses:
+ - address: '73.1.1.1'
+ circuit_id: '%h:%p'
+ state: overridden
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration interface
+# !
+# interface Eth1/1
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 81.1.1.1/24
+# ip dhcp-relay 91.1.1.1 93.1.1.1 95.1.1.1 vrf VrfReg1
+# ip dhcp-relay vrf-select
+# ipv6 address 81::1/24
+# ipv6 dhcp-relay 93::1 94::1
+# ipv6 dhcp-relay source-interface Vlan100
+# !
+# interface Eth1/2
+# mtu 9100
+# speed 400000
+# fec RS
+# no shutdown
+# ip address 61.1.1.1/24
+# ip dhcp-relay 73.1.1.1
+# ip dhcp-relay circuit-id %h:%p
+# ipv6 address 61::1/24
+# !
+# interface Eth1/3
+# mtu 9100
+# speed 400000
+# fec RS
+# shutdown
+# ip address 41.1.1.1/24
+# ipv6 address 41::1/24
+# !
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.dhcp_relay.dhcp_relay import Dhcp_relayArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.dhcp_relay.dhcp_relay import Dhcp_relay
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=Dhcp_relayArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Dhcp_relay(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_dhcp_snooping.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_dhcp_snooping.py
new file mode 100644
index 000000000..948ecb891
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_dhcp_snooping.py
@@ -0,0 +1,499 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_dhcp_snooping
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: sonic_dhcp_snooping
+version_added: 2.3.0
+notes:
+ - "Tested against Enterprise SONiC Distribution by Dell Technologies."
+short_description: "Manage DHCP Snooping on SONiC"
+description: "This module provides configuration management of DHCP snooping for devices running SONiC."
+author: Simon Nathans (@simon-nathans), Xiao Han (@Xiao_Han2)
+options:
+ config:
+ description: The DHCP snooping configuration.
+ type: dict
+ suboptions:
+ afis:
+ description:
+ - List of address families to configure.
+ - "There can be up to two items in this list: one where I(afi=ipv4) and one where I(afi=ipv6) to configure DHCPv4 and DHCPv6, respectively."
+ type: list
+ elements: dict
+ suboptions:
+ afi:
+ description:
+ - The address family to configure.
+ type: str
+ choices: ['ipv4', 'ipv6']
+ required: true
+ enabled:
+ description:
+ - Enable DHCP snooping for I(afi).
+ type: bool
+ vlans:
+ description:
+ - Enable DHCP snooping on a list of VLANs for I(afi).
+ - When I(state=deleted), passing an empty list will disable DHCP snooping in all VLANs
+ type: list
+ elements: str
+ verify_mac:
+ description:
+ - Enable DHCP snooping MAC verification for I(afi).
+ type: bool
+ trusted:
+ description:
+ - Mark interfaces as trusted for DHCP snooping for I(afi).
+ - When I(state=deleted), passing an empty list will delete all trusted interfaces.
+ type: list
+ elements: dict
+ suboptions:
+ intf_name:
+ description:
+ - The interface name.
+ type: str
+ required: true
+ source_bindings:
+ description:
+ - Create a static entry in the DHCP snooping binding database for I(afi).
+ - When I(state=deleted), passing an empty list will delete all source bindings.
+ type: list
+ elements: dict
+ suboptions:
+ mac_addr:
+ description:
+ - The binding's MAC address.
+ type: str
+ required: true
+ ip_addr:
+ description:
+ - The bindings's IP address.
+ type: str
+ intf_name:
+ description:
+ - The binding's interface name.
+ - Can be an Ethernet or a PortChannel interface.
+ type: str
+ vlan_id:
+ description:
+ - The binding's VLAN ID.
+ type: int
+ state:
+ description:
+ - The state of the configuration after module completion.
+ default: merged
+ choices: ['merged', 'deleted', 'overridden', 'replaced']
+ type: str
+"""
+EXAMPLES = """
+# Using merged
+#
+# Before State:
+# -------------
+#
+# sonic# show ip dhcp snooping
+# !
+# DHCP snooping is Disabled
+# DHCP snooping source MAC verification is Disabled
+# DHCP snooping is enabled on the following VLANs:
+# DHCP snooping trusted interfaces:
+# !
+
+- name: Configure DHCPv4 snooping global settings
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv4'
+ enabled: true
+ verify_mac: true
+ vlans: ['1', '2', '3', '5']
+ trusted:
+ - intf_name: 'Ethernet8'
+ state: merged
+
+# After State:
+# ------------
+#
+# sonic# show ip dhcp snooping
+# !
+# DHCP snooping is Enabled
+# DHCP snooping source MAC verification is Enabled
+# DHCP snooping is enabled on the following VLANs: 1 2 3 5
+# DHCP snooping trusted interfaces: Ethernet8
+# !
+
+
+# Using merged
+#
+# Before State:
+# -------------
+#
+# sonic# show ipv6 dhcp snooping
+# !
+# DHCPv6 snooping is Disabled
+# DHCPv6 snooping source MAC verification is Disabled
+# DHCPv6 snooping is enabled on the following VLANs:
+# DHCPv6 snooping trusted interfaces:
+# !
+
+- name: Configure DHCPv6 snooping global settings
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv6'
+ enabled: true
+ vlans:
+ - '4'
+ trusted:
+ - intf_name: 'Ethernet2'
+ - intf_name: PortChannel1
+ state: merged
+
+# After State:
+# ------------
+#
+# sonic# show ipv6 dhcp snooping
+# !
+# DHCPv6 snooping is Enabled
+# DHCPv6 snooping source MAC verification is Disabled
+# DHCPv6 snooping is enabled on the following VLANs: 4
+# DHCPv6 snooping trusted interfaces: PortChannel1
+# !
+
+
+# Using merged
+#
+# Before State:
+# -------------
+#
+# sonic# show ip dhcp snooping binding
+# !
+# Total number of Dynamic bindings: 0
+# Total number of Static bindings: 0
+# Total number of Tentative bindings: 0
+# MAC Address IP Address VLAN Interface Type Lease (Secs)
+# ----------------- --------------- ---- ----------- ------- -----------
+# !
+
+- name: Add DHCPv4 snooping bindings
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '1'
+ - mac_addr: 'aa:f7:67:fc:f4:9a'
+ ip_addr: '156.33.90.167'
+ intf_name: 'PortChannel1'
+ vlan_id: '2'
+ state: merged
+
+# After State:
+# ------------
+#
+# sonic# show ip dhcp snooping binding
+# !
+# Total number of Dynamic bindings: 0
+# Total number of Static bindings: 2
+# Total number of Tentative bindings: 0
+# MAC Address IP Address VLAN Interface Type Lease (Secs)
+# ----------------- --------------- ---- ----------- ------- -----------
+# 00:b0:d0:63:c2:26 192.0.2.146 1 Ethernet4 static NA
+# aa:f7:67:fc:f4:9a 156.33.90.167 2 PortChannel1 static NA
+# !
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show ip dhcp snooping
+# !
+# DHCP snooping is Enabled
+# DHCP snooping source MAC verification is Enabled
+# DHCP snooping is enabled on the following VLANs: 1 2 3 5
+# DHCP snooping trusted interfaces: Ethernet8
+# !
+
+- name: Disable DHCPv4 snooping on some VLANs
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv4'
+ vlans:
+ - '3'
+ - '5'
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show ip dhcp snooping
+# !
+# DHCP snooping is Enabled
+# DHCP snooping source MAC verification is Enabled
+# DHCP snooping is enabled on the following VLANs: 1 2
+# DHCP snooping trusted interfaces:
+# !
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show ipv6 dhcp snooping
+# !
+# DHCPv6 snooping is Enabled
+# DHCPv6 snooping source MAC verification is Disabled
+# DHCPv6 snooping is enabled on the following VLANs: 4
+# DHCPv6 snooping trusted interfaces: PortChannel1 PortChannel2 PortChannel3 PortChannel4
+# !
+
+- name: Disable DHCPv6 snooping on all VLANs
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv6'
+ vlans: []
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show ipv6 dhcp snooping
+# !
+# DHCPv6 snooping is Enabled
+# DHCPv6 snooping source MAC verification is Disabled
+# DHCPv6 snooping is enabled on the following VLANs:
+# DHCPv6 snooping trusted interfaces: PortChannel1 PortChannel2 PortChannel3 PortChannel4
+# !
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show ipv6 dhcp snooping
+# !
+# DHCPv6 snooping is Enabled
+# DHCPv6 snooping source MAC verification is Disabled
+# DHCPv6 snooping is enabled on the following VLANs: 4
+# DHCPv6 snooping trusted interfaces: PortChannel1 PortChannel2 PortChannel3 PortChannel4
+# !
+
+- name: Delete all DHCPv6 configuration
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv6'
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show ipv6 dhcp snooping
+# !
+# DHCPv6 snooping is Disabled
+# DHCPv6 snooping source MAC verification is Disabled
+# DHCPv6 snooping is enabled on the following VLANs:
+# DHCPv6 snooping trusted interfaces:
+# !
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show ip dhcp snooping binding
+# !
+# Total number of Dynamic bindings: 0
+# Total number of Static bindings: 2
+# Total number of Tentative bindings: 0
+# MAC Address IP Address VLAN Interface Type Lease (Secs)
+# ----------------- --------------- ---- ----------- ------- -----------
+# 00:b0:d0:63:c2:26 192.0.2.146 1 Ethernet4 static NA
+# aa:f7:67:fc:f4:9a 156.33.90.167 2 PortChannel1 static NA
+# !
+
+- name: Delete a DHCPv4 snooping binding
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '1'
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show ip dhcp snooping binding
+# !
+# Total number of Dynamic bindings: 0
+# Total number of Static bindings: 2
+# Total number of Tentative bindings: 0
+# MAC Address IP Address VLAN Interface Type Lease (Secs)
+# ----------------- --------------- ---- ----------- ------- -----------
+# aa:f7:67:fc:f4:9a 156.33.90.167 2 PortChannel1 static NA
+# !
+
+
+# Using overridden
+#
+# Before State:
+# -------------
+#
+# sonic# show ipv4 dhcp snooping binding
+# !
+# MAC Address IP Address VLAN Interface Type Lease (Secs)
+# ----------------- --------------- ---- ----------- ------- -----------
+# 00:b0:d0:63:c2:26 192.0.2.146 1 Ethernet4 static NA
+# 28:21:28:15:c1:1b 141.202.222.118 1 Ethernet2 static NA
+# aa:f7:67:fc:f4:9a 156.33.90.167 2 PortChannel1 static NA
+# !
+
+- name: Override DHCPv4 snooping bindings
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '3'
+ state: overridden
+
+# After State:
+# ------------
+#
+# sonic# show ipv4 dhcp snooping binding
+# !
+# MAC Address IP Address VLAN Interface Type Lease (Secs)
+# ----------------- --------------- ---- ----------- ------- -----------
+# 00:b0:d0:63:c2:26 192.0.2.146 3 Ethernet4 static NA
+# !
+
+
+# Using replaced
+#
+# Before State:
+# -------------
+#
+# sonic# show ipv4 dhcp snooping binding
+# !
+# MAC Address IP Address VLAN Interface Type Lease (Secs)
+# ----------------- --------------- ---- ----------- ------- -----------
+# 00:b0:d0:63:c2:26 192.0.2.146 1 Ethernet4 static NA
+# 28:21:28:15:c1:1b 141.202.222.118 1 Ethernet2 static NA
+# aa:f7:67:fc:f4:9a 156.33.90.167 2 PortChannel1 static NA
+# !
+
+- name: Replace DHCPv4 snooping bindings
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '3'
+ state: replaced
+
+# After State:
+# ------------
+#
+# sonic# show ipv4 dhcp snooping binding
+# !
+# MAC Address IP Address VLAN Interface Type Lease (Secs)
+# ----------------- --------------- ---- ----------- ------- -----------
+# 00:b0:d0:63:c2:26 192.0.2.146 3 Ethernet4 static NA
+# 28:21:28:15:c1:1b 141.202.222.118 1 Ethernet2 static NA
+# aa:f7:67:fc:f4:9a 156.33.90.167 2 PortChannel1 static NA
+# !
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: dict
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: dict
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.dhcp_snooping.dhcp_snooping import Dhcp_snoopingArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.dhcp_snooping.dhcp_snooping import Dhcp_snooping
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=Dhcp_snoopingArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Dhcp_snooping(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py
index f13e9defd..3fa261381 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_facts.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
"""
@@ -67,6 +67,7 @@ options:
- bgp_ext_communities
- mclag
- prefix_lists
+ - vlan_mapping
- vrfs
- vxlans
- users
@@ -77,6 +78,21 @@ options:
- radius_server
- static_routes
- ntp
+ - logging
+ - pki
+ - ip_neighbor
+ - port_group
+ - dhcp_relay
+ - acl_interfaces
+ - l2_acls
+ - l3_acls
+ - lldp_global
+ - mac
+ - bfd
+ - copp
+ - route_maps
+ - stp
+ - dhcp_snooping
"""
EXAMPLES = """
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py
index 0cd6a1896..09a5d0e18 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_interfaces.py
@@ -63,12 +63,53 @@ options:
description:
- MTU of the interface.
type: int
+ speed:
+ description:
+ - Interface speed.
+ - Supported speeds are dependent on the type of switch.
+ type: str
+ choices:
+ - SPEED_10MB
+ - SPEED_100MB
+ - SPEED_1GB
+ - SPEED_2500MB
+ - SPEED_5GB
+ - SPEED_10GB
+ - SPEED_20GB
+ - SPEED_25GB
+ - SPEED_40GB
+ - SPEED_50GB
+ - SPEED_100GB
+ - SPEED_400GB
+ auto_negotiate:
+ description:
+ - auto-negotiate transmission parameters with peer interface.
+ type: bool
+ advertised_speed:
+ description:
+ - Advertised speeds of the interface.
+ - Supported speeds are dependent on the type of switch.
+ - Speeds may be 10, 100, 1000, 2500, 5000, 10000, 20000, 25000, 40000, 50000, 100000 or 400000.
+ type: list
+ elements: str
+ fec:
+ description:
+ - Interface FEC (Forward Error Correction).
+ type: str
+ choices:
+ - FEC_RS
+ - FEC_FC
+ - FEC_DISABLED
+ - FEC_DEFAULT
+ - FEC_AUTO
state:
description:
- The state the configuration should be left in.
type: str
choices:
- merged
+ - replaced
+ - overridden
- deleted
default: merged
"""
@@ -80,18 +121,28 @@ EXAMPLES = """
#
# show interface status | no-more
#------------------------------------------------------------------------------------------
-#Name Description Admin Oper Speed MTU
+#Name Description Admin Oper AutoNeg Speed MTU
#------------------------------------------------------------------------------------------
-#Eth1/1 - up 100000 9100
-#Eth1/2 - up 100000 9100
-#Eth1/3 - down 100000 9100
-#Eth1/3 - down 1000 5000
-#Eth1/5 - down 100000 9100
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 Ethernet-8 down 100000 9100
+#Ethernet12 Ethernet-12 down on - 5000
+#Ethernet16 - down 40000 9100
#
-- name: Configures interfaces
- dellemc.enterprise_sonic.sonic_interfaces:
+# show running-configuration interface Ethernet 8
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 100000
+# fec AUTO
+# shutdown
+#
+- name: Configure interfaces
+ sonic_interfaces:
config:
- name: Eth1/3
+ - name: Ethernet8
+ - name: Ethernet12
+ - name: Ethernet16
state: deleted
#
# After state:
@@ -99,14 +150,20 @@ EXAMPLES = """
#
# show interface status | no-more
#------------------------------------------------------------------------------------------
-#Name Description Admin Oper Speed MTU
+#Name Description Admin Oper AutoNeg Speed MTU
#------------------------------------------------------------------------------------------
-#Eth1/1 - up 100000 9100
-#Eth1/2 - up 100000 9100
-#Eth1/3 - down 100000 9100
-#Eth1/3 - up 100000 9100
-#Eth1/5 - down 100000 9100
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 - up 100000 9100
+#Ethernet12 - up 100000 9100
+#Ethernet16 - up 100000 9100
#
+# show running-configuration interface Ethernet 8
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 100000
+# shutdown
#
# Using deleted
#
@@ -115,33 +172,33 @@ EXAMPLES = """
#
# show interface status | no-more
#------------------------------------------------------------------------------------------
-#Name Description Admin Oper Speed MTU
+#Name Description Admin Oper AutoNeg Speed MTU
#------------------------------------------------------------------------------------------
-#Eth1/1 - up 100000 9100
-#Eth1/2 - up 100000 9100
-#Eth1/3 - down 100000 9100
-#Eth1/3 - down 1000 9100
-#Eth1/5 - down 100000 9100
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 - down 100000 9100
+#Ethernet12 - down 1000 9100
+#Ethernet16 - down 100000 9100
#
-
-- name: Configures interfaces
- dellemc.enterprise_sonic.sonic_interfaces:
+- name: Configure interfaces
+ sonic_interfaces:
config:
- state: deleted
+ state: deleted
#
# After state:
# -------------
#
# show interface status | no-more
#------------------------------------------------------------------------------------------
-#Name Description Admin Oper Speed MTU
+#Name Description Admin Oper AutoNeg Speed MTU
#------------------------------------------------------------------------------------------
-#Eth1/1 - up 100000 9100
-#Eth1/2 - up 100000 9100
-#Eth1/3 - up 100000 9100
-#Eth1/3 - up 100000 9100
-#Eth1/5 - up 100000 9100
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 - up 100000 9100
+#Ethernet12 - up 100000 9100
+#Ethernet16 - up 100000 9100
+#
#
#
# Using merged
@@ -151,38 +208,177 @@ EXAMPLES = """
#
# show interface status | no-more
#------------------------------------------------------------------------------------------
-#Name Description Admin Oper Speed MTU
+#Name Description Admin Oper AutoNeg Speed MTU
#------------------------------------------------------------------------------------------
-#Eth1/1 - up 100000 9100
-#Eth1/2 - up 100000 9100
-#Eth1/3 - down 100000 9100
-#Eth1/3 - down 1000 9100
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 - down 100000 9100
+#Ethernet12 - down 100000 9100
+#Ethernet16 - down 100000 9100
+#
+# show running-configuration interface Ethernet 8
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 100000
+# shutdown
#
-- name: Configures interfaces
- dellemc.enterprise_sonic.sonic_interfaces:
+- name: Configure interfaces
+ sonic_interfaces:
config:
- - name: Eth1/3
- description: 'Ethernet Twelve'
- - name: Eth1/5
- description: 'Ethernet Sixteen'
- enable: True
- mtu: 3500
+ - name: Ethernet8
+ fec: FEC_AUTO
+ - name: Ethernet12
+ description: 'Ethernet Twelve'
+ auto_negotiate: True
+ - name: Ethernet16
+ description: 'Ethernet Sixteen'
+ enabled: True
+ mtu: 3500
+ speed: SPEED_40GB
state: merged
#
+# After state:
+# ------------
+#
+# show interface status | no-more
+#------------------------------------------------------------------------------------------
+#Name Description Admin Oper AutoNeg Speed MTU
+#------------------------------------------------------------------------------------------
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 - down 100000 9100
+#Ethernet12 Ethernet Twelve down on 100000 9100
+#Ethernet16 Ethernet Sixteen up 40000 3500
+#
+# show running-configuration interface Ethernet 8
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 100000
+# fec AUTO
+# shutdown
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+# show interface status | no-more
+#------------------------------------------------------------------------------------------
+#Name Description Admin Oper AutoNeg Speed MTU
+#------------------------------------------------------------------------------------------
+#Ethernet0 E0 up 100000 9100
+#Ethernet4 E4 up 100000 9100
+#Ethernet8 E8 down 100000 9100
+#Ethernet12 - down 1000 9100
+#Ethernet16 - down 100000 9100
+#
+# show running-configuration interface Ethernet 8
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 100000
+# shutdown
+#
+- name: Configure interfaces
+ sonic_interfaces:
+ config:
+ - name: Ethernet8
+ fec: FEC_AUTO
+ - name: Ethernet12
+ description: 'Ethernet Twelve'
+ mtu: 3500
+ enabled: True
+ auto_negotiate: True
+ - name: Ethernet16
+ description: 'Ethernet Sixteen'
+ mtu: 3000
+ enabled: False
+ speed: SPEED_40GB
+ state: overridden
+#
+# After state:
+# ------------
+#
+# show interface status | no-more
+#------------------------------------------------------------------------------------------
+#Name Description Admin Oper AutoNeg Speed MTU
+#------------------------------------------------------------------------------------------
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 - up 100000 9100
+#Ethernet12 Ethernet Twelve up on 100000 3500
+#Ethernet16 Ethernet Sixteen down 40000 3000
+#
+# show running-configuration interface Ethernet 8
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 100000
+# fec AUTO
+# no shutdown
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# show interface status | no-more
+#------------------------------------------------------------------------------------------
+#Name Description Admin Oper AutoNeg Speed MTU
+#------------------------------------------------------------------------------------------
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 - down on 100000 9100
+#Ethernet12 - down 1000 9100
+#Ethernet16 - down 100000 9100
+#
+# show running-configuration interface Ethernet 8
+#!
+#interface Ethernet8
+# mtu 9100
+# speed auto 40000
+# shutdown
+#
+- name: Configure interfaces
+ sonic_interfaces:
+ config:
+ - name: Ethernet8
+ advertised_speed:
+ - "100000"
+ - name: Ethernet12
+ description: 'Ethernet Twelve'
+ mtu: 3500
+ enabled: True
+ auto_negotiate: True
+ - name: Ethernet16
+ description: 'Ethernet Sixteen'
+ mtu: 3000
+ enabled: False
+ speed: SPEED_40GB
+ state: replaced
#
# After state:
# ------------
#
# show interface status | no-more
#------------------------------------------------------------------------------------------
-#Name Description Admin Oper Speed MTU
+#Name Description Admin Oper AutoNeg Speed MTU
#------------------------------------------------------------------------------------------
-#Eth1/1 - up 100000 9100
-#Eth1/2 - up 100000 9100
-#Eth1/3 - down 100000 9100
-#Eth1/4 - down 1000 9100
-#Eth1/5 - down 100000 3500
+#Ethernet0 - up 100000 9100
+#Ethernet4 - up 100000 9100
+#Ethernet8 - down on 100000 9100
+#Ethernet12 Ethernet Twelve up on 100000 3500
+#Ethernet16 Ethernet Sixteen down 40000 3000
#
+# show running-configuration interface Ethernet 8
+#!
+#interface Ethernet8
+# mtu 9100
+# speed auto 100000
+# fec AUTO
+# shutdown
#
"""
RETURN = """
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ip_neighbor.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ip_neighbor.py
new file mode 100644
index 000000000..f1e4acc82
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ip_neighbor.py
@@ -0,0 +1,300 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_ip_neighbor
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: sonic_ip_neighbor
+version_added: 2.1.0
+notes:
+ - Supports C(check_mode).
+short_description: Manage IP neighbor global configuration on SONiC.
+description:
+ - This module provides configuration management of IP neighbor global for devices running SONiC.
+author: "M. Zhang (@mingjunzhang2019)"
+options:
+ config:
+ description:
+ - Specifies IP neighbor global configurations.
+ type: dict
+ suboptions:
+ ipv4_arp_timeout:
+ type: int
+ description:
+ - IPv4 ARP timeout.
+ - The range is from 60 to 14400.
+ ipv6_nd_cache_expiry:
+ type: int
+ description:
+ - IPv6 ND cache expiry.
+ - The range is from 60 to 14400.
+ num_local_neigh:
+ type: int
+ description:
+ - The number of reserved local neighbors.
+ - The range is from 0 to 32000.
+ ipv4_drop_neighbor_aging_time:
+ type: int
+ description:
+ - IPv4 drop neighbor aging time.
+ - The range is from 60 to 14400.
+ ipv6_drop_neighbor_aging_time:
+ type: int
+ description:
+ - IPv6 drop neighbor aging time.
+ - The range is from 60 to 14400.
+ state:
+ description:
+ - The state of the configuration after module completion.
+ type: str
+ choices:
+ - merged
+ - replaced
+ - overridden
+ - deleted
+ default: merged
+"""
+EXAMPLES = """
+#
+# Using merged
+#
+# Before state:
+# -------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 180
+#ip drop-neighbor aging-time 300
+#ipv6 drop-neighbor aging-time 300
+#ip reserve local-neigh 0
+#ipv6 nd cache expire 180
+#!
+- name: Configure IP neighbor global
+ sonic_ip_neighbor:
+ config:
+ ipv4_arp_timeout: 1200
+ ipv4_drop_neighbor_aging_time: 600
+ ipv6_drop_neighbor_aging_time: 600
+ ipv6_nd_cache_expiry: 1200
+ num_local_neigh: 1000
+ state: merged
+
+# After state:
+# ------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 1200
+#ip drop-neighbor aging-time 600
+#ipv6 drop-neighbor aging-time 600
+#ip reserve local-neigh 1000
+#ipv6 nd cache expire 1200
+#!
+#
+# Using deleted
+#
+# Before state:
+# -------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 1200
+#ip drop-neighbor aging-time 600
+#ipv6 drop-neighbor aging-time 600
+#ip reserve local-neigh 1000
+#ipv6 nd cache expire 1200
+#!
+- name: Delete some IP neighbor configuration
+ sonic_ip_neighbor:
+ config:
+ ipv4_arp_timeout: 0
+ ipv4_drop_neighbor_aging_time: 0
+ state: deleted
+
+# After state:
+# ------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 180
+#ip drop-neighbor aging-time 300
+#ipv6 drop-neighbor aging-time 600
+#ip reserve local-neigh 1000
+#ipv6 nd cache expire 1200
+#!
+#
+# Using deleted
+#
+# Before state:
+# -------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 1200
+#ip drop-neighbor aging-time 600
+#ipv6 drop-neighbor aging-time 600
+#ip reserve local-neigh 1000
+#ipv6 nd cache expire 1200
+#!
+- name: Delete all IP neighbor configuration
+ sonic_ip_neighbor:
+ config: {}
+ state: deleted
+
+# After state:
+# ------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 180
+#ip drop-neighbor aging-time 300
+#ipv6 drop-neighbor aging-time 300
+#ip reserve local-neigh 0
+#ipv6 nd cache expire 180
+#!
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 1200
+#ip drop-neighbor aging-time 600
+#ipv6 drop-neighbor aging-time 300
+#ip reserve local-neigh 0
+#ipv6 nd cache expire 180
+#!
+- name: Change some IP neighbor configuration
+ sonic_ip_neighbor:
+ config:
+ ipv6_drop_neighbor_aging_time: 600
+ ipv6_nd_cache_expiry: 1200
+ num_local_neigh: 1000
+ state: replaced
+
+# After state:
+# ------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 1200
+#ip drop-neighbor aging-time 600
+#ipv6 drop-neighbor aging-time 600
+#ip reserve local-neigh 1000
+#ipv6 nd cache expire 1200
+#!
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 1200
+#ip drop-neighbor aging-time 600
+#ipv6 drop-neighbor aging-time 300
+#ip reserve local-neigh 0
+#ipv6 nd cache expire 180
+#!
+- name: Reset IP neighbor configuration, then configure some
+ sonic_ip_neighbor:
+ config:
+ ipv6_drop_neighbor_aging_time: 600
+ ipv6_nd_cache_expiry: 1200
+ num_local_neigh: 1000
+ state: overridden
+
+# After state:
+# ------------
+#
+#sonic# show running-configuration
+#!
+#ip arp timeout 180
+#ip drop-neighbor aging-time 300
+#ipv6 drop-neighbor aging-time 600
+#ip reserve local-neigh 1000
+#ipv6 nd cache expire 1200
+#!
+#
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.ip_neighbor.ip_neighbor import Ip_neighborArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.ip_neighbor.ip_neighbor import Ip_neighbor
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=Ip_neighborArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Ip_neighbor(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_acls.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_acls.py
new file mode 100644
index 000000000..cda50242b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_acls.py
@@ -0,0 +1,582 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_l2_acls
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: sonic_l2_acls
+version_added: '2.1.0'
+notes:
+ - Supports C(check_mode).
+short_description: Manage Layer 2 access control lists (ACL) configurations on SONiC
+description:
+ - This module provides configuration management of Layer 2 access control lists (ACL)
+ in devices running SONiC.
+author: 'Arun Saravanan Balachandran (@ArunSaravananBalachandran)'
+options:
+ config:
+ description:
+ - Specifies Layer 2 ACL configurations.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Specifies the ACL name.
+ type: str
+ required: true
+ remark:
+ description:
+ - Specifies remark for the ACL.
+ type: str
+ rules:
+ description:
+ - List of rules with the ACL.
+ - I(sequence_num), I(action), I(source) & I(destination) are required for adding a new rule.
+ - If I(state=deleted), options other than I(sequence_num) are not considered.
+ - I(ethertype) and I(vlan_tag_format) are mutually exclusive.
+ type: list
+ elements: dict
+ suboptions:
+ sequence_num:
+ description:
+ - Specifies the sequence number of the rule.
+ - The range is from 1 to 65535.
+ type: int
+ required: true
+ action:
+ description:
+ - Specifies the action taken on the matched Ethernet frame.
+ type: str
+ choices:
+ - deny
+ - discard
+ - do-not-nat
+ - permit
+ - transit
+ source:
+ description:
+ - Specifies the source of the Ethernet frame.
+ - I(address) and I(address_mask) are required together.
+ - I(any), I(host) and I(address) are mutually exclusive.
+ type: dict
+ suboptions:
+ any:
+ description:
+ - Match any source MAC address.
+ type: bool
+ host:
+ description:
+ - MAC address of a single source host.
+ type: str
+ address:
+ description:
+ - Source MAC address.
+ type: str
+ address_mask:
+ description:
+ - Source MAC address mask.
+ type: str
+ destination:
+ description:
+ - Specifies the destination of the Ethernet frame.
+ - I(address) and I(address_mask) are required together.
+ - I(any), I(host) and I(address) are mutually exclusive.
+ type: dict
+ suboptions:
+ any:
+ description:
+ - Match any destination MAC address.
+ type: bool
+ host:
+ description:
+ - MAC address of a single destination host.
+ type: str
+ address:
+ description:
+ - Destination MAC address.
+ type: str
+ address_mask:
+ description:
+ - Destination MAC address mask.
+ type: str
+ ethertype:
+ description:
+ - Specifies the EtherType of the Ethernet frame.
+ - Only one suboption can be specified for ethertype in a rule.
+ type: dict
+ suboptions:
+ value:
+ description:
+ - Specifies the EtherType value to match as a hexadecimal string.
+ - The range is from 0x600 to 0xffff.
+ type: str
+ arp:
+ description:
+ - Match Ethernet frame with ARP EtherType (0x806).
+ type: bool
+ ipv4:
+ description:
+ - Match Ethernet frame with IPv4 EtherType (0x800).
+ type: bool
+ ipv6:
+ description:
+ - Match Ethernet frame with IPv6 EtherType (0x86DD).
+ type: bool
+ vlan_id:
+ description:
+ - Match Ethernet frame with the given VLAN ID.
+ type: int
+ vlan_tag_format:
+ description:
+ - Match Ethernet frame with the given VLAN tag format.
+ type: dict
+ suboptions:
+ multi_tagged:
+ description:
+ - Match three of more VLAN tagged Ethernet frame.
+ type: bool
+ dei:
+ description:
+ - Match Ethernet frame with the given Drop Eligible Indicator (DEI) value.
+ type: int
+ choices:
+ - 0
+ - 1
+ pcp:
+ description:
+ - Match Ethernet frames using Priority Code Point (PCP) value.
+ - I(mask) is valid only when I(value) is specified.
+ - I(value) and I(traffic_type) are mutually exclusive.
+ type: dict
+ suboptions:
+ value:
+ description:
+ - Match Ethernet frame with the given PCP value.
+ - The range is from 0 to 7
+ type: int
+ mask:
+ description:
+ - Match Ethernet frame with given PCP value and mask.
+ - The range is from 0 to 7.
+ type: int
+ traffic_type:
+ description:
+ - Match Ethernet frame with PCP value for the given traffic type.
+ - C(be) - Match Ethernet frame with Best effort PCP (0).
+ - C(bk) - Match Ethernet frame with Background PCP (1).
+ - C(ee) - Match Ethernet frame with Excellent effort PCP (2).
+ - C(ca) - Match Ethernet frame with Critical applications PCP (3).
+ - C(vi) - Match Ethernet frame with Video, < 100 ms latency and jitter PCP (4).
+ - C(vo) - Match Ethernet frame with Voice, < 10 ms latency and jitter PCP (5).
+ - C(ic) - Match Ethernet frame with Internetwork control PCP (6).
+ - C(nc) - Match Ethernet frame with Network control PCP (7).
+ type: str
+ choices:
+ - be
+ - bk
+ - ee
+ - ca
+ - vi
+ - vo
+ - ic
+ - nc
+ remark:
+ description:
+ - Specifies remark for the ACL rule.
+ type: str
+ state:
+ description:
+ - The state of the configuration after module completion.
+ - C(merged) - Merges provided L2 ACL configuration with on-device configuration.
+ - C(replaced) - Replaces on-device configuration of the specified L2 ACLs with provided configuration.
+ - C(overridden) - Overrides all on-device L2 ACL configurations with the provided configuration.
+ - C(deleted) - Deletes on-device L2 ACL configuration.
+ type: str
+ choices:
+ - merged
+ - replaced
+ - overridden
+ - deleted
+ default: merged
+"""
+EXAMPLES = """
+# Using merged
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test
+# seq 1 permit host 22:22:22:22:22:22 any vlan 20
+# sonic#
+
+ - name: Merge provided Layer 2 ACL configurations
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config:
+ - name: 'test'
+ rules:
+ - sequence_num: 2
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ value: '0x88cc'
+ remark: 'LLDP'
+ - sequence_num: 3
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ address: '00:00:10:00:00:00'
+ address_mask: '00:00:ff:ff:00:00'
+ pcp:
+ value: 4
+ mask: 6
+ - sequence_num: 4
+ action: 'deny'
+ source:
+ any: true
+ destination:
+ any: true
+ vlan_tag_format:
+ multi_tagged: true
+ - name: 'test1'
+ remark: 'test_mac_acl'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '11:11:11:11:11:11'
+ destination:
+ any: true
+ - sequence_num: 2
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ arp: true
+ vlan_id: 100
+ - sequence_num: 3
+ action: 'deny'
+ source:
+ any: true
+ destination:
+ any: true
+ dei: 0
+ state: merged
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test
+# seq 1 permit host 22:22:22:22:22:22 any vlan 20
+# seq 2 permit any any 0x88cc remark LLDP
+# seq 3 permit any 00:00:10:00:00:00 00:00:ff:ff:00:00 pcp vi pcp-mask 6
+# seq 4 deny any any vlan-tag-format multi-tagged
+# !
+# mac access-list test1
+# remark test_mac_acl
+# seq 1 permit host 11:11:11:11:11:11 any
+# seq 2 permit any any arp vlan 100
+# seq 3 deny any any dei 0
+# sonic#
+
+
+# Using replaced
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test
+# seq 1 permit host 22:22:22:22:22:22 any vlan 20
+# seq 2 permit any any 0x88cc remark LLDP
+# seq 3 permit any 00:00:10:00:00:00 00:00:ff:ff:00:00 pcp vi pcp-mask 6
+# !
+# mac access-list test1
+# remark test_mac_acl
+# seq 1 permit host 11:11:11:11:11:11 any
+# seq 2 permit any any arp vlan 100
+# seq 3 deny any any dei 0
+# sonic#
+
+ - name: Replace device configuration of specified Layer 2 ACLs with provided configuration
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config:
+ - name: 'test1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ arp: true
+ vlan_id: 200
+ - sequence_num: 2
+ action: 'discard'
+ source:
+ any: true
+ destination:
+ any: true
+ - name: 'test2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '33:33:33:33:33:33'
+ destination:
+ host: '44:44:44:44:44:44'
+ state: replaced
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test
+# seq 1 permit host 22:22:22:22:22:22 any vlan 20
+# seq 2 permit any any 0x88cc remark LLDP
+# seq 3 permit any 00:00:10:00:00:00 00:00:ff:ff:00:00 pcp vi pcp-mask 6
+# !
+# mac access-list test1
+# seq 1 permit any any arp vlan 200
+# seq 2 discard any any
+# !
+# mac access-list test2
+# seq 1 permit host 33:33:33:33:33:33 host 44:44:44:44:44:44
+# sonic#
+
+
+# Using overridden
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test
+# seq 1 permit host 22:22:22:22:22:22 any vlan 20
+# seq 2 permit any any 0x88cc remark LLDP
+# seq 3 permit any 00:00:10:00:00:00 00:00:ff:ff:00:00 pcp vi pcp-mask 6
+# !
+# mac access-list test1
+# seq 1 permit any any arp vlan 200
+# seq 2 discard any any
+# !
+# mac access-list test2
+# seq 1 permit host 33:33:33:33:33:33 host 44:44:44:44:44:44
+# sonic#
+
+ - name: Override device configuration of all Layer 2 ACLs with provided configuration
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config:
+ - name: 'test1'
+ remark: 'test_mac_acl'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '11:11:11:11:11:11'
+ destination:
+ any: true
+ vlan_id: 100
+ - sequence_num: 2
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ pcp:
+ traffic_type: 'ca'
+ - sequence_num: 3
+ action: 'deny'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ ipv4: true
+ state: overridden
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test1
+# remark test_mac_acl
+# seq 1 permit host 11:11:11:11:11:11 any vlan 100
+# seq 2 permit any any pcp ca
+# seq 3 deny any any ip
+# sonic#
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test
+# seq 1 permit host 22:22:22:22:22:22 any vlan 20
+# seq 2 permit any any 0x88cc remark LLDP
+# seq 3 permit any 00:00:10:00:00:00 00:00:ff:ff:00:00 pcp vi pcp-mask 6
+# !
+# mac access-list test1
+# remark test_mac_acl
+# seq 1 permit host 11:11:11:11:11:11 any vlan 100
+# seq 2 deny any any ip
+# !
+# mac access-list test2
+# seq 1 permit host 33:33:33:33:33:33 host 44:44:44:44:44:44
+# sonic#
+
+ - name: Delete specified Layer 2 ACLs, ACL remark and ACL rule entries
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config:
+ - name: 'test'
+ rules:
+ - sequence_num: 3
+ - name: 'test1'
+ remark: 'test_mac_acl'
+ - name: 'test2'
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test
+# seq 1 permit host 22:22:22:22:22:22 any vlan 20
+# seq 2 permit any any 0x88cc remark LLDP
+# !
+# mac access-list test1
+# seq 1 permit host 11:11:11:11:11:11 any vlan 100
+# seq 2 deny any any ip
+# sonic#
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration mac access-list
+# !
+# mac access-list test
+# seq 1 permit host 22:22:22:22:22:22 any vlan 20
+# seq 2 permit any any 0x88cc remark LLDP
+# seq 3 permit any 00:00:10:00:00:00 00:00:ff:ff:00:00 pcp vi pcp-mask 6
+# !
+# mac access-list test1
+# remark test_mac_acl
+# seq 1 permit host 11:11:11:11:11:11 any vlan 100
+# seq 2 deny any any ip
+# !
+# mac access-list test2
+# seq 1 permit host 33:33:33:33:33:33 host 44:44:44:44:44:44
+# sonic#
+
+ - name: Delete all Layer 2 ACL configurations
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config:
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration mac access-list
+# sonic#
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l2_acls.l2_acls import L2_aclsArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.l2_acls.l2_acls import L2_acls
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=L2_aclsArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = L2_acls(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py
index 34a8ff720..8d70f8a3f 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l2_interfaces.py
@@ -54,13 +54,13 @@ options:
description: Configures trunking parameters on an interface.
suboptions:
allowed_vlans:
- description: Specifies list of allowed VLANs of trunk mode on the interface.
+ description: Specifies a list of allowed trunk mode VLANs and VLAN ranges for the interface.
type: list
elements: dict
suboptions:
vlan:
- type: int
- description: Configures the specified VLAN in trunk mode.
+ type: str
+ description: Configures the specified trunk mode VLAN or VLAN range.
access:
type: dict
description: Configures access mode characteristics of the interface.
@@ -74,6 +74,8 @@ options:
choices:
- merged
- deleted
+ - replaced
+ - overridden
default: merged
"""
EXAMPLES = """
@@ -145,6 +147,47 @@ EXAMPLES = """
#15 Inactive
#
#
+# Using deleted
+#
+# Before state:
+# -------------
+#
+#do show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#11 Inactive T Ethernet12
+#12 Inactive A Ethernet12
+#13 Inactive T Ethernet12
+#14 Inactive T Ethernet12
+#15 Inactive T Ethernet12
+#16 Inactive T Ethernet12
+
+- name: Delete the access vlan and a range of trunk vlans for an interface
+ sonic_l2_interfaces:
+ config:
+ - name: Ethernet12
+ access:
+ vlan: 12
+ trunk:
+ allowed_vlans:
+ - vlan: 13-16
+ state: deleted
+
+# After state:
+# ------------
+#
+#do show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#11 Inactive T Ethernet12
+#12 Inactive
+#13 Inactive
+#14 Inactive
+#15 Inactive
+#16 Inactive
+#
+#
+#
# Using merged
#
# Before state:
@@ -153,10 +196,11 @@ EXAMPLES = """
#do show Vlan
#Q: A - Access (Untagged), T - Tagged
#NUM Status Q Ports
+#10 Inactive
#11 Inactive T Eth1/7
#12 Inactive T Eth1/7
#
-- name: Configures switch port of interfaces
+- name: Configures an access vlan for an interface
dellemc.enterprise_sonic.sonic_l2_interfaces:
config:
- name: Eth1/3
@@ -184,15 +228,23 @@ EXAMPLES = """
#Q: A - Access (Untagged), T - Tagged
#NUM Status Q Ports
#10 Inactive A Eth1/3
+#12 Inactive
+#13 Inactive
+#14 Inactive
+#15 Inactive
+#16 Inactive
+#18 Inactive
#
-- name: Configures switch port of interfaces
+- name: Modify the access vlan, add a range of trunk vlans and a single trunk vlan for an interface
dellemc.enterprise_sonic.sonic_l2_interfaces:
config:
- name: Eth1/3
+ access:
+ vlan: 12
trunk:
allowed_vlans:
- - vlan: 11
- - vlan: 12
+ - vlan: 13-16
+ - vlan: 18
state: merged
#
# After state:
@@ -201,9 +253,13 @@ EXAMPLES = """
#do show Vlan
#Q: A - Access (Untagged), T - Tagged
#NUM Status Q Ports
-#10 Inactive A Eth1/3
-#11 Inactive T Eth1/7
-#12 Inactive T Eth1/7
+#10 Inactive
+#12 Inactive A Eth1/3
+#13 Inactive T Eth1/3
+#14 Inactive T Eth1/3
+#15 Inactive T Eth1/3
+#16 Inactive T Eth1/3
+#18 Inactive T Eth1/3
#
#
# Using merged
@@ -250,6 +306,89 @@ EXAMPLES = """
#15 Inactive T Eth1/5
#
#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#do show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#10 Inactive A Ethernet12
+# A Ethernet13
+#11 Inactive T Ethernet12
+# T Ethernet13
+
+- name: Replace access vlan and trunk vlans for specified interfaces
+ sonic_l2_interfaces:
+ config:
+ - name: Ethernet12
+ access:
+ vlan: 12
+ trunk:
+ allowed_vlans:
+ - vlan: 13-14
+ - name: Ethernet14
+ access:
+ vlan: 10
+ trunk:
+ allowed_vlans:
+ - vlan: 11
+ - vlan: 13-14
+ state: replaced
+
+# After state:
+# ------------
+#
+#do show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#10 Inactive A Ethernet13
+# A Ethernet14
+#11 Inactive T Ethernet13
+# T Ethernet14
+#12 Inactive A Ethernet12
+#13 Inactive T Ethernet12
+# T Ethernet14
+#14 Inactive T Ethernet12
+# T Ethernet14
+#
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#do show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#10 Inactive A Ethernet11
+#11 Inactive T Ethernet11
+#12 Inactive A Ethernet12
+#13 Inactive T Ethernet12
+
+- name: Override L2 interfaces configuration in device with provided configuration
+ sonic_l2_interfaces:
+ config:
+ - name: Ethernet13
+ access:
+ vlan: 12
+ trunk:
+ allowed_vlans:
+ - vlan: 13-14
+ state: overridden
+
+# After state:
+# ------------
+#
+#do show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#12 Inactive A Ethernet13
+#13 Inactive T Ethernet13
+#14 Inactive T Ethernet13
+#
+#
"""
RETURN = """
before:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_acls.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_acls.py
new file mode 100644
index 000000000..ad34025df
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_acls.py
@@ -0,0 +1,1058 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_l3_acls
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: sonic_l3_acls
+version_added: '2.1.0'
+notes:
+ - Supports C(check_mode).
+short_description: Manage Layer 3 access control lists (ACL) configurations on SONiC
+description:
+ - This module provides configuration management of Layer 3 access control lists (ACL)
+ in devices running SONiC.
+author: 'Arun Saravanan Balachandran (@ArunSaravananBalachandran)'
+options:
+ config:
+ description:
+ - Specifies Layer 3 ACL configurations.
+ type: list
+ elements: dict
+ suboptions:
+ address_family:
+ description:
+ - Specifies the address family of the ACLs.
+ type: str
+ required: true
+ choices:
+ - ipv4
+ - ipv6
+ acls:
+ description:
+ - List of ACL configuration for the given address family.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Specifies the ACL name.
+ type: str
+ required: true
+ remark:
+ description:
+ - Specifies remark for the ACL.
+ type: str
+ rules:
+ description:
+ - List of rules with the ACL.
+ - I(sequence_num), I(action), I(protocol), I(source) & I(destination) are required for adding a new rule.
+ - If I(state=deleted), options other than I(sequence_num) are not considered.
+ type: list
+ elements: dict
+ suboptions:
+ sequence_num:
+ description:
+ - Specifies the sequence number of the rule.
+ - The range is from 1 to 65535.
+ type: int
+ required: true
+ action:
+ description:
+ - Specifies the action taken on the matched packet.
+ type: str
+ choices:
+ - deny
+ - discard
+ - do-not-nat
+ - permit
+ - transit
+ protocol:
+ description:
+ - Specifies the protocol to match.
+ - Only one suboption can be specified for protocol in a rule.
+ type: dict
+ suboptions:
+ name:
+ description:
+ - Match packets with the given protocol.
+ - C(ip) - Match any IPv4 packets.
+ - C(ipv6) - Match any IPv6 packets.
+ - C(icmp) - Match ICMP packets.
+ - C(icmpv6) - Match ICMPv6 packets.
+ - C(tcp) - Match TCP packets.
+ - C(udp) - Match UDP packets.
+ - C(ip) and C(icmp) are valid only for IPv4 ACLs.
+ - C(ipv6) and C(icmpv6) are valid only for IPv6 ACLs.
+ type: str
+ choices:
+ - ip
+ - ipv6
+ - icmp
+ - icmpv6
+ - tcp
+ - udp
+ number:
+ description:
+ - Match packets with given protocol number.
+ - The range is from 0 to 255.
+ type: int
+ source:
+ description:
+ - Specifies the source of the packet.
+ - I(any), I(host) and I(prefix) are mutually exclusive.
+ type: dict
+ suboptions:
+ any:
+ description:
+ - Match any source network address.
+ type: bool
+ host:
+ description:
+ - Network address of a single source host.
+ type: str
+ prefix:
+ description:
+ - Source network prefix in the format A.B.C.D/mask (ipv4) or A::B/mask (ipv6).
+ type: str
+ port_number:
+ description:
+ - Specifies the source port (valid only for TCP or UDP)
+ - Only one suboption can be specified for port_number in a rule.
+ type: dict
+ suboptions:
+ eq:
+ description:
+ - Match packets with source port equal to the given port number.
+ - The range is from 0 to 65535.
+ type: int
+ gt:
+ description:
+ - Match packets with source port greater than the given port number.
+ - The range is from 0 to 65534.
+ type: int
+ lt:
+ description:
+ - Match packets with source port lesser than the given port number.
+ - The range is from 1 to 65535.
+ type: int
+ range:
+ description:
+ - Match packets with source port in the given range.
+ - I(begin) and I(end) are required together.
+ type: dict
+ suboptions:
+ begin:
+ description:
+ - Specifies the beginning of the port range.
+ - The range is from 0 to 65534.
+ type: int
+ end:
+ description:
+ - Specifies the end of the port range.
+ - The range is from 1 to 65535.
+ type: int
+ destination:
+ description:
+ - Specifies the destination of the packet.
+ - I(any), I(host) and I(prefix) are mutually exclusive.
+ type: dict
+ suboptions:
+ any:
+ description:
+ - Match any destination network address.
+ type: bool
+ host:
+ description:
+ - Network address of a single destination host.
+ type: str
+ prefix:
+ description:
+ - Destination network prefix in the format A.B.C.D/mask (ipv4) or A::B/mask (ipv6).
+ type: str
+ port_number:
+ description:
+ - Specifies the destination port (valid only for TCP or UDP)
+ - Only one suboption can be specified for port_number in a rule.
+ type: dict
+ suboptions:
+ eq:
+ description:
+ - Match packets with destination port equal to the given port number.
+ - The range is from 0 to 65535.
+ type: int
+ gt:
+ description:
+ - Match packets with destination port greater than the given port number.
+ - The range is from 0 to 65534.
+ type: int
+ lt:
+ description:
+ - Match packets with destination port lesser than the given port number.
+ - The range is from 1 to 65535.
+ type: int
+ range:
+ description:
+ - Match packets with destination port in the given range.
+ - I(begin) and I(end) are required together.
+ type: dict
+ suboptions:
+ begin:
+ description:
+ - Specifies the beginning of the port range.
+ - The range is from 0 to 65534.
+ type: int
+ end:
+ description:
+ - Specifies the end of the port range.
+ - The range is from 1 to 65535.
+ type: int
+ protocol_options:
+ description:
+ - Specifies the additional packet match options for the chosen protocol.
+ - I(icmp), I(icmpv6) and I(tcp) are mutually exclusive.
+ type: dict
+ suboptions:
+ icmp:
+ description:
+ - Packet match options for ICMP.
+ type: dict
+ suboptions:
+ code:
+ description:
+ - Match packets with given ICMP code.
+ - The range is from 0 to 255.
+ type: int
+ type:
+ description:
+ - Match packets with given ICMP type.
+ - The range is from 0 to 255.
+ type: int
+ icmpv6:
+ description:
+ - Packet match options for ICMPv6.
+ type: dict
+ suboptions:
+ code:
+ description:
+ - Match packets with given ICMPv6 code.
+ - The range is from 0 to 255.
+ type: int
+ type:
+ description:
+ - Match packets with given ICMPv6 type.
+ - The range is from 0 to 255.
+ type: int
+ tcp:
+ description:
+ - Packet match options for TCP.
+ - I(established) and other TCP flag options are mutually exclusive.
+ type: dict
+ suboptions:
+ established:
+ description:
+ - Match packets which are part of established TCP session.
+ type: bool
+ ack:
+ description:
+ - Match packets with ACK flag set.
+ type: bool
+ not_ack:
+ description:
+ - Match packets with ACK flag cleared.
+ type: bool
+ fin:
+ description:
+ - Match packets with FIN flag set.
+ type: bool
+ not_fin:
+ description:
+ - Match packets with FIN flag cleared.
+ type: bool
+ psh:
+ description:
+ - Match packets with PSH flag set.
+ type: bool
+ not_psh:
+ description:
+ - Match packets with PSH flag cleared.
+ type: bool
+ rst:
+ description:
+ - Match packets with RST flag set.
+ type: bool
+ not_rst:
+ description:
+ - Match packets with RST flag cleared.
+ type: bool
+ syn:
+ description:
+ - Match packets with SYN flag set.
+ type: bool
+ not_syn:
+ description:
+ - Match packets with SYN flag cleared.
+ type: bool
+ urg:
+ description:
+ - Match packets with URG flag set.
+ type: bool
+ not_urg:
+ description:
+ - Match packets with URG flag cleared.
+ type: bool
+ vlan_id:
+ description:
+ - Match packets with the given VLAN ID value.
+ type: int
+ dscp:
+ description:
+ - Match packets using DSCP value.
+ - Only one suboption can be specified for dscp in a rule.
+ type: dict
+ suboptions:
+ value:
+ description:
+ - Match packets with given DSCP value.
+ - The range is from 0 to 63.
+ type: int
+ af11:
+ description:
+ - Match packets with AF11 DSCP (001010 - Decimal value 10).
+ type: bool
+ af12:
+ description:
+ - Match packets with AF12 DSCP (001100 - Decimal value 12).
+ type: bool
+ af13:
+ description:
+ - Match packets with AF13 DSCP (001110 - Decimal value 14).
+ type: bool
+ af21:
+ description:
+ - Match packets with AF21 DSCP (010010 - Decimal value 18).
+ type: bool
+ af22:
+ description:
+ - Match packets with AF22 DSCP (010100 - Decimal value 20).
+ type: bool
+ af23:
+ description:
+ - Match packets with AF23 DSCP (010110 - Decimal value 22).
+ type: bool
+ af31:
+ description:
+ - Match packets with AF31 DSCP (011010 - Decimal value 26).
+ type: bool
+ af32:
+ description:
+ - Match packets with AF32 DSCP (011100 - Decimal value 28).
+ type: bool
+ af33:
+ description:
+ - Match packets with AF33 DSCP (011110 - Decimal value 30).
+ type: bool
+ af41:
+ description:
+ - Match packets with AF41 DSCP (100010 - Decimal value 34).
+ type: bool
+ af42:
+ description:
+ - Match packets with AF42 DSCP (100100 - Decimal value 36).
+ type: bool
+ af43:
+ description:
+ - Match packets with AF43 DSCP (100110 - Decimal value 38).
+ type: bool
+ cs1:
+ description:
+ - Match packets with CS1 DSCP (001000 - Decimal value 8).
+ type: bool
+ cs2:
+ description:
+ - Match packets with CS2 DSCP (010000 - Decimal value 16).
+ type: bool
+ cs3:
+ description:
+ - Match packets with CS3 DSCP (011000 - Decimal value 24).
+ type: bool
+ cs4:
+ description:
+ - Match packets with CS4 DSCP (100000 - Decimal value 32).
+ type: bool
+ cs5:
+ description:
+ - Match packets with CS5 DSCP (101000 - Decimal value 40).
+ type: bool
+ cs6:
+ description:
+ - Match packets with CS6 DSCP (110000 - Decimal value 48).
+ type: bool
+ cs7:
+ description:
+ - Match packets with CS7 DSCP (111000 - Decimal value 56).
+ type: bool
+ default:
+ description:
+ - Match packets with CS0 DSCP (000000 - Decimal value 0).
+ type: bool
+ ef:
+ description:
+ - Match packets with EF DSCP (101110 - Decimal value 46).
+ type: bool
+ voice_admit:
+ description:
+ - Match packets with VOICE-ADMIT DSCP (101100 - Decimal value 44).
+ type: bool
+ remark:
+ description:
+ - Specifies remark for the ACL rule.
+ type: str
+ state:
+ description:
+ - The state of the configuration after module completion.
+ - C(merged) - Merges provided L3 ACL configuration with on-device configuration.
+ - C(replaced) - Replaces on-device configuration of the specified L3 ACLs with provided configuration.
+ - C(overridden) - Overrides all on-device L3 ACL configurations with the provided configuration.
+ - C(deleted) - Deletes on-device L3 ACL configuration.
+ type: str
+ choices:
+ - merged
+ - replaced
+ - overridden
+ - deleted
+ default: merged
+"""
+EXAMPLES = """
+# Using merged
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit ipv6 host 192:168:1::2 any
+# sonic#
+
+ - name: Merge provided Layer 3 ACL configurations
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'test'
+ rules:
+ - sequence_num: 2
+ action: 'permit'
+ protocol:
+ name: 'icmp'
+ source:
+ any: true
+ destination:
+ host: '192.168.1.2'
+ protocol_options:
+ icmp:
+ type: 8
+ - sequence_num: 3
+ action: 'deny'
+ protocol:
+ number: 2
+ source:
+ any: true
+ destination:
+ any: true
+ - sequence_num: 4
+ action: 'deny'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ any: true
+ vlan_id: 10
+ remark: 'Vlan10'
+ - name: 'test1'
+ remark: 'test_ip_acl'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ prefix: '10.0.0.0/8'
+ destination:
+ any: true
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ destination:
+ prefix: '20.1.0.0/16'
+ port_number:
+ gt: 1024
+ - sequence_num: 3
+ action: 'deny'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ any: true
+ dscp:
+ value: 63
+ - address_family: 'ipv6'
+ acls:
+ - name: 'testv6'
+ rules:
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'icmpv6'
+ source:
+ any: true
+ destination:
+ any: true
+ - name: 'testv6-1'
+ remark: 'test_ipv6_acl'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ipv6'
+ source:
+ prefix: '1000::/16'
+ destination:
+ any: true
+ dscp:
+ af22: true
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ prefix: '2000::1000:0/112'
+ port_number:
+ range:
+ begin: 100
+ end: 1000
+ - sequence_num: 3
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ established: true
+ - sequence_num: 4
+ action: 'deny'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ port_number:
+ eq: 3000
+ destination:
+ any: true
+ state: merged
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# seq 2 permit icmp any host 192.168.1.2 type 8
+# seq 3 deny 2 any any
+# seq 4 deny ip any any vlan 10 remark Vlan10
+# !
+# ip access-list test1
+# remark test_ip_acl
+# seq 1 permit tcp 10.0.0.0/8 any
+# seq 2 deny udp any 20.1.0.0/16 gt 1024
+# seq 3 deny ip any any dscp 63
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit ipv6 host 192:168:1::2 any
+# seq 2 deny icmpv6 any any
+# !
+# ipv6 access-list testv6-1
+# remark test_ipv6_acl
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 3 permit tcp any any established
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+
+# Using replaced
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# seq 2 permit icmp any host 192.168.1.2 type 8
+# seq 3 deny 2 any any
+# seq 4 deny ip any any vlan 10 remark Vlan10
+# !
+# ip access-list test1
+# remark test_ip_acl
+# seq 1 permit tcp 10.0.0.0/8 any
+# seq 2 deny udp any 20.1.0.0/16 gt 1024
+# seq 3 deny ip any any dscp 63
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit tcp host 3000::1 any established
+# seq 2 permit udp any any
+# seq 3 deny icmpv6 any any
+# !
+# ipv6 access-list testv6-1
+# remark test_ipv6_acl
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 3 permit tcp any any established
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+ - name: Replace device configuration of specified Layer 3 ACLs with provided configuration
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'test2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ prefix: '192.168.1.0/24'
+ destination:
+ any: true
+ - address_family: 'ipv6'
+ acls:
+ - name: 'testv6'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ host: '3000::1'
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ ack: true
+ syn: true
+ fin: true
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'ipv6'
+ source:
+ any: true
+ destination:
+ any: true
+ state: replaced
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# seq 2 permit icmp any host 192.168.1.3 type 8
+# seq 3 deny 2 any any
+# seq 4 deny ip any any vlan 10 remark Vlan10
+# !
+# ip access-list test1
+# remark test_ip_acl
+# seq 1 permit tcp 10.0.0.0/8 any
+# seq 2 deny udp any 20.1.0.0/16 gt 1024
+# seq 3 deny ip any any dscp 63
+# !
+# ip access-list test2
+# seq 1 permit tcp 192.168.1.0/24 any
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit tcp host 3000::1 any fin syn ack
+# seq 2 deny ipv6 any any
+# !
+# ipv6 access-list testv6-1
+# remark test_ipv6_acl
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 3 permit tcp any any established
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+
+# Using overridden
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# seq 2 permit icmp any host 192.168.1.3 type 8
+# seq 3 deny 2 any any
+# seq 4 deny ip any any vlan 10 remark Vlan10
+# !
+# ip access-list test1
+# remark test_ip_acl
+# seq 1 permit tcp 10.0.0.0/8 any
+# seq 2 deny udp any 20.1.0.0/16 gt 1024
+# seq 3 deny ip any any dscp 63
+# !
+# ip access-list test2
+# seq 1 permit tcp 192.168.1.0/24 any
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit tcp 3000::/16 any
+# seq 2 deny ipv6 any any
+# !
+# ipv6 access-list testv6-1
+# remark test_ipv6_acl
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 3 permit tcp any any established
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+ - name: Override device configuration of all Layer 3 ACLs with provided configuration
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'test_acl'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ip'
+ source:
+ prefix: '100.1.1.0/24'
+ destination:
+ prefix: '100.1.2.0/24'
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ destination:
+ any: true
+ state: overridden
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test_acl
+# seq 1 permit ip 100.1.1.0/24 100.1.2.0/24
+# seq 2 deny udp any any
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# sonic#
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# seq 2 permit icmp any host 192.168.1.3 type 8
+# seq 3 deny 2 any any
+# seq 4 deny ip any any vlan 10 remark Vlan10
+# !
+# ip access-list test1
+# remark test_ip_acl
+# seq 1 permit tcp 10.0.0.0/8 any
+# seq 2 deny udp any 20.1.0.0/16 gt 1024
+# seq 3 deny ip any any dscp 63
+# !
+# ip access-list test2
+# seq 1 permit tcp 192.168.1.0/24 any
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit tcp 3000::/16 any
+# seq 2 deny ipv6 any any
+# !
+# ipv6 access-list testv6-1
+# remark test_ipv6_acl
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 3 permit tcp any any established
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+ - name: Delete specified Layer 3 ACLs, ACL remark and ACL rule entries
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'test'
+ rules:
+ - sequence_num: 2
+ - name: 'test2'
+ - address_family: 'ipv6'
+ acls:
+ - name: 'testv6-1'
+ remark: 'test_ipv6_acl'
+ rules:
+ - sequence_num: 3
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# seq 3 deny 2 any any
+# seq 4 deny ip any any vlan 10 remark Vlan10
+# !
+# ip access-list test1
+# remark test_ip_acl
+# seq 1 permit tcp 10.0.0.0/8 any
+# seq 2 deny udp any 20.1.0.0/16 gt 1024
+# seq 3 deny ip any any dscp 63
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit tcp 3000::/16 any
+# seq 2 deny ipv6 any any
+# !
+# ipv6 access-list testv6-1
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# seq 2 permit icmp any host 192.168.1.3 type 8
+# seq 3 deny 2 any any
+# seq 4 deny ip any any vlan 10 remark Vlan10
+# !
+# ip access-list test1
+# remark test_ip_acl
+# seq 1 permit tcp 10.0.0.0/8 any
+# seq 2 deny udp any 20.1.0.0/16 gt 1024
+# seq 3 deny ip any any dscp 63
+# !
+# ip access-list test2
+# seq 1 permit tcp 192.168.1.0/24 any
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit tcp 3000::/16 any
+# seq 2 deny ipv6 any any
+# !
+# ipv6 access-list testv6-1
+# remark test_ipv6_acl
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 3 permit tcp any any established
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+ - name: Delete all Layer 3 ACLs for an address-family
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config:
+ - address_family: 'ipv4'
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration ip access-list
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit tcp 3000::/16 any
+# seq 2 deny ipv6 any any
+# !
+# ipv6 access-list testv6-1
+# remark test_ipv6_acl
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 3 permit tcp any any established
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration ip access-list
+# !
+# ip access-list test
+# seq 1 permit ip host 192.168.1.2 any
+# seq 2 permit icmp any host 192.168.1.3 type 8
+# seq 3 deny 2 any any
+# seq 4 deny ip any any vlan 10 remark Vlan10
+# !
+# ip access-list test1
+# remark test_ip_acl
+# seq 1 permit tcp 10.0.0.0/8 any
+# seq 2 deny udp any 20.1.0.0/16 gt 1024
+# seq 3 deny ip any any dscp 63
+# !
+# ip access-list test2
+# seq 1 permit tcp 192.168.1.0/24 any
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# !
+# ipv6 access-list testv6
+# seq 1 permit tcp 3000::/16 any
+# seq 2 deny ipv6 any any
+# !
+# ipv6 access-list testv6-1
+# remark test_ipv6_acl
+# seq 1 permit ipv6 1000::/16 any dscp af22
+# seq 2 deny tcp any 2000::1000:0/112 range 100 1000
+# seq 3 permit tcp any any established
+# seq 4 deny udp any eq 3000 any
+# sonic#
+
+ - name: Delete all Layer 3 ACL configurations
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config:
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration ip access-list
+# sonic#
+# sonic# show running-configuration ipv6 access-list
+# sonic#
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.l3_acls.l3_acls import L3_aclsArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.l3_acls.l3_acls import L3_acls
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=L3_aclsArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = L3_acls(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py
index e796897a5..1ebc11994 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_l3_interfaces.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -29,6 +29,13 @@ The module file for sonic_l3_interfaces
from __future__ import absolute_import, division, print_function
__metaclass__ = type
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community',
+ 'license': 'Apache 2.0'
+}
+
DOCUMENTATION = """
---
module: sonic_l3_interfaces
@@ -44,7 +51,8 @@ description:
author: Kumaraguru Narayanan (@nkumaraguru)
options:
config:
- description: A list of l3_interfaces configurations.
+ description:
+ - A list of l3_interfaces configurations.
type: list
elements: dict
suboptions:
@@ -101,11 +109,13 @@ options:
type: bool
state:
description:
- - The state that the configuration should be left in.
+ - The state of the configuration after module completion.
type: str
choices:
- - merged
- - deleted
+ - merged
+ - deleted
+ - replaced
+ - overridden
default: merged
"""
EXAMPLES = """
@@ -328,7 +338,181 @@ EXAMPLES = """
# ip anycast-address 11.12.13.14/12
#!
#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#rno-dctor-1ar01c01sw02# show running-configuration interface
+#!
+#interface Ethernet20
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 83.1.1.1/16
+# ip address 84.1.1.1/16 secondary
+# ipv6 address 83::1/16
+# ipv6 address 84::1/16
+# ipv6 enable
+#!
+#interface Ethernet24
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 91.1.1.1/16
+# ipv6 address 90::1/16
+# ipv6 address 91::1/16
+# ipv6 address 92::1/16
+# ipv6 address 93::1/16
+#!
+#
+- name: Replace l3 interface
+ dellemc.enterprise_sonic.sonic_l3_interfaces:
+ config:
+ - name: Ethernet20
+ ipv4:
+ - address: 81.1.1.1/16
+ state: replaced
+
+# After state:
+# ------------
#
+#rno-dctor-1ar01c01sw02# show running-configuration interface
+#!
+#interface Ethernet20
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 81.1.1.1/16
+#!
+#interface Ethernet24
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 91.1.1.1/16
+# ipv6 address 90::1/16
+# ipv6 address 91::1/16
+# ipv6 address 92::1/16
+# ipv6 address 93::1/16
+#!
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#rno-dctor-1ar01c01sw02# show running-configuration interface
+#!
+#interface Ethernet20
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 83.1.1.1/16
+# ip address 84.1.1.1/16 secondary
+# ipv6 address 83::1/16
+# ipv6 address 84::1/16
+# ipv6 enable
+#!
+#interface Ethernet24
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 91.1.1.1/16
+# ipv6 address 90::1/16
+# ipv6 address 91::1/16
+# ipv6 address 92::1/16
+# ipv6 address 93::1/16
+#!
+- name: Replace l3 interface
+ dellemc.enterprise_sonic.sonic_l3_interfaces:
+ config:
+ - name: Ethernet20
+ state: replaced
+
+# After state:
+# ------------
+#
+#rno-dctor-1ar01c01sw02# show running-configuration interface
+#!
+#interface Ethernet20
+# mtu 9100
+# speed 100000
+# shutdown
+#!
+#interface Ethernet24
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 91.1.1.1/16
+# ipv6 address 90::1/16
+# ipv6 address 91::1/16
+# ipv6 address 92::1/16
+# ipv6 address 93::1/16
+#!
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#rno-dctor-1ar01c01sw02# show running-configuration interface
+#!
+#interface Ethernet20
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 83.1.1.1/16
+# ip address 84.1.1.1/16 secondary
+# ipv6 address 83::1/16
+# ipv6 address 84::1/16
+# ipv6 enable
+#!
+#interface Ethernet24
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 91.1.1.1/16
+# ipv6 address 90::1/16
+# ipv6 address 91::1/16
+# ipv6 address 92::1/16
+# ipv6 address 93::1/16
+#!
+#
+- name: Override l3 interface
+ dellemc.enterprise_sonic.sonic_l3_interfaces:
+ config:
+ - name: Ethernet24
+ ipv4:
+ - address: 81.1.1.1/16
+ - name: Vlan100
+ ipv4:
+ anycast_addresses:
+ - 83.1.1.1/24
+ - 85.1.1.12/24
+ state: overridden
+
+# After state:
+# ------------
+#
+#rno-dctor-1ar01c01sw02# show running-configuration interface
+#!
+#interface Ethernet20
+# mtu 9100
+# speed 100000
+# shutdown
+#!
+#interface Ethernet24
+# mtu 9100
+# speed 100000
+# shutdown
+# ip address 81.1.1.1/16
+#!
+#interface Vlan100
+# ip anycast-address 83.1.1.1/24
+# ip anycast-address 85.1.1.12/24
+#!
+
+
"""
RETURN = """
before:
@@ -336,14 +520,14 @@ before:
returned: always
type: list
sample: >
- The configuration returned is always in the same format
+ The configuration returned will always be in the same format
of the parameters above.
after:
description: The resulting configuration model invocation.
returned: when changed
type: list
sample: >
- The configuration returned is always in the same format
+ The configuration returned will always be in the same format
of the parameters above.
commands:
description: The set of commands pushed to the remote device.
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py
index 630db7985..0fd0d8b7e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lag_interfaces.py
@@ -80,6 +80,8 @@ options:
type: str
choices:
- merged
+ - replaced
+ - overridden
- deleted
default: merged
"""
@@ -124,6 +126,111 @@ EXAMPLES = """
# speed 100000
# no shutdown
#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# interface Eth1/5
+# channel-group 10
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# interface Eth1/6
+# channel-group 20
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# interface Eth1/7
+# no channel-group
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+- name: Replace device configuration of specified LAG attributes
+ dellemc.enterprise_sonic.sonic_lag_interfaces:
+ config:
+ - name: PortChannel10
+ members:
+ interfaces:
+ - member: Eth1/7
+ state: replaced
+#
+# After state:
+# ------------
+#
+# interface Eth1/5
+# no channel-group
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# interface Eth1/6
+# channel-group 20
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# interface Eth1/7
+# channel-group 10
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+# interface Eth1/5
+# channel-group 10
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# interface Eth1/6
+# no channel-group
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# interface Eth1/7
+# channel-group 2
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+- name: Override device configuration of all LAG attributes
+ dellemc.enterprise_sonic.sonic_lag_interfaces:
+ config:
+ - name: PortChannel20
+ members:
+ interfaces:
+ - member: Eth1/6
+ state: overridden
+#
+# After state:
+# ------------
+# interface Eth1/5
+# no channel-group
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# interface Eth1/6
+# channel-group 20
+# mtu 9100
+# speed 100000
+# no shutdown
+#
+# interface Eth1/7
+# no channel-group
+# mtu 9100
+# speed 100000
+# no shutdown
+#
# Using deleted
#
# Before state:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lldp_global.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lldp_global.py
new file mode 100644
index 000000000..6577d21d0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_lldp_global.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_lldp_global
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: sonic_lldp_global
+version_added: '2.1.0'
+short_description: Manage Global LLDP configurations on SONiC
+description:
+ - This module provides configuration management of global LLDP parameters
+ for use on LLDP enabled Layer 2 interfaces of devices running SONiC.
+ - It is intended for use in conjunction with LLDP Layer 2 interface
+ configuration applied on participating interfaces.
+author: 'Divya Balasubramanian(@divya-balasubramania)'
+options:
+ config:
+ description: The set of link layer discovery protocol global attribute configurations
+ type: dict
+ suboptions:
+ enable:
+ description:
+ - This argument is a boolean value to enable or disable LLDP.
+ type: bool
+ multiplier:
+ description:
+ - Multiplier value is used to determine the timeout interval (i.e. hello-time x multiplier value)
+ - The range is from 1 to 10
+ type: int
+ system_description:
+ description:
+ - Description of this system to be sent in LLDP advertisements.
+ - When configured, this value is used in the advertisements
+ instead of the default system description.
+ type: str
+ system_name:
+ description:
+ - Specifying a descriptive system name using this command, user may find it easier to distinguish the device with LLDP.
+ - By default, the host name is used.
+ type: str
+ mode:
+ description:
+ - By default both transmit and receive of LLDP frames is enabled.
+ - This command can be used to configure either in receive only or transmit only mode.
+ type: str
+ choices:
+ - receive
+ - transmit
+ hello_time:
+ description:
+ - Frequency at which LLDP advertisements are sent (in seconds).
+ - The range is from 5 to 254 sec
+ type: int
+ tlv_select:
+ description:
+ - By default, management address and system capabilities TLV are advertised in LLDP frames.
+ - This configuration option can be used to selectively suppress sending of these TLVs
+ to the Peer.
+ type: dict
+ suboptions:
+ management_address:
+ description:
+ - Enable or disable management address TLV.
+ type: bool
+ system_capabilities:
+ description:
+ - Enable or disable system capabilities TLV.
+ type: bool
+ state:
+ description:
+ - The state specifies the type of configuration update to be performed on the device.
+ - If the state is "merged", merge specified attributes with existing configured attributes.
+ - For "deleted", delete the specified attributes from existing configuration.
+ type: str
+ choices:
+ - merged
+ - deleted
+ default: merged
+"""
+EXAMPLES = """
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration
+# !
+# lldp receive
+# lldp timer 200
+# lldp multiplier 1
+# lldp system-name 8999_System
+# lldp system-description sonic_system
+# !
+
+ - name: Delete LLDP configurations
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config:
+ hello_time: 200
+ system_description : sonic_system
+ mode: receive
+ multiplier: 1
+ state: deleted
+
+# After State:
+# ------------
+# sonic# show running-configuration | grep lldp
+# !
+# lldp system-name 8999_System
+# !
+# sonic#
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration | grep lldp
+# sonic#
+
+ - name: Delete default LLDP configurations
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config:
+ tlv_select:
+ system_capabilities: true
+ state: deleted
+
+# After State:
+# ------------
+# sonic# show running-configuration
+# !
+# no lldp tlv-select system-capabilities
+# !
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration | grep lldp
+# !
+# lldp receive
+# lldp timer 200
+# lldp multiplier 1
+# lldp system-name 8999_System
+# lldp system-description sonic_system
+# !
+
+ - name: Delete all LLDP configuration
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config:
+ state: deleted
+
+# After State: (No LLDP global configuration present.)
+# ------------
+# sonic# show running-configuration | grep lldp
+# sonic#
+
+
+# Using Merged
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration | grep lldp
+# sonic#
+
+ - name: Modify LLDP configurations
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config:
+ enable: false
+ multiplier: 9
+ system_name : CR_sonic
+ hello_time: 18
+ mode: receive
+ system_description: Sonic_System
+ tlv_select:
+ management_address: true
+ system_capabilities: false
+ state: merged
+
+# After State:
+# ------------
+# sonic# show running-configuration | grep lldp
+# !
+# no lldp enable
+# no lldp tlv-select system_capabilities
+# lldp receive
+# lldp timer 18
+# lldp multiplier 9
+# lldp system-name CR_sonic
+# lldp system-description Sonic_System
+# !
+
+
+# Using Merged
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration | grep lldp
+# !
+# lldp receive
+# lldp timer 200
+# lldp multiplier 1
+# lldp system-name 8999_System
+# lldp system-description sonic_system
+# !
+
+ - name: Modify LLDP configurations
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config:
+ multiplier: 9
+ system_name : CR_sonic
+ state: merged
+
+# After State:
+# ------------
+# sonic# show running-configuration | grep lldp
+# !
+# lldp receive
+# lldp timer 200
+# lldp multiplier 9
+# lldp system-name CR_sonic
+# lldp system-description sonic_system
+# !
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+ type: list
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+ type: list
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.lldp_global.lldp_global import Lldp_globalArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.lldp_global.lldp_global import Lldp_global
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=Lldp_globalArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Lldp_global(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_logging.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_logging.py
new file mode 100644
index 000000000..d8c592874
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_logging.py
@@ -0,0 +1,274 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_logging
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: sonic_logging
+version_added: 2.1.0
+notes:
+ - Supports C(check_mode).
+short_description: Manage logging configuration on SONiC.
+description:
+ - This module provides configuration management of logging for devices running SONiC.
+author: "M. Zhang (@mingjunzhang2019)"
+options:
+ config:
+ description:
+ - Specifies logging related configurations.
+ type: dict
+ suboptions:
+ remote_servers:
+ type: list
+ elements: dict
+ description:
+ - Remote logging sever configuration.
+ suboptions:
+ host:
+ type: str
+ description:
+ - IPv4/IPv6 address or host name of the remote logging server.
+ required: true
+ remote_port:
+ type: int
+ description:
+ - Destination port number for logging messages sent to the server.
+ - remote_port can not be deleted.
+ source_interface:
+ type: str
+ description:
+ - Source interface used as source ip for sending logging packets.
+ - source_interface can not be deleted.
+ message_type:
+ type: str
+ description:
+ - Type of messages that remote server receives.
+ - message_type can not be deleted.
+ choices:
+ - log
+ - event
+ vrf:
+ type: str
+ description:
+ - VRF name used by remote logging server.
+ state:
+ description:
+ - The state of the configuration after module completion.
+ type: str
+ choices:
+ - merged
+ - replaced
+ - overridden
+ - deleted
+ default: merged
+"""
+EXAMPLES = """
+# Using deleted
+#
+# Before state:
+# -------------
+#
+#sonic# show logging servers
+#--------------------------------------------------------------------------------
+#HOST PORT SOURCE-INTERFACE VRF MESSGE-TYPE
+#--------------------------------------------------------------------------------
+#10.11.0.2 5 Ethernet24 - event
+#10.11.1.1 616 Ethernet8 - log
+#log1.dell.com 6 Ethernet28 - log
+#
+- name: Delete logging server configuration
+ sonic_logging:
+ config:
+ remote_servers:
+ - host: 10.11.0.2
+ - host: log1.dell.com
+ state: deleted
+
+# After state:
+# ------------
+#
+#sonic# show logging servers
+#--------------------------------------------------------------------------------
+#HOST PORT SOURCE-INTERFACE VRF MESSGE-TYPE
+#--------------------------------------------------------------------------------
+#10.11.1.1 616 Ethernet8 - log
+#
+#
+# Using merged
+#
+# Before state:
+# -------------
+#
+#sonic# show logging servers
+#--------------------------------------------------------------------------------
+#HOST PORT SOURCE-INTERFACE VRF MESSGE-TYPE
+#--------------------------------------------------------------------------------
+#10.11.1.1 616 Ethernet8 - log
+#
+- name: Merge logging server configuration
+ sonic_logging:
+ config:
+ remote_servers:
+ - host: 10.11.0.2
+ remote_port: 5
+ source_interface: Ethernet24
+ message_type: event
+ - host: log1.dell.com
+ remote_port: 6
+ source_interface: Ethernet28
+ state: merged
+
+# After state:
+# ------------
+#
+#sonic# show logging servers
+#--------------------------------------------------------------------------------
+#HOST PORT SOURCE-INTERFACE VRF MESSGE-TYPE
+#--------------------------------------------------------------------------------
+#10.11.0.2 5 Ethernet24 - event
+#10.11.1.1 616 Ethernet8 - log
+#log1.dell.com 6 Ethernet28 - log
+#
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#sonic# show logging servers
+#--------------------------------------------------------------------------------
+#HOST PORT SOURCE-INTERFACE VRF MESSGE-TYPE
+#--------------------------------------------------------------------------------
+#10.11.1.1 616 Ethernet8 - log
+#10.11.1.2 626 Ethernet16 - event
+#
+- name: Replace logging server configuration
+ sonic_logging:
+ config:
+ remote_servers:
+ - host: 10.11.1.2
+ remote_port: 622
+ source_interface: Ethernet24
+ message_type: event
+ state: overridden
+#
+# After state:
+# ------------
+#
+#sonic# show logging servers
+#--------------------------------------------------------------------------------
+#HOST PORT SOURCE-INTERFACE VRF MESSGE-TYPE
+#--------------------------------------------------------------------------------
+#10.11.1.2 622 Ethernet24 - event
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#sonic# show logging servers
+#--------------------------------------------------------------------------------
+#HOST PORT SOURCE-INTERFACE VRF MESSGE-TYPE
+#--------------------------------------------------------------------------------
+#10.11.1.1 616 Ethernet8 - log
+#10.11.1.2 626 Ethernet16 - event
+#
+- name: Replace logging server configuration
+ sonic_logging:
+ config:
+ remote_servers:
+ - host: 10.11.1.2
+ remote_port: 622
+ state: replaced
+#
+# After state:
+# ------------
+#
+# "MESSAGE-TYPE" has default value of "log"
+#
+#sonic# show logging servers
+#--------------------------------------------------------------------------------
+#HOST PORT SOURCE-INTERFACE VRF MESSGE-TYPE
+#--------------------------------------------------------------------------------
+#10.11.1.1 616 Ethernet8 - log
+#10.11.1.2 622 - - log
+#
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ type: list
+ returned: always
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.logging.logging import LoggingArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.logging.logging import Logging
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=LoggingArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Logging(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mac.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mac.py
new file mode 100644
index 000000000..42acfd4fb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mac.py
@@ -0,0 +1,319 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_mac
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community'
+}
+
+DOCUMENTATION = """
+---
+module: sonic_mac
+version_added: "2.1.0"
+short_description: Manage MAC configuration on SONiC
+description:
+ - This module provides configuration management of MAC for devices running SONiC
+author: "Shade Talabi (@stalabi1)"
+options:
+ config:
+ description:
+ - A list of MAC configurations.
+ type: list
+ elements: dict
+ suboptions:
+ vrf_name:
+ description:
+ - Specifies the VRF name.
+ type: str
+ default: 'default'
+ mac:
+ description:
+ - Configuration attributes for MAC.
+ type: dict
+ suboptions:
+ aging_time:
+ description:
+ - Time in seconds of inactivity before the MAC entry is timed out.
+ type: int
+ default: 600
+ dampening_interval:
+ description:
+ - Interval for which mac movements are observed before disabling MAC learning on a port.
+ type: int
+ default: 5
+ dampening_threshold:
+ description:
+ - Number of MAC movements allowed per second before disabling MAC learning on a port.
+ type: int
+ default: 5
+ mac_table_entries:
+ description:
+ - Configuration attributes for MAC table entries.
+ type: list
+ elements: dict
+ suboptions:
+ mac_address:
+ description:
+ - MAC address for the dynamic or static MAC table entry.
+ type: str
+ required: True
+ vlan_id:
+ description:
+ - ID number of VLAN on which the MAC address is present.
+ type: int
+ required: True
+ interface:
+ description:
+ - Specifies the interface for the MAC table entry.
+ type: str
+ state:
+ description:
+ - The state of the configuration after module completion
+ type: str
+ choices: ['merged', 'deleted', 'replaced', 'overridden']
+ default: merged
+"""
+EXAMPLES = """
+# Using merged
+#
+# Before state:
+# -------------
+#
+# sonic# show mac dampening
+# MAC Move Dampening Threshold : 5
+# MAC Move Dampening Interval : 5
+# sonic# show running-configuration | grep mac
+# (No mac configuration pressent)
+
+ - name: Merge MAC configurations
+ dellemc.enterprise_sonic.sonic_mac:
+ config:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 50
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:00:5e:00:53:af'
+ vlan_id: 1
+ interface: 'Ethernet20'
+ - mac_address: '00:33:33:33:33:33'
+ vlan_id: 2
+ interface: 'Ethernet24'
+ - mac_address: '00:00:4e:00:24:af'
+ vlan_id: 3
+ interface: 'Ethernet28'
+ state: merged
+
+# After state:
+# ------------
+#
+# sonic# show mac dampening
+# MAC Move Dampening Threshold : 30
+# MAC Move Dampening Interval : 20
+# sonic# show running-configuration | grep mac
+# mac address-table 00:00:5e:00:53:af Vlan1 Ethernet20
+# mac address-table 00:33:33:33:33:33 Vlan2 Ethernet24
+# mac address-table 00:00:4e:00:24:af Vlan3 Ethernet28
+# mac address-table aging-time 50
+#
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# sonic# show mac dampening
+# MAC Move Dampening Threshold : 30
+# MAC Move Dampening Interval : 20
+# sonic# show running-configuration | grep mac
+# mac address-table 00:00:5e:00:53:af Vlan1 Ethernet20
+# mac address-table 00:33:33:33:33:33 Vlan2 Ethernet24
+# mac address-table 00:00:4e:00:24:af Vlan3 Ethernet28
+# mac address-table aging-time 50
+
+ - name: Replace MAC configurations
+ dellemc.enterprise_sonic.sonic_mac:
+ config:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 45
+ dampening_interval: 30
+ dampening_threshold: 60
+ mac_table_entries:
+ - mac_address: '00:00:5e:00:53:af'
+ vlan_id: 3
+ interface: 'Ethernet24'
+ - mac_address: '00:44:44:44:44:44'
+ vlan_id: 2
+ interface: 'Ethernet20'
+ state: replaced
+
+# sonic# show mac dampening
+# MAC Move Dampening Threshold : 60
+# MAC Move Dampening Interval : 30
+# sonic# show running-configuration | grep mac
+# mac address-table 00:00:5e:00:53:af Vlan3 Ethernet24
+# mac address-table 00:33:33:33:33:33 Vlan2 Ethernet24
+# mac address-table 00:00:4e:00:24:af Vlan3 Ethernet28
+# mac address-table 00:44:44:44:44:44 Vlan2 Ethernet20
+# mac address-table aging-time 45
+#
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+# sonic# show mac dampening
+# MAC Move Dampening Threshold : 60
+# MAC Move Dampening Interval : 30
+# sonic# show running-configuration | grep mac
+# mac address-table 00:00:5e:00:53:af Vlan3 Ethernet24
+# mac address-table 00:33:33:33:33:33 Vlan2 Ethernet24
+# mac address-table 00:00:4e:00:24:af Vlan3 Ethernet28
+# mac address-table 00:44:44:44:44:44 Vlan2 Ethernet20
+# mac address-table aging-time 45
+
+ - name: Override MAC cofigurations
+ dellemc.enterprise_sonic.sonic_mac:
+ config:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 10
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:11:11:11:11:11'
+ vlan_id: 1
+ interface: 'Ethernet20'
+ - mac_address: '00:22:22:22:22:22'
+ vlan_id: 2
+ interface: 'Ethernet24'
+ state: overridden
+
+# After state:
+# ------------
+#
+# sonic# show mac dampening
+# MAC Move Dampening Threshold : 30
+# MAC Move Dampening Interval : 20
+# sonic# show running-configuration | grep mac
+# mac address-table 00:11:11:11:11:11 Vlan1 Ethernet20
+# mac address-table 00:22:22:22:22:22 Vlan2 Ethernet24
+# mac address-table aging-time 10
+#
+#
+# Using deleted
+#
+# Before state:
+# -------------
+#
+# sonic# show mac dampening
+# MAC Move Dampening Threshold : 30
+# MAC Move Dampening Interval : 20
+# sonic# show running-configuration | grep mac
+# mac address-table 00:11:11:11:11:11 Vlan1 Ethernet20
+# mac address-table 00:22:22:22:22:22 Vlan2 Ethernet24
+# mac address-table aging-time 10
+
+ - name: Delete MAC cofigurations
+ dellemc.enterprise_sonic.sonic_mac:
+ config:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 10
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:11:11:11:11:11'
+ vlan_id: 1
+ interface: 'Ethernet20'
+ - mac_address: '00:22:22:22:22:22'
+ vlan_id: 2
+ interface: 'Ethernet24'
+ state: deleted
+
+# After state:
+# ------------
+#
+# sonic# show mac dampening
+# MAC Move Dampening Threshold : 5
+# MAC Move Dampening Interval : 5
+# sonic# show running-configuration | grep mac
+# (No mac configuration pressent)
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.mac.mac import MacArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.mac.mac import Mac
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=MacArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Mac(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py
index 28d3dbb5b..e17fe080c 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_mclag.py
@@ -34,11 +34,11 @@ DOCUMENTATION = """
module: sonic_mclag
version_added: 1.0.0
notes:
-- Tested against Enterprise SONiC Distribution by Dell Technologies.
-- Supports C(check_mode).
+ - Tested against Enterprise SONiC Distribution by Dell Technologies.
+ - Supports C(check_mode).
short_description: Manage multi chassis link aggregation groups domain (MCLAG) and its parameters
description:
- - Manage multi chassis link aggregation groups domain (MCLAG) and its parameters
+ - Manage multi chassis link aggregation groups domain (MCLAG) and its parameters.
author: Abirami N (@abirami-n)
options:
@@ -65,7 +65,7 @@ options:
type: str
system_mac:
description:
- - Mac address of MCLAG.
+ - MAC address of MCLAG.
type: str
keepalive:
description:
@@ -75,17 +75,45 @@ options:
description:
- MCLAG session timeout value in secs.
type: int
+ delay_restore:
+ description:
+ - MCLAG delay restore time in secs.
+ type: int
+ gateway_mac:
+ description:
+ - Gateway MAC address for router ports over MCLAG.
+ - Configured gateway MAC address can be modified only when I(state=replaced) or I(state=overridden).
+ type: str
unique_ip:
- description: Holds Vlan dictionary for mclag unique ip.
+ description: Holds Vlan dictionary for MCLAG unique IP.
+ suboptions:
+ vlans:
+ description:
+ - Holds a list of VLANs and VLAN ranges for which a separate IP address is enabled for Layer 3 protocol support over MCLAG.
+ type: list
+ elements: dict
+ suboptions:
+ vlan:
+ description:
+ - Holds a VLAN name or VLAN range.
+ - Specify a single VLAN eg. Vlan10.
+ - Specify a range of VLANs eg. Vlan10-20.
+ type: str
+ type: dict
+ peer_gateway:
+ description: Holds Vlan dictionary for MCLAG peer gateway.
suboptions:
vlans:
description:
- - Holds list of VLANs for which a separate IP addresses is enabled for Layer 3 protocol support over MCLAG.
+ - Holds a list of VLANs and VLAN ranges for which MCLAG peer gateway functionality is enabled.
type: list
elements: dict
suboptions:
vlan:
- description: Holds a VLAN ID.
+ description:
+ - Holds a VLAN name or VLAN range.
+ - Specify a single VLAN eg. Vlan10.
+ - Specify a range of VLANs eg. Vlan10-20.
type: str
type: dict
members:
@@ -106,8 +134,10 @@ options:
- The state that the configuration should be left in.
type: str
choices:
- - merged
- - deleted
+ - merged
+ - deleted
+ - replaced
+ - overridden
default: merged
"""
EXAMPLES = """
@@ -118,7 +148,7 @@ EXAMPLES = """
#
# sonic# show mclag brief
# MCLAG Not Configured
-#
+
- name: Merge provided configuration with device configuration
dellemc.enterprise_sonic.sonic_mclag:
config:
@@ -128,14 +158,22 @@ EXAMPLES = """
peer_link: 'Portchannel1'
keepalive: 1
session_timeout: 3
+ delay_restore: 240
+ system_mac: '00:00:00:11:11:11'
+ gateway_mac: '00:00:00:12:12:12'
unique_ip:
- vlans:
- - vlan: Vlan4
+ vlans:
+ - vlan: Vlan4
+ - vlan: Vlan21-25
+ peer_gateway:
+ vlans:
+ - vlan: Vlan4
+ - vlan: Vlan21-25
members:
- portchannles:
- - lag: PortChannel10
+ portchannels:
+ - lag: PortChannel10
state: merged
-#
+
# After state:
# ------------
#
@@ -150,7 +188,10 @@ EXAMPLES = """
# Peer Link : PortChannel1
# Keepalive Interval : 1 secs
# Session Timeout : 3 secs
+# Delay Restore : 240 secs
# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
#
#
# Number of MLAG Interfaces:1
@@ -159,18 +200,34 @@ EXAMPLES = """
#-----------------------------------------------------------
# PortChannel10 down/down
#
-# admin@sonic:~$ show runningconfiguration all
-# {
-# ...
-# "MCLAG_UNIQUE_IP": {
-# "Vlan4": {
-# "unique_ip": "enable"
-# }
-# },
-# ...
-# }
-#
-#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+
+
# Using merged
#
# Before state:
@@ -187,7 +244,10 @@ EXAMPLES = """
# Peer Link : PortChannel1
# Keepalive Interval : 1 secs
# Session Timeout : 3 secs
+# Delay Restore : 240 secs
# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
#
#
# Number of MLAG Interfaces:1
@@ -196,18 +256,33 @@ EXAMPLES = """
#-----------------------------------------------------------
# PortChannel10 down/down
#
-# admin@sonic:~$ show runningconfiguration all
-# {
-# ...
-# "MCLAG_UNIQUE_IP": {
-# "Vlan4": {
-# "unique_ip": "enable"
-# }
-# },
-# ...
-# }
-#
-#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+
- name: Merge device configuration with the provided configuration
dellemc.enterprise_sonic.sonic_mclag:
config:
@@ -215,14 +290,20 @@ EXAMPLES = """
source_address: 3.3.3.3
keepalive: 10
session_timeout: 30
+ delay_restore: 360
unique_ip:
vlans:
- vlan: Vlan5
+ - vlan: Vlan26-28
+ peer_gateway:
+ vlans:
+ - vlan: Vlan5
+ - vlan: Vlan26-28
members:
portchannels:
- lag: PortChannel12
state: merged
-#
+
# After state:
# ------------
#
@@ -237,7 +318,10 @@ EXAMPLES = """
# Peer Link : PortChannel1
# Keepalive Interval : 10 secs
# Session Timeout : 30 secs
+# Delay Restore : 360 secs
# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
#
#
# Number of MLAG Interfaces:2
@@ -247,21 +331,41 @@ EXAMPLES = """
# PortChannel10 down/down
# PortChannel12 down/down
#
-# admin@sonic:~$ show runningconfiguration all
-# {
-# ...
-# "MCLAG_UNIQUE_IP": {
-# "Vlan4": {
-# "unique_ip": "enable"
-# },
-# "Vlan5": {
-# "unique_ip": "enable"
-# }
-# },
-# ...
-# }
-#
-#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan5
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# Vlan26
+# Vlan27
+# Vlan28
+# ==============
+# Total count : 10
+# ==============
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan5
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# Vlan26
+# Vlan27
+# Vlan28
+# ==============
+# Total count : 10
+# ==============
+# sonic#
+
+
# Using deleted
#
# Before state:
@@ -278,7 +382,10 @@ EXAMPLES = """
# Peer Link : PortChannel1
# Keepalive Interval : 10 secs
# Session Timeout : 30 secs
+# Delay Restore : 360 secs
# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
#
#
# Number of MLAG Interfaces:1
@@ -287,28 +394,52 @@ EXAMPLES = """
#-----------------------------------------------------------
# PortChannel10 down/down
#
-# admin@sonic:~$ show runningconfiguration all
-# {
-# ...
-# "MCLAG_UNIQUE_IP": {
-# "Vlan4": {
-# "unique_ip": "enable"
-# }
-# },
-# ...
-# }
-#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+
- name: Delete device configuration based on the provided configuration
dellemc.enterprise_sonic.sonic_mclag:
- config:
- domain_id: 1
- source_address: 3.3.3.3
- keepalive: 10
- members:
- portchannels:
- - lag: PortChannel10
- state: deleted
-#
+ config:
+ domain_id: 1
+ source_address: 3.3.3.3
+ keepalive: 10
+ unique_ip:
+ vlans:
+ - vlan: Vlan22
+ - vlan: Vlan24-25
+ peer_gateway:
+ vlans:
+ - vlan: Vlan22
+ - vlan: Vlan24-25
+ members:
+ portchannels:
+ - lag: PortChannel10
+ state: deleted
+
# After state:
# ------------
#
@@ -322,25 +453,37 @@ EXAMPLES = """
# Peer Address : 1.1.1.1
# Peer Link : PortChannel1
# Keepalive Interval : 1 secs
-# Session Timeout : 15 secs
+# Session Timeout : 30 secs
+# Delay Restore : 360 secs
# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
#
#
# Number of MLAG Interfaces:0
#
-# admin@sonic:~$ show runningconfiguration all
-# {
-# ...
-# "MCLAG_UNIQUE_IP": {
-# "Vlan4": {
-# "unique_ip": "enable"
-# }
-# },
-# ...
-# }
-#
-#
-#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan23
+# ==============
+# Total count : 3
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan23
+# ==============
+# Total count : 3
+# ==============
+# sonic#
+
+
# Using deleted
#
# Before state:
@@ -357,7 +500,10 @@ EXAMPLES = """
# Peer Link : PortChannel1
# Keepalive Interval : 10 secs
# Session Timeout : 30 secs
+# Delay Restore : 360 secs
# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
#
#
# Number of MLAG Interfaces:1
@@ -366,32 +512,40 @@ EXAMPLES = """
#-----------------------------------------------------------
# PortChannel10 down/down
#
-# admin@sonic:~$ show runningconfiguration all
-# {
-# ...
-# "MCLAG_UNIQUE_IP": {
-# "Vlan4": {
-# "unique_ip": "enable"
-# }
-# },
-# ...
-# }
-#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# ==============
+# Total count : 1
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# ==============
+# Total count : 1
+# ==============
+# sonic#
+
- name: Delete all device configuration
dellemc.enterprise_sonic.sonic_mclag:
config:
state: deleted
-#
+
# After state:
# ------------
#
# sonic# show mclag brief
# MCLAG Not Configured
-#
-# admin@sonic:~$ show runningconfiguration all | grep MCLAG_UNIQUE_IP
-# admin@sonic:~$
-#
-#
+# sonic# show mclag separate-ip-interfaces
+# MCLAG separate IP interface not configured
+# sonic# show mclag peer-gateway-interfaces
+# MCLAG Peer Gateway interface not configured
+# sonic#
+
+
# Using deleted
#
# Before state:
@@ -408,7 +562,10 @@ EXAMPLES = """
# Peer Link : PortChannel1
# Keepalive Interval : 10 secs
# Session Timeout : 30 secs
+# Delay Restore : 360 secs
# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
#
#
# Number of MLAG Interfaces:2
@@ -416,29 +573,37 @@ EXAMPLES = """
# MLAG Interface Local/Remote Status
#-----------------------------------------------------------
# PortChannel10 down/down
-# PortChannel12 down/sown
-#
-# admin@sonic:~$ show runningconfiguration all
-# {
-# ...
-# "MCLAG_UNIQUE_IP": {
-# "Vlan4": {
-# "unique_ip": "enable"
-# }
-# },
-# ...
-# }
+# PortChannel12 down/down
+#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# ==============
+# Total count : 1
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# ==============
+# Total count : 1
+# ==============
+# sonic#
+
- name: Delete device configuration based on the provided configuration
dellemc.enterprise_sonic.sonic_mclag:
config:
domain_id: 1
source_address: 3.3.3.3
keepalive: 10
+ peer_gateway:
+ vlans:
members:
portchannels:
- - lag: PortChannel10
state: deleted
-#
+
# After state:
# ------------
#
@@ -452,24 +617,283 @@ EXAMPLES = """
# Peer Address : 1.1.1.1
# Peer Link : PortChannel1
# Keepalive Interval : 1 secs
-# Session Timeout : 15 secs
+# Session Timeout : 30 secs
+# Delay Restore : 360 secs
# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
#
#
# Number of MLAG Interfaces:0
#
-# admin@sonic:~$ show runningconfiguration all
-# {
-# ...
-# "MCLAG_UNIQUE_IP": {
-# "Vlan4": {
-# "unique_ip": "enable"
-# }
-# },
-# ...
-# }
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# ==============
+# Total count : 1
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# MCLAG Peer Gateway interface not configured
+# sonic#
+
+
+# Using replaced
+#
+# Before state:
+# ------------
+#
+# sonic# show mclag brief
+#
+# Domain ID : 1
+# Role : standby
+# Session Status : down
+# Peer Link Status : down
+# Source Address : 2.2.2.2
+# Peer Address : 1.1.1.1
+# Peer Link : PortChannel1
+# Keepalive Interval : 1 secs
+# Session Timeout : 3 secs
+# Delay Restore : 240 secs
+# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
+#
+#
+# Number of MLAG Interfaces:2
+#-----------------------------------------------------------
+# MLAG Interface Local/Remote Status
+#-----------------------------------------------------------
+# PortChannel10 down/down
+# PortChannel11 down/down
+#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+
+- name: Replace device configuration with the provided configuration
+ dellemc.enterprise_sonic.sonic_mclag:
+ config:
+ domain_id: 1
+ unique_ip:
+ vlans:
+ - vlan: Vlan5
+ - vlan: Vlan24-28
+ peer_gateway:
+ vlans:
+ - vlan: Vlan5
+ - vlan: Vlan24-28
+ members:
+ portchannels:
+ - lag: PortChannel10
+ - lag: PortChannel12
+ state: replaced
+
+# After state:
+# ------------
+#
+# sonic# show mclag brief
+#
+# Domain ID : 1
+# Role : standby
+# Session Status : down
+# Peer Link Status : down
+# Source Address : 2.2.2.2
+# Peer Address : 1.1.1.1
+# Peer Link : PortChannel1
+# Keepalive Interval : 1 secs
+# Session Timeout : 3 secs
+# Delay Restore : 240 secs
+# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
+#
+#
+# Number of MLAG Interfaces:2
+#-----------------------------------------------------------
+# MLAG Interface Local/Remote Status
+#-----------------------------------------------------------
+# PortChannel10 down/down
+# PortChannel12 down/down
+#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan5
+# Vlan24
+# Vlan25
+# Vlan26
+# Vlan27
+# Vlan28
+# ==============
+# Total count : 6
+# ==============
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan5
+# Vlan24
+# Vlan25
+# Vlan26
+# Vlan27
+# Vlan28
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+
+
+# Using overridden
+#
+# Before state:
+# ------------
+#
+# sonic# show mclag brief
+#
+# Domain ID : 1
+# Role : standby
+# Session Status : down
+# Peer Link Status : down
+# Source Address : 2.2.2.2
+# Peer Address : 1.1.1.1
+# Peer Link : PortChannel1
+# Keepalive Interval : 1 secs
+# Session Timeout : 3 secs
+# Delay Restore : 240 secs
+# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
+#
+#
+# Number of MLAG Interfaces:2
+#-----------------------------------------------------------
+# MLAG Interface Local/Remote Status
+#-----------------------------------------------------------
+# PortChannel10 down/down
+# PortChannel11 down/down
+#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan4
+# Vlan21
+# Vlan22
+# Vlan23
+# Vlan24
+# Vlan25
+# ==============
+# Total count : 6
+# ==============
+# sonic#
+
+- name: Override device configuration with the provided configuration
+ dellemc.enterprise_sonic.sonic_mclag:
+ config:
+ domain_id: 1
+ peer_address: 1.1.1.1
+ source_address: 3.3.3.3
+ peer_link: 'Portchannel1'
+ system_mac: '00:00:00:11:11:11'
+ gateway_mac: '00:00:00:12:12:12'
+ unique_ip:
+ vlans:
+ - vlan: Vlan24-28
+ peer_gateway:
+ vlans:
+ - vlan: Vlan24-28
+ members:
+ portchannels:
+ - lag: PortChannel10
+ - lag: PortChannel12
+ state: overridden
+
+# After state:
+# ------------
+#
+# sonic# show mclag brief
#
+# Domain ID : 1
+# Role : standby
+# Session Status : down
+# Peer Link Status : down
+# Source Address : 3.3.3.3
+# Peer Address : 1.1.1.1
+# Peer Link : PortChannel1
+# Keepalive Interval : 1 secs
+# Session Timeout : 30 secs
+# Delay Restore : 300 secs
+# System Mac : 20:04:0f:37:bd:c9
+# Mclag System Mac : 00:00:00:11:11:11
+# Gateway Mac : 00:00:00:12:12:12
+#
+#
+# Number of MLAG Interfaces:2
+#-----------------------------------------------------------
+# MLAG Interface Local/Remote Status
+#-----------------------------------------------------------
+# PortChannel10 down/down
+# PortChannel12 down/down
#
+# sonic# show mclag separate-ip-interfaces
+# Interface Name
+# ==============
+# Vlan24
+# Vlan25
+# Vlan26
+# Vlan27
+# Vlan28
+# ==============
+# Total count : 5
+# ==============
+# sonic# show mclag peer-gateway-interfaces
+# Interface Name
+# ==============
+# Vlan24
+# Vlan25
+# Vlan26
+# Vlan27
+# Vlan28
+# ==============
+# Total count : 5
+# ==============
+# sonic#
"""
RETURN = """
before:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py
index 87db8bb06..c04e437c1 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_ntp.py
@@ -34,6 +34,8 @@ DOCUMENTATION = """
---
module: sonic_ntp
version_added: 2.0.0
+notes:
+ - Supports C(check_mode).
short_description: Manage NTP configuration on SONiC.
description:
- This module provides configuration management of NTP for devices running SONiC.
@@ -67,6 +69,7 @@ options:
elements: dict
description:
- List of NTP servers.
+ - minpoll and maxpoll are required to be configured together.
suboptions:
address:
type: str
@@ -88,6 +91,11 @@ options:
description:
- Maximum poll interval to poll NTP server.
- maxpoll can not be deleted.
+ prefer:
+ type: bool
+ description:
+ - Indicates whether this server should be preferred.
+ - prefer can not be deleted.
ntp_keys:
type: list
elements: dict
@@ -127,8 +135,10 @@ options:
- The state of the configuration after module completion.
type: str
choices:
- - merged
- - deleted
+ - merged
+ - replaced
+ - overridden
+ - deleted
default: merged
"""
EXAMPLES = """
@@ -138,16 +148,16 @@ EXAMPLES = """
# -------------
#
#sonic# show ntp server
-#----------------------------------------------------------------------
-#NTP Servers minpoll maxpoll Authentication key ID
-#----------------------------------------------------------------------
-#10.11.0.1 6 10
-#10.11.0.2 5 9
-#dell.com 6 9
-#dell.org 7 10
+#----------------------------------------------------------------------------
+#NTP Servers minpoll maxpoll Prefer Authentication key ID
+#----------------------------------------------------------------------------
+#10.11.0.1 6 10 False
+#10.11.0.2 5 9 False
+#dell.com 6 9 False
+#dell.org 7 10 True
#
- name: Delete NTP server configuration
- ntp:
+ sonic_ntp:
config:
servers:
- address: 10.11.0.2
@@ -158,11 +168,11 @@ EXAMPLES = """
# ------------
#
#sonic# show ntp server
-#----------------------------------------------------------------------
-#NTP Servers minpoll maxpoll Authentication key ID
-#----------------------------------------------------------------------
-#10.11.0.1 6 10
-#dell.com 6 9
+#----------------------------------------------------------------------------
+#NTP Servers minpoll maxpoll Prefer Authentication key ID
+#----------------------------------------------------------------------------
+#10.11.0.1 6 10 False
+#dell.com 6 9 False
#
#
# Using deleted
@@ -177,7 +187,7 @@ EXAMPLES = """
#NTP source-interfaces: Ethernet0, Ethernet4, Ethernet8, Ethernet16
#
- name: Delete NTP source-interface configuration
- ntp:
+ sonic_ntp:
config:
source_interfaces:
- Ethernet8
@@ -205,7 +215,7 @@ EXAMPLES = """
#ntp authentication-key 20 sha2-256 U2FsdGVkX1/eAzKj1teKhYWD7tnzOsYOijGeFAT0rKM= encrypted
#
- name: Delete NTP key configuration
- ntp:
+ sonic_ntp:
config:
ntp_keys:
- key_id: 10
@@ -225,14 +235,14 @@ EXAMPLES = """
# -------------
#
#sonic# show ntp server
-#----------------------------------------------------------------------
-#NTP Servers minpoll maxpoll Authentication key ID
-#----------------------------------------------------------------------
-#10.11.0.1 6 10
-#dell.com 6 9
+#----------------------------------------------------------------------------
+#NTP Servers minpoll maxpoll Prefer Authentication key ID
+#----------------------------------------------------------------------------
+#10.11.0.1 6 10 False
+#dell.com 6 9 False
#
- name: Merge NTP server configuration
- ntp:
+ sonic_ntp:
config:
servers:
- address: 10.11.0.2
@@ -240,19 +250,20 @@ EXAMPLES = """
- address: dell.org
minpoll: 7
maxpoll: 10
+ prefer: true
state: merged
# After state:
# ------------
#
#sonic# show ntp server
-#----------------------------------------------------------------------
-#NTP Servers minpoll maxpoll Authentication key ID
-#----------------------------------------------------------------------
-#10.11.0.1 6 10
-#10.11.0.2 5 9
-#dell.com 6 9
-#dell.org 7 10
+#----------------------------------------------------------------------------
+#NTP Servers minpoll maxpoll Prefer Authentication key ID
+#----------------------------------------------------------------------------
+#10.11.0.1 6 10 Flase
+#10.11.0.2 5 10 Flase
+#dell.com 6 9 Flase
+#dell.org 7 10 True
#
#
# Using merged
@@ -267,7 +278,7 @@ EXAMPLES = """
#NTP source-interfaces: Ethernet0, Ethernet4
#
- name: Merge NTP source-interface configuration
- ntp:
+ sonic_ntp:
config:
source_interfaces:
- Ethernet8
@@ -293,7 +304,7 @@ EXAMPLES = """
#ntp authentication-key 8 sha1 U2FsdGVkX1/NpJrdOeyMeUHEkSohY6azY9VwbAqXRTY= encrypted
#
- name: Merge NTP key configuration
- ntp:
+ sonic_ntp:
config:
ntp_keys:
- key_id: 10
@@ -314,6 +325,87 @@ EXAMPLES = """
#ntp authentication-key 10 md5 U2FsdGVkX1/Gxds/5pscCvIKbVngGaKka4SQineS51Y= encrypted
#ntp authentication-key 20 sha2-256 U2FsdGVkX1/eAzKj1teKhYWD7tnzOsYOijGeFAT0rKM= encrypted
#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#sonic# show ntp server
+#----------------------------------------------------------------------------
+#NTP Servers minpoll maxpoll Prefer Authentication key ID
+#----------------------------------------------------------------------------
+#10.11.0.1 6 10 False
+#dell.com 6 9 False
+#
+- name: Replace NTP server configuration
+ sonic_ntp:
+ config:
+ servers:
+ - address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 9
+ - address: dell.com
+ minpoll: 7
+ maxpoll: 10
+ prefer: true
+ state: replaced
+#
+# After state:
+# ------------
+#
+#sonic# show ntp server
+#----------------------------------------------------------------------------
+#NTP Servers minpoll maxpoll Prefer Authentication key ID
+#----------------------------------------------------------------------------
+#10.11.0.1 6 10 False
+#10.11.0.2 5 9 False
+#dell.com 7 10 True
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#sonic# show ntp server
+#----------------------------------------------------------------------------
+#NTP Servers minpoll maxpoll Prefer Authentication key ID
+#----------------------------------------------------------------------------
+#10.11.0.1 6 10 False
+#dell.com 6 9 False
+#
+#sonic# show ntp global
+#----------------------------------------------
+#NTP Global Configuration
+#----------------------------------------------
+#NTP source-interfaces: Ethernet0, Ethernet4
+#
+- name: Overridden NTP configuration
+ sonic_ntp:
+ config:
+ servers:
+ - address: 10.11.0.2
+ minpoll: 5
+ - address: dell.com
+ minpoll: 7
+ maxpoll: 10
+ prefer: true
+ state: overridden
+#
+# After state:
+# ------------
+#
+# After state:
+# ------------
+#
+#sonic# show ntp server
+#----------------------------------------------------------------------------
+#NTP Servers minpoll maxpoll Prefer Authentication key ID
+#----------------------------------------------------------------------------
+#10.11.0.2 5 10 False
+#dell.com 7 10 True
+#
+#sonic# show ntp global
+#
"""
RETURN = """
before:
@@ -330,6 +422,13 @@ after:
sample: >
The configuration returned will always be in the same format
of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_pki.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_pki.py
new file mode 100644
index 000000000..559935fef
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_pki.py
@@ -0,0 +1,301 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2022 Dell EMC
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_pki
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: sonic_pki
+version_added: 2.3.0
+short_description: 'Manages PKI attributes of Enterprise Sonic'
+description: 'Manages PKI attributes of Enterprise Sonic'
+author: Eric Seifert (@seiferteric)
+notes:
+ - 'Tested against Dell Enterprise SONiC 4.1.0'
+options:
+ config:
+ description: The provided configuration
+ type: dict
+ suboptions:
+ trust_stores:
+ description: Store of CA Certificates
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ type: str
+ required: True
+ description: The name of the Trust Store
+ ca_name:
+ type: list
+ elements: str
+ description: List of CA certificates in the trust store.
+ security_profiles:
+ description: Application Security Profiles
+ type: list
+ elements: dict
+ suboptions:
+ profile_name:
+ type: str
+ required: True
+ description: Profile Name
+ certificate_name:
+ type: str
+ description: Host Certificate Name
+ trust_store:
+ type: str
+ description: Name of associated trust_store
+ revocation_check:
+ description: Require certificate revocation check succeeds
+ type: bool
+ peer_name_check:
+ description: Require peer name is verified
+ type: bool
+ key_usage_check:
+ description: Require key usage is enforced
+ type: bool
+ cdp_list:
+ description: Global list of CDP's
+ type: list
+ elements: str
+ ocsp_responder_list:
+ description: Global list of OCSP responders
+ type: list
+ elements: str
+ state:
+ description:
+ - The state of the configuration after module completion.
+ type: str
+ choices: ['merged', 'deleted', 'replaced', 'overridden']
+ default: merged
+"""
+EXAMPLES = """
+# Using "merged" state for initial config
+#
+# Before state:
+# -------------
+#
+# sonic# show running-configuration | grep crypto
+# sonic#
+#
+- name: PKI Config Test
+ hosts: datacenter
+ gather_facts: false
+ connection: httpapi
+ collections:
+ - dellemc.enterprise_sonic
+ tasks:
+ - name: "Initial Config"
+ sonic_pki:
+ config:
+ security_profiles:
+ - profile_name: rest
+ ocsp_responder_list:
+ - http://example.com/ocspa
+ - http://example.com/ocspb
+ certificate_name: host
+ trust_store: default-ts
+ trust_stores:
+ - name: default-ts
+ ca_name:
+ - CA2
+ state: merged
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration | grep crypto
+# crypto trust_store default-ts ca-cert CA2
+# crypto security-profile rest
+# crypto security-profile trust_store rest default-ts
+# crypto security-profile certificate rest host
+# crypto security-profile ocsp-list rest http://example.com/ocspa,http://example.com/ocspb
+
+# Using "deleted" state to remove configuration
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration | grep crypto
+# crypto trust_store default-ts ca-cert CA2
+# crypto security-profile rest
+# crypto security-profile trust_store rest default-ts
+# crypto security-profile certificate rest host
+# crypto security-profile ocsp-list rest http://example.com/ocsp
+#
+- name: PKI Delete Test
+ hosts: datacenter
+ gather_facts: true
+ connection: httpapi
+ collections:
+ - dellemc.enterprise_sonic
+ tasks:
+ - name: Remove trust_store from security-profile
+ sonic_pki:
+ config:
+ security_profiles:
+ - profile_name: rest
+ trust_store: default-ts
+ state: deleted
+# After state:
+# ------------
+#
+# sonic# show running-configuration | grep crypto
+# crypto trust_store default-ts ca-cert CA2
+# crypto security-profile rest
+# crypto security-profile certificate rest host
+# crypto security-profile ocsp-list rest http://example.com/ocsp
+
+# Using "overridden" state
+
+# Before state:
+# ------------
+#
+# sonic# show running-configuration | grep crypto
+# crypto trust_store default-ts ca-cert CA2
+# crypto security-profile rest
+# crypto security-profile trust_store rest default-ts
+# crypto security-profile certificate rest host
+# crypto security-profile ocsp-list rest http://example.com/ocspa,http://example.com/ocspb
+#
+- name: PKI Overridden Test
+ hosts: datacenter
+ gather_facts: false
+ connection: httpapi
+ collections:
+ - dellemc.enterprise_sonic
+ tasks:
+ - name: "Overridden Config"
+ sonic_pki:
+ config:
+ security_profiles:
+ - profile_name: telemetry
+ ocsp_responder_list:
+ - http://example.com/ocspb
+ revocation_check: true
+ trust_store: telemetry-ts
+ certificate_name: host
+ trust_stores:
+ - name: telemetry-ts
+ ca_name: CA
+ state: overridden
+# After state:
+# -----------
+#
+# sonic# show running-configuration | grep crypto
+# crypto trust_store telemetry-ts ca-cert CA
+# crypto security-profile telemetry revocation_check true
+# crypto security-profile trust_store telemetry telemetry-ts
+# crypto security-profile certificate telemetry host
+# crypto security-profile ocsp-list telemetry http://example.com/ocspb
+
+# Using "replaced" state to update config
+
+# Before state:
+# ------------
+#
+# sonic# show running-configuration | grep crypto
+# crypto trust_store default-ts ca-cert CA2
+# crypto security-profile rest
+# crypto security-profile trust_store rest default-ts
+# crypto security-profile certificate rest host
+# crypto security-profile ocsp-list rest http://example.com/ocspa,http://example.com/ocspb
+#
+- name: PKI Replace Test
+ hosts: datacenter
+ gather_facts: false
+ connection: httpapi
+ collections:
+ - dellemc.enterprise_sonic
+ tasks:
+ - name: "Replace Config"
+ sonic_pki:
+ config:
+ security_profiles:
+ - profile_name: rest
+ ocsp_responder_list:
+ - http://example.com/ocsp
+ revocation_check: false
+ trust_store: default-ts
+ certificate_name: host
+ state: replaced
+# After state:
+# -----------
+#
+# sonic# show running-configuration | grep crypto
+# crypto trust_store default-ts ca-cert CA2
+# crypto security-profile rest
+# crypto security-profile trust_store rest default-ts
+# crypto security-profile certificate rest host
+# crypto security-profile ocsp-list rest http://example.com/ocsp
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: dict
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: dict
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.pki.pki import PkiArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.pki.pki import Pki
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=PkiArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Pki(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py
index 66ea00476..3de7dfb17 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_breakout.py
@@ -57,23 +57,35 @@ options:
- Specifies the mode of the port breakout.
type: str
choices:
+ - 1x10G
+ - 1x25G
+ - 1x40G
+ - 1x50G
- 1x100G
+ - 1x200G
- 1x400G
- - 1x40G
+ - 2x10G
+ - 2x25G
+ - 2x40G
+ - 2x50G
- 2x100G
- 2x200G
- - 2x50G
- - 4x100G
- 4x10G
- 4x25G
- 4x50G
+ - 4x100G
+ - 8x10G
+ - 8x25G
+ - 8x50G
state:
description:
- Specifies the operation to be performed on the port breakout configured on the device.
- In case of merged, the input mode configuration will be merged with the existing port breakout configuration on the device.
- - In case of deleted the existing port breakout mode configuration will be removed from the device.
+ - In case of deleted, the existing port breakout mode configuration will be removed from the device.
+ - In case of replaced, on-device port breakout configuration of the specified interfaces is replaced with provided configuration.
+ - In case of overridden, all on-device port breakout configurations are overridden with the provided configuration.
default: merged
- choices: ['merged', 'deleted']
+ choices: ['merged', 'deleted', 'replaced', 'overridden']
type: str
"""
EXAMPLES = """
@@ -82,18 +94,18 @@ EXAMPLES = """
# Before state:
# -------------
#
-#do show interface breakout
-#-----------------------------------------------
-#Port Breakout Mode Status Interfaces
-#-----------------------------------------------
-#1/1 4x10G Completed Eth1/1/1
-# Eth1/1/2
-# Eth1/1/3
-# Eth1/1/4
-#1/11 1x100G Completed Eth1/11
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/1 4x10G Completed Eth1/1/1
+# Eth1/1/2
+# Eth1/1/3
+# Eth1/1/4
+# 1/11 1x100G Completed Eth1/11/1
#
-- name: Merge users configurations
+- name: Delete interface port breakout configuration
dellemc.enterprise_sonic.sonic_port_breakout:
config:
- name: 1/11
@@ -103,15 +115,16 @@ EXAMPLES = """
# After state:
# ------------
#
-#do show interface breakout
-#-----------------------------------------------
-#Port Breakout Mode Status Interfaces
-#-----------------------------------------------
-#1/1 4x10G Completed Eth1/1/1
-# Eth1/1/2
-# Eth1/1/3
-# Eth1/1/4
-#1/11 Default Completed Ethernet40
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/1 4x10G Completed Eth1/1/1
+# Eth1/1/2
+# Eth1/1/3
+# Eth1/1/4
+# 1/11 Default Completed Eth1/11
+#
# Using deleted
@@ -119,31 +132,31 @@ EXAMPLES = """
# Before state:
# -------------
#
-#do show interface breakout
-#-----------------------------------------------
-#Port Breakout Mode Status Interfaces
-#-----------------------------------------------
-#1/1 4x10G Completed Eth1/1/1
-# Eth1/1/2
-# Eth1/1/3
-# Eth1/1/4
-#1/11 1x100G Completed Eth1/11
-#
-- name: Merge users configurations
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/1 4x10G Completed Eth1/1/1
+# Eth1/1/2
+# Eth1/1/3
+# Eth1/1/4
+# 1/11 1x100G Completed Eth1/11/1
+#
+
+- name: Delete all port breakout configurations
dellemc.enterprise_sonic.sonic_port_breakout:
config:
state: deleted
-
# After state:
# ------------
#
-#do show interface breakout
-#-----------------------------------------------
-#Port Breakout Mode Status Interfaces
-#-----------------------------------------------
-#1/1 Default Completed Ethernet0
-#1/11 Default Completed Ethernet40
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/1 Default Completed Eth1/1
+# 1/11 Default Completed Eth1/11
# Using merged
@@ -151,35 +164,111 @@ EXAMPLES = """
# Before state:
# -------------
#
-#do show interface breakout
-#-----------------------------------------------
-#Port Breakout Mode Status Interfaces
-#-----------------------------------------------
-#1/1 4x10G Completed Eth1/1/1
-# Eth1/1/2
-# Eth1/1/3
-# Eth1/1/4
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/1 4x10G Completed Eth1/1/1
+# Eth1/1/2
+# Eth1/1/3
+# Eth1/1/4
#
-- name: Merge users configurations
+
+- name: Merge port breakout configurations
dellemc.enterprise_sonic.sonic_port_breakout:
config:
- name: 1/11
mode: 1x100G
state: merged
+# After state:
+# ------------
+#
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/1 4x10G Completed Eth1/1/1
+# Eth1/1/2
+# Eth1/1/3
+# Eth1/1/4
+# 1/11 1x100G Completed Eth1/11/1
+
+
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/49 4x25G Completed Eth1/49/1
+# Eth1/49/2
+# Eth1/49/3
+# Eth1/49/4
+#
+
+- name: Replace port breakout configurations
+ dellemc.enterprise_sonic.sonic_port_breakout:
+ config:
+ - name: 1/49
+ mode: 4x10G
+ state: replaced
+
+# After state:
+# ------------
+#
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/49 4x10G Completed Eth1/49/1
+# Eth1/49/2
+# Eth1/49/3
+# Eth1/49/4
+
+
+# Using overridden
+#
+# Before state:
+# -------------
+#
+# sonic# show interface breakout
+# ----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/49 4x10G Completed Eth1/49/1
+# Eth1/49/2
+# Eth1/49/3
+# Eth1/49/4
+# 1/50 2x50G Completed Eth1/50/1
+# Eth1/50/2
+# 1/51 1x100G Completed Eth1/51/1
+#
+
+- name: Override port breakout configurations
+ dellemc.enterprise_sonic.sonic_port_breakout:
+ config:
+ - name: 1/52
+ mode: 4x10G
+ state: overridden
# After state:
# ------------
#
-#do show interface breakout
-#-----------------------------------------------
-#Port Breakout Mode Status Interfaces
-#-----------------------------------------------
-#1/1 4x10G Completed Eth1/1/1
-# Eth1/1/2
-# Eth1/1/3
-# Eth1/1/4
-#1/11 1x100G Completed Eth1/11
+# sonic# show interface breakout
+# -----------------------------------------------
+# Port Breakout Mode Status Interfaces
+# -----------------------------------------------
+# 1/49 Default Completed Eth1/49
+# 1/50 Default Completed Eth1/50
+# 1/51 Default Completed Eth1/51
+# 1/52 4x10G Completed Eth1/52/1
+# Eth1/52/2
+# Eth1/52/3
+# Eth1/52/4
"""
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_group.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_group.py
new file mode 100644
index 000000000..d31c19cd3
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_port_group.py
@@ -0,0 +1,370 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# © Copyright 2022 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_port_group
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: sonic_port_group
+version_added: 2.1.0
+notes:
+ - Supports C(check_mode).
+short_description: Manages port group configuration on SONiC.
+description:
+ - This module provides configuration management of port group for devices running SONiC.
+author: 'M. Zhang (@mingjunzhang2019)'
+options:
+ config:
+ description:
+ - A list of port group configurations.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ type: str
+ description:
+ - The index of the port group.
+ required: true
+ speed:
+ description:
+ - Speed for the port group.
+ - This configures the speed for all the memebr ports of the prot group.
+ - Supported speeds are dependent on the type of switch.
+ type: str
+ choices:
+ - SPEED_10MB
+ - SPEED_100MB
+ - SPEED_1GB
+ - SPEED_2500MB
+ - SPEED_5GB
+ - SPEED_10GB
+ - SPEED_20GB
+ - SPEED_25GB
+ - SPEED_40GB
+ - SPEED_50GB
+ - SPEED_100GB
+ - SPEED_400GB
+ state:
+ description:
+ - The state of the configuration after module completion.
+ type: str
+ choices:
+ - merged
+ - replaced
+ - overridden
+ - deleted
+ default: merged
+"""
+EXAMPLES = """
+#
+# Using deleted
+#
+# Before state:
+# -------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 10G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 25G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 10G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+- name: Configure port group speed
+ sonic_port_group:
+ config:
+ - id: 1
+ - id: 10
+ state: deleted
+#
+#
+# After state:
+# ------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 25G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 25G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 10G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+# Using deleted
+#
+# Before state:
+# -------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 10G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 25G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 10G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+- name: Configure port group speed
+ sonic_port_group:
+ config:
+ - id:
+ state: deleted
+#
+#
+# After state:
+# ------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 25G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 25G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 25G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+# Using merged
+#
+# Before state:
+# -------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 25G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 25G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 25G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+- name: Configure port group speed
+ sonic_port_group:
+ config:
+ - id: 1
+ speed: SPEED_10GB
+ - id: 9
+ speed: SPEED_10GB
+ state: merged
+#
+#
+# After state:
+# ------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 10G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 25G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 10G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 25G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 10G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 25G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+- name: Replace port group speed
+ sonic_port_group:
+ config:
+ - id: 1
+ speed: SPEED_10GB
+ - id: 9
+ speed: SPEED_10GB
+ state: replaced
+#
+# After state:
+# ------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 10G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 10G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 10G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 25G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 10G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 10G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 25G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 10G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 10G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 10G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 10G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 10G
+#
+- name: Override port group speed
+ sonic_port_group:
+ config:
+ - id: 1
+ speed: SPEED_10GB
+ - id: 9
+ speed: SPEED_10GB
+ state: overridden
+#
+# After state:
+# ------------
+#
+#sonic# show port-group
+#-------------------------------------------------------------------------------------
+#Port-group Interface range Valid speeds Default Speed Current Speed
+#-------------------------------------------------------------------------------------
+#1 Ethernet0 - Ethernet3 10G, 25G 25G 10G
+#2 Ethernet4 - Ethernet7 10G, 25G 25G 25G
+#3 Ethernet8 - Ethernet11 10G, 25G 25G 25G
+#4 Ethernet12 - Ethernet15 10G, 25G 25G 25G
+#5 Ethernet16 - Ethernet19 10G, 25G 25G 25G
+#6 Ethernet20 - Ethernet23 10G, 25G 25G 25G
+#7 Ethernet24 - Ethernet27 10G, 25G 25G 25G
+#8 Ethernet28 - Ethernet31 10G, 25G 25G 25G
+#9 Ethernet32 - Ethernet35 10G, 25G 25G 10G
+#10 Ethernet36 - Ethernet39 10G, 25G 25G 25G
+#
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.port_group.port_group import Port_groupArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.port_group.port_group import Port_group
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=Port_groupArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Port_group(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py
index 5a734e8b2..b3389b6ad 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_prefix_lists.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -87,11 +87,15 @@ options:
description:
- Specifies the type of configuration update to be performed on the device.
- For "merged", merge specified attributes with existing configured attributes.
- - For "deleted", delete the specified attributes from exiting configuration.
+ - For "deleted", delete the specified attributes from existing configuration.
+ - For "replaced", replace the specified existing configuration with the provided configuration.
+ - For "overridden", override the existing configuration with the provided configuration.
type: str
choices:
- merged
- deleted
+ - replaced
+ - overridden
default: merged
"""
EXAMPLES = """
@@ -227,6 +231,95 @@ EXAMPLES = """
# sonic#
# (no IPv6 prefix-list configuration present)
#
+# ***************************************************************
+# Using "overriden" state to override configuration
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration ip prefix-list
+# !
+# ip prefix-list pfx1 seq 10 permit 1.2.3.4/24 ge 26 le 30
+# ip prefix-list pfx3 seq 20 deny 1.2.3.12/26
+# ip prefix-list pfx4 seq 30 permit 7.8.9.0/24
+#
+# sonic# show running-configuration ipv6 prefix-list
+# !
+# ipv6 prefix-list pfx6 seq 25 permit 40::300/124
+#
+# ------------
+#
+- name: Override prefix-list configuration
+ dellemc.enterprise_sonic.sonic_prefix_lists:
+ config:
+ - name: pfx2
+ afi: "ipv4"
+ prefixes:
+ - sequence: 10
+ prefix: "10.20.30.128/24"
+ action: "deny"
+ ge: 25
+ le: 30
+ state: overridden
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration ip prefix-list
+# !
+# ip prefix-list pfx2 seq 10 deny 10.20.30.128/24 ge 25 le 30
+#
+# sonic# show running-configuration ipv6 prefix-list
+# sonic#
+# (no IPv6 prefix-list configuration present)
+#
+# ***************************************************************
+# Using "replaced" state to replace configuration
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration ip prefix-list
+# !
+# ip prefix-list pfx2 seq 10 deny 10.20.30.128/24 ge 25 le 30
+#
+# sonic# show running-configuration ipv6 prefix-list
+# sonic#
+# (no IPv6 prefix-list configuration present)
+#
+# ------------
+#
+- name: Replace prefix-list configuration
+ dellemc.enterprise_sonic.sonic_prefix_lists:
+ config:
+ - name: pfx2
+ afi: "ipv4"
+ prefixes:
+ - sequence: 10
+ prefix: "10.20.30.128/24"
+ action: "permit"
+ ge: 25
+ le: 30
+ - name: pfx3
+ afi: "ipv6"
+ prefixes:
+ - sequence: 20
+ action: "deny"
+ prefix: "60::70/124"
+ state: replaced
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration ip prefix-list
+# !
+# ip prefix-list pfx2 seq 10 permit 10.20.30.128/24 ge 25 le 30
+#
+# sonic# show running-configuration ipv6 prefix-list
+# sonic#
+# !
+# ipv6 prefix-list pfx3 seq 20 deny 60::70/124
+#
"""
RETURN = """
before:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py
index 1df4aff61..bc1f81d39 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_radius_server.py
@@ -71,6 +71,7 @@ options:
description:
- Specifies the timeout of the radius server.
type: int
+ default: 5
retransmit:
description:
- Specifies the re-transmit value of the radius server.
@@ -110,6 +111,7 @@ options:
description:
- Specifies the port of the radius server host.
type: int
+ default: 1812
timeout:
description:
- Specifies the timeout of the radius server host.
@@ -131,8 +133,10 @@ options:
- Specifies the operation to be performed on the radius server configured on the device.
- In case of merged, the input mode configuration will be merged with the existing radius server configuration on the device.
- In case of deleted the existing radius server mode configuration will be removed from the device.
+ - In case of replaced, the existing radius server configuration will be replaced with provided configuration.
+ - In case of overridden, the existing radius server configuration will be overridden with the provided configuration.
default: merged
- choices: ['merged', 'deleted']
+ choices: ['merged', 'replaced', 'overridden', 'deleted']
type: str
"""
EXAMPLES = """
@@ -280,8 +284,106 @@ EXAMPLES = """
#---------------------------------------------------------
#RADIUS Statistics
#---------------------------------------------------------
-
-
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#sonic(config)# do show radius-server
+#---------------------------------------------------------
+#RADIUS Global Configuration
+#---------------------------------------------------------
+#timeout : 10
+#auth-type : pap
+#key configured : Yes
+#--------------------------------------------------------------------------------------
+#HOST AUTH-TYPE KEY-CONFIG AUTH-PORT PRIORITY TIMEOUT RTSMT VRF SI
+#--------------------------------------------------------------------------------------
+#1.2.3.4 pap No 49 1 5 - - Ethernet0
+#
+- name: Replace radius configurations
+ sonic_radius_server:
+ config:
+ auth_type: mschapv2
+ timeout: 20
+ servers:
+ - host:
+ name: 1.2.3.4
+ auth_type: mschapv2
+ key: mschapv2
+ source_interface: Ethernet12
+ state: replaced
+#
+# After state:
+# ------------
+#
+#sonic(config)# do show radius-server
+#---------------------------------------------------------
+#RADIUS Global Configuration
+#---------------------------------------------------------
+#timeout : 20
+#auth-type : mschapv2
+#key configured : No
+#--------------------------------------------------------------------------------------
+#HOST AUTH-TYPE KEY-CONFIG AUTH-PORT PRIORITY TIMEOUT RTSMT VRF SI
+#--------------------------------------------------------------------------------------
+#1.2.3.4 mschapv2 Yes 1812 - - - - Ethernet12
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#sonic(config)# do show radius-server
+#---------------------------------------------------------
+#RADIUS Global Configuration
+#---------------------------------------------------------
+#timeout : 10
+#auth-type : pap
+#key configured : Yes
+#--------------------------------------------------------------------------------------
+#HOST AUTH-TYPE KEY-CONFIG AUTH-PORT PRIORITY TIMEOUT RTSMT VRF SI
+#--------------------------------------------------------------------------------------
+#1.2.3.4 pap No 49 1 5 - - Ethernet0
+#11.12.13.14 chap Yes 49 10 5 3 - -
+#
+- name: Override radius configurations
+ sonic_radius_server:
+ config:
+ auth_type: mschapv2
+ key: mschapv2
+ timeout: 20
+ servers:
+ - host:
+ name: 1.2.3.4
+ auth_type: mschapv2
+ key: mschapv2
+ source_interface: Ethernet12
+ - host:
+ name: 10.10.11.12
+ auth_type: chap
+ timeout: 30
+ priority: 2
+ port: 49
+ state: overridden
+#
+# After state:
+# ------------
+#
+#sonic(config)# do show radius-server
+#---------------------------------------------------------
+#RADIUS Global Configuration
+#---------------------------------------------------------
+#timeout : 20
+#auth-type : mschapv2
+#key configured : Yes
+#--------------------------------------------------------------------------------------
+#HOST AUTH-TYPE KEY-CONFIG AUTH-PORT PRIORITY TIMEOUT RTSMT VRF SI
+#--------------------------------------------------------------------------------------
+#1.2.3.4 mschapv2 Yes 1812 - - - - Ethernet12
+#10.10.11.12 chap No 49 2 30 - - -
+#
"""
RETURN = """
before:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_route_maps.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_route_maps.py
new file mode 100644
index 000000000..01327572e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_route_maps.py
@@ -0,0 +1,1606 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_route_maps
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: sonic_route_maps
+version_added: "2.1.0"
+author: "Kerry Meyer (@kerry-meyer)"
+short_description: route map configuration handling for SONiC
+description:
+ - This module provides configuration management for route map parameters on devices running SONiC.
+options:
+ config:
+ description:
+ - Specifies a list of route map configuration dictionaries
+ type: list
+ elements: dict
+ suboptions:
+ map_name:
+ description:
+ - Name of a route map
+ type: str
+ required: true
+ action:
+ description:
+ - action type for the route map (permit or deny)
+ - This value is required for creation and modification of a route
+ - map or route map attributes as well as for deletion of route map
+ - attributes. It can be omitted only when requesting deletion of a
+ - route map statement or all route map statements for a given route
+ - map map_name.
+ type: str
+ choices:
+ - permit
+ - deny
+ sequence_num:
+ description:
+ - unique number in the range 1-66535 to specify priority of the map
+ - This value is required for creation and modification of a route
+ - map or route map attributes as well as for deletion of route map
+ - attributes. It can be omitted only when requesting deletion of all
+ - route map "statements" for a given route map "map_name".
+ type: int
+ match:
+ description: Criteria for matching the route map to a route
+ type: dict
+ suboptions:
+ as_path:
+ description:
+ - Name of a configured BGP AS path list to be checked for
+ - a match with the target route
+ type: str
+ community:
+ description:
+ - Name of a configured BGP "community" to be checked for
+ - a match with the target route
+ type: str
+ evpn:
+ description:
+ - BGP Ethernet Virtual Private Network to be checked for
+ - a match with the target route
+ type: dict
+ suboptions:
+ default_route:
+ description:
+ - Default EVPN type-5 route
+ type: bool
+ route_type:
+ description:
+ - "Non-default route type: One of the following:"
+ - mac-ip route, EVPN Type 3 Inclusive Multicast Ethernet
+ - Tag (IMET) route, or prefix route
+ type: str
+ choices:
+ - macip
+ - multicast
+ - prefix
+ vni:
+ description:
+ - VNI ID to be checked for a match; specified by a value in the
+ - range 1-16777215
+ type: int
+ ext_comm:
+ description:
+ - Name of a configured BGP 'extended community' to be checked for
+ - a match with the target route
+ type: str
+ interface:
+ description:
+ - Next hop interface name (type and number) to be checked for a
+ - match with the target route. The interface type can be any
+ - of the following; 'Ethernet/Eth' interface or sub-interface,
+ - "'Loopback' interface, 'PortChannel' interface or"
+ - "sub-interface, 'Vlan' interface."
+ type: str
+ ip:
+ description:
+ - IP addresses or IP next hops to be checked for a match with the
+ - target route
+ type: dict
+ suboptions:
+ address:
+ description:
+ - name of an IPv4 prefix list containing a list of address
+ - prefixes to be checked for a match with the target route
+ type: str
+ next_hop:
+ description:
+ - name of a prefix list containing a list of next-hop
+ - prefixes to be checked for a match with the target route
+ type: str
+ ipv6:
+ description:
+ - IPv6 addresses to be checked for a match with the
+ - target route
+ type: dict
+ suboptions:
+ address:
+ description:
+ - name of an IPv6 prefix list containing a list of address
+ - prefixes to be checked for a match with the target route
+ type: str
+ required: true
+ local_preference:
+ description:
+ - local-preference value to be checked for a match with the
+ - target route. This is a value in the range 0-4294967295.
+ type: int
+ metric:
+ description:
+ - metric value to be checked for a match with the target route.
+ - This is a value in the range 0-4294967295.
+ type: int
+ origin:
+ description:
+ - BGP origin to be checked for a match with the target route
+ type: str
+ choices:
+ - egp
+ - igp
+ - incomplete
+ peer:
+ description:
+ - BGP routing peer/neighbor required for a matching route.
+ - I(ip), I(ipv6), and I(interface) are mutually exclusive.
+ type: dict
+ suboptions:
+ ip:
+ description: IPv4 address of a BGP peer
+ type: str
+ ipv6:
+ description: IPv6 address of a BGP peer
+ type: str
+ interface:
+ description:
+ - Name (type and number) of a BGP peer interface.
+ - Allowed interface types are Ethernet or Eth (depending
+ - on the configured interface-naming mode),
+ - Vlan, and Portchannel
+ type: str
+ source_protocol:
+ description: Source protocol required for a matching route
+ type: str
+ choices:
+ - bgp
+ - connected
+ - ospf
+ - static
+ source_vrf:
+ description: Name of the source VRF required for a matching route
+ type: str
+ tag:
+ description:
+ - Tag value required for a matching route
+ - The value must be in the range 1-4294967295
+ type: int
+ set:
+ description: Information to set into a matching route for re-distribution
+ type: dict
+ suboptions:
+ as_path_prepend:
+ description:
+ - String specifying a comma-separated list of AS-path numbers
+ - "to prepend to the BGP AS-path attribute in a matched route."
+ - AS-path values in the list must be in the range
+ - "1-4294967295; for example, 2000,3000"
+ type: str
+ comm_list_delete:
+ description:
+ - String specifying the name of a BGP community list containing
+ - BGP Community values to be deleted from matching routes.
+ type: str
+ community:
+ description:
+ - BGP community attributes to add to or replace the BGP
+ - community attributes in a matching route. Specifying the
+ - "'additive' attribute is allowed only if one of"
+ - the other attributes (other than 'none') is specified.
+ - It causes the specified 'set community' attributes
+ - to be added to the already existing community
+ - "attributes in the matching route. If the 'additive' attribute"
+ - is not specified, the previously existing community attributes
+ - in the matching route are replaced by the configured
+ - "'set community' attributes. Specifying a 'set community' attribute"
+ - of 'none' is mutually exclusive with setting of other community
+ - attributes and causes any community attributes in the matching
+ - route to be removed.
+ type: dict
+ suboptions:
+ community_number:
+ description:
+ - A list of one or more BGP community numbers in the
+ - "form AA:NN where AA and NN are integers in the range"
+ - "0-65535."
+ - "Note: Each community number in the list must be enclosed"
+ - in double quotes to avoid YAML parsing errors due to the
+ - "list values containing an embedded ':' character."
+ type: list
+ elements: str
+ community_attributes:
+ description:
+ - A list of one or more BGP community attributes. The allowed
+ - "values are the following:"
+ - local_as
+ - Do not send outside local AS (well-known community)
+ - no_advertise
+ - Do not advertise to any peer (well-known community)
+ - no_export
+ - Do not export to next AS (well-known community)
+ - no_peer
+ - "The route does not need to be advertised to peers."
+ - (Advertisement of the route can be suppressed based
+ - on other criteria.)
+ - additive
+ - Add the configured 'set community' attributes to
+ - "the matching route (if set to 'true'); Previously existing"
+ - attributes in the matching route are, instead, replaced
+ - by the configured attributes if this attribute is
+ - not specified or if it is set to 'false'.
+ - none
+ - Do not send any community attribute. This attribute
+ - is mutually exclusive with all other 'set community'
+ - attributes. It causes all attributes to be removed
+ - from the matching route.
+ - "I(none) is mutually exclusive with all of the other attributes:"
+ - I(local_as), I(no_advertise), I(no_export), I(no_peer), I(additive),
+ - and I(additive).
+ type: list
+ elements: str
+ choices:
+ - local_as
+ - no_advertise
+ - no_export
+ - no_peer
+ - additive
+ - none
+ extcommunity:
+ description:
+ - BGP extended community attributes to set into a matching route.
+ type: dict
+ suboptions:
+ rt:
+ description:
+ - Route Target VPN extended communities in the format
+ - "ASN:NN or IP-ADDRESS:NN"
+ - "Note: Each rt value in the list must be enclosed"
+ - in double quotes to avoid YAML parsing errors due to the
+ - "list values containing an embedded ':' character."
+ type: list
+ elements: str
+ soo:
+ description:
+ - "Site-of-Origin VPN extended communities in the format"
+ - "ASN:NN or IP-ADDRESS:NN"
+ - "Note: Each rt value in the list must be enclosed"
+ - in double quotes to avoid YAML parsing errors due to the
+ - "list values containing an embedded ':' character."
+ type: list
+ elements: str
+ ip_next_hop:
+ description:
+ - IPv4 next hop address to set into a matching route in the
+ - dotted decimal format A.B.C.D
+ type: str
+ ipv6_next_hop:
+ description:
+ - IPv6 next hop address attributes to set into a matching route
+ type: dict
+ suboptions:
+ global_addr:
+ description:
+ - IPv6 global next hop address to set into a matching
+ - "route in the format A::B"
+ type: str
+ prefer_global:
+ description:
+ - Set the corresponding attribute into a matching route
+ - if the value of this Ansible attribute is 'true'.
+ - The attribute indicates that the routing algorithm must
+ - prefer the global next-hop address over the link-local
+ - address if both exist.
+ type: bool
+ local_preference:
+ description:
+ - "BGP local preference path attribute; integer value in"
+ - the range 0-4294967295
+ type: int
+ metric:
+ description:
+ - route metric value actions
+ - I(value) and I(rtt_action) are mutually exclusive.
+ type: dict
+ suboptions:
+ value:
+ description:
+ - "metric value to be set into a matching route;"
+ - value in the range 0-4294967295
+ type: int
+ rtt_action:
+ description:
+ - Action to take for modifying the metric for a matched
+ - "route using the Round Trip Time (rtt);"
+ - C(set) causes the route metric to be set to the
+ - rtt value.
+ - C(add) causes the rtt value to be added
+ - to the route metric.
+ - C(subtract) causes the rtt value to be
+ - subtracted from route metric.
+ type: str
+ choices:
+ - set
+ - add
+ - subtract
+ origin:
+ description:
+ - "BGP route origin; One of the following must be selected."
+ - "egp (External; remote EGP)"
+ - "igp (Internal; local IGP)"
+ - incomplete (Unknown origin)
+ type: str
+ choices:
+ - egp
+ - igp
+ - incomplete
+ weight:
+ description:
+ - BGP weight for the routing table. The weight must be an
+ - integer in the range 0-4294967295
+ type: int
+ call:
+ description:
+ - Name of a route map to jump to after executing 'match' and 'set'
+ - statements for the current route map.
+ type: str
+
+ state:
+ description:
+ - Specifies the type of configuration update to be performed on the device.
+ - For C(merged), merge specified attributes with existing configured attributes.
+ - For C(deleted), delete the specified attributes from existing configuration.
+ - For C(replaced), replace each modified list or dictionary with the
+ - specified items.
+ - For C(overridden), replace all current configuration for this resource
+ - module with the specified configuration.
+ type: str
+ choices:
+ - merged
+ - deleted
+ - replaced
+ - overridden
+ default: merged
+"""
+EXAMPLES = """
+# Using "merged" state to create initial configuration
+#
+# Before state:
+# -------------
+#
+# sonic# show running-configuration route-map
+# sonic#
+# (No configuration present)
+#
+# -------------
+#
+- name: Merge initial route_maps configuration
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as1
+ community: bgp_comm_list1
+ evpn:
+ default_route: true
+ vni: 735
+ ext_comm: bgp_ext_comm1
+ interface: Ethernet4
+ ip:
+ address: ip_pfx_list1
+ ipv6:
+ address: ipv6_pfx_list1
+ local_preference: 8000
+ metric: 400
+ origin: egp
+ peer:
+ ip: 10.20.30.40
+ source_protocol: bgp
+ source_vrf: Vrf1
+ tag: 7284
+ set:
+ as_path_prepend: 200,315,7135
+ comm_list_delete: bgp_comm_list2
+ community:
+ community_number:
+ - "35:58"
+ - "79:150"
+ - "308:650"
+ community_attributes:
+ - local_as
+ - no_advertise
+ - no_export
+ - no_peer
+ - additive
+ extcommunity:
+ rt:
+ - "30:40"
+ soo:
+ - "10.73.14.9:78"
+ ip_next_hop: 10.48.16.18
+ ipv6_next_hop:
+ global_addr: 30::30
+ prefer_global: true
+ local_preference: 635
+ metric:
+ metric_value: 870
+ origin: egp
+ weight: 93471
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ evpn:
+ route_type: multicast
+ origin: incomplete
+ peer:
+ interface: Ethernet6
+ source_protocol: ospf
+ set:
+ metric:
+ rtt_action: add
+ origin: incomplete
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ origin: igp
+ peer:
+ ipv6: 87:95:15::53
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - none
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - map_name: rm4
+ action: permit
+ sequence_num: 480
+ match:
+ evpn:
+ route_type: prefix
+ source_protocol: static
+ set:
+ metric:
+ rtt_action: subtract
+ state: merged
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as1
+# match evpn default-route
+# match evpn vni 735
+# match ip address prefix-list ip_pfx_list1
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Ethernet4
+# match community bgp_comm_list1
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match peer 10.20.30.40
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 200,315,7135
+# set community 35:58 79:150 308:650 local-AS no-advertise no-export no-peer additive
+# set extcommunity rt 30:40
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric 870
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set ipv6 next-hop prefer-global
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm1 deny 3047
+# match evpn route-type multicast
+# match peer Ethernet6
+# match source-protocol ospf
+# match origin incomplete
+# set metric +rtt
+# set origin incomplete
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol connected
+# match origin igp
+# set community none
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+# ------------
+
+
+# Using "merged" state to update and add configuration
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as1
+# match evpn default-route
+# match evpn vni 735
+# match ip address prefix-list ip_pfx_list1
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Ethernet4
+# match community bgp_comm_list1
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match peer 10.20.30.40
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 200,315,7135
+# set community 35:58 79:150 308:650 local-AS no-advertise no-export no-peer additive
+# set extcommunity rt 30:40
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric 870
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set ipv6 next-hop prefer-global
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm1 deny 3047
+# match evpn route-type multicast
+# match peer Ethernet6
+# match source-protocol ospf
+# match origin incomplete
+# set metric +rtt
+# set origin incomplete
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol connected
+# match origin igp
+# set community none
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+# ------------
+#
+- name: Merge additional and modified route map configuration
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ route_type: prefix
+ vni: 850
+ interface: Vlan7
+ ip:
+ address: ip_pfx_list2
+ next_hop: ip_pfx_list3
+ peer:
+ interface: Portchannel14
+ set:
+ as_path_prepend: 188,257
+ community:
+ community_number:
+ - "45:736"
+ ipv6_next_hop:
+ prefer_global: false
+ metric:
+ rtt_action: add
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ set:
+ metric:
+ rtt_action: subtract
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ match:
+ interface: Ethernet16
+ set:
+ as_path_prepend: 200,300,400
+ ipv6_next_hop:
+ global_addr: 37::58
+ prefer_global: true
+ metric: 8000
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ local_preference: 14783
+ source_protocol: bgp
+ set:
+ community:
+ community_attributes:
+ - no_advertise
+ state: merged
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as2
+# match evpn default-route
+# match evpn route-type prefix
+# match evpn vni 850
+# match ip address prefix-list ip_pfx_list2
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match community bgp_comm_list3
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 188,257
+# set community 35:58 79:150 308:650 45:736 local-AS no-advertise no-export no-peer additive
+# set extcommunity rt 30:40
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm1 deny 3047
+# match as-path bgp_as3
+# match evpn route-type multicast
+# match ext-community bgp_ext_comm2
+# match peer Ethernet6
+# match source-protocol ospf
+# match origin igp
+# set metric -rtt
+# set origin incomplete
+# !
+# route-map rm2 permit 100
+# match interface Ethernet16
+# set as-path prepend 200,300,400
+# set ipv6 next-hop global 37::58
+# set ipv6 next-hop prefer-global
+# set metric 8000
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# match local-preference 14783
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol bgp
+# match origin igp
+# set community no-advertise
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+
+
+# Using "replaced" state to replace the contents of a list
+#
+# Before state:
+# ------------
+#
+# sonic(config-route-map)# do show running-configuration route-map rm1 80
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as2
+# match evpn default-route
+# match evpn route-type prefix
+# match evpn vni 850
+# match ip address prefix-list ip_pfx_list2
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match community bgp_comm_list3
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 188,257
+# set community 35:58 79:150 308:650 45:736 local-AS no-export no-peer additive
+# set extcommunity rt 30:40
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# ------------
+- name: Replace a list
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ set:
+ community:
+ community_number:
+ - "15:30"
+ - "26:54"
+ state: replaced
+
+# After state:
+# ------------
+#
+# sonic#show running-configuration route-map rm1 80
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as2
+# match evpn default-route
+# match evpn route-type prefix
+# match evpn vni 850
+# match ip address prefix-list ip_pfx_list2
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match community bgp_comm_list3
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 188,257
+# set community 15:30 26:54 local-AS no-export no-peer additive
+# set extcommunity rt 30:40
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+
+
+# Using "replaced" state to replace the contents of dictionaries
+#
+# Before state:
+# ------------
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as2
+# match evpn default-route
+# match evpn route-type prefix
+# match evpn vni 850
+# match ip address prefix-list ip_pfx_list2
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match community bgp_comm_list3
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 188,257
+# set community 15:30 26:54 local-AS no-export no-peer additive
+# set extcommunity rt 30:40
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm1 deny 3047
+# match as-path bgp_as3
+# match evpn route-type multicast
+# match ext-community bgp_ext_comm2
+# match peer Ethernet6
+# match source-protocol ospf
+# match origin igp
+# set metric -rtt
+# set origin incomplete
+# !
+# route-map rm2 permit 100
+# match interface Ethernet16
+# set as-path prepend 200,300,400
+# set ipv6 next-hop global 37::58
+# set ipv6 next-hop prefer-global
+# set metric 8000
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# match local-preference 14783
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol bgp
+# match origin igp
+# set community no-advertise
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+# ------------
+- name: Replace dictionaries
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ evpn:
+ route_type: multicast
+ ip:
+ address: ip_pfx_list1
+ set:
+ community:
+ community_attributes:
+ - no_advertise
+ extcommunity:
+ rt:
+ - "20:20"
+
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ set:
+ ipv6_next_hop:
+ global_addr: 45::90
+ state: replaced
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as2
+# match evpn route-type multicast
+# match ip address prefix-list ip_pfx_list1
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match community bgp_comm_list3
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 188,257
+# set community no-advertise
+# set extcommunity rt 20:20
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm1 deny 3047
+# match as-path bgp_as3
+# match evpn route-type multicast
+# match ext-community bgp_ext_comm2
+# match peer Ethernet6
+# match source-protocol ospf
+# match origin igp
+# set metric -rtt
+# set origin incomplete
+# !
+# route-map rm2 permit 100
+# match interface Ethernet16
+# set as-path prepend 200,300,400
+# set metric 8000
+# set ipv6 next-hop global 45::90
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# match local-preference 14783
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol bgp
+# match origin igp
+# set community no-advertise
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+
+
+# Using "overridden" state to override all existing configuration with new
+# configuration
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as2
+# match evpn route-type multicast
+# match ip address prefix-list ip_pfx_list1
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match community bgp_comm_list3
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 188,257
+# set community no-advertise
+# set extcommunity rt 30:40
+# set extcommunity rt 20:20
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm1 deny 3047
+# match as-path bgp_as3
+# match evpn route-type multicast
+# match ext-community bgp_ext_comm2
+# match peer Ethernet6
+# match source-protocol ospf
+# match origin igp
+# set metric -rtt
+# set origin incomplete
+# !
+# route-map rm2 permit 100
+# match interface Ethernet16
+# set as-path prepend 200,300,400
+# set metric 8000
+# set ipv6 next-hop global 45::90
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# match local-preference 14783
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol bgp
+# match origin igp
+# set community no-advertise
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+# ------------
+- name: Override all route map configuration with new configuration
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config:
+ - map_name: rm5
+ action: permit
+ sequence_num: 250
+ match:
+ interface: Ethernet28
+ set:
+ as_path_prepend: 150,275
+ metric: 7249
+ state: overridden
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm5 permit 250
+# match interface Ethernet28
+# set as-path prepend 150,275
+# set metric 7249
+
+
+# Using "overridden" state to override all existing configuration with new
+# configuration. (Restore previous configuration.)
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm5 permit 250
+# match interface Ethernet28
+# set as-path prepend 150,275
+# set metric 7249
+# ------------
+- name: Override (restore) all route map configuration with older configuration
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ default_route: true
+ route_type: prefix
+ vni: 850
+ ext_comm: bgp_ext_comm1
+ interface: Vlan7
+ ip:
+ address: ip_pfx_list2
+ next_hop: ip_pfx_list3
+ ipv6:
+ address: ipv6_pfx_list1
+ local_preference: 8000
+ metric: 400
+ origin: egp
+ peer:
+ interface: Portchannel14
+ source_protocol: bgp
+ source_vrf: Vrf1
+ tag: 7284
+ set:
+ as_path_prepend: 188,257
+ comm_list_delete: bgp_comm_list2
+ community:
+ community_number:
+ - "35:58"
+ - "79:150"
+ - "308:650"
+ - "45:736"
+ community_attributes:
+ - local_as
+ - no_export
+ - no_peer
+ - additive
+ extcommunity:
+ rt:
+ - "30:40"
+ soo:
+ - "10.73.14.9:78"
+ ip_next_hop: 10.48.16.18
+ ipv6_next_hop:
+ global_addr: 30::30
+ local_preference: 635
+ metric:
+ rtt_action: add
+ origin: egp
+ weight: 93471
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ evpn:
+ route_type: multicast
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ peer:
+ interface: Ethernet6
+ source_protocol: ospf
+ set:
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ match:
+ interface: Ethernet16
+ set:
+ as_path_prepend: 200,300,400
+ ipv6_next_hop:
+ global_addr: 37::58
+ prefer_global: true
+ metric: 8000
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ origin: igp
+ peer:
+ ipv6: 87:95:15::53
+ local_preference: 14783
+ source_protocol: bgp
+ set:
+ community:
+ community_attributes:
+ - no_advertise
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - map_name: rm4
+ action: permit
+ sequence_num: 480
+ match:
+ evpn:
+ route_type: prefix
+ source_protocol: static
+ set:
+ metric:
+ rtt_action: subtract
+ state: overridden
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as2
+# match evpn default-route
+# match evpn route-type prefix
+# match evpn vni 850
+# match ip address prefix-list ip_pfx_list2
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match community bgp_comm_list3
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 188,257
+# set community 35:58 79:150 308:650 45:736 local-AS no-export no-peer additive
+# set extcommunity rt 30:40
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm1 deny 3047
+# match as-path bgp_as3
+# match evpn route-type multicast
+# match ext-community bgp_ext_comm2
+# match peer Ethernet6
+# match source-protocol ospf
+# match origin igp
+# set metric -rtt
+# set origin incomplete
+# !
+# route-map rm2 permit 100
+# match interface Ethernet16
+# set as-path prepend 200,300,400
+# set ipv6 next-hop global 37::58
+# set ipv6 next-hop prefer-global
+# set metric 8000
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# match local-preference 14783
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol bgp
+# match origin igp
+# set community no-advertise
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+
+
+# Using "deleted" state to remove configuration
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration route-map rm1 80
+# !
+# route-map rm1 permit 80
+# match as-path bgp_as2
+# match evpn default-route
+# match evpn route-type prefix
+# match evpn vni 850
+# match ip address prefix-list ip_pfx_list2
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match community bgp_comm_list3
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set as-path prepend 188,257
+# set community 35:58 79:150 308:650 45:736 local-AS no-export no-peer additive
+# set extcommunity rt 30:40
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# ------------
+- name: Delete selected route map configuration
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ vni: 850
+ ip:
+ address: ip_pfx_list2
+ set:
+ as_path_prepend: 188,257
+ community:
+ community_number:
+ - "35:58"
+ community_attributes:
+ - local_as
+ extcommunity:
+ rt:
+ - "30:40"
+ state: deleted
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration route-map rm1 80
+# !
+# route-map rm1 permit 80
+# match evpn default-route
+# match evpn route-type prefix
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set community 79:150 308:650 45:736 no-export no-peer additive
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+
+
+# Using "deleted" state to remove a route map or route map subset
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match evpn default-route
+# match evpn route-type prefix
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set community 79:150 308:650 45:736 no-export no-peer additive
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm1 deny 3047
+# match as-path bgp_as3
+# match evpn route-type multicast
+# match ext-community bgp_ext_comm2
+# match peer Ethernet6
+# match source-protocol ospf
+# match origin igp
+# set metric -rtt
+# set origin incomplete
+# !
+# route-map rm2 permit 100
+# match interface Ethernet16
+# set as-path prepend 200,300,400
+# set metric 8000
+# set ipv6 next-hop prefer-global
+# set ipv6 next-hop global 37::58
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# match local-preference 14783
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol bgp
+# match origin igp
+# set community no-advertise
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+# ------------
+- name: Delete a route map or route map subset
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config:
+ - map_name: rm1
+ sequence_num: 3047
+ - map_name: rm2
+ sequence_num: 100
+ state: deleted
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match evpn default-route
+# match evpn route-type prefix
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set community 79:150 308:650 45:736 no-export no-peer additive
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# match local-preference 14783
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol bgp
+# match origin igp
+# set community no-advertise
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+
+
+# Using "deleted" state to remove all route map configuration
+#
+# Before state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# !
+# route-map rm1 permit 80
+# match evpn default-route
+# match evpn route-type prefix
+# match ipv6 address prefix-list ipv6_pfx_list1
+# match interface Vlan7
+# match ext-community bgp_ext_comm1
+# match tag 7284
+# match local-preference 8000
+# match source-vrf Vrf1
+# match ip next-hop prefix-list ip_pfx_list3
+# match peer PortChannel 14
+# match source-protocol bgp
+# match metric 400
+# match origin egp
+# set community 79:150 308:650 45:736 no-export no-peer additive
+# set extcommunity soo 10.73.14.9:78
+# set comm-list bgp_comm_list2 delete
+# set metric +rtt
+# set ip next-hop 10.48.16.18
+# set ipv6 next-hop global 30::30
+# set local-preference 635
+# set origin egp
+# set weight 93471
+# !
+# route-map rm3 deny 285
+# match evpn route-type macip
+# match local-preference 14783
+# call rm1
+# match peer 87:95:15::53
+# match source-protocol bgp
+# match origin igp
+# set community no-advertise
+# set metric rtt
+# set origin igp
+# !
+# route-map rm4 permit 480
+# match evpn route-type prefix
+# match source-protocol static
+# set metric -rtt
+# ------------
+- name: Delete all route map configuration
+ dellemc.enterprise_sonic.sonic_route_maps:
+ config: []
+ state: deleted
+
+# After state:
+# ------------
+#
+# sonic# show running-configuration route-map
+# sonic#
+# (no route map configuration present)
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ as the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ as the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.route_maps.route_maps import Route_mapsArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.route_maps.route_maps import Route_maps
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=Route_mapsArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Route_maps(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py
index 7a528cdf0..b6f8be3b7 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_static_routes.py
@@ -33,6 +33,8 @@ DOCUMENTATION = """
---
module: sonic_static_routes
version_added: 2.0.0
+notes:
+ - Supports C(check_mode).
short_description: Manage static routes configuration on SONiC
description:
- This module provides configuration management of static routes for devices running SONiC
@@ -108,6 +110,8 @@ options:
choices:
- merged
- deleted
+ - overridden
+ - replaced
default: merged
"""
EXAMPLES = """
@@ -137,13 +141,13 @@ EXAMPLES = """
metric: 2
tag: 4
track: 8
- - vrf_name: '{{vrf_1}}'
+ - vrf_name: 'VrfReg1'
static_list:
- prefix: '3.0.0.0/8'
next_hops:
- index:
interface: 'eth0'
- nexthop_vrf: '{{vrf_2}}'
+ nexthop_vrf: 'VrfReg2'
next_hop: '4.0.0.0'
metric: 4
tag: 5
@@ -162,7 +166,7 @@ EXAMPLES = """
# ip route 2.0.0.0/8 3.0.0.0 tag 4 track 8 2
# ip route 2.0.0.0/8 interface Ethernet4 tag 2 track 3 1
# ip route vrf VrfReg1 3.0.0.0/8 4.0.0.0 interface Management 0 nexthop-vrf VrfReg2 tag 5 track 6 4
-# ip route vrf VrfREg1 3.0.0.0/8 blackhole tag 20 track 30 10
+# ip route vrf VrfReg1 3.0.0.0/8 blackhole tag 20 track 30 10
#
#
# Modifying previous merge
@@ -170,7 +174,7 @@ EXAMPLES = """
- name: Modify static routes configurations
dellemc.enterprise_sonic.sonic_static_routes:
config:
- - vrf_name: '{{vrf_1}}'
+ - vrf_name: 'VrfReg1'
static_list:
- prefix: '3.0.0.0/8'
next_hops:
@@ -188,7 +192,65 @@ EXAMPLES = """
# ip route 2.0.0.0/8 3.0.0.0 tag 4 track 8 2
# ip route 2.0.0.0/8 interface Ethernet4 tag 2 track 3 1
# ip route vrf VrfReg1 3.0.0.0/8 4.0.0.0 interface Management 0 nexthop-vrf VrfReg2 tag 5 track 6 4
-# ip route vrf VrfREg1 3.0.0.0/8 blackhole tag 22 track 33 11
+# ip route vrf VrfReg1 3.0.0.0/8 blackhole tag 22 track 33 11
+
+
+# Using overridden
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration | grep "ip route"
+# ip route 4.0.0.0/8 2.0.0.0 tag 4 track 8 2
+
+ - name: Override static routes configurations
+ dellemc.enterprise_sonic.sonic_static_routes:
+ config:
+ - vrf_name: 'VrfReg2'
+ static_list:
+ - prefix: '3.0.0.0/8'
+ next_hops:
+ - index:
+ blackhole: True
+ metric: 10
+ tag: 20
+ track: 30
+ state: overridden
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration | grep "ip route"
+# ip route vrf VrfReg2 3.0.0.0/8 blackhole tag 20 track 30 10
+
+
+# Using Replaced
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration | grep "ip route"
+# ip route 4.0.0.0/8 2.0.0.0 tag 4 track 8 2
+
+ - name: Replace static routes configurations
+ dellemc.enterprise_sonic.sonic_static_routes:
+ config:
+ - vrf_name: 'default'
+ static_list:
+ - prefix: '4.0.0.0/8'
+ next_hops:
+ - index:
+ blackhole: True
+ metric: 5
+ tag: 10
+ track: 15
+ state: replaced
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration | grep "ip route"
+# ip route 4.0.0.0/8 blackhole tag 10 track 15 5
# Using deleted
@@ -200,7 +262,7 @@ EXAMPLES = """
# ip route 2.0.0.0/8 3.0.0.0 tag 4 track 8 2
# ip route 2.0.0.0/8 interface Ethernet4 tag 2 track 3 1
# ip route vrf VrfReg1 3.0.0.0/8 4.0.0.0 interface Management 0 nexthop-vrf VrfReg2 tag 5 track 6 4
-# ip route vrf VrfREg1 3.0.0.0/8 blackhole tag 22 track 33 11
+# ip route vrf VrfReg1 3.0.0.0/8 blackhole tag 22 track 33 11
- name: Delete static routes configurations
dellemc.enterprise_sonic.sonic_static_routes:
@@ -211,7 +273,7 @@ EXAMPLES = """
next_hops:
- index:
interface: 'Ethernet4'
- - vrf_name: '{{vrf_1}}'
+ - vrf_name: 'VrfReg1'
state: deleted
# After State:
@@ -237,6 +299,13 @@ after:
sample: >
The configuration returned will always be in the same format
of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_stp.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_stp.py
new file mode 100644
index 000000000..a25252547
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_stp.py
@@ -0,0 +1,677 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_stp
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+module: sonic_stp
+version_added: "2.3.0"
+short_description: Manage STP configuration on SONiC
+description:
+ - This module provides configuration management of STP for devices running SONiC
+author: "Shade Talabi (@stalabi1)"
+options:
+ config:
+ description:
+ - Specifies STP configurations
+ - I(mstp), I(pvst) and I(rapid_pvst) are mutually exclusive.
+ type: dict
+ suboptions:
+ global:
+ description:
+ - Global STP configuration
+ type: dict
+ suboptions:
+ enabled_protocol:
+ description:
+ - Specifies the type of STP enabled on the device
+ type: str
+ choices: ['mst', 'pvst', 'rapid_pvst']
+ loop_guard:
+ description:
+ - The loop guard default setting for the bridge
+ type: bool
+ default: False
+ bpdu_filter:
+ description:
+ - Enables edge port BPDU filter
+ type: bool
+ default: False
+ disabled_vlans:
+ description:
+ - List of disabled STP VLANs. The value of a list item can be a single VLAN ID or a range of VLAN IDs
+ - separated by '-' or '..'; for example 70-100 or 70..100.
+ type: list
+ elements: str
+ root_guard_timeout:
+ description:
+ - Specifies root guard recovery timeout in seconds before the port is moved back to forwarding state
+ - Range 5-600
+ type: int
+ portfast:
+ description:
+ - Enables PortFast globally on all access ports
+ - Configurable for pvst protocol
+ type: bool
+ default: False
+ hello_time:
+ description:
+ - Interval in seconds between periodic transmissions of configuration messages by designated ports
+ - Range 1-10
+ type: int
+ default: 2
+ max_age:
+ description:
+ - Maximum age in seconds of the information transmitted by the bridge when it is the root bridge
+ - Range 6-40
+ type: int
+ default: 20
+ fwd_delay:
+ description:
+ - Delay in seconds used by STP bridges to transition root and designated ports to forwarding
+ - Range 4-30
+ type: int
+ default: 15
+ bridge_priority:
+ description:
+ - The manageable component of the bridge identifier
+ - Value must be a multiple of 4096 in the range of 0-61440
+ type: int
+ default: 32768
+ interfaces:
+ description:
+ - Interfaces STP configuration
+ type: list
+ elements: dict
+ suboptions:
+ intf_name:
+ description:
+ - Name of interface
+ type: str
+ required: True
+ edge_port:
+ description:
+ - Configure interface as an STP edge port
+ type: bool
+ default: False
+ link_type:
+ description:
+ - Specifies the interface's link type
+ type: str
+ choices: ['point-to-point', 'shared']
+ guard:
+ description:
+ - Enables root guard or loop guard
+ type: str
+ choices: ['loop', 'root', 'none']
+ bpdu_guard:
+ description:
+ - Enable edge port BPDU guard
+ type: bool
+ default: False
+ bpdu_filter:
+ description:
+ - Enables edge port BPDU filter
+ type: bool
+ default: False
+ portfast:
+ description:
+ - Enable/Disable portfast on specified interface
+ - Configurable for pvst protocol
+ type: bool
+ default: False
+ uplink_fast:
+ description:
+ - Enables uplink fast
+ type: bool
+ default: False
+ shutdown:
+ description:
+ - Port to be shutdown when it receives a BPDU
+ type: bool
+ default: False
+ cost:
+ description:
+ - The port's contribution, when it is the root port, to the root path cost for the bridge
+ type: int
+ port_priority:
+ description:
+ - The manageable component of the port identifier
+ - Range 0-240
+ type: int
+ stp_enable:
+ description:
+ - Enables STP on the interface
+ type: bool
+ default: True
+ mstp:
+ description:
+ - Multi STP configuration
+ type: dict
+ suboptions:
+ mst_name:
+ description:
+ - Name of the MST configuration identifier
+ type: str
+ revision:
+ description:
+ - Revision level of the MST configuration identifier
+ type: int
+ max_hop:
+ description:
+ - Number of bridges in an MST region that a BPDU can traverse before it is discarded
+ type: int
+ hello_time:
+ description:
+ - Interval in seconds between periodic transmissions of configuration messages by designated ports
+ - Range 1-10
+ type: int
+ max_age:
+ description:
+ - Maximum age in seconds of the information transmitted by the bridge when it is the root bridge
+ - Range 6-40
+ type: int
+ fwd_delay:
+ description:
+ - Delay in seconds used by STP bridges to transition root and designated ports to forwarding
+ - Range 4-30
+ type: int
+ mst_instances:
+ description:
+ - Configuration for MST instances
+ type: list
+ elements: dict
+ suboptions:
+ mst_id:
+ description:
+ - Value used to identify MST instance
+ type: int
+ required: True
+ bridge_priority:
+ description:
+ - The manageable component of the bridge identifier
+ - Value must be a multiple of 4096
+ type: int
+ vlans:
+ description:
+ - List of VLANs mapped to the MST instance. The value of a list item can be a single VLAN ID or a range of VLAN IDs
+ - separated by '-' or '..'; for example 70-100 or 70..100.
+ type: list
+ elements: str
+ interfaces:
+ description:
+ - List of STP enabled interfaces
+ type: list
+ elements: dict
+ suboptions:
+ intf_name:
+ description:
+ - Reference to the STP interface
+ type: str
+ required: True
+ cost:
+ description:
+ - The port's contribution, when it is the root port, to the root path cost for the bridge
+ type: int
+ port_priority:
+ description:
+ - The manageable component of the port identifier
+ type: int
+ pvst:
+ description:
+ - Per VLAN STP configuration
+ type: list
+ elements: dict
+ suboptions:
+ vlan_id:
+ description:
+ - VLAN identifier
+ type: int
+ required: True
+ hello_time:
+ description:
+ - Interval in seconds between periodic transmissions of configuration messages by designated ports
+ - Range 1-10
+ type: int
+ max_age:
+ description:
+ - Maximum age in seconds of the information transmitted by the bridge when it is the root bridge
+ - Range 6-40
+ type: int
+ fwd_delay:
+ description:
+ - Delay in seconds used by STP bridges to transition root and designated ports to forwarding
+ - Range 4-30
+ type: int
+ bridge_priority:
+ description:
+ - The manageable component of the bridge identifier
+ - Value must be a multiple of 4096
+ type: int
+ interfaces:
+ description:
+ - List of STP enabled interfaces
+ type: list
+ elements: dict
+ suboptions:
+ intf_name:
+ description:
+ - Reference to the STP interface
+ type: str
+ required: True
+ cost:
+ description:
+ - The port's contribution, when it is the root port, to the root path cost for the bridge
+ type: int
+ port_priority:
+ description:
+ - The manageable component of the port identifier
+ type: int
+ rapid_pvst:
+ description:
+ - Rapid per VLAN STP configuration
+ type: list
+ elements: dict
+ suboptions:
+ vlan_id:
+ description:
+ - VLAN identifier
+ type: int
+ required: True
+ hello_time:
+ description:
+ - Interval in seconds between periodic transmissions of configuration messages by designated ports
+ - Range 1-10
+ type: int
+ max_age:
+ description:
+ - Maximum age in seconds of the information transmitted by the bridge when it is the root bridge
+ - Range 6-40
+ type: int
+ fwd_delay:
+ description:
+ - Delay in seconds used by STP bridges to transition root and designated ports to forwarding
+ - Range 4-30
+ type: int
+ bridge_priority:
+ description:
+ - The manageable component of the bridge identifier
+ - Value must be a multiple of 4096
+ type: int
+ interfaces:
+ description:
+ - List of STP enabled interfaces
+ type: list
+ elements: dict
+ suboptions:
+ intf_name:
+ description:
+ - Reference to the STP interface
+ type: str
+ required: True
+ cost:
+ description:
+ - The port's contribution, when it is the root port, to the root path cost for the bridge
+ type: int
+ port_priority:
+ description:
+ - The manageable component of the port identifier
+ type: int
+ state:
+ description:
+ - The state of the configuration after module completion
+ type: str
+ choices: ['merged', 'deleted', 'replaced', 'overridden']
+ default: merged
+"""
+EXAMPLES = """
+
+# Using merged
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration spanning-tree
+# (No spanning-tree configuration present)
+
+- name: Merge STP configurations
+ dellemc.enterprise_sonic.sonic_stp:
+ config:
+ global:
+ enabled_protocol: mst
+ loop_guard: true
+ bpdu_filter: true
+ disabled_vlans:
+ - 4-6
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 20
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: Ethernet20
+ edge_port: true
+ link_type: shared
+ guard: loop
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 20
+ port_priority: 30
+ stp_enable: true
+ mstp:
+ mst_name: mst1
+ revision: 1
+ max_hop: 3
+ hello_time: 6
+ max_age: 9
+ fwd_delay: 12
+ mst_instances:
+ - mst_id: 1
+ bridge_priority: 2048
+ vlans:
+ - 1
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 60
+ port_priority: 65
+ state: merged
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration spanning-tree
+# no spanning-tree vlan 4-6
+# spanning-tree mode mst
+# spanning-tree edge-port bpdufilter default
+# spanning-tree forward-time 20
+# spanning-tree hello-time 5
+# spanning-tree max-age 10
+# spanning-tree loopguard default
+# spanning-tree mst hello-time 6
+# spanning-tree mst forward-time 12
+# spanning-tree mst max-age 9
+# spanning-tree mst max-hops 3
+# spanning-tree mst 1 priority 2048
+# !
+# spanning-tree mst configuration
+# name mst1
+# revision 1
+# instance 1 vlan 1
+# activate
+# !
+# interface Ethernet20
+# spanning-tree bpdufilter enable
+# spanning-tree guard loop
+# spanning-tree bpduguard port-shutdown
+# spanning-tree cost 20
+# spanning-tree link-type shared
+# spanning-tree port-priority 30
+# spanning-tree port type edge
+# spanning-tree uplinkfast
+# spanning-tree mst 1 cost 60
+# spanning-tree mst 1 port-priority 65
+
+
+# Using replaced
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration spanning-tree
+# no spanning-tree vlan 4-6
+# spanning-tree mode mst
+# spanning-tree edge-port bpdufilter default
+# spanning-tree loopguard default
+# spanning-tree mst hello-time 6
+# spanning-tree mst forward-time 12
+# spanning-tree mst max-age 9
+# spanning-tree mst max-hops 3
+# spanning-tree mst 1 priority 2048
+# !
+# spanning-tree mst configuration
+# name mst1
+# revision 1
+# instance 1 vlan 1
+# activate
+# !
+# interface Ethernet20
+# spanning-tree bpdufilter enable
+# spanning-tree guard loop
+# spanning-tree bpduguard port-shutdown
+# spanning-tree cost 20
+# spanning-tree link-type shared
+# spanning-tree port-priority 30
+# spanning-tree port type edge
+# spanning-tree uplinkfast
+# spanning-tree mst 1 cost 60
+# spanning-tree mst 1 port-priority 65
+
+- name: Replace STP configurations
+ dellemc.enterprise_sonic.sonic_stp:
+ config:
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 25
+ port_priority: 35
+ mstp:
+ mst_name: mst2
+ revision: 2
+ max_hop: 4
+ hello_time: 7
+ max_age: 10
+ fwd_delay: 13
+ state: replaced
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration spanning-tree
+# no spanning-tree vlan 4-6
+# spanning-tree mode mst
+# spanning-tree edge-port bpdufilter default
+# spanning-tree loopguard default
+# spanning-tree mst hello-time 7
+# spanning-tree mst forward-time 13
+# spanning-tree mst max-age 10
+# spanning-tree mst max-hops 4
+# !
+# spanning-tree mst configuration
+# name mst2
+# revision 2
+# activate
+# !
+# interface Ethernet20
+# spanning-tree cost 25
+# spanning-tree port-priority 35
+
+
+# Using overridden
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration spanning-tree
+# no spanning-tree vlan 4-6
+# spanning-tree mode mst
+# spanning-tree edge-port bpdufilter default
+# spanning-tree loopguard default
+# spanning-tree mst hello-time 7
+# spanning-tree mst forward-time 13
+# spanning-tree mst max-age 10
+# spanning-tree mst max-hops 4
+# !
+# spanning-tree mst configuration
+# name mst2
+# revision 2
+# activate
+# !
+# interface Ethernet20
+# spanning-tree cost 25
+# spanning-tree port-priority 35
+
+- name: Override STP configurations
+ dellemc.enterprise_sonic.sonic_stp:
+ config:
+ global:
+ enabled_protocol: pvst
+ bpdu_filter: true
+ root_guard_timeout: 25
+ portfast: true
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 20
+ bridge_priority: 4096
+ pvst:
+ - vlan_id: 1
+ hello_time: 4
+ max_age: 6
+ fwd_delay: 8
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 10
+ port_priority: 50
+ state: overridden
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration spanning-tree
+# spanning-tree mode pvst
+# spanning-tree edge-port bpdufilter default
+# spanning-tree forward-time 20
+# spanning-tree guard root timeout 25
+# spanning-tree hello-time 5
+# spanning-tree max-age 10
+# spanning-tree priority 4096
+# spanning-tree portfast default
+# spanning-tree vlan 1 hello-time 4
+# spanning-tree vlan 1 forward-time 8
+# spanning-tree vlan 1 max-age 6
+# sonic# show running-configuration interface Ethernet 20 | grep spanning-tree
+# spanning-tree vlan 1 cost 10
+# spanning-tree vlan 1 port-priority 50
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+# sonic# show running-configuration spanning-tree
+# spanning-tree mode pvst
+# spanning-tree edge-port bpdufilter default
+# spanning-tree forward-time 20
+# spanning-tree guard root timeout 25
+# spanning-tree hello-time 5
+# spanning-tree max-age 10
+# spanning-tree priority 4096
+# spanning-tree portfast default
+# spanning-tree vlan 1 hello-time 4
+# spanning-tree vlan 1 forward-time 8
+# spanning-tree vlan 1 max-age 6
+# sonic# show running-configuration interface Ethernet 20 | grep spanning-tree
+# spanning-tree vlan 1 cost 10
+# spanning-tree vlan 1 port-priority 50
+
+- name: Delete STP configurations
+ dellemc.enterprise_sonic.sonic_stp:
+ config:
+ global:
+ bpdu_filter: true
+ root_guard_timeout: 25
+ pvst:
+ - vlan_id: 1
+ interfaces:
+ - intf_name: Ethernet20
+ state: deleted
+
+# After State:
+# ------------
+#
+# sonic# show running-configuration spanning-tree
+# spanning-tree mode pvst
+# spanning-tree forward-time 20
+# spanning-tree hello-time 5
+# spanning-tree max-age 10
+# spanning-tree priority 4096
+# spanning-tree portfast default
+# spanning-tree vlan 1 hello-time 4
+# spanning-tree vlan 1 forward-time 8
+# spanning-tree vlan 1 max-age 6
+# sonic# show running-configuration interface Ethernet 20 | grep spanning-tree
+# (No spanning-tree configuration present)
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.stp.stp import StpArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.stp.stp import Stp
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=StpArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Stp(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py
index efb285a11..8b4d29ae1 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_system.py
@@ -80,7 +80,7 @@ options:
- In case of merged, the input configuration will be merged with the existing system configuration on the device.
- In case of deleted the existing system configuration will be removed from the device.
default: merged
- choices: ['merged', 'deleted']
+ choices: ['merged', 'replaced', 'overridden', 'deleted']
type: str
"""
EXAMPLES = """
@@ -167,6 +167,89 @@ EXAMPLES = """
#ipv6 anycast-address enable
#interface-naming standard
+# Using replaced
+#
+# Before state:
+# -------------
+#!
+#sonic(config)#do show running-configuration
+#!
+#ip anycast-mac-address aa:bb:cc:dd:ee:ff
+#ip anycast-address enable
+#ipv6 anycast-address enable
+
+- name: Replace system configuration.
+ sonic_system:
+ config:
+ hostname: sonic
+ interface_naming: standard
+ state: replaced
+
+# After state:
+# ------------
+#!
+#SONIC(config)#do show running-configuration
+#!
+#interface-naming standard
+
+# Using replaced
+#
+# Before state:
+# -------------
+#!
+#sonic(config)#do show running-configuration
+#!
+#ip anycast-mac-address aa:bb:cc:dd:ee:ff
+#interface-naming standard
+
+- name: Replace system device configuration.
+ sonic_system:
+ config:
+ hostname: sonic
+ interface_naming: standard
+ anycast_address:
+ ipv6: true
+ ipv4: true
+ state: replaced
+
+# After state:
+# ------------
+#!
+#SONIC(config)#do show running-configuration
+#!
+#ip anycast-address enable
+#ipv6 anycast-address enable
+#interface-naming standard
+
+# Using overridden
+#
+# Before state:
+# -------------
+#!
+#sonic(config)#do show running-configuration
+#!
+#ip anycast-mac-address aa:bb:cc:dd:ee:ff
+#ip anycast-address enable
+#ipv6 anycast-address enable
+
+- name: Override system configuration.
+ sonic_system:
+ config:
+ hostname: sonic
+ interface_naming: standard
+ anycast_address:
+ ipv4: true
+ mac_address: bb:aa:cc:dd:ee:ff
+ state: overridden
+
+# After state:
+# ------------
+#!
+#SONIC(config)#do show running-configuration
+#!
+#ip anycast-mac-address bb:aa:cc:dd:ee:ff
+#ip anycast-address enable
+#interface-naming standard
"""
RETURN = """
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py
index 3295e11ba..3361345f5 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_tacacs_server.py
@@ -64,6 +64,7 @@ options:
description:
- Specifies the timeout of the tacacs server.
type: int
+ default: 5
source_interface:
description:
- Specifies the source interface of the tacacs server.
@@ -122,8 +123,10 @@ options:
- Specifies the operation to be performed on the tacacs server configured on the device.
- In case of merged, the input mode configuration will be merged with the existing tacacs server configuration on the device.
- In case of deleted the existing tacacs server mode configuration will be removed from the device.
+ - In case of replaced, the existing tacacs server configuration will be replaced with provided configuration.
+ - In case of overridden, the existing tacacs server configuration will be overridden with the provided configuration.
default: merged
- choices: ['merged', 'deleted']
+ choices: ['merged', 'replaced', 'overridden', 'deleted']
type: str
"""
EXAMPLES = """
@@ -249,8 +252,110 @@ EXAMPLES = """
#HOST AUTH-TYPE KEY PORT PRIORITY TIMEOUT VRF
#------------------------------------------------------------------------------------------------
#1.2.3.4 pap 1234 49 1 5 default
-
-
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#sonic(config)# do show tacacs-server
+#---------------------------------------------------------
+#TACACS Global Configuration
+#---------------------------------------------------------
+#source-interface : Ethernet12
+#timeout : 10
+#auth-type : pap
+#key configured : Yes
+#--------------------------------------------------------------------------------------
+#HOST AUTH-TYPE KEY-CONFIG PORT PRIORITY TIMEOUT VRF
+#--------------------------------------------------------------------------------------
+#1.2.3.4 pap No 49 1 5 default
+#
+- name: Replace tacacs configurations
+ sonic_tacacs_server:
+ config:
+ auth_type: pap
+ key: pap
+ source_interface: Ethernet12
+ timeout: 10
+ servers:
+ - host:
+ name: 1.2.3.4
+ auth_type: mschap
+ key: 1234
+ state: replaced
+#
+# After state:
+# ------------
+#
+#sonic(config)# do show tacacs-server
+#---------------------------------------------------------
+#TACACS Global Configuration
+#---------------------------------------------------------
+#source-interface : Ethernet12
+#timeout : 10
+#auth-type : pap
+#key configured : Yes
+#--------------------------------------------------------------------------------------
+#HOST AUTH-TYPE KEY-CONFIG PORT PRIORITY TIMEOUT VRF
+#--------------------------------------------------------------------------------------
+#1.2.3.4 mschap Yes 49 1 5 default
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#sonic(config)# do show tacacs-server
+#---------------------------------------------------------
+#TACACS Global Configuration
+#---------------------------------------------------------
+#source-interface : Ethernet12
+#timeout : 10
+#auth-type : pap
+#key configured : Yes
+#--------------------------------------------------------------------------------------
+#HOST AUTH-TYPE KEY-CONFIG PORT PRIORITY TIMEOUT VRF
+#--------------------------------------------------------------------------------------
+#1.2.3.4 pap No 49 1 5 default
+#11.12.13.14 chap Yes 49 10 5 default
+#
+- name: Override tacacs configurations
+ sonic_tacacs_server:
+ config:
+ auth_type: mschap
+ key: mschap
+ source_interface: Ethernet12
+ timeout: 20
+ servers:
+ - host:
+ name: 1.2.3.4
+ auth_type: mschap
+ key: mschap
+ - host:
+ name: 10.10.11.12
+ auth_type: chap
+ timeout: 30
+ priority: 2
+ state: overridden
+#
+# After state:
+# ------------
+#
+#sonic(config)# do show tacacs-server
+#---------------------------------------------------------
+#TACACS Global Configuration
+#---------------------------------------------------------
+#source-interface : Ethernet12
+#timeout : 20
+#auth-type : mschap
+#key configured : Yes
+#--------------------------------------------------------------------------------------
+#HOST AUTH-TYPE KEY-CONFIG PORT PRIORITY TIMEOUT VRF
+#--------------------------------------------------------------------------------------
+#1.2.3.4 mschap Yes 49 1 5 default
+#10.10.11.12 chap No 49 2 30 default
+#
"""
RETURN = """
before:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py
index 7f0855a94..ac528e88d 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_users.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# Copyright 2019 Red Hat
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -59,6 +59,8 @@ options:
choices:
- admin
- operator
+ - netadmin
+ - secadmin
password:
description:
- Specifies the password of the user.
@@ -78,8 +80,10 @@ options:
- Specifies the operation to be performed on the users configured on the device.
- In case of merged, the input configuration will be merged with the existing users configuration on the device.
- In case of deleted the existing users configuration will be removed from the device.
+ - In case of replaced, the existing specified user configuration will be replaced with provided configuration.
+ - In case of overridden, the existing users configuration will be overridden with the provided configuration.
default: merged
- choices: ['merged', 'deleted']
+ choices: ['merged', 'deleted', 'overridden', 'replaced']
type: str
"""
EXAMPLES = """
@@ -88,38 +92,44 @@ EXAMPLES = """
# Before state:
# -------------
#
-#do show running-configuration
-#!
-#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin
-#username sysadmin password $6$3QNqJzpFAPL9JqHA$417xFKw6SRn.CiqMFJkDfQJXKJGjeYwi2A8BIyfuWjGimvunOOjTRunVluudey/W9l8jhzN1oewBW5iLxmq2Q1 role admin
-#username sysoperator password $6$s1eTVjcX4Udi69gY$zlYgqwoKRGC6hGL5iKDImN/4BL7LXKNsx9e5PoSsBLs6C80ShYj2LoJAUZ58ia2WNjcHXhTD1p8eU9wyRTCiE0 role operator
-#
-- name: Merge users configurations
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+# sysadmin admin
+# sysoperator operator
+
+- name: Delete user
dellemc.enterprise_sonic.sonic_users:
config:
- name: sysoperator
state: deleted
+
# After state:
# ------------
#
-#do show running-configuration
-#!
-#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin
-#username sysadmin password $6$3QNqJzpFAPL9JqHA$417xFKw6SRn.CiqMFJkDfQJXKJGjeYwi2A8BIyfuWjGimvunOOjTRunVluudey/W9l8jhzN1oewBW5iLxmq2Q1 role admin
-
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+# sysadmin admin
# Using deleted
#
# Before state:
# -------------
#
-#do show running-configuration
-#!
-#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin
-#username sysadmin password $6$3QNqJzpFAPL9JqHA$417xFKw6SRn.CiqMFJkDfQJXKJGjeYwi2A8BIyfuWjGimvunOOjTRunVluudey/W9l8jhzN1oewBW5iLxmq2Q1 role admin
-#username sysoperator password $6$s1eTVjcX4Udi69gY$zlYgqwoKRGC6hGL5iKDImN/4BL7LXKNsx9e5PoSsBLs6C80ShYj2LoJAUZ58ia2WNjcHXhTD1p8eU9wyRTCiE0 role operator
-#
-- name: Merge users configurations
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+# sysadmin admin
+# sysoperator operator
+
+- name: Delete all users configurations except admin
dellemc.enterprise_sonic.sonic_users:
config:
state: deleted
@@ -127,20 +137,23 @@ EXAMPLES = """
# After state:
# ------------
#
-#do show running-configuration
-#!
-#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin
-
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
# Using merged
#
# Before state:
# -------------
#
-#do show running-configuration
-#!
-#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin
-#
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+
- name: Merge users configurations
dellemc.enterprise_sonic.sonic_users:
config:
@@ -156,14 +169,83 @@ EXAMPLES = """
# After state:
# ------------
-#!
-#do show running-configuration
-#!
-#username admin password $6$sdZt2C7F$3oPSRkkJyLZtsKlFNGWdwssblQWBj5dXM6qAJAQl7dgOfqLSpZJ/n6xf8zPRcqPUFCu5ZKpEtynJ9sZ/S8Mgj. role admin
-#username sysadmin password $6$3QNqJzpFAPL9JqHA$417xFKw6SRn.CiqMFJkDfQJXKJGjeYwi2A8BIyfuWjGimvunOOjTRunVluudey/W9l8jhzN1oewBW5iLxmq2Q1 role admin
-#username sysoperator password $6$s1eTVjcX4Udi69gY$zlYgqwoKRGC6hGL5iKDImN/4BL7LXKNsx9e5PoSsBLs6C80ShYj2LoJAUZ58ia2WNjcHXhTD1p8eU9wyRTCiE0 role operator
+#
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+# sysadmin admin
+# sysoperator operator
+
+# Using Overridden
+#
+# Before state:
+# -------------
+#
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+# sysadmin admin
+# sysoperator operator
+
+- name: Override users configurations
+ dellemc.enterprise_sonic.sonic_users:
+ config:
+ - name: user1
+ role: secadmin
+ password: 123abc
+ update_password: always
+ state: overridden
+
+# After state:
+# ------------
+#
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+# user1 secadmin
+# Using Replaced
+#
+# Before state:
+# -------------
+#
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+# user1 secadmin
+# user2 operator
+- name: Replace users configurations
+ dellemc.enterprise_sonic.sonic_users:
+ config:
+ - name: user1
+ role: operator
+ password: 123abc
+ update_password: always
+ - name: user2
+ role: netadmin
+ password: 123abc
+ update_password: always
+ state: replaced
+
+# After state:
+# ------------
+#
+# sonic# show users configured
+# ----------------------------------------------------------------------
+# User Role(s)
+# ----------------------------------------------------------------------
+# admin admin
+# user1 operator
+# user2 netadmin
"""
RETURN = """
before:
@@ -180,6 +262,13 @@ after:
sample: >
The configuration returned will always be in the same format
of the parameters above.
+after(generated):
+ description: The generated configuration model invocation.
+ returned: when C(check_mode)
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
commands:
description: The set of commands pushed to the remote device.
returned: always
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlan_mapping.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlan_mapping.py
new file mode 100644
index 000000000..985e0523b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlan_mapping.py
@@ -0,0 +1,543 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
+# GNU General Public License v3.0+
+# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+#############################################
+# WARNING #
+#############################################
+#
+# This file is auto generated by the resource
+# module builder playbook.
+#
+# Do not edit this file manually.
+#
+# Changes to this file will be over written
+# by the resource module builder.
+#
+# Changes should be made in the model used to
+# generate this file or in the resource module
+# builder template.
+#
+#############################################
+
+"""
+The module file for sonic_vlan_mapping
+"""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+ANSIBLE_METADATA = {
+ 'metadata_version': '1.1',
+ 'status': ['preview'],
+ 'supported_by': 'community',
+ 'license': 'Apache 2.0'
+}
+
+DOCUMENTATION = """
+---
+module: sonic_vlan_mapping
+author: "Cypher Miller (@Cypher-Miller)"
+version_added: "2.1.0"
+short_description: Configure vlan mappings on SONiC.
+description:
+ - This module provides configuration management for vlan mappings on devices running SONiC.
+ - Vlan mappings only available on TD3 and TD4 devices.
+ - For TD4 devices must enable vlan mapping first (can enable in config-switch-resource).
+options:
+ config:
+ description:
+ - Specifies the vlan mapping related configurations.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - Full name of the interface, i.e. Ethernet8, PortChannel2, Eth1/2.
+ required: true
+ type: str
+ mapping:
+ description:
+ - Defining a single vlan mapping.
+ type: list
+ elements: dict
+ suboptions:
+ service_vlan:
+ description:
+ - Configure service provider VLAN ID.
+ - VLAN ID range is 1-4094.
+ required: true
+ type: int
+ vlan_ids:
+ description:
+ - Configure customer VLAN IDs.
+ - If mode is double tagged translation then this VLAN ID represents the outer VLAN ID.
+ - If mode is set to stacking can pass ranges and/or multiple list entries.
+ - Individual VLAN ID or (-) separated range of VLAN IDs.
+ type: list
+ elements: str
+ dot1q_tunnel:
+ description:
+ - Specify whether it is a vlan stacking or translation (false means translation; true means stacking).
+ type: bool
+ default: false
+ inner_vlan:
+ description:
+ - Configure inner customer VLAN ID.
+ - VLAN IDs range is 1-4094.
+ - Only available for double tagged translations.
+ type: int
+ priority:
+ description:
+ - Set priority level of the vlan mapping.
+ - Priority range is 0-7.
+ type: int
+ state:
+ description:
+ - Specifies the operation to be performed on the vlan mappings configured on the device.
+ - In case of merged, the input configuration will be merged with the existing vlan mappings on the device.
+ - In case of deleted, the existing vlan mapping configuration will be removed from the device.
+ - In case of overridden, all existing vlan mappings will be deleted and the specified input configuration will be add.
+ - In case of replaced, the existing vlan mappings on the device will be replaced by the configuration for each vlan mapping.
+ type: str
+ default: merged
+ choices:
+ - merged
+ - deleted
+ - replaced
+ - overridden
+"""
+EXAMPLES = """
+# Using deleted
+#
+# Before State:
+# -------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 623 2411
+# switchport vlan-mapping 392 inner 590 2755
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 400-402,404,406,408,410,412,420,422,430-432 dot1q-tunnel 2436 priority 3
+# switchport vlan-mapping 300 dot1q-tunnel 2567 priority 3
+#!
+
+
+ - name: Delete vlan mapping configurations
+ sonic_vlan_mapping:
+ config:
+ - name: Ethernet8
+ mapping:
+ - service_vlan: 2755
+ - name: Ethernet16
+ mapping:
+ - service_vlan: 2567
+ priority: 3
+ - service_vlan: 2436
+ vlan_ids:
+ - 404
+ - 401
+ - 412
+ - 430-431
+ priority: 3
+ state: deleted
+
+# After State:
+# ------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 623 2411
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 400,402,406,408,410,420,422,432 dot1q-tunnel 2436
+# switchport vlan-mapping 300 dot1q-tunnel 2567
+#!
+
+
+# Using deleted
+#
+# Before State:
+# -------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 623 2411
+# switchport vlan-mapping 392 inner 590 2755
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 400-402,404,406,408,410,412,420,422,430-431 dot1q-tunnel 2436
+# switchport vlan-mapping 300 dot1q-tunnel 2567 priority 3
+#!
+
+
+ - name: Delete vlan mapping configurations
+ sonic_vlan_mapping:
+ config:
+ - name: Ethernet8
+ - name: Ethernet16
+ mapping:
+ - service_vlan: 2567
+ state: deleted
+
+# After State:
+# ------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdo#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 400-402,406,408,410,420,422,431 dot1q-tunnel 2436
+#!
+
+
+# Using merged
+#
+# Before State:
+# -------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 623 2411
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+#!
+#interface PortChannel 2
+# switchport vlan-mapping 345 2999 priority 0
+# switchport vlan-mapping 500,540 dot1q-tunnel 3000
+# no shutdown
+#!
+
+ - name: Add vlan mapping configurations
+ sonic_vlan_mapping:
+ config:
+ - name: Ethernet8
+ mapping:
+ - service_vlan: 2755
+ vlan_ids:
+ - 392
+ dot1q_tunnel: false
+ inner_vlan: 590
+ - name: Ethernet16
+ mapping:
+ - service_vlan: 2567
+ vlan_ids:
+ - 300
+ dot1q_tunnel: true
+ priority: 3
+ - service_vlan: 2436
+ vlan_ids:
+ - 400-402
+ - 404
+ - 406
+ - 408
+ - 410
+ - 412
+ - 420
+ - 422
+ - 430-431
+ dot1q_tunnel: true
+ - name: Portchannel 2
+ mapping:
+ - service_vlan: 2999
+ priority: 4
+ - service_vlan: 3000
+ vlan_ids:
+ - 506-512
+ - 561
+ priority: 5
+ state: merged
+
+# After State:
+# ------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 623 2411
+# switchport vlan-mapping 392 inner 590 2755
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 400-402,404,406,408,410,412,420,422,430-431 dot1q-tunnel 2436
+# switchport vlan-mapping 300 dot1q-tunnel 2567 priority 3
+#!
+#interface PortChannel 2
+# switchport vlan-mapping 345 2999 priority 4
+# switchport vlan-mapping 500,506-512,540,561 dot1q-tunnel 3000 priority 5
+# no shutdown
+#!
+
+
+# Using replaced
+#
+# Before State:
+# -------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 623 2411
+# switchport vlan-mapping 392 inner 590 2755
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 400-402,404,406,408,410,412,420,422,430-431 dot1q-tunnel 2436
+# switchport vlan-mapping 300 dot1q-tunnel 2567 priority 3
+#!
+#interface PortChannel 2
+# switchport vlan-mapping 345 2999 priority 0
+# no shutdown
+#!
+
+ - name: Replace vlan mapping configurations
+ sonic_vlan_mapping:
+ config:
+ - name: Ethernet8
+ mapping:
+ - service_vlan: 2755
+ vlan_ids:
+ - 390
+ dot1q_tunnel: false
+ inner_vlan: 593
+ - name: Ethernet16
+ mapping:
+ - service_vlan: 2567
+ vlan_ids:
+ - 310
+ - 330-340
+ priority: 5
+ - name: Portchannel 2
+ mapping:
+ - service_vlan: 2999
+ vlan_ids:
+ - 345
+ dot1q_tunnel: true
+ priority: 1
+ state: replaced
+
+
+# After State:
+# ------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 623 2411
+# switchport vlan-mapping 390 inner 593 2755
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 400-402,404,406,408,410,412,420,422,430-431 dot1q-tunnel 2436
+# switchport vlan-mapping 310,330-340 dot1q-tunnel 2567 priority 5
+#!
+#interface PortChannel 2
+# switchport vlan-mapping 345 dot1q_tunnel 2999 priority 1
+# no shutdown
+#!
+
+
+# Using overridden
+#
+# Before State:
+# -------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 623 2411
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 400-402,404,406,408,410,412,420,422,430-431 dot1q-tunnel 2436
+#!
+
+ - name: Override the vlan mapping configurations
+ sonic_vlan_mapping:
+ config:
+ - name: Ethernet8
+ mapping:
+ - service_vlan: 2755
+ vlan_ids:
+ - 392
+ dot1q_tunnel: false
+ inner_vlan: 590
+ - name: Ethernet16
+ mapping:
+ - service_vlan: 2567
+ vlan_ids:
+ - 300
+ dot1q_tunnel: true
+ priority: 3
+ - name: Portchannel 2
+ mapping:
+ - service_vlan: 2999
+ vlan_ids:
+ - 345
+ priority: 0
+ state: overridden
+
+# After State:
+# ------------
+#
+#sonic# show running-configuration interface
+#!
+#interface Ethernet8
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 392 inner 590 2755
+#!
+#interface Ethernet16
+# mtu 9100
+# speed 400000
+# fec RS
+# unreliable-los auto
+# shutdown
+# switchport vlan-mapping 300 dot1q-tunnel 2567 priority 3
+#!
+#interface PortChannel 2
+# switchport vlan-mapping 345 2999 priority 0
+# no shutdown
+#!
+
+
+"""
+RETURN = """
+before:
+ description: The configuration prior to the model invocation.
+ returned: always
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+after:
+ description: The resulting configuration model invocation.
+ returned: when changed
+ type: list
+ sample: >
+ The configuration returned will always be in the same format
+ of the parameters above.
+commands:
+ description: The set of commands pushed to the remote device.
+ returned: always
+ type: list
+ sample: ['command 1', 'command 2', 'command 3']
+"""
+
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.argspec.vlan_mapping.vlan_mapping import Vlan_mappingArgs
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.vlan_mapping.vlan_mapping import Vlan_mapping
+
+
+def main():
+ """
+ Main entry point for module execution
+
+ :returns: the result form module invocation
+ """
+ module = AnsibleModule(argument_spec=Vlan_mappingArgs.argument_spec,
+ supports_check_mode=True)
+
+ result = Vlan_mapping(module).execute_module()
+ module.exit_json(**result)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py
index cfd536c79..cd3d7729d 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vlans.py
@@ -63,6 +63,8 @@ options:
type: str
choices:
- merged
+ - replaced
+ - overridden
- deleted
default: merged
"""
@@ -109,6 +111,64 @@ EXAMPLES = """
#sonic#
#
+# Using replaced
+
+# Before state:
+# -------------
+#
+#sonic# show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#10 Inactive
+#30 Inactive
+#
+#sonic#
+
+- name: Replace all attributes of specified VLANs with provided configuration
+ dellemc.enterprise_sonic.sonic_vlans:
+ config:
+ - vlan_id: 10
+ state: replaced
+
+# After state:
+# ------------
+#
+#sonic# show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#10 Inactive
+#30 Inactive
+#
+#sonic#
+
+# Using overridden
+
+# Before state:
+# -------------
+#
+#sonic# show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#10 Inactive
+#30 Inactive
+#
+#sonic#
+
+- name: Override device configuration of all VLANs with provided configuration
+ dellemc.enterprise_sonic.sonic_vlans:
+ config:
+ - vlan_id: 10
+ state: overridden
+
+# After state:
+# ------------
+#
+#sonic# show Vlan
+#Q: A - Access (Untagged), T - Tagged
+#NUM Status Q Ports
+#10 Inactive
+#
+#sonic#
# Using deleted
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py
index 4c881aee6..84233145a 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vrfs.py
@@ -66,6 +66,8 @@ options:
type: str
choices:
- merged
+ - replaced
+ - overridden
- deleted
default: merged
"""
@@ -158,6 +160,88 @@ EXAMPLES = """
#Vrfcheck4 Eth1/5
# Eth1/6
#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+#show ip vrf
+#VRF-NAME INTERFACES
+#----------------------------------------------------------------
+#Vrfcheck1
+#Vrfcheck2
+#Vrfcheck3 Eth1/7
+# Eth1/8
+#
+- name: Overridden VRF configuration
+ dellemc.enterprise_sonic.sonic_vrfs:
+ sonic_vrfs:
+ config:
+ - name: Vrfcheck1
+ members:
+ interfaces:
+ - name: Eth1/3
+ - name: Eth1/14
+ - name: Vrfcheck3
+ members:
+ interfaces:
+ - name: Eth1/5
+ - name: Eth1/6
+ state: overridden
+#
+# After state:
+# ------------
+#
+#show ip vrf
+#VRF-NAME INTERFACES
+#----------------------------------------------------------------
+#Vrfcheck1 Eth1/3
+# Eth1/14
+#Vrfcheck2
+#Vrfcheck3 Eth1/5
+# Eth1/6
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+#show ip vrf
+#VRF-NAME INTERFACES
+#----------------------------------------------------------------
+#Vrfcheck1 Eth1/3
+#Vrfcheck2
+#Vrfcheck3 Eth1/5
+# Eth1/6
+#
+- name: Replace VRF configuration
+ dellemc.enterprise_sonic.sonic_vrfs:
+ sonic_vrfs:
+ config:
+ - name: Vrfcheck1
+ members:
+ interfaces:
+ - name: Eth1/3
+ - name: Eth1/14
+ - name: Vrfcheck3
+ members:
+ interfaces:
+ - name: Eth1/5
+ - name: Eth1/6
+ state: replaced
+#
+# After state:
+# ------------
+#
+#show ip vrf
+#VRF-NAME INTERFACES
+#----------------------------------------------------------------
+#Vrfcheck1 Eth1/3
+# Eth1/14
+#Vrfcheck2
+#Vrfcheck3 Eth1/5
+# Eth1/6
+#
"""
RETURN = """
before:
diff --git a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py
index e6613ba24..0500db79e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py
+++ b/ansible_collections/dellemc/enterprise_sonic/plugins/modules/sonic_vxlans.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
-# © Copyright 2020 Dell Inc. or its subsidiaries. All Rights Reserved
+# © Copyright 2023 Dell Inc. or its subsidiaries. All Rights Reserved
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -43,7 +43,6 @@ options:
config:
description:
- A list of VxLAN configurations.
- - source_ip and evpn_nvo are required together.
type: list
elements: dict
suboptions:
@@ -90,6 +89,8 @@ options:
choices:
- merged
- deleted
+ - replaced
+ - overridden
default: merged
"""
EXAMPLES = """
@@ -173,7 +174,7 @@ EXAMPLES = """
- name: vteptest1
source_ip: 1.1.1.1
primary_ip: 2.2.2.2
- evpn_nvo_name: nvo1
+ evpn_nvo: nvo1
vlan_map:
- vni: 101
vlan: 11
@@ -199,7 +200,87 @@ EXAMPLES = """
# map vni 101 vrf Vrfcheck1
# map vni 102 vrf Vrfcheck2
#!
+#
+# Using overridden
+#
+# Before state:
+# -------------
+#
+# do show running-configuration
+#
+#interface vxlan vteptest1
+# source-ip 1.1.1.1
+# primary-ip 2.2.2.2
+# map vni 101 vlan 11
+# map vni 102 vlan 12
+# map vni 101 vrf Vrfcheck1
+# map vni 102 vrf Vrfcheck2
+#!
+#
+- name: "Test vxlans overridden state 01"
+ dellemc.enterprise_sonic.sonic_vxlans:
+ config:
+ - name: vteptest2
+ source_ip: 3.3.3.3
+ primary_ip: 4.4.4.4
+ evpn_nvo: nvo2
+ vlan_map:
+ - vni: 101
+ vlan: 11
+ vrf_map:
+ - vni: 101
+ vrf: Vrfcheck1
+ state: overridden
+#
+# After state:
+# ------------
+#
+# do show running-configuration
+#
+#interface vxlan vteptest2
+# source-ip 3.3.3.3
+# primary-ip 4.4.4.4
+# map vni 101 vlan 11
+# map vni 101 vrf Vrfcheck1
+#!
+#
+# Using replaced
+#
+# Before state:
+# -------------
+#
+# do show running-configuration
+#
+#interface vxlan vteptest2
+# source-ip 3.3.3.3
+# primary-ip 4.4.4.4
+# map vni 101 vlan 11
+# map vni 101 vrf Vrfcheck
+#!
+#
+- name: "Test vxlans replaced state 01"
+ dellemc.enterprise_sonic.sonic_vxlans:
+ config:
+ - name: vteptest2
+ source_ip: 5.5.5.5
+ vlan_map:
+ - vni: 101
+ vlan: 12
+ state: replaced
+#
+# After state:
+# ------------
+#
+# do show running-configuration
+#
+#interface vxlan vteptest2
+# source-ip 5.5.5.5
+# primary-ip 4.4.4.4
+# map vni 101 vlan 12
+# map vni 101 vrf Vrfcheck1
+#!
# """
+
RETURN = """
before:
description: The configuration prior to the model invocation.
diff --git a/ansible_collections/dellemc/enterprise_sonic/test-requirements.txt b/ansible_collections/dellemc/enterprise_sonic/test-requirements.txt
new file mode 100644
index 000000000..6d8843b12
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/test-requirements.txt
@@ -0,0 +1,4 @@
+pytest-xdist
+coverage==4.5.4
+pytest-forked
+git+https://github.com/ansible-community/pytest-ansible-units.git
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts b/ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts
index b8ec3e04b..f2fd8da12 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/hosts
@@ -8,6 +8,6 @@ sonic2
[datacenter:vars]
ansible_network_os=dellemc.enterprise_sonic.sonic
-ansible_python_interpreter=/usr/bin/python3
+ansible_python_interpreter=/usr/bin/python3.9
ansible_httpapi_use_ssl=true
ansible_httpapi_validate_certs=false
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml
index 93dd85447..4d08bb767 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/common/defaults/main.yml
@@ -50,6 +50,12 @@ native_eth3: Ethernet28
native_eth4: Ethernet32
native_eth5: Ethernet36
native_eth6: Ethernet40
+native_eth7: Ethernet96
+native_eth8: Ethernet100
+native_eth9: Ethernet104
+native_eth10: Ethernet108
+native_eth11: Ethernet112
+native_eth12: Ethernet116
std_eth1: Eth1/5
std_eth2: Eth1/6
@@ -57,6 +63,12 @@ std_eth3: Eth1/7
std_eth4: Eth1/8
std_eth5: Eth1/9
std_eth6: Eth1/10
+std_eth7: Eth1/97
+std_eth8: Eth1/98
+std_eth9: Eth1/99
+std_eth10: Eth1/100
+std_eth11: Eth1/101
+std_eth12: Eth1/102
interface1: "{{ std_eth1 if std_name in interface_mode else native_eth1 }}"
interface2: "{{ std_eth2 if std_name in interface_mode else native_eth2 }}"
@@ -64,3 +76,9 @@ interface3: "{{ std_eth3 if std_name in interface_mode else native_eth3 }}"
interface4: "{{ std_eth4 if std_name in interface_mode else native_eth4 }}"
interface5: "{{ std_eth5 if std_name in interface_mode else native_eth5 }}"
interface6: "{{ std_eth6 if std_name in interface_mode else native_eth6 }}"
+interface7: "{{ std_eth7 if std_name in interface_mode else native_eth7 }}"
+interface8: "{{ std_eth8 if std_name in interface_mode else native_eth8 }}"
+interface9: "{{ std_eth9 if std_name in interface_mode else native_eth9 }}"
+interface10: "{{ std_eth10 if std_name in interface_mode else native_eth10 }}"
+interface11: "{{ std_eth11 if std_name in interface_mode else native_eth11 }}"
+interface12: "{{ std_eth12 if std_name in interface_mode else native_eth12 }}"
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml
index 291f615ee..e51a026cd 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_aaa/defaults/main.yml
@@ -28,9 +28,27 @@ tests:
data:
fail_through: true
group: radius
- local: true
- name: test_case_04
+ description: Replace aaa properties
+ state: replaced
+ input:
+ authentication:
+ data:
+ fail_through: false
+ group: ldap
+
+ - name: test_case_05
+ description: Override aaa properties
+ state: overridden
+ input:
+ authentication:
+ data:
+ fail_through: true
+ group: radius
+ local: true
+
+ - name: test_case_06
description: Delete aaa properties
state: deleted
input:
@@ -38,7 +56,7 @@ tests:
data:
group: radius
- - name: test_case_05
+ - name: test_case_07
description: aaa properties
state: merged
input:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/defaults/main.yml
new file mode 100644
index 000000000..43c3a1491
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/defaults/main.yml
@@ -0,0 +1,188 @@
+---
+ansible_connection: httpapi
+module_name: acl_interfaces
+
+po1: "Portchannel 100"
+
+vlan1: "Vlan 100"
+
+macacl1: "mac-acl-1"
+macacl2: "mac-acl-2"
+
+ipv4acl1: "ipv4-acl-1"
+ipv4acl2: "ipv4-acl-2"
+
+ipv6acl1: "ipv6-acl-1"
+ipv6acl2: "ipv6-acl-2"
+
+preparations_tests:
+ lag_interfaces:
+ - name: '{{ po1 }}'
+ vlans:
+ - vlan_id: 100
+ l2_acls:
+ - name: '{{ macacl1 }}'
+ - name: '{{ macacl2 }}'
+ l3_acls:
+ - address_family: 'ipv4'
+ acls:
+ - name: '{{ ipv4acl1 }}'
+ - name: '{{ ipv4acl2 }}'
+ - address_family: 'ipv6'
+ acls:
+ - name: '{{ ipv6acl1 }}'
+ - name: '{{ ipv6acl2 }}'
+
+tests:
+ - name: test_case_01
+ description: Add interface access-group configurations
+ state: merged
+ input:
+ - name: '{{ interface1 }}'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: '{{ macacl1 }}'
+ direction: 'in'
+ - type: 'ipv4'
+ acls:
+ - name: '{{ ipv4acl1 }}'
+ direction: 'in'
+ - name: '{{ ipv4acl2 }}'
+ direction: 'out'
+
+ - name: test_case_02
+ description: Update interface access-group configurations
+ state: merged
+ input:
+ - name: '{{ interface1 }}'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: '{{ macacl2 }}'
+ direction: 'out'
+ - name: '{{ interface2 }}'
+ access_groups:
+ - type: 'ipv6'
+ acls:
+ - name: '{{ ipv6acl2 }}'
+ direction: 'out'
+
+ - name: test_case_03
+ description: Update interface access-group configuration
+ state: merged
+ input:
+ - name: '{{ vlan1 }}'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: '{{ macacl2 }}'
+ direction: 'out'
+ - name: '{{ po1 }}'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: '{{ macacl2 }}'
+ direction: 'out'
+ - type: 'ipv4'
+ acls:
+ - name: '{{ ipv4acl2 }}'
+ direction: 'out'
+ - type: 'ipv6'
+ acls:
+ - name: '{{ ipv6acl2 }}'
+ direction: 'out'
+
+ - name: test_case_04
+ description: Replace interface access-group configurations
+ state: replaced
+ input:
+ - name: '{{ interface1 }}'
+ access_groups:
+ - type: 'ipv6'
+ acls:
+ - name: '{{ ipv6acl1 }}'
+ direction: 'in'
+ - name: '{{ interface2 }}'
+
+ - name: test_case_05
+ description: Override interface access-group configurations
+ state: overridden
+ input:
+ - name: '{{ interface1 }}'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: '{{ macacl2 }}'
+ direction: 'out'
+ - type: 'ipv6'
+ acls:
+ - name: '{{ ipv6acl1 }}'
+ direction: 'in'
+ - name: '{{ ipv6acl2 }}'
+ direction: 'out'
+ - name: '{{ vlan1 }}'
+ access_groups:
+ - type: 'ipv4'
+ acls:
+ - name: '{{ ipv4acl1 }}'
+ direction: 'in'
+ - name: '{{ ipv4acl2 }}'
+ direction: 'out'
+
+ - name: test_case_06
+ description: Update interface access-group configurations
+ state: merged
+ input:
+ - name: '{{ interface2 }}'
+ access_groups:
+ - type: 'ipv4'
+ acls:
+ - name: '{{ ipv4acl1 }}'
+ direction: 'in'
+ - name: '{{ ipv4acl2 }}'
+ direction: 'out'
+ - type: 'ipv6'
+ acls:
+ - name: '{{ ipv6acl1 }}'
+ direction: 'in'
+ - name: '{{ ipv6acl2 }}'
+ direction: 'out'
+ - type: 'mac'
+ acls:
+ - name: '{{ macacl1 }}'
+ direction: 'in'
+ - name: '{{ macacl2 }}'
+ direction: 'out'
+ - name: '{{ po1 }}'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: '{{ macacl1 }}'
+ direction: 'in'
+ - name: '{{ macacl2 }}'
+ direction: 'out'
+
+ - name: test_case_07
+ description: Delete interface ACL bindings based on interface name, access-group type and ACLs
+ state: deleted
+ input:
+ - name: '{{ interface1 }}'
+ access_groups:
+ - type: 'ipv6'
+ acls:
+ - name: '{{ ipv6acl1 }}'
+ direction: 'in'
+ - name: '{{ interface2 }}'
+ access_groups:
+ - type: 'ipv4'
+ - type: 'ipv6'
+ - name: '{{ vlan1 }}'
+ - name: '{{ po1 }}'
+ access_groups:
+ - type: 'mac'
+
+ - name: test_case_08
+ description: Delete all interface access-group configurations
+ state: deleted
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/meta/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/meta/main.yml
new file mode 100644
index 000000000..d0ceaf6f5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/meta/main.yml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..319f9f449
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/cleanup_tests.yaml
@@ -0,0 +1,30 @@
+---
+- name: Delete interface access-group configurations
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test L2 ACLs
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config: "{{ preparations_tests.l2_acls }}"
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test L3 ACLs
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config: "{{ preparations_tests.l3_acls }}"
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test VLANs
+ dellemc.enterprise_sonic.sonic_vlans:
+ config: "{{ preparations_tests.vlans }}"
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test lag interfaces
+ dellemc.enterprise_sonic.sonic_lag_interfaces:
+ config: "{{ preparations_tests.lag_interfaces }}"
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/main.yml
new file mode 100644
index 000000000..b037b17e8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- ansible.builtin.debug:
+ msg: "sonic_acl_interfaces Test started ..."
+
+- name: "Preparations for {{ module_name }}"
+ ansible.builtin.include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started"
+ ansible.builtin.include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup of {{ module_name }}"
+ ansible.builtin.include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..7b6a39964
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/preparation_tests.yaml
@@ -0,0 +1,50 @@
+---
+- name: Delete old interface access-group configurations
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete old L2 ACLs
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete old L3 ACLs
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Initialize default interfaces
+ vars:
+ ansible_connection: network_cli
+ dellemc.enterprise_sonic.sonic_config:
+ commands: "{{ default_interface_cli }}"
+ register: output
+ ignore_errors: yes
+
+- name: Create lag interfaces
+ dellemc.enterprise_sonic.sonic_lag_interfaces:
+ config: "{{ preparations_tests.lag_interfaces }}"
+ state: merged
+ ignore_errors: yes
+
+- name: Create VLANs
+ dellemc.enterprise_sonic.sonic_vlans:
+ config: "{{ preparations_tests.vlans }}"
+ state: merged
+ ignore_errors: yes
+
+- name: Configure L2 ACLs
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config: "{{ preparations_tests.l2_acls }}"
+ state: merged
+ ignore_errors: yes
+
+- name: Configure L3 ACLs
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config: "{{ preparations_tests.l3_acls }}"
+ state: merged
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/tasks_template.yaml
new file mode 100644
index 000000000..ebfb8d92d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_acl_interfaces/tasks/tasks_template.yaml
@@ -0,0 +1,22 @@
+---
+- name: "{{ item.name }} , {{ item.description }}"
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name }} , {{ item.description }} Idempotent"
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/defaults/main.yml
new file mode 100644
index 000000000..0703a2443
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/defaults/main.yml
@@ -0,0 +1,236 @@
+---
+ansible_connection: httpapi
+module_name: bfd
+
+tests:
+ - name: test_case_01
+ description: Merge BFD configuration
+ state: merged
+ input:
+ profiles:
+ - profile_name: 'p1'
+ enabled: True
+ transmit_interval: 120
+ receive_interval: 200
+ detect_multiplier: 2
+ passive_mode: True
+ min_ttl: 140
+ echo_interval: 150
+ echo_mode: True
+ single_hops:
+ - remote_address: '196.88.6.1'
+ vrf: 'default'
+ interface: '{{interface1}}'
+ local_address: '1.1.1.1'
+ enabled: True
+ transmit_interval: 50
+ receive_interval: 80
+ detect_multiplier: 4
+ passive_mode: True
+ echo_interval: 110
+ echo_mode: True
+ profile_name: 'p1'
+ multi_hops:
+ - remote_address: '192.40.1.3'
+ vrf: 'default'
+ local_address: '3.3.3.3'
+ enabled: True
+ transmit_interval: 75
+ receive_interval: 100
+ detect_multiplier: 3
+ passive_mode: True
+ min_ttl: 125
+ profile_name: 'p1'
+
+ - name: test_case_02
+ description: Update BFD configuration
+ state: merged
+ input:
+ profiles:
+ - profile_name: 'p1'
+ enabled: False
+ transmit_interval: 130
+ receive_interval: 220
+ detect_multiplier: 5
+ passive_mode: False
+ min_ttl: 245
+ echo_interval: 255
+ echo_mode: False
+ - profile_name: 'p2'
+ enabled: True
+ transmit_interval: 135
+ receive_interval: 225
+ detect_multiplier: 10
+ passive_mode: True
+ min_ttl: 250
+ echo_interval: 250
+ echo_mode: True
+ single_hops:
+ - remote_address: '196.88.6.1'
+ vrf: 'default'
+ interface: '{{interface1}}'
+ local_address: '1.1.1.1'
+ enabled: False
+ transmit_interval: 60
+ receive_interval: 90
+ detect_multiplier: 2
+ passive_mode: False
+ echo_interval: 140
+ echo_mode: False
+ profile_name: 'p1'
+ - remote_address: '194.56.2.1'
+ vrf: 'default'
+ interface: '{{interface2}}'
+ local_address: '2.2.2.2'
+ enabled: False
+ transmit_interval: 65
+ receive_interval: 95
+ detect_multiplier: 7
+ passive_mode: False
+ echo_interval: 145
+ echo_mode: False
+ profile_name: 'p2'
+ multi_hops:
+ - remote_address: '192.40.1.3'
+ vrf: 'default'
+ local_address: '3.3.3.3'
+ enabled: False
+ transmit_interval: 65
+ receive_interval: 280
+ detect_multiplier: 3
+ passive_mode: False
+ min_ttl: 150
+ profile_name: 'p2'
+ - remote_address: '198.72.1.4'
+ vrf: 'default'
+ local_address: '4.4.4.4'
+ enabled: False
+ transmit_interval: 70
+ receive_interval: 285
+ detect_multiplier: 8
+ passive_mode: False
+ min_ttl: 155
+ profile_name: 'p2'
+
+ - name: test_case_03
+ description: Replace BFD configuration
+ state: replaced
+ input:
+ profiles:
+ - profile_name: 'p2'
+ enabled: False
+ single_hops:
+ - remote_address: '194.56.2.1'
+ vrf: 'default'
+ interface: '{{interface2}}'
+ local_address: '2.2.2.2'
+ echo_interval: 125
+ echo_mode: True
+ profile_name: 'p1'
+ multi_hops:
+ - remote_address: '198.72.1.4'
+ vrf: 'default'
+ local_address: '4.4.4.4'
+ enabled: True
+ transmit_interval: 71
+ receive_interval: 286
+ detect_multiplier: 9
+
+ - name: test_case_04
+ description: Override BFD configuration
+ state: overridden
+ input:
+ profiles:
+ - profile_name: 'p3'
+ enabled: True
+ transmit_interval: 110
+ receive_interval: 230
+ detect_multiplier: 10
+ passive_mode: True
+ min_ttl: 170
+ echo_interval: 140
+ echo_mode: True
+ - profile_name: 'p4'
+ single_hops:
+ - remote_address: '182.98.4.1'
+ vrf: 'default'
+ interface: '{{interface3}}'
+ local_address: '3.3.3.3'
+ enabled: True
+ transmit_interval: 42
+ receive_interval: 84
+ detect_multiplier: 8
+ passive_mode: True
+ echo_interval: 115
+ echo_mode: True
+ profile_name: 'p3'
+ - remote_address: '183.98.3.2'
+ vrf: 'default'
+ interface: '{{interface2}}'
+ local_address: '1.2.3.4'
+ multi_hops:
+ - remote_address: '182.44.1.2'
+ vrf: 'default'
+ local_address: '2.2.2.2'
+ enabled: True
+ transmit_interval: 74
+ receive_interval: 101
+ detect_multiplier: 6
+ passive_mode: True
+ min_ttl: 127
+ profile_name: 'p3'
+ - remote_address: '162.45.5.1'
+ vrf: 'default'
+ local_address: '2.1.1.1'
+
+ - name: test_case_05
+ description: Delete BFD configuration
+ state: deleted
+ input:
+ profiles:
+ - profile_name: 'p3'
+ enabled: True
+ transmit_interval: 110
+ receive_interval: 230
+ detect_multiplier: 10
+ passive_mode: True
+ min_ttl: 170
+ echo_interval: 140
+ echo_mode: True
+ - profile_name: 'p4'
+ single_hops:
+ - remote_address: '182.98.4.1'
+ vrf: 'default'
+ interface: '{{interface3}}'
+ local_address: '3.3.3.3'
+ enabled: True
+ transmit_interval: 42
+ receive_interval: 84
+ detect_multiplier: 8
+ passive_mode: True
+ echo_interval: 115
+ echo_mode: True
+ profile_name: 'p3'
+ - remote_address: '183.98.3.2'
+ vrf: 'default'
+ interface: '{{interface2}}'
+ local_address: '1.2.3.4'
+ multi_hops:
+ - remote_address: '182.44.1.2'
+ vrf: 'default'
+ local_address: '2.2.2.2'
+ enabled: True
+ transmit_interval: 74
+ receive_interval: 101
+ detect_multiplier: 6
+ passive_mode: True
+ min_ttl: 127
+ profile_name: 'p3'
+ - remote_address: '162.45.5.1'
+ vrf: 'default'
+ local_address: '2.1.1.1'
+
+ - name: test_case_06
+ description: Delete all BFD configuration
+ state: deleted
+ input: {}
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/meta/main.yaml
new file mode 100644
index 000000000..0b356217e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/main.yml
new file mode 100644
index 000000000..28d60a497
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/main.yml
@@ -0,0 +1,11 @@
+- debug: msg="sonic_bfd Test started ..."
+
+- set_fact:
+ base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}"
+
+- name: Preparations test
+ include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..db13675de
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/preparation_tests.yaml
@@ -0,0 +1,5 @@
+- name: Delete old BFD configuration
+ sonic_bfd:
+ config: {}
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/tasks_template.yaml
new file mode 100644
index 000000000..596617309
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_bfd:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_bfd:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/tasks_template_del.yaml
new file mode 100644
index 000000000..1044f56c6
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bfd/tasks/tasks_template_del.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_bfd:
+ state: "{{ item.state }}"
+ config:
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_bfd:
+ state: "{{ item.state }}"
+ config:
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml
index 0eb7a6cb0..04d0a515b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/defaults/main.yml
@@ -4,15 +4,18 @@ module_name: bgp
vrf_1: VrfReg1
vrf_2: VrfReg2
+vrf_3: VrfReg3
bgp_as_1: 51
bgp_as_2: 52
bgp_as_3: 53
+bgp_as_4: 54
preparations_tests:
init_vrf:
- "ip vrf {{vrf_1}}"
- "ip vrf {{vrf_2}}"
+ - "ip vrf {{vrf_3}}"
tests_cli:
- name: cli_test_case_01
@@ -21,6 +24,7 @@ tests_cli:
input:
- bgp_as: "{{ bgp_as_1 }}"
router_id: 110.2.2.4
+ rt_delay: 10
bestpath:
as_path:
confed: True
@@ -34,9 +38,10 @@ tests_cli:
max_med:
on_startup:
timer: 667
- med_val: 7878
+ med_val: 7878
- bgp_as: "{{ bgp_as_2 }}"
router_id: 110.2.2.5
+ rt_delay: 20
vrf_name: "{{vrf_1}}"
bestpath:
as_path:
@@ -51,7 +56,7 @@ tests_cli:
max_med:
on_startup:
timer: 889
- med_val: 8854
+ med_val: 8854
tests:
- name: test_case_01
@@ -60,8 +65,10 @@ tests:
input:
- bgp_as: "{{ bgp_as_1 }}"
router_id: 110.2.2.4
+ rt_delay: 10
- bgp_as: "{{ bgp_as_2 }}"
router_id: 110.2.2.5
+ rt_delay: 20
vrf_name: "{{vrf_1}}"
- name: test_case_02
description: Updates BGP properties
@@ -69,9 +76,11 @@ tests:
input:
- bgp_as: "{{ bgp_as_1 }}"
router_id: 110.2.2.30
+ rt_delay: 12
log_neighbor_changes: True
- bgp_as: "{{ bgp_as_2 }}"
router_id: 110.2.2.31
+ rt_delay: 22
vrf_name: "{{vrf_1}}"
log_neighbor_changes: True
- name: test_case_03
@@ -80,9 +89,11 @@ tests:
input:
- bgp_as: "{{ bgp_as_1 }}"
router_id: 110.2.2.30
+ rt_delay: 12
log_neighbor_changes: True
- bgp_as: "{{ bgp_as_2 }}"
router_id: 110.2.2.31
+ rt_delay: 22
vrf_name: "{{vrf_1}}"
log_neighbor_changes: True
- name: test_case_04
@@ -91,6 +102,7 @@ tests:
input:
- bgp_as: "{{ bgp_as_1 }}"
router_id: 110.2.2.4
+ rt_delay: 10
bestpath:
as_path:
confed: True
@@ -104,9 +116,10 @@ tests:
max_med:
on_startup:
timer: 889
- med_val: 8854
+ med_val: 8854
- bgp_as: "{{ bgp_as_2 }}"
router_id: 110.2.2.5
+ rt_delay: 20
vrf_name: "{{vrf_1}}"
bestpath:
as_path:
@@ -117,17 +130,18 @@ tests:
compare_routerid: True
med:
confed: True
- missing_as_worst: True
+ missing_as_worst: True
max_med:
on_startup:
timer: 556
- med_val: 5567
+ med_val: 5567
- name: test_case_05
description: Update bestpath BGP properties
state: merged
input:
- bgp_as: "{{ bgp_as_1 }}"
router_id: 110.2.2.51
+ rt_delay: 50
bestpath:
as_path:
confed: False
@@ -138,9 +152,10 @@ tests:
max_med:
on_startup:
timer: 776
- med_val: 7768
+ med_val: 7768
- bgp_as: "{{ bgp_as_2 }}"
router_id: 110.2.2.52
+ rt_delay: 100
vrf_name: "{{vrf_1}}"
bestpath:
as_path:
@@ -152,7 +167,7 @@ tests:
max_med:
on_startup:
timer: 445
- med_val: 4458
+ med_val: 4458
- name: test_case_06
description: Update1 bestpath BGP properties
state: merged
@@ -191,7 +206,7 @@ tests:
max_med:
on_startup:
timer: 889
- med_val: 8854
+ med_val: 8854
- bgp_as: "{{ bgp_as_2 }}"
vrf_name: "{{vrf_1}}"
bestpath:
@@ -204,7 +219,7 @@ tests:
max_med:
on_startup:
timer: 889
- med_val: 8854
+ med_val: 8854
- name: test_case_08
description: Update1 bestpath BGP properties
state: merged
@@ -237,7 +252,7 @@ tests:
multipath_relax_as_set: True
compare_routerid: True
med:
- missing_as_worst: True
+ missing_as_worst: True
- name: test_case_09
description: Deletes BGP properties
state: deleted
@@ -245,6 +260,59 @@ tests:
- bgp_as: "{{ bgp_as_2 }}"
vrf_name: "{{vrf_1}}"
- name: test_case_10
+ description: Replaces BGP properties
+ state: replaced
+ input:
+ - bgp_as: "{{ bgp_as_1 }}"
+ router_id: 110.2.2.51
+ rt_delay: 15
+ bestpath:
+ as_path:
+ confed: True
+ compare_routerid: False
+ med:
+ always_compare_med: True
+ max_med:
+ on_startup:
+ timer: 890
+ med_val: 8854
+ - bgp_as: "{{ bgp_as_2 }}"
+ vrf_name: "{{vrf_1}}"
+ router_id: 110.2.2.52
+ bestpath:
+ as_path:
+ confed: True
+ med:
+ confed: True
+ timers:
+ holdtime: 90
+ keepalive_interval: 30
+ - name: test_case_11
+ description: Overrides BGP properties
+ state: overridden
+ input:
+ - bgp_as: "{{ bgp_as_1 }}"
+ router_id: 110.2.2.51
+ bestpath:
+ as_path:
+ confed: True
+ compare_routerid: True
+ med:
+ confed: True
+ timers:
+ holdtime: 90
+ keepalive_interval: 30
+ - bgp_as: "{{ bgp_as_3 }}"
+ vrf_name: "{{vrf_3}}"
+ router_id: 110.2.2.52
+ rt_delay: 15
+ bestpath:
+ as_path:
+ multipath_relax: True
+ multipath_relax_as_set: True
+ med:
+ missing_as_worst: True
+ - name: test_case_12
description: Deletes all BGP properties
state: deleted
- input: []
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml
index 29d5392d6..b488340e0 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/tasks/main.yml
@@ -1,7 +1,7 @@
- debug: msg="sonic_interfaces Test started ..."
- set_fact:
- base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}"
+ base_cfg_path: "{{ role_path + '/' + 'templates/' }}"
- name: Preparations test
include_tasks: preparation_tests.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg
index 720006565..42857467e 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp/templates/cli_test_case_01.cfg
@@ -1,6 +1,8 @@
router bgp 52 vrf VrfReg1
router-id 110.2.2.5
log-neighbor-changes
+ max-med on-startup 889 8854
+ route-map delay-timer 20
bestpath as-path multipath-relax as-set
bestpath as-path ignore
bestpath as-path confed
@@ -10,6 +12,8 @@ router bgp 52 vrf VrfReg1
router bgp 51
router-id 110.2.2.4
log-neighbor-changes
+ max-med on-startup 667 7878
+ route-map delay-timer 10
bestpath as-path multipath-relax as-set
bestpath as-path ignore
bestpath as-path confed
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml
index ba23b3f57..d920b3880 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_af/defaults/main.yml
@@ -5,6 +5,7 @@ module_name: bgp_af
bgp_as_1: 51
bgp_as_2: 52
+bgp_as_3: 53
vrf_1: VrfReg1
vrf_2: VrfReg2
@@ -25,6 +26,11 @@ preparations_tests:
router_id: 111.2.2.42
log_neighbor_changes: True
vrf_name: VrfReg1
+ - bgp_as: "{{bgp_as_3}}"
+ router_id: 111.2.2.43
+ log_neighbor_changes: True
+ vrf_name: VrfReg2
+
tests:
- name: test_case_01
description: BGP AF properties
@@ -39,6 +45,11 @@ tests:
safi: unicast
- afi: l2vpn
safi: evpn
+ advertise_all_vni: True
+ route_advertise_list:
+ - advertise_afi: ipv4
+ vnis:
+ - vni_number: 1
- bgp_as: "{{ bgp_as_2 }}"
vrf_name: "{{vrf_1}}"
address_family:
@@ -92,10 +103,20 @@ tests:
advertise_pip_ip: "1.1.1.1"
advertise_pip_peer_ip: "2.2.2.2"
advertise_svi_ip: True
- advertise_all_vni: True
- route_advertise_list:
- - advertise_afi: ipv4
- route_map: rmap_reg1
+ rd: "1.1.1.1:11"
+ rt_in:
+ - "22:22"
+ rt_out:
+ - "33:33"
+ vnis:
+ - vni_number: 1
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ rd: "2.2.2.2:22"
+ rt_in:
+ - "44:44"
+ rt_out:
+ - "44:44"
- bgp_as: "{{bgp_as_2}}"
vrf_name: "{{vrf_1}}"
address_family:
@@ -175,14 +196,35 @@ tests:
route_map: rmap_reg2
- afi: l2vpn
safi: evpn
- advertise_pip: False
advertise_pip_ip: "3.3.3.3"
advertise_pip_peer_ip: "4.4.4.4"
advertise_svi_ip: False
- advertise_all_vni: False
route_advertise_list:
- advertise_afi: ipv6
route_map: rmap_reg2
+ rd: "3.3.3.3:33"
+ rt_in:
+ - "12:12"
+ rt_out:
+ - "14:14"
+ vnis:
+ - vni_number: 1
+ advertise_default_gw: False
+ advertise_svi_ip: False
+ rd: "5.5.5.5:55"
+ rt_in:
+ - "88:88"
+ rt_out:
+ - "77:77"
+ - vni_number: 2
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ - vni_number: 3
+ rd: "9.9.9.9:99"
+ rt_in:
+ - "60:60"
+ rt_out:
+ - "80:80"
- bgp_as: "{{bgp_as_2}}"
vrf_name: "{{vrf_1}}"
address_family:
@@ -233,7 +275,7 @@ tests:
- afi: ipv4
safi: unicast
max_path:
- ebgp: 4
+ ebgp: 2
ibgp: 3
redistribute:
- metric: "30"
@@ -253,13 +295,31 @@ tests:
route_map: rmap_reg2
- afi: l2vpn
safi: evpn
- advertise_pip: False
+ advertise_pip: True
advertise_pip_ip: "3.3.3.3"
advertise_pip_peer_ip: "4.4.4.4"
advertise_svi_ip: False
route_advertise_list:
- advertise_afi: ipv4
route_map: rmap_reg1
+ rd: "3.3.3.3:33"
+ rt_in:
+ - "22:22"
+ rt_out:
+ - "33:33"
+ - "14:14"
+ vnis:
+ - vni_number: 1
+ advertise_default_gw: False
+ advertise_svi_ip: False
+ rd: "5.5.5.5:55"
+ rt_in:
+ - "44:44"
+ - "88:88"
+ rt_out:
+ - "77:77"
+ - vni_number: 2
+ - vni_number: 3
- bgp_as: "{{bgp_as_2}}"
vrf_name: "{{vrf_1}}"
address_family:
@@ -268,7 +328,7 @@ tests:
safi: unicast
max_path:
ebgp: 4
- ibgp: 3
+ ibgp: 5
redistribute:
- metric: "41"
protocol: ospf
@@ -279,8 +339,8 @@ tests:
- afi: ipv6
safi: unicast
max_path:
- ebgp: 4
- ibgp: 6
+ ebgp: 9
+ ibgp: 8
redistribute:
- metric: "43"
protocol: connected
@@ -309,7 +369,6 @@ tests:
afis:
- afi: l2vpn
safi: evpn
- route_advertise_list:
- name: test_case_06
description: Delete2 BGP AF properties
state: deleted
@@ -319,6 +378,162 @@ tests:
address_family:
afis:
- name: test_case_07
+ description: Create BGP AF configuration
+ state: merged
+ input:
+ - bgp_as: "{{ bgp_as_1 }}"
+ address_family:
+ afis:
+ - afi: l2vpn
+ safi: evpn
+ - name: test_case_08
+ description: Add BGP AF configuration for replace
+ state: merged
+ input:
+ - bgp_as: "{{bgp_as_1}}"
+ address_family:
+ afis:
+ - afi: l2vpn
+ safi: evpn
+ advertise_all_vni: True
+ route_advertise_list:
+ - advertise_afi: ipv4
+ route_map: rmap_reg1
+ - advertise_afi: ipv6
+ route_map: rmap_reg2
+ rd: "5.5.5.5:55"
+ rt_in:
+ - "11:11"
+ - "22:22"
+ - "33:33"
+ rt_out:
+ - "77:77"
+ vnis:
+ - vni_number: 4
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ rd: "1.1.1.1:11"
+ rt_in:
+ - "88:88"
+ - "99:99"
+ rt_out:
+ - "88:88"
+ - "99:99"
+ - vni_number: 5
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ rd: "1.1.1.2:11"
+ - bgp_as: "{{bgp_as_2}}"
+ vrf_name: "{{vrf_1}}"
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ max_path:
+ ebgp: 5
+ network:
+ - '10.1.1.0/24'
+ - '10.1.2.0/24'
+ - name: test_case_09
+ description: Replace BGP AF configuration
+ state: replaced
+ input:
+ - bgp_as: "{{bgp_as_1}}"
+ address_family:
+ afis:
+ - afi: l2vpn
+ safi: evpn
+ advertise_all_vni: True
+ route_advertise_list:
+ - advertise_afi: ipv6
+ route_map: rmap_reg2
+ rd: "5.5.5.5:55"
+ rt_in:
+ - "11:11"
+ rt_out:
+ - "77:77"
+ vnis:
+ - vni_number: 4
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ rd: "1.1.1.1:11"
+ rt_in:
+ - "87:87"
+ rt_out:
+ - "88:88"
+ - bgp_as: "{{bgp_as_2}}"
+ vrf_name: "{{vrf_1}}"
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ dampening: True
+ redistribute:
+ - protocol: connected
+ network:
+ - '10.1.1.1/24'
+ - '11.1.1.1/24'
+ - afi: ipv6
+ safi: unicast
+ redistribute:
+ - protocol: ospf
+ metric: 40
+ - protocol: static
+ route_map: rmap_reg1
+ network:
+ - '1::1/64'
+ - '2::1/64'
+ - bgp_as: "{{bgp_as_3}}"
+ vrf_name: "{{vrf_2}}"
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ dampening: True
+ - name: test_case_10
+ description: Override BGP AF configuration
+ state: overridden
+ input:
+ - bgp_as: "{{bgp_as_1}}"
+ address_family:
+ afis:
+ - afi: l2vpn
+ safi: evpn
+ advertise_all_vni: True
+ route_advertise_list:
+ - advertise_afi: ipv4
+ route_map: rmap_reg1
+ - advertise_afi: ipv6
+ rd: "5.5.5.5:55"
+ rt_in:
+ - "22:22"
+ - "33:33"
+ rt_out:
+ - "66:66"
+ - "77:77"
+ vnis:
+ - vni_number: 10
+ advertise_default_gw: True
+ advertise_svi_ip: True
+ rd: "1.1.1.1:11"
+ rt_in:
+ - "87:87"
+ rt_out:
+ - "88:88"
+ - bgp_as: "{{bgp_as_2}}"
+ vrf_name: "{{vrf_1}}"
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ redistribute:
+ - protocol: ospf
+ - protocol: connected
+ metric: 40
+ network:
+ - '11.1.1.1/24'
+ - '12.1.1.1/24'
+ - name: test_case_11
description: Delete3 BGP AF properties
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml
index f2e31e4a2..ebf984bb8 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_as_paths/defaults/main.yml
@@ -7,10 +7,10 @@ bgp_as_2: 52
vrf_1: VrfReg1
vrf_2: VrfReg2
-
+
tests:
- name: test_case_01
- description: BGP properties
+ description: Add BGP as-path lists
state: merged
input:
- name: test
@@ -22,57 +22,124 @@ tests:
- "101.101"
permit: False
- name: test_case_02
- description: Update created BGP properties
+ description: Update BGP as-path lists
state: merged
input:
- - name: test
- members:
- - "11"
- - "22"
- - "33"
- - 44
- permit: True
- - name: test_1
- members:
- - "101.101"
- - "201.201"
- - "301.301"
- permit: False
- - name: test_2
- members:
- - '111\\:'
- - '11\\d+'
- - '113\\*'
- - '114\\'
- permit: True
+ - name: test
+ members:
+ - "11"
+ - "22"
+ - "33"
+ - 44
+ permit: True
+ - name: test_1
+ members:
+ - "101.101"
+ - "201.201"
+ - "301.301"
+ permit: False
+ - name: test_2
+ members:
+ - "110"
+ - "111*"
+ - "112*"
+ - "^113"
+ - "45$"
+ permit: True
- name: test_case_03
- description: Delete BGP properties
+ description: Delete BGP as-path lists' members
state: deleted
input:
- - name: test
- members:
- - "33"
- - name: test_1
- members:
- - "101.101"
- - "201.201"
- - "301.301"
- permit: False
- - name: test_2
- members:
- - '111\\:'
- - '11\\d+'
- - '113\\*'
- - '114\\'
- permit: True
+ - name: test
+ members:
+ - "33"
+ - name: test_1
+ members:
+ - "101.101"
+ - "201.201"
+ - "301.301"
+ permit: False
+ - name: test_2
+ members:
+ - "111*"
+ - "112*"
+ - "^113"
+ - "45$"
+ permit: True
- name: test_case_04
- description: Delete BGP properties
- state: deleted
+ description: Add BGP as-path lists
+ state: merged
input:
- - name: test
- members:
- permit:
+ - name: test_1
+ members:
+ - "100.*"
+ - "200.*"
+ permit: False
+ - name: test_2
+ members:
+ - "110"
+ - "120"
+ - "^800"
+ - "25$"
+ permit: True
+ - name: test_3
+ members:
+ - "900.*"
+ - "910.*"
+ permit: False
- name: test_case_05
- description: Delete BGP properties
+ description: Replace BGP as-path lists
+ state: replaced
+ input:
+ - name: test
+ - name: test_1
+ members:
+ - "301.301"
+ permit: False
+ - name: test_2
+ members:
+ - "111*"
+ - "112*"
+ - "^800"
+ - "25$"
+ permit: True
+ - name: test_3
+ members:
+ - "900.*"
+ - "910.*"
+ permit: True
+ - name: test_4
+ members:
+ - "800.*"
+ permit: True
+ - name: test_case_06
+ description: Override BGP as-path lists
+ state: overridden
+ input:
+ - name: test
+ members:
+ - "33.*"
+ - "44.*"
+ permit: True
+ - name: test_1
+ members:
+ - "201.201"
+ - "301.301"
+ permit: False
+ - name: test_2
+ members:
+ - "111*"
+ - "^800"
+ - "25$"
+ permit: True
+ - name: test_case_07
+ description: Delete BGP as-path list
+ state: deleted
+ input:
+ - name: test
+ members:
+ permit:
+ - name: test_case_08
+ description: Delete all BGP as-path list
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml
index eb32d2759..cf2f82d43 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_communities/defaults/main.yml
@@ -9,7 +9,7 @@ tests:
input:
- name: test
type: expanded
- permit: false
+ permit: true
match: ANY
members:
regex:
@@ -17,19 +17,16 @@ tests:
- "12"
- name: test2
type: standard
- permit: true
+ permit: false
+ no_export: true
match: ALL
- members:
- regex:
- - "21"
- - "22"
- name: test_case_02
description: Update created BGP properties
state: merged
input:
- name: test
type: expanded
- permit: false
+ permit: true
match: ANY
members:
regex:
@@ -38,20 +35,16 @@ tests:
- 14
- name: test2
type: standard
- permit: true
+ permit: false
+ no_peer: true
match: ALL
- members:
- regex:
- - "23"
- - "24"
- - 25
- name: test_case_03
description: Update1 created BGP properties
state: merged
input:
- name: test
type: expanded
- permit: true
+ permit: false
match: ANY
members:
regex:
@@ -59,12 +52,8 @@ tests:
- "12"
- name: test2
type: standard
- permit: false
+ permit: true
match: ALL
- members:
- regex:
- - "21"
- - "22"
- name: test_case_04
description: Delete BGP properties
state: deleted
@@ -77,11 +66,10 @@ tests:
- "13"
- name: test2
type: standard
+ permit: false
match: ALL
- members:
- regex:
- - "23"
- - "24"
+ no_export: true
+ no_peer: true
- name: test_case_05
description: Delete1 BGP properties
state: deleted
@@ -91,11 +79,124 @@ tests:
members:
regex:
- name: test_case_06
- description: Delete2 BGP properties
- state: deleted
+ description: Update2 BGP properties
+ state: merged
input:
- name: test
+ type: expanded
+ match: ANY
+ permit: true
+ members:
+ regex:
+ - 201
+ - name: test3
+ type: expanded
+ match: ALL
+ permit: true
+ members:
+ regex:
+ - "110"
+ - 111
- name: test_case_07
+ description: Replace BGP properties
+ state: replaced
+ input:
+ - name: test
+ type: standard
+ local_as: true
+ permit: true
+ - name: test2
+ type: expanded
+ match: ALL
+ permit: false
+ members:
+ regex:
+ - "220"
+ - 222
+ - "123"
+ - name: test_case_08
+ description: Replace2 BGP properties
+ state: replaced
+ input:
+ - name: test4
+ type: standard
+ permit: true
+ no_peer: true
+ - name: test5
+ type: expanded
+ members:
+ regex:
+ - 113
+ permit: true
+ - name: test_case_09
+ description: Override BGP properties
+ state: overridden
+ input:
+ - name: test3
+ type: standard
+ local_as: True
+ permit: false
+ - name: test2
+ type: standard
+ permit: true
+ no_export: true
+ - name: test_case_10
+ description: Override2 BGP properties
+ state: overridden
+ input:
+ - name: test3
+ type: standard
+ permit: false
+ no_export: true
+ - name: test4
+ type: expanded
+ permit: false
+ members:
+ regex:
+ - 113
+ - name: test2
+ type: standard
+ permit: true
+ no_export: true
+ - name: test_case_11
+ description: Override3 BGP properties
+ state: overridden
+ input:
+ - name: test4
+ type: expanded
+ permit: false
+ members:
+ regex:
+ - 113
+ - name: test2
+ type: standard
+ local_as: true
+ no_peer: true
+ no_advertise: true
+ permit: true
+ no_export: true
+ - name: test_case_12
+ description: Override4 BGP properties
+ state: overridden
+ input:
+ - name: test4
+ type: expanded
+ permit: false
+ members:
+ regex:
+ - 113
+ - name: test2
+ type: standard
+ local_as: true
+ no_advertise: true
+ permit: true
+ no_export: true
+ - name: test_case_13
+ description: Delete2 BGP properties
+ state: deleted
+ input:
+ - name: test4
+ - name: test_case_14
description: Delete2 BGP properties
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml
index be6e96a86..6e9daf9a1 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_ext_communities/defaults/main.yml
@@ -2,7 +2,7 @@
ansible_connection: httpapi
module_name: sonic_bgp_ext_communities
-tests: "{{ merged_tests + deleted_tests }}"
+tests: "{{ merged_tests + deleted_tests + replaced_tests + overridden_tests + deleted_all }}"
merged_tests:
- name: test_case_01
@@ -95,19 +95,25 @@ merged_tests:
match: all
members:
route_origin:
- - "4403.301"
- - "5503.401"
+ - "4403:301"
+ - "5503:401"
- name: test_case_05
- description: Create empty Communities properties
+ description: Create new Communities properties
state: merged
input:
- name: test_ext1
type: expanded
permit: true
+ members:
+ regex:
+ - "20"
match: any
- name: test_std1
type: standard
permit: false
+ members:
+ route_target:
+ - "1.1.1.1:33"
match: any
- name: test_case_06
description: test BGP Communities properties
@@ -127,8 +133,8 @@ merged_tests:
match: any
members:
route_origin:
- - "4403.301"
- - "5503.401"
+ - "4403:301"
+ - "5503:401"
deleted_tests:
# Ethernet testcases started...
@@ -154,101 +160,65 @@ deleted_tests:
match: all
members:
route_target:
- - "101.101"
- - "201.201"
- - "102.101"
- - "202.201"
- - "1.1.1.1.101"
- - "1.1.1.2.201"
+ - "101:101"
+ - "201:201"
+ - "102:101"
+ - "202:201"
+ - "1.1.1.1:101"
+ - "1.1.1.2:201"
route_origin:
- - "301.301"
- - "401.401"
- - "302.301"
- - "402.401"
- - "303.301"
- - "403.401"
+ - "301:301"
+ - "401:401"
+ - "302:301"
+ - "402:401"
+ - "303:301"
+ - "403:401"
- name: test_std11
type: standard
permit: true
match: all
members:
route_target:
- - "101.101"
- - "201.201"
- - "102.101"
- - "202.201"
- - "103.101"
- - "203.201"
- - "1.1.1.1.101"
- - "1.1.1.2.201"
- - "1.1.1.1.102"
- - "1.1.1.2.203"
- route_origin:
- - "301.301"
- - "401.401"
- - "302.301"
- - "402.401"
- - "303.301"
- - "403.401"
- - name: test_std12
- type: standard
- permit: true
- match: all
- members:
- route_target:
- - "101.101"
- - "201.201"
- - "102.101"
- - "202.201"
- - "103.101"
- - "203.201"
- - "1.1.1.1.101"
- - "1.1.1.2.201"
- - "1.1.1.1.102"
- - "1.1.1.2.203"
- route_origin:
- - "301.301"
- - "401.401"
- - "302.301"
- - "402.401"
- - "303.301"
- - "403.401"
- - name: test_std12
- type: standard
- permit: true
- match: all
- members:
- route_target:
- - "101.101"
- - "201.201"
- - "102.101"
- - "202.201"
- - "103.101"
- - "203.201"
- - "1.1.1.1.101"
- - "1.1.1.2.201"
- - "1.1.1.1.102"
- - "1.1.1.2.203"
+ - "101:101"
+ - "201:201"
+ - "102:101"
+ - "202:201"
+ - "103:101"
+ - "203:201"
+ - "1.1.1.1:101"
+ - "1.1.1.2:201"
+ - "1.1.1.1:102"
+ - "1.1.1.2:203"
route_origin:
- - "301.301"
- - "401.401"
- - "302.301"
- - "402.401"
- - "303.301"
- - "403.401"
+ - "301:301"
+ - "401:401"
+ - "302:301"
+ - "402:401"
+ - "303:301"
+ - "403:401"
- name: test_std12
type: standard
permit: true
match: all
members:
route_target:
- - "103.101"
- - "203.201"
+ - "101:101"
+ - "201:201"
+ - "102:101"
+ - "202:201"
+ - "103:101"
+ - "203:201"
+ - "1.1.1.1:101"
+ - "1.1.1.2:201"
- "1.1.1.1:102"
- - "1.1.1.2.203"
+ - "1.1.1.2:203"
route_origin:
- - "301.301"
- - "401.401"
+ - "301:301"
+ - "401:401"
+ - "302:301"
+ - "402:401"
+ - "303:301"
+ - "403:401"
- name: del_test_case_01
description: BGP Communities properties
state: deleted
@@ -277,7 +247,7 @@ deleted_tests:
route_origin:
- "301:301"
- "401:401"
- - name: test_case_02
+ - name: del_test_case_02
description: BGP Communities properties
state: deleted
input:
@@ -289,14 +259,15 @@ deleted_tests:
type: standard
members:
route_target:
- - "1.1.1.1.101"
+ - "1.1.1.1:101"
- name: test_std11
type: standard
members:
route_origin:
- - "301.301"
- - "401.401"
- - "1.1.1.1.101"
+ - "301:301"
+ - "401:401"
+ route_target:
+ - "1.1.1.1:101"
- name: del_test_case_03
description: Update created BGP properties
state: deleted
@@ -315,6 +286,150 @@ deleted_tests:
members:
route_origin:
route_target:
+
+replaced_tests:
+ - name: replace_test_case_01
+ description: Replace created BGP properties
+ state: replaced
+ input:
+ - name: test_std
+ type: standard
+ permit: false
+ match: any
+ members:
+ route_origin:
+ - "4403:301"
+ - "5503:401"
+ - name: test_comm112
+ type: standard
+ permit: true
+ match: any
+ members:
+ route_target:
+ - "2.2.2.2:201"
+ route_origin:
+ - "500:500"
+ - "600:600"
+ - name: replace_test_case_02
+ description: Replace2 created BGP properties
+ state: replaced
+ input:
+ - name: test_comm13
+ type: expanded
+ permit: true
+ match: all
+ members:
+ regex:
+ - "15"
+ - "20"
+ - 25
+ - name: test_comm112
+ type: standard
+ permit: true
+ match: any
+ members:
+ route_target:
+ - "2.2.2.2:201"
+ route_origin:
+ - "500:500"
+ - "600:600"
+ - name: test_std
+ type: standard
+ permit: false
+ match: any
+ members:
+ route_origin:
+ - "5503:401"
+
+overridden_tests:
+ - name: overridden_test_case_01
+ description: Override created BGP properties
+ state: overridden
+ input:
+ - name: test_new_std
+ type: standard
+ permit: true
+ match: all
+ members:
+ route_target:
+ - "12.12.12.12:335"
+ - "13.13.13.13:445"
+ route_origin:
+ - "4301:4301"
+ - "501:501"
+ - name: test_new_expanded
+ type: expanded
+ permit: false
+ match: any
+ members:
+ regex:
+ - 22
+ - 23
+ - 24
+ - name: overridden_test_case_02
+ description: Override2 created BGP properties
+ state: overridden
+ input:
+ - name: test_new_expanded2
+ type: expanded
+ permit: true
+ members:
+ regex:
+ - 33
+ - "44"
+ - name: test_new_std
+ type: standard
+ permit: true
+ match: all
+ members:
+ route_target:
+ - "13.13.13.13:445"
+ route_origin:
+ - "501:501"
+ - name: test_new_expanded
+ type: expanded
+ permit: false
+ match: any
+ members:
+ regex:
+ - 22
+ - 23
+ - 24
+ - name: overridden_test_case_03
+ description: Override3 created BGP properties
+ state: overridden
+ input:
+ - name: test_new_std
+ type: standard
+ permit: true
+ match: all
+ members:
+ route_target:
+ - "13.13.13.13:445"
+ route_origin:
+ - "501:501"
+ - name: test_new_expanded
+ type: expanded
+ permit: false
+ match: any
+ members:
+ regex:
+ - 22
+ - 23
+ - 24
+ - name: overridden_test_case_04
+ description: Override4 created BGP properties
+ state: overridden
+ input:
+ - name: test_new_std
+ type: standard
+ permit: true
+ match: all
+ members:
+ route_origin:
+ - "502:502"
+
+deleted_all:
- name: del_test_case_04
description: Update created BGP properties
state: deleted
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml
deleted file mode 100644
index 353861255..000000000
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main copy.yml
+++ /dev/null
@@ -1,316 +0,0 @@
----
-ansible_connection: httpapi
-module_name: sonic_bgp_neighbors
-
-bgp_as_1: 51
-bgp_as_2: 52
-
-vrf_1: VrfReg1
-vrf_2: VrfReg2
-
-preparations_tests:
- init_route_map:
- - route-map rmap_reg1 permit 11
- - route-map rmap_reg2 permit 11
- - route-map rmap_reg3 permit 11
- - route-map rmap_reg4 permit 11
- - route-map rmap_reg5 permit 11
- init_vrf:
- - "ip vrf {{vrf_1}}"
- - "ip vrf {{vrf_2}}"
- init_bgp:
- - bgp_as: "{{bgp_as_1}}"
- router_id: 111.2.2.41
- log_neighbor_changes: False
- - bgp_as: "{{bgp_as_2}}"
- router_id: 111.2.2.42
- log_neighbor_changes: True
- vrf_name: VrfReg1
-
-negative_tests:
- - name: negative_test_case_01
- description: allowas_in beyond value
- state: merged
- input:
- - bgp_as: "{{bgp_as_1}}"
- neighbors:
- - neighbor: Ethernet12
- address_family:
- - afi: ipv4
- safi: unicast
- allowas_in:
- value: 11
- route_map:
- - name: rmap_reg1
- direction: in
- - name: rmap_reg1
- direction: out
- route_reflector_client: true
- route_server_client: true
- - name: negative_test_case_02
- description: BGP properties
- state: merged
- input:
- - bgp_as: "{{bgp_as_1}}"
- neighbors:
- - neighbor: Ethernet12
- address_family:
- - afi: ipv4
- safi: unicast
- allowas_in:
- value: 11
- route_map:
- - name: rmap_reg1
- direction: in
- - name: rmap_reg2
- direction: in
- - name: rmap_reg1
- direction: out
- route_reflector_client: true
- route_server_client: true
-
-tests: "{{ merged_tests }}"
-
-deleted_tests:
- - name: test_case_del_01
- description: Delete peer group BGP properties
- state: deleted
- input:
- - bgp_as: "{{bgp_as_1}}"
- peergroup:
- - name: SPINE
- - name: SPINE1
- - bgp_as: "{{bgp_as_1}}"
- vrf_name: "{{vrf_1}}"
- peergroup:
- - name: SPINE
- - name: SPINE1
-
-
-
-merged_tests:
- - name: test_case_01
- description: BGP properties
- state: merged
- input:
- - bgp_as: "{{bgp_as_1}}"
- peergroup:
- - name: SPINE
- neighbors:
- - neighbor: Ethernet12
- remote_as: 5
- peer_group: SPINE
- advertisement_interval: 10
- timers:
- keepalive: 40
- holdtime: 50
- bfd: true
- capability:
- dynamic: true
- extended_nexthop: true
- - neighbor: 192.168.1.4
- - neighbor: 2::2
- - neighbor: Ethernet8
- - neighbor: 192.168.1.5
- remote_as: 6
- peer_group: SPINE
- advertisement_interval: 20
- timers:
- keepalive: 30
- holdtime: 20
- capability:
- dynamic: true
- - neighbor: 3::3
- remote_as: 7
- peer_group: SPINE
- advertisement_interval: 20
- timers:
- keepalive: 30
- holdtime: 20
- capability:
- dynamic: true
- - bgp_as: "{{bgp_as_1}}"
- vrf_name: "{{vrf_1}}"
- peergroup:
- - name: SPINE
- neighbors:
- - neighbor: Ethernet24
- remote_as: 11
- peer_group: SPINE
- advertisement_interval: 10
- timers:
- keepalive: 40
- holdtime: 50
- bfd: true
- capability:
- dynamic: true
- extended_nexthop: true
- - neighbor: 192.168.2.2
- - neighbor: Ethernet28
- remote_as: 12
- peer_group: SPINE
- advertisement_interval: 20
- timers:
- keepalive: 30
- holdtime: 20
- capability:
- dynamic: true
- - name: test_case_02
- description: Update BGP properties
- state: merged
- input:
- - bgp_as: "{{bgp_as_1}}"
- peergroup:
- - name: SPINE
- - name: SPINE1
- neighbors:
- - neighbor: Ethernet12
- remote_as: 111
- peer_group: SPINE
- advertisement_interval: 11
- timers:
- keepalive: 41
- holdtime: 51
- bfd: false
- capability:
- dynamic: false
- extended_nexthop: false
- - neighbor: 192.168.1.4
- - neighbor: Ethernet8
- - neighbor: 192.168.1.5
- remote_as: 112
- peer_group: SPINE1
- advertisement_interval: 21
- timers:
- keepalive: 22
- holdtime: 23
- capability:
- dynamic: true
- - bgp_as: "{{bgp_as_1}}"
- vrf_name: "{{vrf_1}}"
- peergroup:
- - name: SPINE
- - name: SPINE1
- neighbors:
- - neighbor: Ethernet24
- remote_as: 213
- peer_group: SPINE1
- advertisement_interval: 44
- timers:
- keepalive: 55
- holdtime: 44
- bfd: false
- capability:
- dynamic: false
- extended_nexthop: false
- - neighbor: 192.168.2.2
- - neighbor: Ethernet28
- remote_as: 214
- peer_group: SPINE
- advertisement_interval: 45
- timers:
- keepalive: 33
- holdtime: 34
- capability:
- dynamic: false
- - neighbor: 3::3
- remote_as: 215
- peer_group: SPINE
- advertisement_interval: 20
- timers:
- keepalive: 30
- holdtime: 20
- capability:
- dynamic: true
- - name: test_case_03
- description: BGP ipv6 properties
- state: merged
- input:
- - bgp_as: "{{bgp_as_1}}"
- peergroup:
- - name: SPINE
- neighbors:
- - neighbor: 2::2
- - neighbor: 11::11
- remote_as: external
- peer_group: SPINE
- advertisement_interval: 20
- timers:
- keepalive: 30
- holdtime: 20
- capability:
- dynamic: true
- - neighbor: 3::3
- remote_as: 7
- peer_group: SPINE
- advertisement_interval: 20
- timers:
- keepalive: 30
- holdtime: 20
- capability:
- dynamic: true
- - bgp_as: "{{bgp_as_1}}"
- vrf_name: "{{vrf_1}}"
- peergroup:
- - name: SPINE
- neighbors:
- - neighbor: 192.168.2.2
- - neighbor: Ethernet28
- remote_as: 12
- peer_group: SPINE
- advertisement_interval: 20
- timers:
- keepalive: 30
- holdtime: 20
- capability:
- dynamic: true
- - name: test_case_04
- description: BGP remote-as properties
- state: merged
- input:
- - bgp_as: "{{bgp_as_1}}"
- peergroup:
- - name: SPINE
- neighbors:
- - neighbor: Ethernet8
- remote_as: internal
- - neighbor: 11::11
- remote_as: external
- - neighbor: 67.1.1.1
- remote_as: 7
- - bgp_as: "{{bgp_as_1}}"
- vrf_name: "{{vrf_1}}"
- peergroup:
- - name: SPINE
- neighbors:
- - neighbor: Ethernet8
- remote_as: 1345
- - neighbor: 11::11
- remote_as: 2345
- - neighbor: 67.1.1.1
- remote_as: external
- - name: test_case_05
- description: BGP remote-as properties
- state: merged
- input:
- - bgp_as: "{{bgp_as_1}}"
- peergroup:
- - name: SPINE
- neighbors:
- - neighbor: Ethernet8
- remote_as: external
- - neighbor: 11::11
- remote_as: internal
- - neighbor: 67.1.1.1
- remote_as: internal
- - bgp_as: "{{bgp_as_1}}"
- vrf_name: "{{vrf_1}}"
- peergroup:
- - name: SPINE
- neighbors:
- - neighbor: Ethernet8
- remote_as: internal
- - neighbor: 11::11
- remote_as: external
- - neighbor: 67.1.1.1
- remote_as: 1123 \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml
index 140eeeae5..dcc2b657f 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors/defaults/main.yml
@@ -116,10 +116,6 @@ deleted_tests:
prefix_list_out: p2
- afi: l2vpn
safi: evpn
- prefix_limit:
- max_prefixes: 4
- warning_threshold: 66
- restart_timer: 15
prefix_list_in: p2
prefix_list_out: p1
@@ -213,7 +209,7 @@ deleted_tests:
- neighbor: 3::3
peer_group: SPINE
- neighbor: 192.168.1.5
- peer_group: SPINE1
+ peer_group: SPINE
- bgp_as: "{{bgp_as_1}}"
vrf_name: "{{vrf_1}}"
peer_group:
@@ -230,7 +226,7 @@ deleted_tests:
- neighbor: "{{ interface4 }}"
peer_group: SPINE
- neighbor: "{{ interface3 }}"
- peer_group: SPINE1
+ peer_group: SPINE
bfd:
enabled: false
check_failure: false
@@ -468,7 +464,6 @@ merged_tests:
- neighbor: 192.168.1.5
remote_as:
peer_as: 112
- peer_group: SPINE1
advertisement_interval: 21
timers:
keepalive: 22
@@ -509,7 +504,6 @@ merged_tests:
- neighbor: "{{ interface3 }}"
remote_as:
peer_as: 212
- peer_group: SPINE1
advertisement_interval: 44
timers:
keepalive: 55
@@ -832,10 +826,6 @@ merged_tests:
prefix_list_out: p1
- afi: l2vpn
safi: evpn
- prefix_limit:
- max_prefixes: 3
- warning_threshold: 60
- restart_timer: 8
prefix_list_in: p1
prefix_list_out: p2
- name: test_case_09
@@ -872,9 +862,5 @@ merged_tests:
prefix_list_out: p2
- afi: l2vpn
safi: evpn
- prefix_limit:
- max_prefixes: 4
- warning_threshold: 66
- restart_timer: 15
prefix_list_in: p2
prefix_list_out: p1
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml
index dcb7b46e5..30b16bc41 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_bgp_neighbors_af/defaults/main.yml
@@ -382,10 +382,6 @@ tests:
prefix_list_out: p1
- afi: l2vpn
safi: evpn
- prefix_limit:
- max_prefixes: 3
- warning_threshold: 60
- restart_timer: 8
prefix_list_in: p1
prefix_list_out: p2
- name: test_case_06
@@ -421,10 +417,6 @@ tests:
prefix_list_out: p2
- afi: l2vpn
safi: evpn
- prefix_limit:
- max_prefixes: 4
- warning_threshold: 66
- restart_timer: 15
prefix_list_in: p2
prefix_list_out: p1
- name: test_case_07
@@ -460,9 +452,5 @@ tests:
prefix_list_out: p2
- afi: l2vpn
safi: evpn
- prefix_limit:
- max_prefixes: 4
- warning_threshold: 66
- restart_timer: 15
prefix_list_in: p2
prefix_list_out: p1
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/defaults/main.yml
new file mode 100644
index 000000000..77ef90f0e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/defaults/main.yml
@@ -0,0 +1,80 @@
+---
+ansible_connection: httpapi
+module_name: copp
+
+tests:
+ - name: test_case_01
+ description: Merge CoPP groups configuration
+ state: merged
+ input:
+ copp_groups:
+ - copp_name: 'copp-1'
+ trap_priority: 1
+ trap_action: 'DROP'
+ queue: 1
+ cir: '45'
+ cbs: '45'
+ - copp_name: 'copp-2'
+ trap_priority: 2
+ trap_action: 'FORWARD'
+ queue: 2
+ cir: '90'
+ cbs: '90'
+ - name: test_case_02
+ description: Update CoPP groups configuration
+ state: merged
+ input:
+ copp_groups:
+ - copp_name: 'copp-1'
+ trap_priority: 8
+ trap_action: 'FORWARD'
+ queue: 8
+ cir: '20'
+ cbs: '20'
+ - name: test_case_03
+ description: Replace CoPP groups configuration
+ state: replaced
+ input:
+ copp_groups:
+ - copp_name: 'copp-2'
+ trap_action: 'FORWARD'
+ cir: '60'
+ cbs: '60'
+ - copp_name: 'copp-3'
+ trap_priority: 3
+ trap_action: 'DROP'
+ queue: 3
+ cir: '70'
+ cbs: '70'
+ - copp_name: 'copp-4'
+ trap_priority: 5
+ trap_action: 'DROP'
+ queue: 5
+ cir: '75'
+ cbs: '75'
+ - name: test_case_04
+ description: Delete CoPP groups configuration
+ state: deleted
+ input:
+ copp_groups:
+ - copp_name: 'copp-1'
+ trap_priority: 8
+ queue: 8
+ cir: '20'
+ cbs: '20'
+ - copp_name: 'copp-2'
+ - name: test_case_05
+ description: Overridden CoPP groups configuration
+ state: overridden
+ input:
+ copp_groups:
+ - copp_name: 'copp-5'
+ trap_priority: 1
+ trap_action: 'FORWARD'
+ queue: 1
+ cir: '15'
+ cbs: '15'
+ - name: test_case_06
+ description: Delete all CoPP groups
+ state: deleted
+ input: {}
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/meta/main.yaml
new file mode 100644
index 000000000..0b356217e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/main.yml
new file mode 100644
index 000000000..d7c2bfb11
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/main.yml
@@ -0,0 +1,11 @@
+- debug: msg="sonic_copp Test started ..."
+
+- set_fact:
+ base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}"
+
+- name: Preparations test
+ include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..2c01c9c08
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/preparation_tests.yaml
@@ -0,0 +1,5 @@
+- name: Delete old CoPP groups configuration
+ sonic_copp:
+ config: {}
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/tasks_template.yaml
new file mode 100644
index 000000000..cd5e02ef3
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_copp/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_copp:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_copp:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/defaults/main.yml
new file mode 100644
index 000000000..ab5c73af7
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/defaults/main.yml
@@ -0,0 +1,350 @@
+---
+ansible_connection: httpapi
+module_name: dhcp_relay
+
+vrf1: "VrfReg1"
+vrf2: "VrfReg2"
+
+po1: "Portchannel 100"
+po2: "Portchannel 101"
+
+vlan1: "Vlan 100"
+vlan2: "Vlan 101"
+
+preparations_tests:
+ vrfs:
+ - name: '{{ vrf1 }}'
+ - name: '{{ vrf2 }}'
+ lag_interfaces:
+ - name: '{{ po1 }}'
+ - name: '{{ po2 }}'
+ vlans:
+ - vlan_id: 100
+ - vlan_id: 101
+ l3_interfaces:
+ - name: '{{ interface1 }}'
+ ipv4:
+ addresses:
+ - address: 100.1.1.1/24
+ ipv6:
+ addresses:
+ - address: 100::1/122
+ - name: '{{ interface2 }}'
+ ipv4:
+ addresses:
+ - address: 101.1.1.1/24
+ ipv6:
+ addresses:
+ - address: 101::1/122
+ - name: '{{ vlan1 }}'
+ ipv4:
+ addresses:
+ - address: 110.1.1.1/24
+ ipv6:
+ addresses:
+ - address: 110::1/122
+ - name: '{{ vlan2 }}'
+ ipv4:
+ addresses:
+ - address: 111.1.1.1/24
+ ipv6:
+ addresses:
+ - address: 111::1/122
+ - name: '{{ po1 }}'
+ ipv4:
+ addresses:
+ - address: 120.1.1.1/24
+ ipv6:
+ addresses:
+ - address: 120::1/122
+ - name: '{{ po2 }}'
+ ipv4:
+ addresses:
+ - address: 121.1.1.1/24
+ ipv6:
+ addresses:
+ - address: 121::1/122
+
+tests:
+ - name: test_case_01
+ description: Add DHCP and DHCPv6 relay configuration
+ state: merged
+ input:
+ - name: '{{ interface1 }}'
+ ipv4:
+ server_addresses:
+ - address: 100.1.1.2
+ - address: 100.1.1.3
+ vrf_name: '{{ vrf1 }}'
+ vrf_select: true
+ source_interface: '{{ vlan2 }}'
+ link_select: true
+ max_hop_count: 8
+ policy_action: 'replace'
+ ipv6:
+ server_addresses:
+ - address: 100::2
+ - address: 100::3
+ vrf_name: '{{ vrf2 }}'
+ vrf_select: true
+ source_interface: '{{ vlan2 }}'
+ max_hop_count: 8
+
+ - name: test_case_02
+ description: Update DHCP and DHCPv6 relay configuration
+ state: merged
+ input:
+ - name: '{{ interface1 }}'
+ ipv4:
+ server_addresses:
+ - address: 100.1.1.4
+ circuit_id: '%h:%p'
+ policy_action: 'discard'
+ vrf_select: false
+ ipv6:
+ vrf_select: false
+ - name: '{{ interface2 }}'
+ ipv4:
+ server_addresses:
+ - address: 101.1.1.2
+ circuit_id: '%h:%p'
+ max_hop_count: 8
+ ipv6:
+ server_addresses:
+ - address: 101::2
+ - address: 101::3
+ max_hop_count: 8
+
+ - name: test_case_03
+ description: Update DHCP and DHCPv6 relay configuration
+ state: merged
+ input:
+ - name: '{{ po1 }}'
+ ipv4:
+ server_addresses:
+ - address: 120.1.1.2
+ - address: 120.1.1.3
+ source_interface: '{{ vlan2 }}'
+ link_select: false
+ circuit_id: '%p'
+ max_hop_count: 8
+ - name: '{{ po2 }}'
+ ipv6:
+ server_addresses:
+ - address: 121::2
+ - address: 121::3
+ source_interface: '{{ vlan2 }}'
+ max_hop_count: 8
+
+ - name: test_case_04
+ description: Update DHCP and DHCPv6 relay configuration
+ state: merged
+ input:
+ - name: '{{ po1 }}'
+ ipv4:
+ link_select: true
+ circuit_id: '%i'
+ max_hop_count: 6
+ ipv6:
+ server_addresses:
+ - address: 120::2
+ - name: '{{ po2 }}'
+ ipv4:
+ server_addresses:
+ - address: 121.1.1.2
+ - name: '{{ vlan1 }}'
+ ipv4:
+ server_addresses:
+ - address: 110.1.1.2
+ - address: 110.1.1.3
+ - address: 110.1.1.4
+ - address: 110.1.1.5
+ circuit_id: '%i'
+ policy_action: 'append'
+
+ - name: test_case_05
+ description: Delete specific DHCP and DHCPv6 relay configurations
+ state: deleted
+ input:
+ - name: '{{ interface1 }}'
+ ipv4:
+ server_addresses:
+ - address: 100.1.1.2
+ source_interface: '{{ vlan2 }}'
+ link_select: true
+ ipv6:
+ server_addresses:
+ - address: 100::2
+ source_interface: '{{ vlan2 }}'
+ - name: '{{ interface2 }}'
+ ipv4:
+ circuit_id: '%h:%p'
+ max_hop_count: 8
+ ipv6:
+ server_addresses:
+ - address: 101::3
+ max_hop_count: 8
+ - name: '{{ vlan1 }}'
+ ipv4:
+ policy_action: 'append'
+
+ - name: test_case_06
+ description: Delete all DHCP relay configuration for specified interface by deleting all DHCP server addresses
+ state: deleted
+ input:
+ - name: '{{ vlan1 }}'
+ ipv4:
+ server_addresses:
+ - address: 110.1.1.2
+ - address: 110.1.1.3
+ - address: 110.1.1.4
+ - address: 110.1.1.5
+
+ - name: test_case_07
+ description: Delete all DHCP relay configuration for specified interface by specifying only an empty DHCP server address
+ state: deleted
+ input:
+ - name: '{{ interface1 }}'
+ ipv4:
+ server_addresses:
+ - address:
+
+ - name: test_case_08
+ description: Delete all DHCPv6 relay configuration for specified interface by specifying only an empty DHCPv6 server address
+ state: deleted
+ input:
+ - name: '{{ interface2 }}'
+ ipv6:
+ server_addresses:
+ - address:
+
+ - name: test_case_09
+ description: Delete all DHCP and DHCPv6 relay configuration for specified interface
+ state: deleted
+ input:
+ - name: '{{ po1 }}'
+ - name: '{{ po2 }}'
+
+ - name: test_case_10
+ description: Add DHCP and DHCPv6 relay configuration for replace
+ state: merged
+ input:
+ - name: '{{ interface1 }}'
+ ipv4:
+ server_addresses:
+ - address: 100.1.1.2
+ - address: 100.1.1.3
+ vrf_name: '{{ vrf1 }}'
+ vrf_select: true
+ source_interface: '{{ vlan2 }}'
+ link_select: true
+ max_hop_count: 8
+ policy_action: 'replace'
+ ipv6:
+ server_addresses:
+ - address: 100::2
+ - address: 100::3
+ vrf_name: '{{ vrf2 }}'
+ vrf_select: true
+ source_interface: '{{ vlan2 }}'
+ max_hop_count: 8
+ - name: '{{ interface2 }}'
+ ipv4:
+ server_addresses:
+ - address: 101.1.1.2
+ - address: 101.1.1.3
+ vrf_name: '{{ vrf1 }}'
+ circuit_id: '%h:%p'
+ max_hop_count: 8
+ ipv6:
+ server_addresses:
+ - address: 101::2
+ - address: 101::3
+ vrf_name: '{{ vrf2 }}'
+ max_hop_count: 8
+ - name: '{{ po1 }}'
+ ipv4:
+ server_addresses:
+ - address: 120.1.1.2
+ - address: 120.1.1.3
+ source_interface: '{{ vlan2 }}'
+ link_select: false
+ circuit_id: '%p'
+ max_hop_count: 8
+
+ - name: test_case_11
+ description: Replace DHCP and DHCPv6 relay configurations
+ state: replaced
+ input:
+ - name: '{{ interface1 }}'
+ ipv4:
+ server_addresses:
+ - address: 100.1.1.2
+ - address: 100.1.1.3
+ vrf_name: '{{ vrf2 }}'
+ vrf_select: true
+ source_interface: '{{ vlan2 }}'
+ max_hop_count: 8
+ policy_action: 'append'
+ ipv6:
+ server_addresses:
+ - address: 100::2
+ - address: 100::4
+ - address: 100::6
+ vrf_name: '{{ vrf2 }}'
+ vrf_select: true
+ - name: '{{ interface2 }}'
+ ipv4:
+ server_addresses:
+ - address: 101.1.1.2
+ - address: 101.1.1.4
+ - address: 101.1.1.6
+ circuit_id: '%h:%p'
+ max_hop_count: 8
+ ipv6:
+ server_addresses:
+ - address: 101::2
+ - address: 101::3
+ max_hop_count: 8
+ - name: '{{ po2 }}'
+ ipv4:
+ server_addresses:
+ - address: 120.1.1.2
+ - address: 120.1.1.3
+ source_interface: '{{ vlan2 }}'
+ link_select: false
+ circuit_id: '%p'
+ max_hop_count: 8
+
+ - name: test_case_12
+ description: Override DHCP and DHCPv6 relay configurations
+ state: overridden
+ input:
+ - name: '{{ interface1 }}'
+ ipv4:
+ server_addresses:
+ - address: 100.1.1.10
+ - address: 100.1.1.11
+ vrf_name: '{{ vrf2 }}'
+ vrf_select: true
+ source_interface: '{{ vlan2 }}'
+ max_hop_count: 12
+ policy_action: 'replace'
+ - name: '{{ interface2 }}'
+ ipv6:
+ server_addresses:
+ - address: 101::20
+ - address: 101::30
+ max_hop_count: 8
+ - name: '{{ po2 }}'
+ ipv6:
+ server_addresses:
+ - address: 121::2
+ - address: 121::3
+ source_interface: '{{ vlan2 }}'
+ max_hop_count: 8
+
+ - name: test_case_13
+ description: Delete all DHCP and DHCPv6 relay configurations
+ state: deleted
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/meta/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/meta/main.yml
new file mode 100644
index 000000000..d0ceaf6f5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/meta/main.yml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..b21a7a4b9
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/cleanup_tests.yaml
@@ -0,0 +1,30 @@
+---
+- name: Delete DHCP, DHCPv6 relay configurations
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete l3 configurations on test interfaces
+ dellemc.enterprise_sonic.sonic_l3_interfaces:
+ config: "{{ preparations_tests.l3_interfaces }}"
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test VRFs
+ dellemc.enterprise_sonic.sonic_vrfs:
+ config: "{{ preparations_tests.vrfs }}"
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test VLANs
+ dellemc.enterprise_sonic.sonic_vlans:
+ config: "{{ preparations_tests.vlans }}"
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test lag interfaces
+ dellemc.enterprise_sonic.sonic_lag_interfaces:
+ config: "{{ preparations_tests.lag_interfaces }}"
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/main.yml
new file mode 100644
index 000000000..063a33d65
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- ansible.builtin.debug:
+ msg: "sonic_dhcp_relay Test started ..."
+
+- name: "Preparations for {{ module_name }}"
+ ansible.builtin.include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started"
+ ansible.builtin.include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup of {{ module_name }}"
+ ansible.builtin.include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..5101b7f4c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/preparation_tests.yaml
@@ -0,0 +1,38 @@
+---
+- name: Delete old DHCP, DHCPv6 relay configurations
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Initialize default interfaces
+ vars:
+ ansible_connection: network_cli
+ dellemc.enterprise_sonic.sonic_config:
+ commands: "{{ default_interface_cli }}"
+ register: output
+ ignore_errors: yes
+
+- name: Create VRFs
+ dellemc.enterprise_sonic.sonic_vrfs:
+ config: "{{ preparations_tests.vrfs }}"
+ state: merged
+ ignore_errors: yes
+
+- name: Create lag interfaces
+ dellemc.enterprise_sonic.sonic_lag_interfaces:
+ config: "{{ preparations_tests.lag_interfaces }}"
+ state: merged
+ ignore_errors: yes
+
+- name: Create VLANs
+ dellemc.enterprise_sonic.sonic_vlans:
+ config: "{{ preparations_tests.vlans }}"
+ state: merged
+ ignore_errors: yes
+
+- name: Configure l3 interfaces
+ dellemc.enterprise_sonic.sonic_l3_interfaces:
+ config: "{{ preparations_tests.l3_interfaces }}"
+ state: merged
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/tasks_template.yaml
new file mode 100644
index 000000000..c4a91213e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_relay/tasks/tasks_template.yaml
@@ -0,0 +1,22 @@
+---
+- name: "{{ item.name }} , {{ item.description }}"
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name }} , {{ item.description }} Idempotent"
+ dellemc.enterprise_sonic.sonic_dhcp_relay:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/defaults/main.yml
new file mode 100644
index 000000000..bf3e70d0e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/defaults/main.yml
@@ -0,0 +1,133 @@
+---
+ansible_connection: httpapi
+module_name: sonic_dhcp_snooping
+
+preparations_tests:
+ init:
+ - 'interface vlan 1'
+ - 'interface vlan 2'
+ - 'interface vlan 3'
+ - 'interface vlan 4'
+ - 'interface portchannel 1'
+ - 'interface portchannel 2'
+ - 'interface portchannel 3'
+ cleanup:
+ - 'no interface vlan 1'
+ - 'no interface vlan 2'
+ - 'no interface vlan 3'
+ - 'no interface vlan 4'
+ - 'no interface portchannel 1'
+ - 'no interface portchannel 2'
+ - 'no interface portchannel 3'
+
+tests:
+ - name: test_case_01_merge_add
+ description: Add DHCPv4 and DHCPv6 snooping configuration
+ state: merged
+ input:
+ afis:
+ - afi: 'ipv4'
+ enabled: true
+ vlans:
+ - '1'
+ trusted:
+ - intf_name: 'Ethernet1'
+ source_bindings:
+ - mac_addr: "12:12:12:12:12:12"
+ ip_addr: "2.2.2.2"
+ vlan_id: 1
+ intf_name: "Ethernet3"
+ - afi: 'ipv6'
+ enabled: true
+ verify_mac: false
+ vlans:
+ - '2'
+ - '3'
+ trusted:
+ - intf_name: 'Ethernet2'
+ - intf_name: 'PortChannel1'
+ - name: test_case_02_merge_update
+ description: Update DHCPv4 and DHCPv6 snooping configuration
+ state: merged
+ input:
+ afis:
+ - afi: 'ipv4'
+ trusted:
+ - intf_name: 'Ethernet2'
+ source_bindings:
+ - mac_addr: "12:12:12:12:12:12"
+ ip_addr: "2.2.2.2"
+ vlan_id: 2
+ intf_name: 'Ethernet3'
+ - mac_addr: "14:14:14:14:14:14"
+ ip_addr: "4.4.4.4"
+ vlan_id: 4
+ intf_name: 'Ethernet4'
+ - afi: 'ipv6'
+ vlans:
+ - '2'
+ - '3'
+ - '4'
+ trusted:
+ - intf_name: 'PortChannel3'
+ - name: test_case_03_override
+ description: Override DHCPv4 and DHCPv6 snooping configuration
+ state: overridden
+ input:
+ afis:
+ - afi: 'ipv4'
+ enabled: true
+ vlans: ['1', '2']
+ source_bindings:
+ - mac_addr: "12:12:12:12:12:12"
+ ip_addr: "3.3.3.3"
+ vlan_id: 2
+ intf_name: 'Ethernet3'
+ trusted:
+ - intf_name: 'Ethernet1'
+ - intf_name: 'Ethernet2'
+ - afi: 'ipv6'
+ verify_mac: true
+ source_bindings:
+ - mac_addr: "12:12:12:12:12:12"
+ ip_addr: "2002::2"
+ vlan_id: 3
+ intf_name: 'Ethernet3'
+ trusted:
+ - intf_name: 'Ethernet1'
+ - name: test_case_04_replace
+ description: Replace DHCPv4 and DHCPv6 snooping configuration
+ state: replaced
+ input:
+ afis:
+ - afi: 'ipv4'
+ verify_mac: false
+ vlans: ['3']
+ trusted:
+ - intf_name: 'Ethernet1'
+ - intf_name: 'Ethernet2'
+ - afi: 'ipv6'
+ vlans: ['1', '4']
+ trusted:
+ - intf_name: 'PortChannel1'
+ - intf_name: 'PortChannel2'
+ enabled: true
+ - name: test_case_05_delete_afi
+ description: Delete DHCP snooping configuration for specific AFI
+ state: deleted
+ input:
+ afis:
+ - afi: 'ipv6'
+ - name: test_case_06_delete_subsettings
+ description: Delete DHCP snooping configuration for each section
+ state: deleted
+ input:
+ afis:
+ - afi: 'ipv4'
+ source_bindings: []
+ trusted: []
+ vlans: []
+ - name: test_case_07_delete_all
+ description: Delete all DHCP snooping configuration
+ state: deleted
+ input: {} \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/meta/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/meta/main.yml
new file mode 100644
index 000000000..d0ceaf6f5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/meta/main.yml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..756783cef
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/cleanup_tests.yaml
@@ -0,0 +1,7 @@
+---
+- name: "cleanup dhcp test environment"
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.cleanup }}"
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/main.yml
new file mode 100644
index 000000000..bbd8846fc
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- ansible.builtin.debug:
+ msg: "sonic_dhcp_snooping Test started ..."
+
+- name: "Preparations for {{ module_name }}"
+ ansible.builtin.include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started"
+ ansible.builtin.include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup of {{ module_name }}"
+ ansible.builtin.include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..32038e703
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/preparation_tests.yaml
@@ -0,0 +1,12 @@
+---
+- name: "initialize dhcp test environment"
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.init }}"
+
+- name: Delete old DHCP, DHCPv6 snooping configurations
+ dellemc.enterprise_sonic.sonic_dhcp_snooping:
+ config: {}
+ state: deleted
+ # ignore_errors: yes \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/tasks_template.yaml
new file mode 100644
index 000000000..9b1339dc9
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_dhcp_snooping/tasks/tasks_template.yaml
@@ -0,0 +1,22 @@
+---
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_dhcp_snooping:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_dhcp_snooping:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml
index d391e690d..7bb6dc33f 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/defaults/main.yml
@@ -29,73 +29,94 @@ tests:
description: Update interface parameters
state: merged
input:
- - name: "{{ interface1 }}"
- description: ansible Ethernet4 descr
+ - name: "{{ interface7 }}"
+ description: ansible Ethernet96
mtu: 6445
enabled: false
- name: test_case_02
description: Update interface parameters
state: merged
input:
- - name: "{{ interface1 }}"
- description: ansible Ethernet4 descr
+ - name: "{{ interface7 }}"
+ description: ansible Ethernet96
mtu: 6444
enabled: true
- - name: "{{ interface3 }}"
- description: ansible Ethernet12 descr
+ - name: "{{ interface8 }}"
+ description: ansible Ethernet100
mtu: 6000
enabled: true
- - name: "{{ interface2 }}"
- description: ansible Ethernet8 descr
+ - name: "{{ interface10 }}"
+ description: ansible Ethernet108
mtu: 5666
enabled: false
- - name: "{{ interface4 }}"
- description: ansible Ethernet16 descr
+ - name: "{{ interface9 }}"
+ description: ansible Ethernet104
mtu: 5222
enabled: true
- name: test_case_03
- description: Update interface parameters
+ description: Configure interface parameter speed
+ state: merged
+ input:
+ - name: "{{ interface7 }}"
+ speed: SPEED_40GB
+ - name: test_case_04
+ description: Configure interface parameters auto_negotiate and advertised_speed
+ state: merged
+ input:
+ - name: "{{ interface10 }}"
+ auto_negotiate: true
+ advertised_speed:
+ - 100000
+ - 40000
+ - name: test_case_05
+ description: Configure interface parameters FEC
+ state: merged
+ input:
+ - name: "{{ interface8 }}"
+ fec: FEC_AUTO
+ - name: test_case_06
+ description: Delete interface parameters
state: deleted
input:
- - name: "{{ interface1 }}"
+ - name: "{{ interface7 }}"
description:
- - name: "{{ interface3 }}"
- mtu:
- - name: "{{ interface2 }}"
+ - name: "{{ interface8 }}"
+ mtu: 6000
+ - name: "{{ interface10 }}"
enabled:
- - name: "{{ interface4 }}"
- - name: test_case_04
+ - name: "{{ interface9 }}"
+ - name: test_case_07
description: Update interface parameters
state: merged
input:
- - name: "{{ interface1 }}"
- description: ansible Ethernet4 descr
+ - name: "{{ interface7 }}"
+ description: ansible Ethernet96
mtu: 6444
enabled: true
- - name: "{{ interface3 }}"
- description: ansible Ethernet12 descr
- - name: "{{ interface4 }}"
- description: ansible eth56 descr
+ - name: "{{ interface8 }}"
+ description: ansible Ethernet100
+ - name: "{{ interface9 }}"
+ description: ansible eth60
# Loopback test cases started
- - name: test_case_05
+ - name: test_case_08
description: Loopback interface parameters
state: merged
input:
- name: "{{ lo1 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os
- name: "{{ lo2 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os
- - name: test_case_06
+ - name: test_case_09
description: Loopback delete interface parameters
state: deleted
input:
- name: "{{ lo1 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os
- - name: test_case_07
+ - name: test_case_10
description: Loopback delete interface parameters
state: deleted
input:
- name: "{{ lo1 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os
- name: "{{ lo2 }}" # Loopback: mtu, desc, enabled not configurable in sonic-os
# Vlan test cases started
- - name: test_case_08
+ - name: test_case_11
description: Update interface parameters
state: merged
input:
@@ -103,14 +124,13 @@ tests:
mtu: 5000
- name: "Vlan{{ vlan2 }}"
mtu: 5001
- - name: test_case_09
- description: Update interface parameters
+ - name: test_case_12
+ description: Delete interface parameters
state: deleted
input:
- name: "Vlan{{ vlan1 }}"
- mtu:
- - name: "Vlan{{ vlan2 }}"
- - name: test_case_10
+ mtu: 5000
+ - name: test_case_13
description: Update interface parameters
state: merged
input:
@@ -119,7 +139,7 @@ tests:
- name: "Vlan{{ vlan2 }}"
mtu: 5113
# Portchannel testcase started
- - name: test_case_12
+ - name: test_case_14
description: Update interface parameters
state: merged
input:
@@ -130,13 +150,16 @@ tests:
description: ansible PortChannel51 descr
mtu: 5454
enabled: true
- - name: test_case_13
- description: Update interface parameters
+ - name: test_case_15
+ description: Delete interface parameters
state: deleted
input:
- name: "{{ po1 }}"
+ mtu: 3434
+ enabled: true
- name: "{{ po2 }}"
- - name: test_case_14
+ mtu: 5454
+ - name: test_case_16
description: Update interface parameters
state: merged
input:
@@ -147,7 +170,60 @@ tests:
description: ansible PortChannel51 descr
mtu: 5454
enabled: true
- - name: test_case_15
- description: Update interface parameters
- state: deleted
- input: []
+ - name: test_case_17
+ description: Update interface parameters descr and mtu
+ state: merged
+ input:
+ - name: "{{ interface7 }}"
+ description: Ansible Interface1 descr
+ mtu: 6500
+ enabled: true
+ - name: "{{ interface9 }}"
+ description: ansible Interface2 descr
+ mtu: 7500
+ enabled: true
+ - name: test_case_18
+ description: Replace interface mtu parameters
+ state: replaced
+ input:
+ - name: "{{ interface7 }}"
+ mtu: 3300
+ auto_negotiate: true
+ advertised_speed:
+ - 100000
+ - 40000
+ - name: "{{ interface9 }}"
+ mtu: 3300
+ auto_negotiate: true
+ advertised_speed:
+ - 100000
+ - name: test_case_19
+ description: Replace interface parameters
+ state: replaced
+ input:
+ - name: "{{ interface7 }}"
+ mtu: 3500
+ auto_negotiate: true
+ advertised_speed:
+ - 40000
+ - name: "{{ interface9 }}"
+ description: Ansible Interface2
+ mtu: 3500
+ auto_negotiate: true
+ advertised_speed:
+ - 100000
+ - 40000
+ - name: test_case_20
+ description: Replace interface parameters
+ state: replaced
+ input:
+ - name: "{{ interface9 }}"
+ description: Ansible Interface2
+ - name: test_case_21
+ description: Override interface parameters
+ state: overridden
+ input:
+ - name: "{{ interface7 }}"
+ description: Interface1
+ mtu: 3300
+ enabled: true
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml
index debf1a6ce..4b928adc7 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_interfaces/tasks/tasks_template.yaml
@@ -18,4 +18,4 @@
- import_role:
name: common
- tasks_from: idempotent.facts.report.yaml \ No newline at end of file
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/defaults/main.yml
new file mode 100644
index 000000000..b4d67019a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/defaults/main.yml
@@ -0,0 +1,41 @@
+---
+ansible_connection: httpapi
+module_name: ip_neighbor
+
+tests:
+ - name: test_case_01
+ description: Configure some IP neighbor global parameters
+ state: merged
+ input:
+ ipv4_arp_timeout: 1200
+ ipv4_drop_neighbor_aging_time: 600
+ - name: test_case_02
+ description: Configure some IP neighbor global parameters
+ state: merged
+ input:
+ ipv6_drop_neighbor_aging_time: 600
+ ipv6_nd_cache_expiry: 1200
+ num_local_neigh: 1000
+ - name: test_case_03
+ description: Delete some IP neighbor global parameters
+ state: deleted
+ input:
+ ipv4_arp_timeout: 0
+ ipv4_drop_neighbor_aging_time: 0
+ - name: test_case_04
+ description: Change some IP neighbor global parameters
+ state: replaced
+ input:
+ ipv4_drop_neighbor_aging_time: 800
+ ipv6_drop_neighbor_aging_time: 800
+ - name: test_case_05
+ description: Reset IP neighbor global configuration, and configure some
+ state: overridden
+ input:
+ ipv4_arp_timeout: 1800
+ ipv4_drop_neighbor_aging_time: 800
+ num_local_neigh: 1100
+ - name: test_case_06
+ description: Clean up - reset all IP neighbor global configuration
+ state: deleted
+ input: {}
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/meta/main.yaml
new file mode 100644
index 000000000..611fd54d2
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common } \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/tasks/main.yml
new file mode 100644
index 000000000..9ed8dc1bf
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/tasks/main.yml
@@ -0,0 +1,10 @@
+- debug: msg="sonic_ip_neighbor Test started ..."
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: Display all variables/facts known for a host
+ debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
+
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/tasks/tasks_template.yaml
new file mode 100644
index 000000000..35548591e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ip_neighbor/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_ip_neighbor:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_ip_neighbor:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/defaults/main.yml
new file mode 100644
index 000000000..604aae22f
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/defaults/main.yml
@@ -0,0 +1,214 @@
+---
+ansible_connection: httpapi
+module_name: l2_acls
+
+tests:
+ - name: test_case_01
+ description: Add L2 ACL
+ state: merged
+ input:
+ - name: 'acl1'
+ remark: 'L2 ACL 1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '11:11:11:11:11:11'
+ destination:
+ host: '11:11:11:22:22:22'
+ remark: 'Rule1'
+ - sequence_num: 2
+ action: 'transit'
+ source:
+ address: '00:00:10:00:00:00'
+ address_mask: '00:00:ff:ff:00:00'
+ destination:
+ any: true
+ vlan_id: 100
+ - sequence_num: 3
+ action: 'transit'
+ source:
+ any: true
+ destination:
+ address: '00:00:00:00:10:00'
+ address_mask: '00:00:00:00:ff:ff'
+ ethertype:
+ value: '0x0842'
+
+ - name: test_case_02
+ description: Add new rules to existing L2 ACL and add new L2 ACLs
+ state: merged
+ input:
+ - name: 'acl1'
+ remark: 'Remark_ACL1'
+ rules:
+ - sequence_num: 4
+ action: 'discard'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ arp: true
+ - sequence_num: 5
+ action: 'discard'
+ source:
+ any: true
+ destination:
+ any: true
+ vlan_tag_format:
+ multi_tagged: true
+ remark: 'VLAN_multi_tagged'
+ - name: 'acl2'
+ remark: 'Remark_ACL2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ ipv6: true
+ vlan_id: 200
+ - sequence_num: 2
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ dei: 1
+ - sequence_num: 3
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ pcp:
+ value: 4
+ mask: 6
+ - sequence_num: 4
+ action: 'do-not-nat'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ ipv4: true
+
+ - name: test_case_03
+ description: Replace specified L2 ACL with provided configuration and add new L2 ACL
+ state: replaced
+ input:
+ - name: 'acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ host: '11:11:11:22:22:22'
+ - sequence_num: 2
+ action: 'deny'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ ipv4: true
+ vlan_id: 100
+ - name: 'acl3'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '22:22:22:22:22:22'
+ destination:
+ any: true
+ pcp:
+ traffic_type: 'ca'
+ - sequence_num: 2
+ action: 'deny'
+ source:
+ any: true
+ destination:
+ any: true
+ remark: 'Deny_All'
+
+ - name: test_case_04
+ description: Override existing L2 ACL configuration with provided L2 ACL configuration
+ state: overridden
+ input:
+ - name: 'acl1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '33:33:33:33:33:33'
+ destination:
+ host: '44:44:44:44:44:44'
+ - name: 'test-acl'
+ remark: 'test_mac_acl'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '22:22:22:22:22:22'
+ destination:
+ any: true
+ vlan_id: 20
+ - sequence_num: 2
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ value: '0x88cc'
+ remark: 'LLDP'
+ - sequence_num: 3
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ address: '00:00:10:00:00:00'
+ address_mask: '00:00:ff:ff:00:00'
+ pcp:
+ value: 4
+ mask: 6
+ - name: 'test-acl-1'
+ remark: 'test_mac_acl_1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ arp: true
+ vlan_id: 200
+ - sequence_num: 2
+ action: 'discard'
+ source:
+ any: true
+ destination:
+ any: true
+
+ - name: test_case_05
+ description: Delete specified L2 ACLs, ACL remark and ACL rule entries
+ state: deleted
+ input:
+ - name: 'acl1'
+ - name: 'test-acl'
+ rules:
+ - sequence_num: 3
+ - name: 'test-acl-1'
+ remark: 'test_mac_acl_1'
+ rules:
+ - sequence_num: 2
+
+ - name: test_case_06
+ description: Delete all L2 ACLs
+ state: deleted
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/meta/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/meta/main.yml
new file mode 100644
index 000000000..d0ceaf6f5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/meta/main.yml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..646e0bfa9
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/cleanup_tests.yaml
@@ -0,0 +1,6 @@
+---
+- name: Delete L2 ACLs
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config: []
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/main.yml
new file mode 100644
index 000000000..bf07d028a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- ansible.builtin.debug:
+ msg: "sonic_l2_acls Test started ..."
+
+- name: "Preparations for {{ module_name }}"
+ ansible.builtin.include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started"
+ ansible.builtin.include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup of {{ module_name }}"
+ ansible.builtin.include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..ceab447fc
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/preparation_tests.yaml
@@ -0,0 +1,18 @@
+---
+- name: Delete old interface access-group configurations
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete old L2 ACLs
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete old L3 ACLs
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config: []
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/tasks_template.yaml
new file mode 100644
index 000000000..19fb375fd
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_acls/tasks/tasks_template.yaml
@@ -0,0 +1,22 @@
+---
+- name: "{{ item.name }} , {{ item.description }}"
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name }} , {{ item.description }} Idempotent"
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml
index 8117a89cc..b24cec7bd 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l2_interfaces/defaults/main.yml
@@ -13,6 +13,48 @@ preparations_tests:
- vlan_id: 400
- vlan_id: 401
- vlan_id: 402
+ - vlan_id: 605
+ - vlan_id: 606
+ - vlan_id: 607
+ - vlan_id: 609
+ - vlan_id: 610
+ - vlan_id: 611
+ - vlan_id: 612
+ - vlan_id: 613
+ - vlan_id: 615
+ - vlan_id: 616
+ - vlan_id: 617
+ - vlan_id: 619
+ - vlan_id: 620
+ - vlan_id: 621
+ - vlan_id: 622
+ - vlan_id: 624
+ - vlan_id: 625
+ - vlan_id: 626
+ - vlan_id: 627
+ - vlan_id: 628
+ - vlan_id: 629
+ - vlan_id: 634
+ - vlan_id: 635
+ - vlan_id: 636
+ - vlan_id: 637
+ - vlan_id: 639
+ - vlan_id: 640
+ - vlan_id: 642
+ - vlan_id: 643
+ - vlan_id: 644
+ - vlan_id: 646
+ - vlan_id: 647
+ - vlan_id: 649
+ - vlan_id: 650
+ - vlan_id: 651
+ - vlan_id: 653
+ - vlan_id: 654
+ - vlan_id: 655
+ - vlan_id: 656
+ - vlan_id: 658
+ - vlan_id: 659
+ - vlan_id: 660
delete_port_configurations:
- name: "{{ interface1 }}"
- name: "{{ interface2 }}"
@@ -61,14 +103,73 @@ tests:
- vlan: 503
access:
vlan: 402
- # delete test cases started
- name: test_case_03
+ description: Add trunk vlan range base config
+ state: merged
+ input:
+ - name: "{{ interface5 }}"
+ trunk:
+ allowed_vlans:
+ - vlan: 605
+ - vlan: 611
+ - vlan: 617
+ - vlan: 619-620
+ - vlan: 626-627
+ - vlan: 636-637
+ - vlan: 639
+ - vlan: 643
+ - vlan: 647
+ - vlan: 649-650
+ - vlan: 654-655
+ - vlan: 659-660
+ access:
+ vlan: 611
+ - name: test_case_04
+ description: Add trunk vlan range overlay lower half
+ state: merged
+ input:
+ - name: "{{ interface5 }}"
+ trunk:
+ allowed_vlans:
+ - vlan: 605-607
+ - vlan: 609-613
+ - vlan: 615-617
+ - vlan: 619-622
+ - vlan: 624-629
+ - vlan: 634-637
+ - vlan: 639-640
+ access:
+ vlan: 611
+ - name: test_case_05
+ description: Add trunk vlan range overlay all
+ state: merged
+ input:
+ - name: "{{ interface5 }}"
+ trunk:
+ allowed_vlans:
+ - vlan: 605-607
+ - vlan: 609-613
+ - vlan: 615-617
+ - vlan: 619-622
+ - vlan: 624-629
+ - vlan: 634-637
+ - vlan: 639-640
+ - vlan: 642-644
+ - vlan: 646-647
+ - vlan: 649-651
+ - vlan: 653-656
+ - vlan: 658-660
+ access:
+ vlan: 611
+ # delete test cases started
+ - name: test_case_06
description: Delete Access VLAN
state: deleted
input:
- name: "{{ interface1 }}"
access:
- - name: test_case_04
+ vlan:
+ - name: test_case_07
description: Delete specific trunk VLANs
state: deleted
input:
@@ -76,26 +177,147 @@ tests:
trunk:
allowed_vlans:
- vlan: 502
- - name: test_case_05
+ - name: test_case_08
description: Delete access VLANs from both associations
state: deleted
input:
- name: "{{ interface3 }}"
access:
vlan:
- - name: test_case_06
+ - name: test_case_09
description: Delete all trunk VLANs
state: deleted
input:
- name: "{{ interface3 }}"
trunk:
allowed_vlans:
- - name: test_case_07
+ - name: test_case_10
description: Delete all associations in specific interface
state: deleted
input:
- name: "{{ interface2 }}"
- - name: test_case_08
+ - name: test_case_11
+ description: Delete trunk vlan range overlay base
+ state: deleted
+ input:
+ - name: "{{ interface5 }}"
+ trunk:
+ allowed_vlans:
+ - vlan: 605
+ - vlan: 611
+ - vlan: 617
+ - vlan: 619-620
+ - vlan: 626-627
+ - vlan: 636-637
+ - vlan: 639
+ - vlan: 643
+ - vlan: 647
+ - vlan: 649-650
+ - vlan: 654-655
+ - vlan: 659-660
+ access:
+ vlan: 611
+ - name: test_case_12
+ description: Delete trunk vlan range overlay lower half
+ state: deleted
+ input:
+ - name: "{{ interface5 }}"
+ trunk:
+ allowed_vlans:
+ - vlan: 605-607
+ - vlan: 609-613
+ - vlan: 615-617
+ - vlan: 619-622
+ - vlan: 624-629
+ - vlan: 634-637
+ - vlan: 639-640
+ access:
+ vlan: 611
+ - name: test_case_13
+ description: Delete trunk vlan range overlay all
+ state: deleted
+ input:
+ - name: "{{ interface5 }}"
+ trunk:
+ allowed_vlans:
+ - vlan: 605-607
+ - vlan: 609-613
+ - vlan: 615-617
+ - vlan: 619-622
+ - vlan: 624-629
+ - vlan: 634-637
+ - vlan: 639-640
+ - vlan: 642-644
+ - vlan: 646-647
+ - vlan: 649-651
+ - vlan: 653-656
+ - vlan: 658-660
+ access:
+ vlan: 611
+ # Base config for replace test case
+ - name: test_case_14
+ description: Add access and trunk VLANs
+ state: merged
+ input:
+ - name: "{{ interface1 }}"
+ access:
+ vlan: 400
+ trunk:
+ allowed_vlans:
+ - vlan: 600-650
+ - name: "{{ interface2 }}"
+ access:
+ vlan: 400
+ trunk:
+ allowed_vlans:
+ - vlan: 600-650
+ - name: "{{ interface3 }}"
+ access:
+ vlan: 400
+ trunk:
+ allowed_vlans:
+ - vlan: 600-650
+ - name: test_case_15
+ description: Replace access and trunk VLANs
+ state: replaced
+ input:
+ - name: "{{ interface2 }}"
+ trunk:
+ allowed_vlans:
+ - vlan: 500
+ - vlan: 502-505
+ - vlan: 620-630
+ - name: "{{ interface3 }}"
+ access:
+ vlan: 405
+ - name: PortChannel100
+ - name: PortChannel101
+ access:
+ vlan: 400
+ trunk:
+ allowed_vlans:
+ - vlan: 620-630
+ - name: test_case_16
+ description: Override access and trunk VLANs
+ state: overridden
+ input:
+ - name: "{{ interface1 }}"
+ access:
+ vlan: 400
+ trunk:
+ allowed_vlans:
+ - vlan: 500-505
+ - vlan: 600-605
+ - name: "{{ interface2 }}"
+ access:
+ vlan: 400
+ - name: PortChannel100
+ trunk:
+ allowed_vlans:
+ - vlan: 500
+ - vlan: 600
+ - vlan: 605
+ - name: test_case_17
description: Delete All associations in all interfaces
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/defaults/main.yml
new file mode 100644
index 000000000..2c3c23b3d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/defaults/main.yml
@@ -0,0 +1,476 @@
+---
+ansible_connection: httpapi
+module_name: l3_acls
+
+tests:
+ - name: test_case_01
+ description: Add IPv4 ACL
+ state: merged
+ input:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl1'
+ remark: 'IPv4 ACL 1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ip'
+ source:
+ host: '192.168.1.2'
+ destination:
+ host: '192.168.2.2'
+ remark: 'Host-Rule'
+ - sequence_num: 2
+ action: 'transit'
+ protocol:
+ name: 'icmp'
+ source:
+ prefix: '192.168.0.0/16'
+ destination:
+ any: true
+ protocol_options:
+ icmp:
+ type: 8
+ vlan_id: 100
+
+ - name: test_case_02
+ description: Add IPv6 ACL
+ state: merged
+ input:
+ - address_family: 'ipv6'
+ acls:
+ - name: 'ipv6-acl1'
+ remark: 'IPv6 ACL 1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ipv6'
+ source:
+ prefix: '192::/64'
+ destination:
+ any: true
+ - sequence_num: 2
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ port_number:
+ gt: 1024
+ destination:
+ host: '192::2'
+ port_number:
+ eq: 80
+
+ - name: test_case_03
+ description: Add new rules to existing L3 ACLs and add new L3 ACLs
+ state: merged
+ input:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl1'
+ rules:
+ - sequence_num: 3
+ action: 'deny'
+ protocol:
+ number: 17
+ source:
+ host: '192.168.1.2'
+ destination:
+ prefix: '192.168.1.0/24'
+ port_number:
+ lt: 1024
+ remark: "Drop UDP"
+ - name: 'ip-acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ ack: true
+ syn: true
+ fin: true
+ - sequence_num: 2
+ action: 'permit'
+ protocol:
+ number: 2
+ source:
+ any: true
+ destination:
+ any: true
+ dscp:
+ voice_admit: true
+ - sequence_num: 3
+ action: 'discard'
+ protocol:
+ name: 'icmp'
+ source:
+ any: true
+ destination:
+ any: true
+ - address_family: 'ipv6'
+ acls:
+ - name: 'ipv6-acl1'
+ rules:
+ - sequence_num: 3
+ action: 'deny'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ destination:
+ host: '100::1'
+ port_number:
+ range:
+ begin: 1024
+ end: 2048
+ - name: 'ipv6-acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'icmpv6'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ icmpv6:
+ type: 128
+ code: 0
+ vlan_id: 200
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'ipv6'
+ source:
+ host: '100::1'
+ destination:
+ any: true
+ vlan_id: 200
+
+ - name: test_case_04
+ description: Replace specified L3 ACLs with provided configuration and add new L3 ACLs
+ state: replaced
+ input:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ established: true
+ remark: 'TCP established'
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ any: true
+ - name: 'ip-acl3'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ host: '192.168.2.2'
+ dscp:
+ value: 4
+ - sequence_num: 2
+ action: 'discard'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ psh: true
+ urg: true
+ - address_family: 'ipv6'
+ acls:
+ - name: 'ipv6-acl1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ host: '100::2'
+ port_number:
+ eq: 80
+ destination:
+ any: true
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ port_number:
+ gt: 1024
+ destination:
+ any: true
+ port_number:
+ gt: 1024
+
+ - name: test_case_05
+ description: Override existing L3 ACL configuration with provided L3 ACL configuration
+ state: overridden
+ input:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'icmp'
+ source:
+ host: '100.1.1.2'
+ destination:
+ host: '100.1.2.2'
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ any: true
+ - name: 'test-acl-ipv4-udp'
+ remark: 'UDP'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ port_number:
+ lt: 1000
+ destination:
+ any: true
+ - sequence_num: 2
+ action: 'discard'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ destination:
+ any: true
+
+ - name: test_case_06
+ description: Add new L3 ACLs
+ state: merged
+ input:
+ - address_family: 'ipv6'
+ acls:
+ - name: 'test-acl-ipv6'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ipv6'
+ source:
+ any: true
+ destination:
+ any: true
+ dscp:
+ value: 8
+ - sequence_num: 2
+ action: 'discard'
+ protocol:
+ name: 'ipv6'
+ source:
+ any: true
+ destination:
+ any: true
+ vlan_id: 100
+ - name: 'test-acl-ipv6-tcp'
+ remark: 'TCP'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ prefix: '200::/64'
+ port_number:
+ range:
+ begin: 3000
+ end: 10000
+ destination:
+ any: true
+ - sequence_num: 2
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ not_psh: true
+ not_urg: true
+
+ - name: test_case_07
+ description: Delete specified L3 ACLs, ACL remark and ACL rule entries
+ state: deleted
+ input:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl'
+ - name: 'test-acl-ipv4-udp'
+ rules:
+ - sequence_num: 2
+ - address_family: 'ipv6'
+ acls:
+ - name: 'test-acl-ipv6-tcp'
+ remark: 'TCP'
+ rules:
+ - sequence_num: 1
+
+ - name: test_case_08
+ description: Add new L3 ACLs
+ state: merged
+ input:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'test-acl-ipv4'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ip'
+ source:
+ prefix: '100.1.1.0/24'
+ destination:
+ prefix: '100.1.2.0/24'
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ any: true
+
+ - name: test_case_09
+ description: Delete all IPv6 ACLs
+ state: deleted
+ input:
+ - address_family: 'ipv6'
+
+ - name: test_case_10
+ description: Add new IPv6 ACLs
+ state: merged
+ input:
+ - address_family: 'ipv6'
+ acls:
+ - name: 'acl-IPv6'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ipv6'
+ source:
+ prefix: '100:1:1::/64'
+ destination:
+ prefix: '100:1:2::/64'
+ vlan_id: 10
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'udp'
+ source:
+ prefix: '100:1:1::/64'
+ port_number:
+ gt: 200
+ destination:
+ prefix: '100:1:2::/64'
+ port_number:
+ gt: 200
+ - name: 'acl-IPv6-1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'icmpv6'
+ source:
+ any: true
+ destination:
+ any: true
+
+ - name: test_case_11
+ description: Delete all IPv4 ACLs
+ state: deleted
+ input:
+ - address_family: 'ipv4'
+
+ - name: test_case_12
+ description: Add new IPv4 ACLs
+ state: merged
+ input:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'acl-IPv4'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ prefix: '100.0.0.0/8'
+ destination:
+ host: '101.1.1.2'
+ port_number:
+ eq: 80
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ any: true
+ - name: 'acl-IPv4-1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ any: true
+ dscp:
+ af11: true
+
+ - name: test_case_13
+ description: Delete all L3 ACLs
+ state: deleted
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/meta/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/meta/main.yml
new file mode 100644
index 000000000..d0ceaf6f5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/meta/main.yml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..297ad05a2
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/cleanup_tests.yaml
@@ -0,0 +1,6 @@
+---
+- name: Delete L3 ACLs
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config: []
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/main.yml
new file mode 100644
index 000000000..dffa26c5d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- ansible.builtin.debug:
+ msg: "sonic_l3_acls Test started ..."
+
+- name: "Preparations for {{ module_name }}"
+ ansible.builtin.include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started"
+ ansible.builtin.include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup of {{ module_name }}"
+ ansible.builtin.include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..ceab447fc
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/preparation_tests.yaml
@@ -0,0 +1,18 @@
+---
+- name: Delete old interface access-group configurations
+ dellemc.enterprise_sonic.sonic_acl_interfaces:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete old L2 ACLs
+ dellemc.enterprise_sonic.sonic_l2_acls:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete old L3 ACLs
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config: []
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/tasks_template.yaml
new file mode 100644
index 000000000..a57059008
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_acls/tasks/tasks_template.yaml
@@ -0,0 +1,22 @@
+---
+- name: "{{ item.name }} , {{ item.description }}"
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name }} , {{ item.description }} Idempotent"
+ dellemc.enterprise_sonic.sonic_l3_acls:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml
index de6322049..5c5d5a74d 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/defaults/main.yml
@@ -6,8 +6,6 @@ preparations_tests:
delete_port_configurations:
- name: "{{ interface1 }}"
- name: "{{ interface2 }}"
- - name: "{{ interface3 }}"
- - name: "{{ interface4 }}"
init_loopback:
- "interface Loopback 100"
- "interface Loopback 101"
@@ -149,24 +147,28 @@ tests:
- address: 92::1/16
- address: 93::1/16
- name: test_case_07
- description: Update interface parameters
+ description: Delete interface addresses
state: deleted
input:
- name: "{{ interface1 }}"
ipv4:
addresses:
- - address: 82.1.1.1/16
+ - address: 83.1.1.1/16
- name: "{{ interface2 }}"
ipv6:
enabled: false
addresses:
- address: 91::1/16
+ - name: Loopback100
+ ipv4:
+ addresses:
+ - address: 103.1.1.1/32
- name: vlan 102
ipv4:
anycast_addresses:
- 1.1.1.1/16
- name: test_case_08
- description: Update interface parameters
+ description: Delete interface parameters
state: deleted
input:
- name: "{{ interface1 }}"
@@ -201,7 +203,7 @@ tests:
- address: 3041::1/16
- address: 3042::1/16
- name: test_case_10
- description: Naming tests
+ description: Update interface parameters
state: merged
input:
- name: vlan 501
@@ -232,13 +234,109 @@ tests:
- address: 3051::1/16
- address: 3052::1/16
- name: test_case_11
- description: Naming tests
+ description: Update l3 interface parameters
+ state: merged
+ input:
+ - name: '{{ interface1 }}'
+ ipv6:
+ enabled: true
+ addresses:
+ - address: 213::1/64
+ ipv4:
+ addresses:
+ - address: 213.1.1.1/24
+ - name: '{{ interface3 }}'
+ ipv4:
+ addresses:
+ - address: 215.1.1.1/24
+ - address: 216.1.1.1/24
+ secondary: true
+ - name: test_case_12
+ description: Replace interface parameters
+ state: replaced
+ input:
+ - name: '{{ interface2 }}'
+ ipv6:
+ enabled: true
+ addresses:
+ - address: 251::1/64
+ - address: 252::1/64
+ - name: '{{ interface3 }}'
+ ipv4:
+ addresses:
+ - address: 222.1.1.1/24
+ - name: vlan 101
+ ipv4:
+ anycast_addresses:
+ - 15.16.17.18/16
+ - name: test_case_13
+ description: Replace interface parameters
+ state: replaced
+ input:
+ - name: vlan 501
+ ipv4:
+ addresses:
+ - address: 105.2.2.2/16
+ - name: test_case_14
+ description: Override interface parameters
+ state: overridden
+ input:
+ - name: vlan 501
+ ipv6:
+ enabled: true
+ addresses:
+ - address: 1053::1/64
+ - address: 1054::1/64
+ - address: 1055::1/64
+ - address: 1056::2/64
+ - name: Loopback101
+ ipv4:
+ addresses:
+ - address: 152.1.1.1/32
+ - address: 153.1.1.1/32
+ secondary: true
+ - name: vlan 100
+ ipv4:
+ anycast_addresses:
+ - 20.21.22.23/16
+ - 85.1.1.12/16
+ ipv6:
+ enabled: false
+ - name: test_case_15
+ description: Override2 interface parameters
+ state: overridden
+ input:
+ - name: vlan 501
+ ipv6:
+ enabled: true
+ addresses:
+ - address: 1053::1/64
+ - address: 1054::1/64
+ - address: 1055::1/64
+ - address: 1056::2/64
+ - address: 1057::2/64
+ - name: Loopback101
+ ipv4:
+ addresses:
+ - address: 152.1.1.1/32
+ - address: 153.1.1.1/32
+ secondary: true
+ - name: vlan 100
+ ipv4:
+ anycast_addresses:
+ - 20.21.22.23/16
+ - 85.1.1.12/16
+ ipv6:
+ enabled: false
+ - name: test_case_16
+ description: Delete all interfaces config
state: deleted
input:
+ - name: vlan 100
+ - name: vlan 101
+ - name: vlan 102
- name: vlan 501
- name: lo101
+ - name: lo102
- name: portchannel 100 # po100 or portchannel100
- - name: test_case_12
- description: Update interface parameters
- state: deleted
- input: []
+ - name: portchannel 101
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml
index 66700d53e..48fc64d5c 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_l3_interfaces/tasks/preparation_tests.yaml
@@ -8,11 +8,6 @@
config: []
state: deleted
ignore_errors: yes
-- name: Deletes old l3 interfaces
- sonic_l3_interfaces:
- config: []
- state: deleted
- ignore_errors: yes
- name: "initialize default interfaces"
vars:
ansible_connection: network_cli
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml
index 3f77cabab..c320445e6 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lag_interfaces/defaults/main.yml
@@ -8,6 +8,8 @@ preparations_tests:
- name: "{{ interface2 }}"
- name: "{{ interface3 }}"
- name: "{{ interface4 }}"
+ - name: "{{ interface5 }}"
+ - name: "{{ interface6 }}"
tests:
# Ethernet testcases started...
@@ -77,13 +79,49 @@ tests:
interfaces:
- member: "{{ interface3 }}"
- name: test_case_06
+ description: Replace portchannel configuration
+ state: replaced
+ input:
+ - name: portchannel 40
+ members:
+ interfaces:
+ - member: "{{ interface5 }}"
+ - name: po41
+ members:
+ interfaces:
+ - member: "{{ interface6 }}"
+ - name: test_case_07
+ description: Override portchannel configuration
+ state: overridden
+ input:
+ - name: portchannel 40
+ members:
+ interfaces:
+ - member: "{{ interface1 }}"
+ - name: po41
+ members:
+ interfaces:
+ - member: "{{ interface2 }}"
+ - name: test_case_08
+ description: Override all portchannel configuration
+ state: overridden
+ input:
+ - name: portchannel 42
+ members:
+ interfaces:
+ - member: "{{ interface5 }}"
+ - name: po43
+ members:
+ interfaces:
+ - member: "{{ interface6 }}"
+ - name: test_case_09
description: Create standalone portchannels
state: merged
input:
- name: portchannel42
- name: portchannel 12
- name: po10
- - name: test_case_07
+ - name: test_case_10
description: Update interface parameters
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/defaults/main.yml
new file mode 100644
index 000000000..ab33abe28
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/defaults/main.yml
@@ -0,0 +1,47 @@
+---
+ansible_connection: httpapi
+module_name: lldp_global
+
+
+tests:
+ - name: test_case_01
+ description: Add Global LLDP configuration
+ state: merged
+ input:
+ enable: true
+ multiplier: 9
+ system_name: CR_sonic
+ hello_time: 18
+ mode: receive
+ system_description: Sonic_CR1_device
+ tlv_select:
+ system_capabilities: false
+
+ - name: test_case_02
+ description: Update Global LLDP configuration
+ state: merged
+ input:
+ multiplier: 10
+ system_name: CR1_sonic
+ hello_time: 200
+ tlv_select:
+ system_capabilities: true
+
+ - name: test_case_03
+ description: Delete specific global lldp configurations
+ state: deleted
+ input:
+ multiplier: 10
+ system_name: CR1_sonic
+
+ - name: test_case_04
+ description: Delete default global lldp configurations
+ state: deleted
+ input:
+ tlv_select:
+ system_capabilities: true
+
+test_delete_all:
+ - name: del_all_test_case_05
+ description: Delete all global lldp configurations
+ state: deleted
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/meta/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/meta/main.yml
new file mode 100644
index 000000000..d0ceaf6f5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/meta/main.yml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..e2ca40667
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/cleanup_tests.yaml
@@ -0,0 +1,5 @@
+- name: Delete global LLDP configurations
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config:
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/main.yml
new file mode 100644
index 000000000..a5c0b020c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/main.yml
@@ -0,0 +1,22 @@
+---
+- ansible.builtin.debug:
+ msg: "sonic_lldp_global Test started ..."
+
+- name: "Preparations for {{ module_name }}"
+ ansible.builtin.include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started"
+ ansible.builtin.include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "test_delete_all {{ module_name }} stated ..."
+ ansible.builtin.include_tasks: tasks_template_del.yaml
+ loop: "{{ test_delete_all }}"
+ when: test_delete_all is defined
+
+- name: "Cleanup of {{ module_name }}"
+ ansible.builtin.include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..aaadc7dc2
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/preparation_tests.yaml
@@ -0,0 +1,6 @@
+---
+- name: Delete old global LLDP configurations
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config: {}
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/tasks_template.yaml
new file mode 100644
index 000000000..9e90e5468
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/tasks_template.yaml
@@ -0,0 +1,22 @@
+---
+- name: "{{ item.name }} , {{ item.description }}"
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name }} , {{ item.description }} Idempotent"
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/tasks_template_del.yaml
new file mode 100644
index 000000000..383e72b84
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_lldp_global/tasks/tasks_template_del.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name }} , {{ item.description }}"
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config:
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name }} , {{ item.description }} Idempotent"
+ dellemc.enterprise_sonic.sonic_lldp_global:
+ config:
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/defaults/main.yml
new file mode 100644
index 000000000..456635fcb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/defaults/main.yml
@@ -0,0 +1,127 @@
+---
+ansible_connection: httpapi
+module_name: logging
+
+po1: Portchannel 100
+vlan1: Vlan 100
+lo1: Loopback 100
+mgmt: Management 0
+
+logging_ip_server_1: 10.11.0.1
+logging_ip_server_2: 10.11.0.2
+logging_ip_server_3: 10.11.0.3
+logging_ip_server_4: 10.11.0.4
+logging_ip_server_5: 10.11.0.5
+logging_ip_server_6: 10.11.0.6
+logging_host_server: logging.dell.com
+
+tests:
+ - name: test_case_01
+ description: Create a logging remote server
+ state: merged
+ input:
+ remote_servers:
+ - host: "{{ logging_ip_server_1 }}"
+ source_interface: "{{ interface1 }}"
+ remote_port: 616
+ message_type: event
+ vrf: Vrf_logging_1
+
+ - name: test_case_02
+ description: Create several logging remote servers
+ state: merged
+ input:
+ remote_servers:
+ - host: "{{ logging_ip_server_2 }}"
+ source_interface: "{{ po1 }}"
+ - host: "{{ logging_ip_server_3 }}"
+ source_interface: "{{ vlan1 }}"
+ remote_port: 818
+ message_type: event
+ vrf: Vrf_logging_2
+ - host: "{{ logging_ip_server_4 }}"
+ source_interface: "{{ mgmt }}"
+ message_type: event
+ vrf: Vrf_logging_1
+ - host: "{{ logging_host_server }}"
+ source_interface: "{{ lo1 }}"
+ message_type: log
+ vrf: Vrf_logging_1
+
+ - name: test_case_03
+ description: Replace logging remote server
+ state: replaced
+ input:
+ remote_servers:
+ - host: "{{ logging_ip_server_3 }}"
+ source_interface: "{{ interface2 }}"
+ remote_port: 838
+
+ - name: test_case_04
+ description: Replace logging remote servers
+ state: replaced
+ input:
+ remote_servers:
+ - host: "{{ logging_ip_server_3 }}"
+ source_interface: "{{ interface3 }}"
+ remote_port: 838
+ message_type: event
+ - host: "{{ logging_ip_server_4 }}"
+ source_interface: "{{ interface4 }}"
+ - host: "{{ logging_ip_server_6 }}"
+ source_interface: "{{ interface4 }}"
+ remote_port: 868
+ message_type: event
+
+ - name: test_case_05
+ description: Overridden logging remote servers
+ state: overridden
+ input:
+ remote_servers:
+ - host: "{{ logging_ip_server_2 }}"
+ source_interface: "{{ interface3 }}"
+ - host: "{{ logging_ip_server_5 }}"
+ source_interface: "{{ interface4 }}"
+ remote_port: 858
+ message_type: event
+
+ - name: test_case_06
+ description: Create several logging remote servers
+ state: merged
+ input:
+ remote_servers:
+ - host: "{{ logging_ip_server_1 }}"
+ source_interface: "{{ interface1 }}"
+ remote_port: 616
+ message_type: event
+ vrf: Vrf_logging_1
+ - host: "{{ logging_ip_server_3 }}"
+ source_interface: "{{ vlan1 }}"
+ remote_port: 818
+ message_type: event
+ vrf: Vrf_logging_2
+ - host: "{{ logging_host_server }}"
+ source_interface: "{{ lo1 }}"
+ message_type: log
+ vrf: Vrf_logging_1
+
+ - name: test_case_07
+ description: Delete a logging remote server
+ state: deleted
+ input:
+ remote_servers:
+ - host: "{{ logging_ip_server_1 }}"
+
+ - name: test_case_08
+ description: Delete several logging remote servers
+ state: deleted
+ input:
+ remote_servers:
+ - host: "{{ logging_ip_server_2 }}"
+ - host: "{{ logging_ip_server_3 }}"
+ - host: "{{ logging_host_server }}"
+
+ - name: test_case_09
+ description: Delete all logging configurations
+ state: deleted
+ input: {}
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/meta/main.yaml
new file mode 100644
index 000000000..611fd54d2
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common } \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..c9800850d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/cleanup_tests.yaml
@@ -0,0 +1,28 @@
+- name: Delete loopback
+ sonic_interfaces:
+ config:
+ - name: Loopback100
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete portchannel
+ sonic_lag_interfaces:
+ config:
+ - name: PortChannel100
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete vlan
+ sonic_vlans:
+ config:
+ - vlan_id: 100
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete VRFs
+ sonic_vrfs:
+ config:
+ - name: Vrf_logging_1
+ - name: Vrf_logging_2
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/main.yml
new file mode 100644
index 000000000..63ee885ce
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/main.yml
@@ -0,0 +1,16 @@
+- debug: msg="sonic_logging Test started ..."
+
+- name: Preparations tests
+ include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup test {{ module_name }} started"
+ include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
+
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..f360925d6
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/preparation_tests.yaml
@@ -0,0 +1,34 @@
+- name: Delete existing logging configurations
+ sonic_logging:
+ config: {}
+ state: deleted
+ ignore_errors: yes
+
+- name: Initialize loopback
+ sonic_interfaces:
+ config:
+ - name: Loopback100
+ state: merged
+ ignore_errors: yes
+
+- name: Initialize portchannel
+ sonic_lag_interfaces:
+ config:
+ - name: PortChannel100
+ state: merged
+ ignore_errors: yes
+
+- name: Initialize vlan
+ sonic_vlans:
+ config:
+ - vlan_id: 100
+ state: merged
+ ignore_errors: yes
+
+- name: Create VRFs
+ sonic_vrfs:
+ config:
+ - name: Vrf_logging_1
+ - name: Vrf_logging_2
+ state: merged
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/tasks_template.yaml
new file mode 100644
index 000000000..7ca17b24b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_logging/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_logging:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_logging:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/defaults/main.yml
new file mode 100644
index 000000000..fd092beab
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/defaults/main.yml
@@ -0,0 +1,101 @@
+---
+ansible_connection: httpapi
+module_name: sonic_mac
+
+vrf_1: VrfReg1
+
+preparations_tests:
+ init_vlan:
+ - 'interface Vlan 1'
+ - 'interface Vlan 2'
+ - 'interface Vlan 3'
+tests:
+ - name: test_case_01
+ description: Configure MACs
+ state: merged
+ input:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 50
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:00:5e:00:53:af'
+ vlan_id: 1
+ interface: '{{interface1}}'
+ - mac_address: '00:33:33:33:33:33'
+ vlan_id: 2
+ interface: '{{interface2}}'
+ - mac_address: '00:00:4e:00:24:af'
+ vlan_id: 3
+ interface: '{{interface3}}'
+ - name: test_case_02
+ description: Modify MAC configurations
+ state: merged
+ input:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 25
+ dampening_interval: 10
+ dampening_threshold: 40
+ mac_table_entries:
+ - mac_address: '00:00:5e:00:53:af'
+ vlan_id: 1
+ interface: '{{interface2}}'
+ - mac_address: '00:33:33:33:33:33'
+ vlan_id: 2
+ interface: '{{interface1}}'
+ - name: test_case_03
+ description: Replace MAC configurations
+ state: replaced
+ input:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 45
+ dampening_interval: 30
+ dampening_threshold: 60
+ mac_table_entries:
+ - mac_address: '00:00:5e:00:53:af'
+ vlan_id: 1
+ interface: '{{interface1}}'
+ - mac_address: '00:44:44:44:44:44'
+ vlan_id: 2
+ interface: '{{interface3}}'
+ - name: test_case_04
+ description: Override MAC cofigurations
+ state: overridden
+ input:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 10
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:11:11:11:11:11'
+ vlan_id: 1
+ interface: '{{interface1}}'
+ - mac_address: '00:22:22:22:22:22'
+ vlan_id: 2
+ interface: '{{interface2}}'
+ - mac_address: '00:00:33:33:33:33'
+ vlan_id: 3
+ interface: '{{interface3}}'
+ - name: test_case_05
+ description: Delete MAC cofigurations
+ state: deleted
+ input:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 10
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:11:11:11:11:11'
+ vlan_id: 1
+ interface: '{{interface1}}'
+ - mac_address: '00:00:33:33:33:33'
+ vlan_id: 3
+ - name: test_case_06
+ description: Delete all MAC configurations
+ state: deleted
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/meta/main.yaml
new file mode 100644
index 000000000..0b356217e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..bcb4847a8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/cleanup_tests.yaml
@@ -0,0 +1,5 @@
+- name: Delete trunk Vlans
+ sonic_l2_interfaces:
+ config: []
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/main.yml
new file mode 100644
index 000000000..b4e9498f5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/main.yml
@@ -0,0 +1,14 @@
+- debug: msg="sonic_mac test started ..."
+
+- set_fact:
+ base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}"
+
+- name: Preparation tests
+ include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: Cleanup tests
+ include_tasks: cleanup_tests.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..7dcd8a9a9
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/preparation_tests.yaml
@@ -0,0 +1,37 @@
+- name: Initialize Vlans
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.init_vlan }}"
+
+- name: Add trunk Vlans
+ sonic_l2_interfaces:
+ config:
+ - name: "{{interface1}}"
+ trunk:
+ allowed_vlans:
+ - vlan: 1
+ - vlan: 2
+ - name: "{{interface2}}"
+ trunk:
+ allowed_vlans:
+ - vlan: 1
+ - vlan: 2
+ - vlan: 3
+ - name: "{{interface3}}"
+ trunk:
+ allowed_vlans:
+ - vlan: 2
+ - vlan: 3
+
+- name: Delete VRF configurations
+ sonic_vrfs:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete MAC configurations
+ sonic_mac:
+ config: []
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/tasks_template.yaml
new file mode 100644
index 000000000..8bf357322
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mac/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name }} , {{ item.description }}"
+ sonic_mac:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name }} , {{ item.description }} Idempotent"
+ sonic_mac:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml
index a2df2d365..743ae9bec 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/defaults/main.yml
@@ -7,6 +7,21 @@ preparations_tests:
- vlan_id: 5
- vlan_id: 6
- vlan_id: 2
+ - vlan_id: 11
+ - vlan_id: 12
+ - vlan_id: 13
+ - vlan_id: 14
+ - vlan_id: 15
+ - vlan_id: 21
+ - vlan_id: 22
+ - vlan_id: 23
+ - vlan_id: 24
+ - vlan_id: 25
+ - vlan_id: 31
+ - vlan_id: 32
+ - vlan_id: 33
+ - vlan_id: 34
+ - vlan_id: 35
add_lag_interfaces:
- name: Po10
- name: Po11
@@ -24,7 +39,9 @@ merged_tests:
peer_link: "{{ interface1 }}"
keepalive: 3
session_timeout: 300
+ delay_restore: 480
system_mac: 00:00:00:01:01:01
+ gateway_mac: 00:00:00:02:02:02
- name: test_case_02
description: Update created MCLAG properties
@@ -36,6 +53,7 @@ merged_tests:
peer_link: "{{ interface2 }}"
keepalive: 2
session_timeout: 350
+ delay_restore: 600
system_mac: 00:00:00:11:11:11
- name: test_case_03
@@ -47,11 +65,37 @@ merged_tests:
vlans:
- vlan: vlan4
- vlan: vlan5
+ - vlan: vlan12-13
+ - vlan: vlan21-22
+ - vlan: vlan35
+ peer_gateway:
+ vlans:
+ - vlan: vlan4
+ - vlan: vlan5
+ - vlan: vlan12-13
+ - vlan: vlan21-22
+ - vlan: vlan35
members:
portchannels:
- lag: Po10
- lag: Po11
+ - name: test_case_04
+ description: Update MCLAG properties - associate further vlans
+ state: merged
+ input:
+ domain_id: 1
+ unique_ip:
+ vlans:
+ - vlan: vlan11-15
+ - vlan: vlan21-25
+ - vlan: vlan31-35
+ peer_gateway:
+ vlans:
+ - vlan: vlan11-15
+ - vlan: vlan21-25
+ - vlan: vlan31-35
+
delete_all:
- name: del_all_test_case_01
description: Delete MCLAG properties
@@ -68,11 +112,23 @@ updated_tests:
peer_link: "{{ interface3 }}"
keepalive: 3
session_timeout: 300
+ delay_restore: 450
system_mac: 00:00:00:01:01:01
+ gateway_mac: 00:00:00:03:03:03
unique_ip:
vlans:
- vlan: vlan2
- vlan: vlan6
+ - vlan: vlan11-15
+ - vlan: vlan21-25
+ - vlan: vlan31-35
+ peer_gateway:
+ vlans:
+ - vlan: vlan2
+ - vlan: vlan6
+ - vlan: vlan11-15
+ - vlan: vlan21-25
+ - vlan: vlan31-35
members:
portchannels:
- lag: Po13
@@ -88,10 +144,21 @@ updated_tests:
peer_link: "{{ interface3 }}"
keepalive: 3
session_timeout: 300
+ delay_restore: 450
system_mac: 00:00:00:01:01:01
+ gateway_mac: 00:00:00:03:03:03
unique_ip:
vlans:
- vlan: vlan2
+ - vlan: vlan12-14
+ - vlan: vlan23-25
+ - vlan: vlan31-34
+ peer_gateway:
+ vlans:
+ - vlan: vlan2
+ - vlan: vlan12-14
+ - vlan: vlan23-25
+ - vlan: vlan31-34
members:
portchannels:
- lag: Po13
@@ -103,5 +170,120 @@ updated_tests:
domain_id: 2
unique_ip:
vlans:
+ peer_gateway:
+ vlans:
+ members:
+ portchannels:
+
+replaced_overridden_tests:
+ - name: test_case_06
+ description: Create new MCLAG with all properties including VLANs and Portchannels
+ state: merged
+ input:
+ domain_id: 2
+ source_address: 3.3.3.5
+ peer_address: 1.1.1.3
+ peer_link: "{{ interface3 }}"
+ keepalive: 3
+ session_timeout: 300
+ delay_restore: 450
+ system_mac: 00:00:00:01:01:01
+ gateway_mac: 00:00:00:03:03:03
+ unique_ip:
+ vlans:
+ - vlan: vlan2
+ - vlan: vlan11-15
+ peer_gateway:
+ vlans:
+ - vlan: vlan2
+ - vlan: vlan11-15
+ members:
+ portchannels:
+ - lag: Po10
+ - lag: Po11
+
+ - name: test_case_07
+ description: Replace MCLAG Portchannels and VLANs
+ state: replaced
+ input:
+ domain_id: 2
+ unique_ip:
+ vlans:
+ - vlan: vlan2
+ - vlan: vlan11-13
+ - vlan: vlan21-25
+ peer_gateway:
+ vlans:
+ - vlan: vlan12-13
+ - vlan: vlan22-24
+ members:
+ portchannels:
+ - lag: Po11
+ - lag: Po12
+
+ - name: test_case_08
+ description: Replace MCLAG domain ID and properties
+ state: replaced
+ input:
+ domain_id: 20
+ source_address: 3.3.3.5
+ peer_address: 1.1.1.3
+ peer_link: "{{ interface3 }}"
+ keepalive: 3
+ session_timeout: 300
+ delay_restore: 480
+ gateway_mac: 00:00:00:12:12:12
+ unique_ip:
+ vlans:
+ - vlan: vlan11-13
+ - vlan: vlan21-25
+ peer_gateway:
+ vlans:
+ - vlan: vlan12-13
members:
portchannels:
+ - lag: Po10
+ - lag: Po12
+
+ - name: test_case_09
+ description: Replace MCLAG properties
+ state: replaced
+ input:
+ domain_id: 20
+ source_address: 3.3.3.5
+ peer_address: 1.1.1.3
+ peer_link: "{{ interface3 }}"
+ system_mac: 00:00:00:01:01:01
+ gateway_mac: 00:00:00:03:03:03
+ unique_ip:
+ vlans:
+ - vlan: vlan11-15
+ peer_gateway:
+ vlans:
+ - vlan: vlan11-15
+ members:
+ portchannels:
+ - lag: Po10
+ - lag: Po11
+
+ - name: test_case_10
+ description: Override MCLAG properties including VLANs and Portchannels
+ state: overridden
+ input:
+ domain_id: 10
+ source_address: 3.3.3.5
+ peer_address: 1.1.1.3
+ peer_link: "{{ interface3 }}"
+ keepalive: 3
+ unique_ip:
+ vlans:
+ - vlan: vlan11-12
+ - vlan: vlan31-32
+ peer_gateway:
+ vlans:
+ - vlan: vlan11-12
+ - vlan: vlan31-32
+ members:
+ portchannels:
+ - lag: Po10
+ - lag: Po12
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..502489684
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/cleanup_tests.yaml
@@ -0,0 +1,18 @@
+---
+- name: Delete MCLAG configuration
+ dellemc.enterprise_sonic.sonic_mclag:
+ config:
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test VLANs
+ dellemc.enterprise_sonic.sonic_vlans:
+ config: "{{ preparations_tests.add_vlans_input }}"
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete test lag interfaces
+ dellemc.enterprise_sonic.sonic_lag_interfaces:
+ config: "{{ preparations_tests.add_lag_interfaces }}"
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml
index 071ef9495..b8d2efa83 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_mclag/tasks/main.yml
@@ -16,7 +16,13 @@
include_tasks: tasks_template.yaml
loop: "{{ updated_tests }}"
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ replaced_overridden_tests }}"
+
+- name: Cleanup test
+ include_tasks: cleanup_tests.yaml
+
- name: Display all variables/facts known for a host
debug:
var: hostvars[inventory_hostname].ansible_facts.test_reports
-
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml
index 860297d6c..d5538e51c 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_ntp/defaults/main.yml
@@ -3,6 +3,7 @@ ansible_connection: httpapi
module_name: ntp
po1: Portchannel 100
+po2: Portchannel 200
vlan1: Vlan 100
lo1: Loopback 100
@@ -11,15 +12,18 @@ mgmt_vrf: mgmt
ntp_ip_server_1: 10.11.0.1
ntp_ip_server_2: 10.11.0.2
ntp_ip_server_3: 10.11.0.3
+ntp_ip_server_4: 10.11.0.4
ntp_host_server: pool.ntp.org
preparations_tests:
delete_interfaces:
- "no interface {{ po1 }}"
+ - "no interface {{ po2 }}"
- "no interface {{ vlan1 }}"
- "no interface {{ lo1 }}"
init_interfaces:
- "interface {{ po1 }}"
+ - "interface {{ po2 }}"
- "interface {{ vlan1 }}"
- "interface {{ lo1 }}"
delete_mgmt_vrf:
@@ -81,22 +85,54 @@ tests:
- address: "{{ ntp_ip_server_3 }}"
minpoll: 7
maxpoll: 10
+ prefer: true
- name: test_case_06
+ description: Replace NTP source interfaces and some servers
+ state: replaced
+ input:
+ source_interfaces:
+ - "{{ interface2 }}"
+ - "{{ po2 }}"
+ servers:
+ - address: "{{ ntp_ip_server_4 }}"
+ minpoll: 5
+ maxpoll: 8
+ - address: "{{ ntp_ip_server_3 }}"
+ minpoll: 5
+ maxpoll: 8
+ prefer: true
+
+ - name: test_case_07
+ description: Configure NTP source interfaces and server prefer to false
+ state: merged
+ input:
+ source_interfaces:
+ - "{{ interface1 }}"
+ - "{{ po1 }}"
+ - "{{ vlan1 }}"
+ - "{{ lo1 }}"
+ servers:
+ - address: "{{ ntp_ip_server_3 }}"
+ minpoll: 6
+ maxpoll: 10
+ prefer: false
+
+ - name: test_case_08
description: Delete a NTP source interface
state: deleted
input:
source_interfaces:
- "{{ interface1 }}"
- - name: test_case_07
+ - name: test_case_09
description: Delete a NTP server
state: deleted
input:
servers:
- address: "{{ ntp_ip_server_1 }}"
- - name: test_case_08
+ - name: test_case_10
description: Delete several NTP source interfaces
state: deleted
input:
@@ -104,7 +140,7 @@ tests:
- "{{ interface2 }}"
- "{{ po1 }}"
- - name: test_case_09
+ - name: test_case_11
description: Delete several NTP servers
state: deleted
input:
@@ -112,7 +148,7 @@ tests:
- address: "{{ ntp_ip_server_1 }}"
- address: "{{ ntp_ip_server_3 }}"
- - name: test_case_10
+ - name: test_case_12
description: Delete NTP source interfaces and servers
state: deleted
input:
@@ -124,25 +160,25 @@ tests:
- address: "{{ ntp_ip_server_1 }}"
- address: "{{ ntp_host_server }}"
- - name: test_case_11
+ - name: test_case_13
description: Configure NTP VRF
state: merged
input:
vrf: "{{ mgmt_vrf }}"
- - name: test_case_12
+ - name: test_case_14
description: Delete NTP VRF
state: deleted
input:
vrf: "{{ mgmt_vrf }}"
- - name: test_case_13
+ - name: test_case_15
description: Enable NTP authentication
state: merged
input:
enable_ntp_auth: true
- - name: test_case_14
+ - name: test_case_16
description: Create NTP authentication keys
state: merged
input:
@@ -156,7 +192,7 @@ tests:
key_value: U2FsdGVkX1/wWVxmcp59mJQO6uzhFEHIxScdCbIqJh4=
encrypted: true
- - name: test_case_15
+ - name: test_case_17
description: Configure NTP trusted keys
state: merged
input:
@@ -164,7 +200,7 @@ tests:
- 2
- 6
- - name: test_case_16
+ - name: test_case_18
description: Create NTP servers with key
state: merged
input:
@@ -174,7 +210,7 @@ tests:
minpoll: 6
maxpoll: 9
- - name: test_case_17
+ - name: test_case_19
description: Delete NTP trusted keys
state: deleted
input:
@@ -182,14 +218,14 @@ tests:
- 2
- 6
- - name: test_case_18
+ - name: test_case_20
description: Delete NTP server
state: deleted
input:
servers:
- address: "{{ ntp_ip_server_1 }}"
- - name: test_case_19
+ - name: test_case_21
description: Delete NTP authentication keys
state: deleted
input:
@@ -197,13 +233,52 @@ tests:
- key_id: 2
- key_id: 6
- - name: test_case_20
+ - name: test_case_22
description: Delete NTP authentication
state: deleted
input:
enable_ntp_auth: true
- - name: test_case_21
+ - name: test_case_23
+ description: Overridden NTP configuration
+ state: overridden
+ input:
+ enable_ntp_auth: false
+ source_interfaces:
+ - "{{ interface1 }}"
+ - "{{ interface2 }}"
+ - "{{ po1 }}"
+ - "{{ po2 }}"
+ servers:
+ - address: "{{ ntp_ip_server_1 }}"
+ minpoll: 6
+ maxpoll: 9
+ - address: "{{ ntp_ip_server_4 }}"
+ minpoll: 5
+ maxpoll: 8
+ - address: "{{ ntp_ip_server_3 }}"
+ minpoll: 5
+ maxpoll: 8
+ prefer: true
+
+ - name: test_case_24
+ description: Replace more NTP configuration
+ state: replaced
+ input:
+ enable_ntp_auth: true
+ source_interfaces:
+ - "{{ interface2 }}"
+ - "{{ po2 }}"
+ servers:
+ - address: "{{ ntp_ip_server_4 }}"
+ minpoll: 6
+ maxpoll: 8
+ - address: "{{ ntp_ip_server_3 }}"
+ minpoll: 5
+ maxpoll: 8
+ prefer: true
+
+ - name: test_case_25
description: Delete all NTP configurations
state: deleted
input: {}
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/defaults/main.yml
new file mode 100644
index 000000000..104b5af04
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/defaults/main.yml
@@ -0,0 +1,122 @@
+---
+ansible_connection: httpapi
+module_name: pki
+
+preparations_tests:
+ delete_pki:
+ - name: delete_all
+ description: Delete PKI
+ state: deleted
+ input:
+tests:
+ - name: test_case_01
+ description: Add new PKI configuration
+ state: merged
+ input:
+ security_profiles:
+ - profile_name: mysp
+ - profile_name: myspb
+ ocsp_responder_list:
+ - http://example.com/ocsp
+ trust_stores:
+ - name: myts
+ ca_name: CA2
+
+ - name: test_case_02
+ description: Update specific params of security-profile
+ state: merged
+ input:
+ security_profiles:
+ - profile_name: mysp
+ cdp_list:
+ - http://example.com/cdp
+ ocsp_responder_list:
+ - http://example.com/ocspb
+ - http://example.com/ocspc
+ trust_store: myts
+ revocation_check: false
+ - profile_name: mysp2
+ trust_store: myts2
+ trust_stores:
+ - name: myts
+ ca_name:
+ - CA2
+ - CA
+ - name: myts2
+ ca_name: CA
+ - name: test_case_03
+ description: Delete specific params of security-profile
+ state: deleted
+ input:
+ security_profiles:
+ - profile_name: mysp
+ cdp_list:
+ - http://example.com/cdp
+ - profile_name: mysp2
+ trust_store: myts2
+ - name: test_case_04
+ description: Delete all security_profiles configurations
+ state: deleted
+ input:
+ security_profiles:
+ - profile_name: mysp
+ - profile_name: myspb
+
+ - name: test_case_05
+ description: Merge parameter of security_profiles configurations
+ state: merged
+ input:
+ security_profiles:
+ - profile_name: mysp
+ revocation_check: false
+ peer_name_check: true
+ - profile_name: myspb
+ ocsp_responder_list:
+ - http://example.com/ocsp
+ trust_store: myts
+ key_usage_check: true
+
+ - name: test_case_06
+ description: Replace some parameter of security_profiles
+ state: replaced
+ input:
+ security_profiles:
+ - profile_name: mysp
+ revocation_check: true
+ peer_name_check: false
+ cdp_list:
+ - http://example.com/cdp
+ ocsp_responder_list:
+ - http://example.com/ocspb
+ - http://example.com/ocspc
+ trust_store: myts
+
+ - name: test_case_07
+ description: Replace CA of trust_store
+ state: replaced
+ input:
+ trust_stores:
+ - name: myts
+ ca_name: CA
+
+ - name: test_case_08
+ description: Override parameter of security_profiles and trust_stores
+ state: overridden
+ input:
+ security_profiles:
+ - profile_name: newsp
+ revocation_check: false
+ peer_name_check: true
+ - profile_name: newspb
+ ocsp_responder_list:
+ - http://example.com/ocsp
+ key_usage_check: true
+ trust_store: newts
+ trust_stores:
+ - name: newts
+ ca_name: CA
+
+test_delete_all:
+ - name: test_case_09
+ description: Delete all PKI configurations
+ state: deleted
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/meta/main.yaml
new file mode 100644
index 000000000..0b356217e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..b1ffad57e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/cleanup_tests.yaml
@@ -0,0 +1,6 @@
+- name: Deletes old PKI
+ sonic_pki:
+ config: {}
+ state: deleted
+ ignore_errors: yes
+
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/main.yml
new file mode 100644
index 000000000..bd20b1292
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/main.yml
@@ -0,0 +1,16 @@
+- debug: msg="sonic_pki Test started ..."
+
+- set_fact:
+ base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}"
+
+- name: Preparations test
+ include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "test_delete_all {{ module_name }} stated ..."
+ include_tasks: tasks_template_del.yaml
+ loop: "{{ test_delete_all }}"
+ when: test_delete_all is defined
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..241e75a81
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/preparation_tests.yaml
@@ -0,0 +1,5 @@
+- name: Deletes old PKI configurations
+ sonic_pki:
+ config: {}
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/tasks_template.yaml
new file mode 100644
index 000000000..ff6bce99e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_pki:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_pki:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/tasks_template_del.yaml
new file mode 100644
index 000000000..4b914364e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_pki/tasks/tasks_template_del.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_pki:
+ state: "{{ item.state }}"
+ config:
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_pki:
+ state: "{{ item.state }}"
+ config:
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml
index 402088be5..dd31e4afc 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/defaults/main.yml
@@ -2,40 +2,21 @@
ansible_connection: httpapi
module_name: port_breakout
-preparations_tests:
- delete_port_breakouts:
- - "no interface breakout port 1/97"
- - "no interface breakout port 1/98"
- - "no interface breakout port 1/99"
- - "no interface breakout port 1/100"
- - "no interface breakout port 1/101"
- - "no interface breakout port 1/102"
-
-tests_cli:
- - name: cli_test_case_01
- description: Configure breakout mode for ports
- state: merged
- input:
- - name: 1/97
- mode: 4x25G
- - name: 1/98
- mode: 1x40G
-
tests:
- name: test_case_01
description: Configure breakout mode for ports
state: merged
input:
- name: 1/97
- mode: 4x25G
- - name: 1/98
mode: 1x40G
+ - name: 1/98
+ mode: 1x50G
- name: 1/99
- mode: 4x25G
+ mode: 1x100G
- name: 1/100
- mode: 4x10G
+ mode: 2x50G
- name: 1/101
- mode: 1x40G
+ mode: 4x10G
- name: 1/102
mode: 4x25G
- name: test_case_02
@@ -43,7 +24,7 @@ tests:
state: merged
input:
- name: 1/97
- mode: 1x40G
+ mode: 2x50G
- name: 1/98
mode: 4x10G
- name: test_case_03
@@ -52,6 +33,22 @@ tests:
input:
- name: 1/98
- name: test_case_04
+ description: Replace breakout mode for ports
+ state: replaced
+ input:
+ - name: 1/97
+ mode: 4x10G
+ - name: 1/98
+ mode: 4x10G
+ - name: test_case_05
+ description: Override breakout mode for ports
+ state: overridden
+ input:
+ - name: 1/100
+ mode: 4x10G
+ - name: 1/101
+ mode: 2x50G
+ - name: test_case_06
description: deleting all the port breakout modes
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml
index fc7e76dda..6dbbc8263 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/cleanup_tests.yaml
@@ -1,4 +1,4 @@
-- name: Deletes old bgp
+- name: Deletes old port breakout configuration
sonic_port_breakout:
config: []
state: deleted
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml
index 2dea65315..5e2e87c05 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/main.yml
@@ -1,31 +1,15 @@
- debug: msg="sonic_port_breakout Test started ..."
-- set_fact:
- base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}"
-
-- name: Preparations test
+- name: Preparation test
include_tasks: preparation_tests.yaml
-- name: "Test {{ module_name }} CLI validation started ..."
+- name: "Test {{ module_name }} started ..."
include_tasks: tasks_template.yaml
- loop: "{{ tests_cli }}"
-
-- name: "Test CLI validation started ..."
- include_role:
- name: common
- tasks_from: cli_tasks_template.yaml
- loop: "{{ tests_cli }}"
-
-# - name: Preparations test
-# include_tasks: preparation_tests.yaml
-
-# - name: "Test {{ module_name }} started ..."
-# include_tasks: tasks_template.yaml
-# loop: "{{ tests }}"
+ loop: "{{ tests }}"
-# - name: Cleanup tests
-# include_tasks: cleanup_tests.yaml
+- name: Cleanup tests
+ include_tasks: cleanup_tests.yaml
-# - name: Display all variables/facts known for a host
-# debug:
-# var: hostvars[inventory_hostname].ansible_facts.test_reports
+- name: Display the full test report
+ debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml
index 04ab1b456..958a69f27 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/tasks/tasks_template.yaml
@@ -8,14 +8,18 @@
- import_role:
name: common
tasks_from: action.facts.report.yaml
-
+
- name: "{{ item.name}} , {{ item.description}} Idempotent"
sonic_port_breakout:
config: "{{ item.input }}"
state: "{{ item.state }}"
register: idempotent_task_output
ignore_errors: yes
-
+
- import_role:
name: common
tasks_from: idempotent.facts.report.yaml
+
+- name: "Pause before the next test"
+ ansible.builtin.pause:
+ seconds: 10
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg
deleted file mode 100644
index c08c5950c..000000000
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_breakout/templates/cli_test_case_01.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-interface breakout port 1/97 mode 4x25G
-interface breakout port 1/98 mode 1x40G
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/defaults/main.yml
new file mode 100644
index 000000000..89babf8d9
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/defaults/main.yml
@@ -0,0 +1,82 @@
+---
+ansible_connection: httpapi
+module_name: port_group
+
+pg1: 1
+pg2: 2
+pg16: 16
+pg20: 20
+
+preparations_tests:
+ delete_port_group_configurations:
+ - "no port-group {{ pg1 }} speed"
+ - "no port-group {{ pg2 }} speed"
+ - "no port-group {{ pg16 }} speed"
+ - "no port-group {{ pg20 }} speed"
+
+tests:
+ - name: test_case_01
+ description: Configure port group speeds
+ state: merged
+ input:
+ - id: "{{ pg1 }}"
+ speed: SPEED_10GB
+ - id: "{{ pg2 }}"
+ speed: SPEED_25GB
+ - id: "{{ pg16 }}"
+ speed: SPEED_10GB
+ - id: "{{ pg20 }}"
+ speed: SPEED_25GB
+ - name: test_case_02
+ description: Delete some port group speeds
+ state: deleted
+ input:
+ - id: "{{ pg1 }}"
+ - id: "{{ pg2 }}"
+ - name: test_case_03
+ description: Set all port group to default speeds
+ state: deleted
+ input:
+ - id:
+ - name: test_case_04
+ description: Set some port group speeds
+ state: merged
+ input:
+ - id: "{{ pg16 }}"
+ speed: SPEED_10GB
+ - id: "{{ pg20 }}"
+ speed: SPEED_25GB
+ - name: test_case_05
+ description: Replace some port group speeds
+ state: replaced
+ input:
+ - id: "{{ pg1 }}"
+ speed: SPEED_10GB
+ - id: "{{ pg20 }}"
+ speed: SPEED_10GB
+ - name: test_case_06
+ description: Replace more port group speeds
+ state: replaced
+ input:
+ - id: "{{ pg1 }}"
+ speed: SPEED_25GB
+ - id: "{{ pg2 }}"
+ speed: SPEED_10GB
+ - id: "{{ pg16 }}"
+ speed: SPEED_10GB
+ - id: "{{ pg20 }}"
+ speed: SPEED_10GB
+ - name: test_case_07
+ description: Override port group speeds
+ state: overridden
+ input:
+ - id: "{{ pg1 }}"
+ speed: SPEED_10GB
+ - id: "{{ pg2 }}"
+ speed: SPEED_10GB
+ - id: "{{ pg20 }}"
+ speed: SPEED_25GB
+ - name: test_case_08
+ description: Clean up - reset all port group speeds
+ state: deleted
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/meta/main.yaml
new file mode 100644
index 000000000..611fd54d2
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common } \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..af476de06
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/cleanup_tests.yaml
@@ -0,0 +1,7 @@
+- name: Reset port groups
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.delete_port_group_configurations }}"
+ register: output
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/main.yml
new file mode 100644
index 000000000..8c430cce9
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/main.yml
@@ -0,0 +1,16 @@
+- debug: msg="sonic_port_group Test started ..."
+
+- name: Preparations tests
+ include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup test {{ module_name }} started"
+ include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
+
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..b9104df7b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/preparation_tests.yaml
@@ -0,0 +1,7 @@
+- name: Initialize port groups
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.delete_port_group_configurations }}"
+ register: output
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/tasks_template.yaml
new file mode 100644
index 000000000..936cadb54
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_port_group/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_port_group:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_port_group:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml
index be5199910..6df415c3c 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_prefix_lists/defaults/main.yml
@@ -83,40 +83,98 @@ tests:
le: 29
- name: test_case_05
- description: Delete prefixes from existing prefix sets
- state: deleted
+ description: Override prefix list configuration
+ state: overridden
input:
- afi: ipv4
- name: pf2
+ name: pf6
+ prefixes:
+ - action: deny
+ sequence: 10
+ prefix: 1.2.3.0/24
+ ge: 25
+ le: 27
+ - afi: ipv4
+ name: pf7
prefixes:
- action: deny
prefix: 10.20.30.0/24
sequence: 20
ge: 26
+ - action: permit
+ prefix: 10.20.30.128/25
+ sequence: 50
+ ge: 27
+ le: 29
- afi: ipv4
- name: pf3
+ name: pf8
+ prefixes:
+ - action: deny
+ prefix: 1.2.3.128/25
+ sequence: 30
+ le: 27
+
+ - name: test_case_06
+ description: Replace prefix list configuration
+ state: replaced
+ input:
+ - afi: ipv4
+ name: pf6
prefixes:
- action: permit
- prefix: 1.2.3.192/26
- sequence: 40
- ge: 28
- le: 30
+ sequence: 10
+ prefix: 1.2.3.0/24
+ ge: 25
+ le: 27
+ - action: deny
+ sequence: 11
+ prefix: 1.2.4.0/24
+ ge: 26
+ le: 28
- afi: ipv4
- name: pf5
+ name: pf7
+ prefixes:
+ - action: deny
+ prefix: 10.20.30.128/25
+ sequence: 50
+ ge: 27
+ le: 29
+ - afi: ipv6
+ name: pf9
prefixes:
- action: permit
- prefix: 15.25.35.0/24
- sequence: 15
+ sequence: 32
+ prefix: 60:70::/64
- - name: test_case_06
+ - name: test_case_07
+ description: Delete prefixes from existing prefix sets
+ state: deleted
+ input:
+ - afi: ipv4
+ name: pf6
+ prefixes:
+ - action: permit
+ sequence: 10
+ prefix: 1.2.3.0/24
+ ge: 25
+ le: 27
+ - afi: ipv4
+ name: pf7
+ prefixes:
+ - action: deny
+ prefix: 10.20.30.0/24
+ sequence: 20
+ ge: 26
+
+ - name: test_case_08
description: Delete prefix sets from the existing configuration
state: deleted
input:
- - name: pf1
- - name: pf4
+ - name: pf6
+ - name: pf9
afi: ipv6
- - name: test_case_07
+ - name: test_case_09
description: Delete all prefix set configuration
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml
index 6a79dc88d..9ef0e3094 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_radius_server/defaults/main.yml
@@ -108,7 +108,79 @@ tests:
source_interface: "{{ interface3 }}"
retransmit: 9
-test_delete_all:
- name: test_case_06
- description: delete all the configurations of radius server
+ description: Replace some configuration of radius servers
+ state: replaced
+ input:
+ auth_type: mschapv2
+ timeout: 36
+ nas_ip: 11.11.11.22
+ retransmit: 5
+ statistics: true
+ servers:
+ host:
+ - name: my_host
+ auth_type: chap
+ port: 55
+ timeout: 12
+ priority: 3
+ source_interface: "{{ interface3 }}"
+
+ - name: test_case_07
+ description: Replace hosts of radius servers
+ state: replaced
+ input:
+ auth_type: mschapv2
+ timeout: 36
+ nas_ip: 11.11.11.22
+ retransmit: 5
+ statistics: true
+ servers:
+ host:
+ - name: my_host
+ auth_type: chap
+ port: 55
+ timeout: 21
+ priority: 3
+ source_interface: "{{ interface3 }}"
+ - name: 20.21.22.23
+ auth_type: pap
+ port: 50
+ timeout: 38
+ priority: 4
+ source_interface: "{{ interface2 }}"
+ - name: 18.21.22.23
+ auth_type: chap
+ port: 20
+ timeout: 19
+ priority: 8
+ source_interface: "{{ interface1 }}"
+
+ - name: test_case_08
+ description: Override configuration of radius server
+ state: overridden
+ input:
+ auth_type: mschapv2
+ timeout: 20
+ nas_ip: 10.10.10.20
+ retransmit: 3
+ servers:
+ host:
+ - name: 10.11.11.11
+ auth_type: pap
+ port: 55
+ timeout: 12
+ priority: 3
+ retransmit: 8
+ source_interface: "{{ interface2 }}"
+ - name: your_host
+ auth_type: chap
+ port: 50
+ timeout: 30
+ priority: 6
+ source_interface: "{{ interface3 }}"
+
+test_delete_all:
+ - name: test_case_09
+ description: Delete all the configurations of radius server
state: deleted
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/defaults/main.yml
new file mode 100644
index 000000000..b0fc48be8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/defaults/main.yml
@@ -0,0 +1,1060 @@
+---
+ansible_connection: httpapi
+module_name: sonic_route_maps
+
+preparation_tests:
+ init_vrfs:
+ - 'ip vrf Vrf1'
+ init_prefix_lists:
+ - 'ip prefix-list ip_pfx_list1 seq 40 permit 1.2.3.0/24'
+ - 'ip prefix-list ip_pfx_list2 seq 50 permit 5.6.7.0/24'
+ - 'ip prefix-list ip_pfx_list3 seq 89 permit 12.13.14.0/24'
+ - 'ipv6 prefix-list ipv6_pfx_list1 seq 40 permit 30:40::/64'
+ - 'ipv6 prefix-list ipv6_pfx_list2 seq 75 deny 20:80::/70'
+ init_bgp_communities:
+ - 'bgp community-list standard bgp_comm_list1 permit 67:35'
+ - 'bgp community-list standard bgp_comm_list2 permit local-as'
+ - 'bgp community-list standard bgp_comm_list3 permit 25:34'
+ init_bgp_extcommunities:
+ - 'bgp extcommunity-list standard bgp_ext_comm1 permit rt 35:45'
+ - 'bgp extcommunity-list standard bgp_ext_comm2 permit soo 21:43'
+ init_bgp_as_paths:
+ - 'bgp as-path-list bgp_as1 permit "34"'
+ - 'bgp as-path-list bgp_as2 permit "97"'
+ - 'bgp as-path-list bgp_as3 permit "270"'
+ init_interfaces:
+ - 'interface Vlan7'
+ - 'interface PortChannel 14'
+
+cleanup_tests:
+ delete_vrfs:
+ - 'no ip vrf Vrf1'
+ delete_prefix_lists:
+ - 'no ip prefix-list ip_pfx_list1 seq 40 permit 1.2.3.0/24'
+ - 'no ip prefix-list ip_pfx_list2 seq 50 permit 5.6.7.0/24'
+ - 'no ip prefix-list ip_pfx_list3 seq 89 permit 12.13.14.0/24'
+ - 'no ipv6 prefix-list ipv6_pfx_list1 seq 40 permit 30:40::/64'
+ - 'no ipv6 prefix-list ipv6_pfx_list2 seq 75 deny 20:80::/70'
+ delete_bgp_communities:
+ - 'no bgp community-list standard bgp_comm_list1 permit 67:35'
+ - 'no bgp community-list standard bgp_comm_list2 permit local-as'
+ - 'no bgp community-list standard bgp_comm_list3 permit 25:34'
+ delete_bgp_extcommunities:
+ - 'no bgp extcommunity-list standard bgp_ext_comm1 permit rt 35:45'
+ - 'no bgp extcommunity-list standard bgp_ext_comm2 permit soo 21:43'
+ delete_bgp_as_paths:
+ - 'no bgp as-path-list bgp_as1 permit "34"'
+ - 'no bgp as-path-list bgp_as2 permit "97"'
+ - 'no bgp as-path-list bgp_as3 permit "270"'
+ delete_interfaces:
+ - 'no interface Vlan7'
+ - 'no interface PortChannel 14'
+
+tests:
+ - name: test_case_01_merged_base_rm1_80_match
+ description: Add initial route map match configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as1
+ community: bgp_comm_list1
+ evpn:
+ default_route: true
+ vni: 735
+ ext_comm: bgp_ext_comm1
+ interface: Ethernet8
+ ip:
+ address: ip_pfx_list1
+ ipv6:
+ address: ipv6_pfx_list1
+ local_preference: 8000
+ metric: 400
+ origin: egp
+ peer:
+ ip: 10.20.30.40
+ source_protocol: bgp
+ source_vrf: Vrf1
+ tag: 7284
+
+ - name: test_case_02_merged_base_rm1_80_set
+ description: Add initial route map set configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ set:
+ as_path_prepend: 200,315,7135
+ comm_list_delete: bgp_comm_list2
+ community:
+ community_number:
+ - "35:58"
+ - "79:150"
+ - "308:650"
+ community_attributes:
+ - local_as
+ - no_advertise
+ - no_export
+ - no_peer
+ - additive
+ extcommunity:
+ rt:
+ - "30:40"
+ soo:
+ - "10.73.14.9:78"
+ ip_next_hop: 10.48.16.18
+ ipv6_next_hop:
+ global_addr: 30::30
+ prefer_global: true
+ local_preference: 635
+ metric:
+ value: 870
+ origin: egp
+ weight: 93471
+ - name: test_case_03_merged_base_other_route_maps
+ description: Add initial route map match configuration for other maps
+ state: merged
+ input:
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ evpn:
+ route_type: multicast
+ origin: incomplete
+ peer:
+ interface: Ethernet16
+ source_protocol: ospf
+ set:
+ community:
+ community_attributes:
+ - no_advertise
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ origin: igp
+ peer:
+ ipv6: 87:95:15::53
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - none
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - map_name: rm4
+ action: permit
+ sequence_num: 480
+ match:
+ evpn:
+ route_type: prefix
+ source_protocol: static
+ set:
+ metric:
+ rtt_action: subtract
+ - name: test_case_04_merged_modify_rm1_80_match
+ description: Modify route map match configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ route_type: prefix
+ vni: 850
+ interface: Vlan7
+ ip:
+ address: ip_pfx_list2
+ next_hop: ip_pfx_list3
+ peer:
+ interface: PortChannel14
+ - name: test_case_05_merged_modify_rm1_80_set
+ description: Modify route map set configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ set:
+ as_path_prepend: 188,257
+ community:
+ community_number:
+ - 45:736
+ ipv6_next_hop:
+ prefer_global: false
+ metric:
+ rtt_action: add
+ - name: test_case_06_merged_modify_other_route_maps
+ description: Modify route map configuration for other maps
+ state: merged
+ input:
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - local_as
+ - no_advertise
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ match:
+ interface: Ethernet16
+ source_protocol: ospf
+ origin: incomplete
+ peer:
+ ip: 5.6.7.8
+ set:
+ as_path_prepend: 200,300,400
+ ipv6_next_hop:
+ global_addr: 37::58
+ prefer_global: true
+ metric:
+ value: 8000
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ local_preference: 14783
+ source_protocol: bgp
+ - name: test_case_07_deleted_individual_attributes
+ description: Delete at least one attribute of each type
+ state: deleted
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ default_route: true
+ route_type: prefix
+ vni: 850
+ ext_comm: bgp_ext_comm1
+ interface: Vlan7
+ ip:
+ address: ip_pfx_list2
+ next_hop: ip_pfx_list3
+ ipv6:
+ address: ipv6_pfx_list1
+ local_preference: 8000
+ metric: 400
+ origin: egp
+ peer:
+ interface: PortChannel14
+ source_vrf: Vrf1
+ tag: 7284
+ set:
+ as_path_prepend: 188,257
+ comm_list_delete: bgp_comm_list2
+ community:
+ community_number:
+ - "35:58"
+ community_attributes:
+ - local_as
+ extcommunity:
+ rt:
+ - "30:40"
+ soo:
+ - 10.73.14.9:78
+ ip_next_hop: 10.48.16.18
+ ipv6_next_hop:
+ global_addr: 30::30
+ prefer_global: false
+ local_preference: 635
+ metric:
+ rtt_action: add
+ origin: egp
+ weight: 93471
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ evpn:
+ route_type: multicast
+ origin: igp
+ source_protocol: connected
+ set:
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ match:
+ peer:
+ ip: 5.6.7.8
+ source_protocol: ospf
+ set:
+ metric:
+ value: 8000
+ ipv6_next_hop:
+ prefer_global: true
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ peer:
+ ipv6: 87:95:15::53
+ source_protocol: bgp
+ set:
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - map_name: rm4
+ action: permit
+ sequence_num: 480
+ match:
+ source_protocol: static
+ - name: test_case_08_merged_restore_rm1_80_match
+ description: Restore route map match configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ default_route: true
+ route_type: prefix
+ vni: 850
+ ext_comm: bgp_ext_comm1
+ interface: Vlan7
+ ip:
+ address: ip_pfx_list2
+ next_hop: ip_pfx_list3
+ ipv6:
+ address: ipv6_pfx_list1
+ local_preference: 8000
+ metric: 400
+ origin: egp
+ peer:
+ interface: PortChannel14
+ source_protocol: bgp
+ source_vrf: Vrf1
+ tag: 7284
+ - name: test_case_09_merged_restore_rm1_80_set
+ description: Restore route map set configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ set:
+ as_path_prepend: 188,257
+ comm_list_delete: bgp_comm_list2
+ community:
+ community_attributes:
+ - additive
+ - local_as
+ - no_advertise
+ - no_export
+ - no_peer
+ community_number:
+ - "35:58"
+ - "79:150"
+ - "308:650"
+ - "45:736"
+ extcommunity:
+ rt:
+ - "30:40"
+ soo:
+ - "10.73.14.9:78"
+ ip_next_hop: 10.48.16.18
+ ipv6_next_hop:
+ global_addr: 30::30
+ prefer_global: false
+ local_preference: 635
+ metric:
+ rtt_action: add
+ origin: egp
+ weight: 93471
+ - name: test_case_10_merged_restore_other_route_maps
+ description: Restore route map configuration for other maps
+ state: merged
+ input:
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ evpn:
+ route_type: multicast
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ peer:
+ interface: Ethernet16
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - local_as
+ - no_advertise
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ match:
+ interface: Ethernet16
+ origin: incomplete
+ peer:
+ ip: 5.6.7.8
+ source_protocol: ospf
+ set:
+ as_path_prepend: 200,300,400
+ ipv6_next_hop:
+ global_addr: 37::58
+ prefer_global: true
+ metric:
+ value: 8000
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ local_preference: 14783
+ origin: igp
+ peer:
+ ipv6: 87:95:15::53
+ source_protocol: bgp
+ set:
+ community:
+ community_attributes:
+ - none
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - map_name: rm4
+ action: permit
+ sequence_num: 480
+ match:
+ evpn:
+ route_type: prefix
+ source_protocol: static
+ set:
+ metric:
+ rtt_action: subtract
+ - name: test_case_11_replaced_dict_and_list
+ description: Replaced state handling for dict and list attributes
+ state: replaced
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ evpn:
+ route_type: multicast
+ ip:
+ address: ip_pfx_list1
+ set:
+ community:
+ community_attributes:
+ - no_advertise
+ community_number:
+ - "25:25"
+ extcommunity:
+ rt:
+ - "20:20"
+ soo:
+ - "45:55"
+ ipv6_next_hop:
+ global_addr: 30::30
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ set:
+ ipv6_next_hop:
+ global_addr: 45::90
+ - name: test_case_12_merged_restore_rm1_80_match
+ description: Restore route map match configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ default_route: true
+ route_type: prefix
+ vni: 850
+ ext_comm: bgp_ext_comm1
+ interface: Vlan7
+ ip:
+ address: ip_pfx_list2
+ next_hop: ip_pfx_list3
+ ipv6:
+ address: ipv6_pfx_list1
+ local_preference: 8000
+ metric: 400
+ origin: egp
+ peer:
+ interface: PortChannel14
+ source_protocol: bgp
+ source_vrf: Vrf1
+ tag: 7284
+ - name: test_case_13_merged_restore_rm1_80_set
+ description: Restore route map set configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ set:
+ as_path_prepend: 188,257
+ comm_list_delete: bgp_comm_list2
+ community:
+ community_attributes:
+ - additive
+ - local_as
+ - no_advertise
+ - no_export
+ - no_peer
+ community_number:
+ - "35:58"
+ - "79:150"
+ - "308:650"
+ - "45:736"
+ extcommunity:
+ rt:
+ - "30:40"
+ soo:
+ - "10.73.14.9:78"
+ ip_next_hop: 10.48.16.18
+ ipv6_next_hop:
+ global_addr: 30::30
+ prefer_global: false
+ local_preference: 635
+ metric:
+ rtt_action: add
+ origin: egp
+ weight: 93471
+ - name: test_case_14_merged_restore_other_route_maps
+ description: Restore route map configuration for other replaced maps (rm2 "set")
+ state: merged
+ input:
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ set:
+ as_path_prepend: 200,300,400
+ ipv6_next_hop:
+ global_addr: 37::58
+ prefer_global: true
+ metric:
+ value: 8000
+ - name: test_case_15_replaced_top_level_match_attr_rm1_80
+ description: Replaced state handling for top level match attributes in rm1 80
+ state: replaced
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as3
+ ext_comm: bgp_ext_comm2
+ - name: test_case_16_replaced_top_level_attr_other_route_maps
+ description: Replaced state handling for top level attributes in other route maps
+ state: replaced
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ set:
+ origin: egp
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ interface: Ethernet8
+ ipv6:
+ address: ipv6_pfx_list1
+ origin: egp
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ set:
+ as_path_prepend: 375,94
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ local_preference: 14783
+ set:
+ comm_list_delete: bgp_comm_list1
+ - name: test_case_17_deleted_route_map_statement
+ description: Delete entire route map "statements" (can be sub route maps sharing a map name)
+ state: deleted
+ input:
+ - map_name: rm1
+ sequence_num: 3047
+ - map_name: rm2
+ sequence_num: 100
+ - name: test_case_18_merged_restore_deleted_route_maps
+ description: Restore route map configuration for deleted maps
+ state: merged
+ input:
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ evpn:
+ route_type: multicast
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ peer:
+ interface: Ethernet16
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - local_as
+ - no_advertise
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ match:
+ interface: Ethernet16
+ origin: incomplete
+ peer:
+ ip: 5.6.7.8
+ source_protocol: ospf
+ set:
+ as_path_prepend: 200,300,400
+ ipv6_next_hop:
+ global_addr: 37::58
+ prefer_global: true
+ metric:
+ value: 8000
+ - name: test_case_19_deleted_entire_route_maps
+ description: Delete entire route maps (single statement and multiple statement)
+ state: deleted
+ input:
+ - map_name: rm3
+ - map_name: rm1
+ - name: test_case_20_merged_restore_rm1_80_match
+ description: Restore route map match configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ default_route: true
+ route_type: prefix
+ vni: 850
+ ext_comm: bgp_ext_comm1
+ interface: Vlan7
+ ip:
+ address: ip_pfx_list2
+ next_hop: ip_pfx_list3
+ ipv6:
+ address: ipv6_pfx_list1
+ local_preference: 8000
+ metric: 400
+ origin: egp
+ peer:
+ interface: PortChannel14
+ source_protocol: bgp
+ source_vrf: Vrf1
+ tag: 7284
+ - name: test_case_21_merged_restore_rm1_80_set
+ description: Restore route map set configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ set:
+ as_path_prepend: 188,257
+ comm_list_delete: bgp_comm_list2
+ community:
+ community_attributes:
+ - additive
+ - local_as
+ - no_advertise
+ - no_export
+ - no_peer
+ community_number:
+ - "35:58"
+ - "79:150"
+ - "308:650"
+ - "45:736"
+ extcommunity:
+ rt:
+ - "30:40"
+ soo:
+ - "10.73.14.9:78"
+ ip_next_hop: 10.48.16.18
+ ipv6_next_hop:
+ global_addr: 30::30
+ prefer_global: false
+ local_preference: 635
+ metric:
+ rtt_action: add
+ origin: egp
+ weight: 93471
+ - name: test_case_22_merged_restore_deleted_rm1_3047_and_rm3_route_map
+ description: Restore deleted route maps rm1 3047 and rm3
+ state: merged
+ input:
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ evpn:
+ route_type: multicast
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ peer:
+ interface: Ethernet16
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - local_as
+ - no_advertise
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ local_preference: 14783
+ origin: igp
+ peer:
+ ipv6: 87:95:15::53
+ source_protocol: bgp
+ set:
+ community:
+ community_attributes:
+ - none
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - name: test_case_23_overridden_existing_route_map_subset
+ description: Override with an existing configured route map subset
+ state: overridden
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ evpn:
+ vni: 735
+ - name: test_case_24_merged_restore_rm1_80_match
+ description: Restore route map match configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ as_path: bgp_as2
+ community: bgp_comm_list3
+ evpn:
+ default_route: true
+ route_type: prefix
+ vni: 850
+ ext_comm: bgp_ext_comm1
+ interface: Vlan7
+ ip:
+ address: ip_pfx_list2
+ next_hop: ip_pfx_list3
+ ipv6:
+ address: ipv6_pfx_list1
+ local_preference: 8000
+ metric: 400
+ origin: egp
+ peer:
+ interface: PortChannel14
+ source_protocol: bgp
+ source_vrf: Vrf1
+ tag: 7284
+ - name: test_case_25_merged_restore_rm1_80_set
+ description: Restore route map set configuration for map rm1 80
+ state: merged
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ set:
+ as_path_prepend: 188,257
+ comm_list_delete: bgp_comm_list2
+ community:
+ community_attributes:
+ - additive
+ - local_as
+ - no_advertise
+ - no_export
+ - no_peer
+ community_number:
+ - "35:58"
+ - "79:150"
+ - "308:650"
+ - "45:736"
+ extcommunity:
+ rt:
+ - "30:40"
+ soo:
+ - "10.73.14.9:78"
+ ip_next_hop: 10.48.16.18
+ ipv6_next_hop:
+ global_addr: 30::30
+ prefer_global: false
+ local_preference: 635
+ metric:
+ rtt_action: add
+ origin: egp
+ weight: 93471
+ - name: test_case_26_merged_restore_deleted_rm1_3047_and_rm3_route_map
+ description: Restore deleted route maps rm1 3047 and rm3
+ state: merged
+ input:
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ evpn:
+ route_type: multicast
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ peer:
+ interface: Ethernet16
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - local_as
+ - no_advertise
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ local_preference: 14783
+ origin: igp
+ peer:
+ ipv6: 87:95:15::53
+ source_protocol: bgp
+ set:
+ community:
+ community_attributes:
+ - none
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - name: test_case_27_overridden_modified_route_map_subset
+ description: Override with a change to existing configured route map configuration
+ state: overridden
+ input:
+ - map_name: rm1
+ action: permit
+ sequence_num: 80
+ match:
+ ip:
+ address: ip_pfx_list2
+ - name: test_case_28_merged_restore_some_deleted_route_maps
+ description: Restore some deleted route map configuration
+ state: merged
+ input:
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ evpn:
+ route_type: multicast
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ peer:
+ interface: Ethernet16
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - local_as
+ - no_advertise
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ match:
+ interface: Ethernet16
+ origin: incomplete
+ peer:
+ ip: 5.6.7.8
+ source_protocol: ospf
+ set:
+ as_path_prepend: 200,300,400
+ ipv6_next_hop:
+ global_addr: 37::58
+ prefer_global: true
+ metric:
+ value: 8000
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ local_preference: 14783
+ origin: igp
+ peer:
+ ipv6: 87:95:15::53
+ source_protocol: bgp
+ set:
+ community:
+ community_attributes:
+ - none
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - map_name: rm4
+ action: permit
+ sequence_num: 480
+ match:
+ evpn:
+ route_type: prefix
+ source_protocol: static
+ set:
+ metric:
+ rtt_action: subtract
+ - name: test_case_29_overridden_new_route_map_configuration
+ description: Override by specifying previously unconfigured attributes
+ state: overridden
+ input:
+ - map_name: rm5
+ action: permit
+ sequence_num: 250
+ match:
+ interface: Ethernet24
+ set:
+ as_path_prepend: 150,275
+ metric:
+ value: 7249
+ - name: test_case_30_merged_restore_some_deleted_route_maps
+ description: Restore some deleted route map configuration
+ state: merged
+ input:
+ - map_name: rm1
+ action: deny
+ sequence_num: 3047
+ match:
+ as_path: bgp_as3
+ evpn:
+ route_type: multicast
+ ext_comm: bgp_ext_comm2
+ origin: igp
+ peer:
+ interface: Ethernet16
+ source_protocol: connected
+ set:
+ community:
+ community_attributes:
+ - local_as
+ - no_advertise
+ metric:
+ rtt_action: subtract
+ origin: incomplete
+ - map_name: rm2
+ action: permit
+ sequence_num: 100
+ match:
+ interface: Ethernet16
+ origin: incomplete
+ peer:
+ ip: 5.6.7.8
+ source_protocol: ospf
+ set:
+ as_path_prepend: 200,300,400
+ ipv6_next_hop:
+ global_addr: 37::58
+ prefer_global: true
+ metric:
+ value: 8000
+ - map_name: rm3
+ action: deny
+ sequence_num: 285
+ match:
+ evpn:
+ route_type: macip
+ local_preference: 14783
+ origin: igp
+ peer:
+ ipv6: 87:95:15::53
+ source_protocol: bgp
+ set:
+ community:
+ community_attributes:
+ - none
+ metric:
+ rtt_action: set
+ origin: igp
+ call: rm1
+ - map_name: rm4
+ action: permit
+ sequence_num: 480
+ match:
+ evpn:
+ route_type: prefix
+ source_protocol: static
+ set:
+ metric:
+ rtt_action: subtract
+ - name: test_case_31_deleted_all
+ description: Delete all route map configuration
+ state: deleted
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/meta/main.yaml
new file mode 100644
index 000000000..0b356217e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..dce856395
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/cleanup_tests.yaml
@@ -0,0 +1,42 @@
+- name: "Delete all route maps"
+ vars:
+ ansible_connection: httpapi
+ sonic_route_maps:
+ config: []
+ state: deleted
+
+- name: Remove test vrfs
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ cleanup_tests.delete_vrfs }}"
+
+- name: Remove test prefix lists
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ cleanup_tests.delete_prefix_lists }}"
+
+- name: Remove test BGP communities
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ cleanup_tests.delete_bgp_communities }}"
+
+- name: Remove test BGP extended communities
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ cleanup_tests.delete_bgp_extcommunities }}"
+
+- name: Remove test BGP AS paths
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ cleanup_tests.delete_bgp_as_paths }}"
+
+- name: Remove test interfaces
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ cleanup_tests.delete_interfaces }}"
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/main.yml
new file mode 100644
index 000000000..b3e9671f8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/main.yml
@@ -0,0 +1,15 @@
+- debug: msg="sonic_route_maps Test started ..."
+
+- name: Preparation test
+ include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup {{ module_name }} started ..."
+ include_tasks: cleanup_tests.yaml
+
+- name: Display Test Report Output
+ debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..cf27f70f1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/preparation_tests.yaml
@@ -0,0 +1,42 @@
+- name: "Delete old route maps"
+ vars:
+ ansible_connection: httpapi
+ sonic_route_maps:
+ config: []
+ state: deleted
+
+- name: Initialize vrfs
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparation_tests.init_vrfs }}"
+
+- name: Initialize prefix lists
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparation_tests.init_prefix_lists }}"
+
+- name: Initialize BGP communities
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparation_tests.init_bgp_communities }}"
+
+- name: Initialize BGP extended communities
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparation_tests.init_bgp_extcommunities }}"
+
+- name: Initialize BGP AS paths
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparation_tests.init_bgp_as_paths }}"
+
+- name: Initialize interfaces
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparation_tests.init_interfaces }}"
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/tasks_template.yaml
new file mode 100644
index 000000000..f36c29694
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_route_maps/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_route_maps:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_route_maps:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml
index 64d2485fe..67235ac5d 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/defaults/main.yml
@@ -4,11 +4,24 @@ module_name: static_routes
vrf_1: VrfReg1
vrf_2: VrfReg2
+vrf_3: VrfReg3
+vrf_4: VrfReg4
+vrf_5: VrfReg5
preparations_tests:
init_vrf:
- 'ip vrf {{vrf_1}}'
- 'ip vrf {{vrf_2}}'
+ - 'ip vrf {{vrf_3}}'
+ - 'ip vrf {{vrf_4}}'
+ - 'ip vrf {{vrf_5}}'
+ delete_vrf:
+ - 'no ip vrf {{vrf_1}}'
+ - 'no ip vrf {{vrf_2}}'
+ - 'no ip vrf {{vrf_3}}'
+ - 'no ip vrf {{vrf_4}}'
+ - 'no ip vrf {{vrf_5}}'
+
tests:
- name: test_case_01
description: Add new static routes configuration
@@ -106,10 +119,10 @@ tests:
tag: 10
track: 11
- name: test_case_03
- description: Delete static routes metric, tag, and track configuration
- state: deleted
+ description: Override static routes configuration
+ state: overridden
input:
- - vrf_name: 'default'
+ - vrf_name: '{{vrf_3}}'
static_list:
- prefix: '2.0.0.0/8'
next_hops:
@@ -123,50 +136,139 @@ tests:
metric: 8
tag: 10
track: 12
- - vrf_name: '{{vrf_1}}'
+ - vrf_name: '{{vrf_4}}'
static_list:
- - prefix: '3.0.0.0/8'
+ - prefix: '7.0.0.0/8'
next_hops:
- index:
blackhole: True
interface: '{{ interface1 }}'
nexthop_vrf: '{{vrf_2}}'
- next_hop: '5.0.0.0'
+ next_hop: '9.0.0.0'
metric: 11
tag: 22
track: 33
+ - vrf_name: '{{vrf_5}}'
+ static_list:
+ - prefix: '9.0.0.0/8'
+ next_hops:
- index:
- interface: '{{ interface1 }}'
- nexthop_vrf: '{{vrf_2}}'
+ interface: '{{ interface2 }}'
+ nexthop_vrf: '{{vrf_1}}'
+ metric: 7
+ tag: 9
+ track: 11
+ - prefix: '6.0.0.0/8'
+ next_hops:
+ - index:
+ nexthop_vrf: '{{vrf_1}}'
next_hop: '4.0.0.0'
+ metric: 4
+ tag: 16
+ track: 28
- name: test_case_04
+ description: Replace static routes configuration
+ state: replaced
+ input:
+ - vrf_name: '{{vrf_3}}'
+ static_list:
+ - prefix: '5.0.0.0/8'
+ next_hops:
+ - index:
+ interface: '{{ interface1 }}'
+ metric: 12
+ tag: 18
+ track: 32
+ - index:
+ next_hop: '6.0.0.0'
+ metric: 4
+ tag: 5
+ track: 6
+ - vrf_name: '{{vrf_4}}'
+ static_list:
+ - prefix: '7.0.0.0/8'
+ next_hops:
+ - index:
+ blackhole: True
+ interface: '{{ interface4 }}'
+ nexthop_vrf: '{{vrf_1}}'
+ next_hop: '12.0.0.0'
+ metric: 15
+ tag: 25
+ track: 35
+ - vrf_name: '{{vrf_5}}'
+ static_list:
+ - prefix: '9.0.0.0/8'
+ next_hops:
+ - index:
+ interface: '{{ interface2 }}'
+ nexthop_vrf: '{{vrf_1}}'
+ metric: 8
+ tag: 10
+ track: 12
+ - name: test_case_05
+ description: Delete static routes metric, tag, and track configuration
+ state: deleted
+ input:
+ - vrf_name: '{{vrf_3}}'
+ static_list:
+ - prefix: '5.0.0.0/8'
+ next_hops:
+ - index:
+ interface: '{{ interface1 }}'
+ metric: 12
+ tag: 18
+ track: 32
+ - index:
+ next_hop: '6.0.0.0'
+ metric: 4
+ tag: 5
+ track: 6
+ - vrf_name: '{{vrf_4}}'
+ static_list:
+ - prefix: '7.0.0.0/8'
+ next_hops:
+ - index:
+ blackhole: True
+ interface: '{{ interface4 }}'
+ nexthop_vrf: '{{vrf_1}}'
+ next_hop: '12.0.0.0'
+ metric: 15
+ tag: 25
+ track: 35
+ - index:
+ blackhole: True
+ interface: '{{ interface1 }}'
+ nexthop_vrf: '{{vrf_2}}'
+ next_hop: '9.0.0.0'
+ - name: test_case_06
description: Delete static route index configuration
state: deleted
input:
- - vrf_name: 'default'
+ - vrf_name: '{{vrf_3}}'
static_list:
- - prefix: '2.0.0.0/8'
+ - prefix: '5.0.0.0/8'
next_hops:
- index:
interface: '{{ interface1 }}'
- index:
- next_hop: '3.0.0.0'
- - vrf_name: '{{vrf_2}}'
+ next_hop: '6.0.0.0'
+ - vrf_name: '{{vrf_5}}'
static_list:
- - prefix: '1.0.0.0/8'
+ - prefix: '6.0.0.0/8'
next_hops:
- index:
- interface: '{{ interface3 }}'
- next_hop: '2.0.0.0'
- - name: test_case_05
+ nexthop_vrf: '{{vrf_1}}'
+ next_hop: '4.0.0.0'
+ - name: test_case_07
description: Delete static route prefix configuration
state: deleted
input:
- - vrf_name: '{{vrf_1}}'
- - vrf_name: '{{vrf_2}}'
+ - vrf_name: '{{vrf_3}}'
+ - vrf_name: '{{vrf_4}}'
static_list:
- prefix: '7.0.0.0/8'
- - name: test_case_06
+ - name: test_case_08
description: Delete all static routes configuration
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..8601ad94e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/cleanup_tests.yaml
@@ -0,0 +1,7 @@
+- name: Delete VRFs
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.delete_vrf }}"
+ register: output
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml
index c87965de1..aa518f627 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/main.yml
@@ -9,3 +9,10 @@
- name: "Test {{ module_name }} started ..."
include_tasks: tasks_template.yaml
loop: "{{ tests }}"
+
+- name: "Cleanup test {{ module_name }} started"
+ include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/defaults/main.yml
new file mode 100644
index 000000000..2b503453c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/defaults/main.yml
@@ -0,0 +1,534 @@
+---
+ansible_connection: httpapi
+module_name: stp
+
+tests:
+ - name: test_case_01
+ description: Add STP configuration for mst protocol
+ state: merged
+ input:
+ global:
+ enabled_protocol: mst
+ loop_guard: true
+ bpdu_filter: true
+ disabled_vlans:
+ - 4-6
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 15
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ edge_port: true
+ link_type: shared
+ guard: loop
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 20
+ port_priority: 30
+ stp_enable: true
+ - intf_name: '{{ interface2 }}'
+ edge_port: true
+ link_type: point-to-point
+ guard: root
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 40
+ port_priority: 50
+ stp_enable: true
+ - intf_name: '{{ interface3 }}'
+ edge_port: true
+ link_type: shared
+ guard: none
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 60
+ port_priority: 70
+ stp_enable: true
+ mstp:
+ mst_name: mst1
+ revision: 1
+ max_hop: 3
+ hello_time: 6
+ max_age: 9
+ fwd_delay: 12
+ mst_instances:
+ - mst_id: 1
+ bridge_priority: 2048
+ vlans:
+ - 1
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 60
+ port_priority: 65
+ - mst_id: 2
+ bridge_priority: 1024
+ vlans:
+ - 2
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 50
+ port_priority: 55
+ - name: test_case_02
+ description: Replace at mst instance and interface level
+ state: replaced
+ input:
+ mstp:
+ mst_instances:
+ - mst_id: 1
+ bridge_priority: 1024
+ vlans:
+ - 2-3
+ - mst_id: 2
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 52
+ port_priority: 57
+ - intf_name: '{{ interface3 }}'
+ cost: 40
+ port_priority: 45
+ - mst_id: 3
+ bridge_priority: 3072
+ - name: test_case_03
+ description: Replace at mstp level
+ state: replaced
+ input:
+ mstp:
+ mst_name: mst2
+ revision: 2
+ max_hop: 2
+ hello_time: 8
+ max_age: 10
+ fwd_delay: 14
+ - name: test_case_04
+ description: Modify mst configuration
+ state: merged
+ input:
+ mstp:
+ mst_name: mst1
+ revision: 3
+ max_hop: 3
+ hello_time: 9
+ max_age: 11
+ fwd_delay: 15
+ mst_instances:
+ - mst_id: 1
+ vlans:
+ - 1
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 70
+ port_priority: 75
+ - mst_id: 2
+ bridge_priority: 2048
+ vlans:
+ - 1-3
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 52
+ port_priority: 57
+ - intf_name: '{{ interface3 }}'
+ cost: 45
+ port_priority: 47
+ - name: test_case_05
+ description: Delete mstp attributes
+ state: deleted
+ input:
+ mstp:
+ mst_name: mst1
+ revision: 3
+ max_hop: 3
+ hello_time: 9
+ max_age: 11
+ fwd_delay: 15
+ mst_instances:
+ - mst_id: 1
+ - mst_id: 2
+ bridge_priority: 2048
+ vlans:
+ - 2
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 52
+ port_priority: 57
+ - intf_name: '{{ interface3 }}'
+ - name: test_case_06
+ description: Delete global mstp configuration
+ state: deleted
+ input:
+ global:
+ enabled_protocol: mst
+ - name: test_case_07
+ description: Add STP configuration for pvst protocol
+ state: merged
+ input:
+ global:
+ enabled_protocol: pvst
+ bpdu_filter: true
+ root_guard_timeout: 25
+ portfast: true
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 20
+ bridge_priority: 4096
+ pvst:
+ - vlan_id: 1
+ hello_time: 4
+ max_age: 6
+ fwd_delay: 8
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 10
+ port_priority: 50
+ - vlan_id: 2
+ hello_time: 5
+ max_age: 7
+ fwd_delay: 9
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 20
+ port_priority: 60
+ - intf_name: '{{ interface3 }}'
+ cost: 30
+ port_priority: 70
+ - name: test_case_08
+ description: Replace pvst configuration
+ state: replaced
+ input:
+ pvst:
+ - vlan_id: 1
+ hello_time: 7
+ max_age: 8
+ fwd_delay: 9
+ bridge_priority: 8192
+ - vlan_id: 2
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 2
+ port_priority: 60
+ - intf_name: '{{ interface3 }}'
+ cost: 31
+ port_priority: 71
+ - name: test_case_09
+ description: Modify pvst configuration
+ state: merged
+ input:
+ pvst:
+ - vlan_id: 1
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 11
+ port_priority: 51
+ - vlan_id: 2
+ hello_time: 3
+ max_age: 9
+ fwd_delay: 11
+ bridge_priority: 4096
+ - name: test_case_10
+ description: Delete pvst attributes
+ state: deleted
+ input:
+ pvst:
+ - vlan_id: 1
+ hello_time: 7
+ max_age: 8
+ fwd_delay: 9
+ bridge_priority: 8192
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 11
+ - vlan_id: 2
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ - intf_name: '{{ interface3 }}'
+ cost: 31
+ port_priority: 71
+ - name: test_case_11
+ description: Delete pvst and global pvst configuration
+ state: deleted
+ input:
+ global:
+ enabled_protocol: pvst
+ - name: test_case_12
+ description: Add STP configuration for rapid-pvst protocol
+ state: merged
+ input:
+ global:
+ enabled_protocol: rapid_pvst
+ bpdu_filter: true
+ root_guard_timeout: 25
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 20
+ bridge_priority: 4096
+ rapid_pvst:
+ - vlan_id: 1
+ hello_time: 4
+ max_age: 6
+ fwd_delay: 8
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 10
+ port_priority: 50
+ - vlan_id: 2
+ hello_time: 5
+ max_age: 7
+ fwd_delay: 9
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 20
+ port_priority: 60
+ - intf_name: '{{ interface3 }}'
+ cost: 30
+ port_priority: 70
+ - name: test_case_13
+ description: Replace rapid-pvst configuration
+ state: replaced
+ input:
+ rapid_pvst:
+ - vlan_id: 1
+ hello_time: 7
+ max_age: 8
+ fwd_delay: 9
+ bridge_priority: 8192
+ - vlan_id: 2
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 2
+ port_priority: 60
+ - intf_name: '{{ interface3 }}'
+ cost: 31
+ port_priority: 71
+ - name: test_case_14
+ description: Modify rapid-pvst configuration
+ state: merged
+ input:
+ rapid_pvst:
+ - vlan_id: 1
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 11
+ port_priority: 51
+ - vlan_id: 2
+ hello_time: 3
+ max_age: 9
+ fwd_delay: 11
+ bridge_priority: 4096
+ - name: test_case_15
+ description: Delete rapid-pvst attributes
+ state: deleted
+ input:
+ rapid_pvst:
+ - vlan_id: 1
+ hello_time: 7
+ max_age: 8
+ fwd_delay: 9
+ bridge_priority: 8192
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 11
+ port_priority: 51
+ - vlan_id: 2
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ - intf_name: '{{ interface3 }}'
+ cost: 31
+ port_priority: 71
+ - name: test_case_16
+ description: Delete rapid-pvst and global pvst configuration
+ state: deleted
+ input:
+ global:
+ enabled_protocol: rapid_pvst
+ - name: test_case_17
+ description: Configure STP global and interfaces
+ state: merged
+ input:
+ global:
+ enabled_protocol: mst
+ disabled_vlans:
+ - 4
+ bpdu_filter: true
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 20
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ edge_port: true
+ link_type: shared
+ guard: loop
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 20
+ port_priority: 30
+ stp_enable: true
+ - intf_name: '{{ interface2 }}'
+ edge_port: true
+ link_type: point-to-point
+ guard: root
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 40
+ port_priority: 50
+ stp_enable: true
+ - intf_name: '{{ interface3 }}'
+ edge_port: true
+ link_type: shared
+ guard: none
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 60
+ port_priority: 70
+ stp_enable: true
+ - name: test_case_18
+ description: Modify STP interface
+ state: merged
+ input:
+ global:
+ disabled_vlans:
+ - 4-6
+ bpdu_filter: false
+ hello_time: 7
+ max_age: 20
+ fwd_delay: 30
+ bridge_priority: 8192
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ edge_port: false
+ link_type: point-to-point
+ guard: root
+ bpdu_guard: false
+ bpdu_filter: false
+ uplink_fast: false
+ shutdown: false
+ cost: 28
+ port_priority: 32
+ stp_enable: false
+ - name: test_case_19
+ description: Replace STP interface
+ state: replaced
+ input:
+ interfaces:
+ - intf_name: '{{ interface3 }}'
+ cost: 80
+ port_priority: 90
+ - name: test_case_20
+ description: Delete STP interfaces
+ state: deleted
+ input:
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ edge_port: true
+ link_type: point-to-point
+ guard: root
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 40
+ port_priority: 50
+ stp_enable: true
+ - intf_name: '{{ interface3 }}'
+ - name: test_case_21
+ description: Replace at global level
+ state: replaced
+ input:
+ global:
+ enabled_protocol: mst
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ - intf_name: '{{ interface2 }}'
+ - intf_name: '{{ interface3 }}'
+ - name: test_case_22
+ description: Override STP configuration
+ state: overridden
+ input:
+ global:
+ enabled_protocol: mst
+ loop_guard: true
+ bpdu_filter: true
+ disabled_vlans:
+ - 4-6
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 15
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ edge_port: true
+ link_type: shared
+ guard: loop
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 20
+ port_priority: 30
+ stp_enable: true
+ - intf_name: '{{ interface2 }}'
+ edge_port: true
+ link_type: point-to-point
+ guard: root
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 40
+ port_priority: 50
+ stp_enable: true
+ - intf_name: '{{ interface3 }}'
+ edge_port: true
+ link_type: shared
+ guard: none
+ bpdu_guard: true
+ bpdu_filter: true
+ uplink_fast: true
+ shutdown: true
+ cost: 60
+ port_priority: 70
+ stp_enable: true
+ mstp:
+ mst_name: mst1
+ revision: 1
+ max_hop: 3
+ hello_time: 6
+ max_age: 9
+ fwd_delay: 12
+ mst_instances:
+ - mst_id: 1
+ bridge_priority: 2048
+ vlans:
+ - 1
+ interfaces:
+ - intf_name: '{{ interface1 }}'
+ cost: 60
+ port_priority: 65
+ - mst_id: 2
+ bridge_priority: 1024
+ vlans:
+ - 2
+ interfaces:
+ - intf_name: '{{ interface2 }}'
+ cost: 50
+ port_priority: 55
+ - name: test_case_23
+ description: Delete all STP configuration
+ state: deleted
+ input: {}
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/meta/main.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/meta/main.yaml
new file mode 100644
index 000000000..0b356217e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/meta/main.yaml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..5ccc027bb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/cleanup_tests.yaml
@@ -0,0 +1,11 @@
+- name: Delete L2 interfaces configuration
+ sonic_l2_interfaces:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete VLANs configuration
+ sonic_vlans:
+ config: []
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/main.yml
new file mode 100644
index 000000000..470df5a0b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/main.yml
@@ -0,0 +1,14 @@
+- debug: msg="sonic_stp Test started ..."
+
+- set_fact:
+ base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}"
+
+- name: Preparations test
+ include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started ..."
+ include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: Cleanup tests
+ include_tasks: cleanup_tests.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..1c402e232
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/preparation_tests.yaml
@@ -0,0 +1,47 @@
+- name: Delete STP configuration
+ sonic_stp:
+ config: {}
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete L2 interfaces configuration
+ sonic_l2_interfaces:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Delete VLANs configuration
+ sonic_vlans:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: Add VLANs
+ sonic_vlans:
+ config:
+ - vlan_id: 1
+ - vlan_id: 2
+ - vlan_id: 3
+ - vlan_id: 4
+ - vlan_id: 5
+ - vlan_id: 6
+ state: merged
+ ignore_errors: yes
+
+- name: Add L2 interfaces configuration
+ sonic_l2_interfaces:
+ config:
+ - name: '{{ interface1 }}'
+ trunk:
+ allowed_vlans:
+ - vlan: 1-3
+ - name: '{{ interface2 }}'
+ trunk:
+ allowed_vlans:
+ - vlan: 1-3
+ - name: '{{ interface3 }}'
+ trunk:
+ allowed_vlans:
+ - vlan: 1-3
+ state: merged
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/tasks_template.yaml
new file mode 100644
index 000000000..11f787418
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/tasks_template.yaml
@@ -0,0 +1,21 @@
+- name: "{{ item.name}} , {{ item.description}}"
+ sonic_stp:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name}} , {{ item.description}} Idempotent"
+ sonic_stp:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/tasks_template_del.yaml
index bbf2331df..bbf2331df 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_static_routes/tasks/tasks_template_del.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_stp/tasks/tasks_template_del.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml
index c5146db97..4f0bda521 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_system/defaults/main.yml
@@ -25,7 +25,7 @@ tests:
anycast_address:
mac_address: 00:09:5B:EC:EE:F2
- - name: del_test_case_01
+ - name: test_case_04
description: Delete System properties
state: deleted
input:
@@ -34,13 +34,41 @@ tests:
anycast_address:
ipv4: false
- - name: del_test_case_02
+ - name: test_case_05
description: Delete System associated anycast mac address
state: deleted
input:
anycast_address:
mac_address: 00:09:5B:EC:EE:F2
+ - name: test_case_06
+ description: Override System configuration
+ state: overridden
+ input:
+ hostname: SONIC-ov
+ interface_naming: standard
+ anycast_address:
+ ipv4: true
+ mac_address: 00:09:5B:EC:EE:F2
+
+ - name: test_case_07
+ description: Replace some System configuration
+ state: replaced
+ input:
+ anycast_address:
+ ipv4: true
+ ipv6: false
+ mac_address: 00:09:5B:EC:EE:F2
+
+ - name: test_case_08
+ description: Replace System configuration
+ state: replaced
+ input:
+ hostname: SONIC
+ interface_naming: native
+ anycast_address:
+ ipv4: true
+
test_delete_all:
- name: del_all_test_case_01
description: Delete System properties
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml
index 0f9b3e3c4..e964fcf60 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_tacacs_server/defaults/main.yml
@@ -64,7 +64,7 @@ tests:
servers:
host:
- name: test_case_05
- description: merge parameter of tacacs servers
+ description: Merge parameter of tacacs servers
state: merged
input:
servers:
@@ -85,7 +85,67 @@ tests:
timeout: 14
priority: 4
-test_delete_all:
- name: test_case_06
- description: delete all the configurations of tacacs server
+ description: Replace some parameter of tacacs servers
+ state: replaced
+ input:
+ auth_type: mschap
+ source_interface: "{{ interface3 }}"
+ timeout: 36
+ servers:
+ host:
+ - name: my_host
+ auth_type: chap
+ port: 55
+ timeout: 12
+ priority: 3
+
+ - name: test_case_07
+ description: Replace hosts of tacacs servers
+ state: replaced
+ input:
+ auth_type: mschap
+ source_interface: "{{ interface3 }}"
+ timeout: 36
+ servers:
+ host:
+ - name: my_host
+ auth_type: chap
+ port: 55
+ timeout: 12
+ priority: 3
+ - name: 20.21.22.23
+ auth_type: login
+ port: 50
+ timeout: 38
+ priority: 4
+ - name: 18.21.22.23
+ auth_type: chap
+ port: 20
+ timeout: 19
+ priority: 8
+
+ - name: test_case_08
+ description: Override parameter of tacacs servers
+ state: overridden
+ input:
+ auth_type: chap
+ source_interface: "{{ interface2 }}"
+ timeout: 20
+ servers:
+ host:
+ - name: 10.11.11.11
+ auth_type: pap
+ port: 55
+ timeout: 12
+ priority: 3
+ - name: your_host
+ auth_type: login
+ port: 50
+ timeout: 30
+ priority: 6
+
+test_delete_all:
+ - name: test_case_09
+ description: Delete all the configurations of tacacs server
state: deleted
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml
index 342cb2d40..069b1d729 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/defaults/main.yml
@@ -2,23 +2,6 @@
ansible_connection: httpapi
module_name: users
-tests_cli:
- - name: cli_test_case_01
- description: Configure users
- state: merged
- expected_cli:
- - username sysadmin password
- - username operator1 password
- input:
- - name: sysadmin
- role: admin
- password: admin
- update_password: always
- - name: operator1
- role: operator
- password: admin
- update_password: always
-
tests_single_run:
- name: test_case_sr_01
description: Configure users
@@ -53,9 +36,8 @@ tests_single_run:
- name: user2
role: operator
password: admin
- update_password: on_create
-
-
+ update_password: on_create
+
tests:
- name: test_case_01
description: Configure users
@@ -74,7 +56,7 @@ tests:
password: admin
update_password: on_create
- name: test_case_02
- description: Configure users role
+ description: Update user roles
state: merged
input:
- name: user1
@@ -86,11 +68,43 @@ tests:
password: admin
update_password: on_create
- name: test_case_03
+ description: Replace user roles
+ state: replaced
+ input:
+ - name: user2
+ role: secadmin
+ password: admin
+ update_password: on_create
+ - name: user3
+ role: netadmin
+ password: admin
+ update_password: on_create
+ - name: test_case_04
+ description: Override users configuration with existing user
+ state: overridden
+ input:
+ - name: user2
+ role: secadmin
+ password: admin
+ update_password: on_create
+ - name: test_case_05
+ description: Override users configuration
+ state: overridden
+ input:
+ - name: user4
+ role: admin
+ password: admin
+ update_password: on_create
+ - name: user5
+ role: operator
+ password: admin
+ update_password: on_create
+ - name: test_case_06
description: Delete user
state: deleted
input:
- - name: user1
- - name: test_case_04
- description: Update users role
+ - name: user4
+ - name: test_case_07
+ description: Delete all users configurations
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/cli_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/cli_tests.yaml
deleted file mode 100644
index 1e9bfc240..000000000
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/cli_tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: Test sonic multiple command with wait_for
- vars:
- ansible_connection: network_cli
- sonic_command:
- commands:
- - show running-configuration
- register: cli_contains_output
-
-- set_fact:
- cli_contains_condition: "{{ 'username operator1 password' in cli_contains_output.stdout.0 }}"
-
-- import_role:
- name: common
- tasks_from: cli.contains.test.facts.report.yaml \ No newline at end of file
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml
index 987174b91..5b1b54c77 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_users/tasks/main.yml
@@ -3,9 +3,6 @@
- set_fact:
base_cfg_path: "{{ playbook_dir + '/roles/' + role_name + '/' + 'templates/' }}"
-# - name: CLI test test
-# include_tasks: cli_tests.yaml
-
- name: Preparations test
include_tasks: preparation_tests.yaml
@@ -19,21 +16,3 @@
- name: "Test {{ module_name }} started ..."
include_tasks: tasks_template.yaml
loop: "{{ tests }}"
-
-# - name: "Test CLI validation started ..."
-# include_role:
-# name: common
-# tasks_from: cli_tasks_template.yaml
-# loop: "{{ tests_cli }}"
-
-# - name: Preparations test
-# include_tasks: preparation_tests.yaml
-
-
-
-# - name: Preparations test
-# include_tasks: preparation_tests.yaml
-
-# - name: Display all variables/facts known for a host
-# debug:
-# var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/defaults/main.yml
new file mode 100644
index 000000000..1ba48941e
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/defaults/main.yml
@@ -0,0 +1,195 @@
+---
+
+po1: PortChannel2
+po2: PortChannel4
+
+ansible_connection: httpapi
+module_name: vlan_mapping
+
+preparations_tests:
+ delete_port_configurations:
+ - name: "{{ interface2 }}"
+ - name: "{{ interface4 }}"
+ - name: "{{ interface6 }}"
+ init_portchannel:
+ - "interface Portchannel 2"
+ - "interface Portchannel 4"
+
+tests:
+ - name: test_case_01
+ description: Add vlan mapping configurations
+ state: merged
+ input:
+ - name: '{{ interface2 }}'
+ mapping:
+ - service_vlan: 2755
+ vlan_ids:
+ - 392
+ dot1q_tunnel: false
+ inner_vlan: 590
+ - service_vlan: 2855
+ vlan_ids:
+ - 393
+ - 395
+ dot1q_tunnel: true
+ priority: 6
+ - name: test_case_02
+ description: Add vlan mapping configurations
+ state: merged
+ input:
+ - name: '{{ interface4 }}'
+ mapping:
+ - service_vlan: 2567
+ vlan_ids:
+ - 300
+ dot1q_tunnel: true
+ priority: 3
+ - service_vlan: 2436
+ vlan_ids:
+ - 400-402
+ - 412
+ - 420
+ - 422
+ - 430-431
+ dot1q_tunnel: true
+ - name: '{{ po1 }}'
+ mapping:
+ - service_vlan: 3000
+ dot1q_tunnel: true
+ vlan_ids:
+ - 506-512
+ - 561
+ priority: 5
+ - name: test_case_03
+ description: Update existing vlan mapping configurations
+ state: merged
+ input:
+ - name: '{{ interface2 }}'
+ mapping:
+ - service_vlan: 2755
+ priority: 3
+ - service_vlan: 2855
+ vlan_ids:
+ - 397
+ - 399
+ dot1q_tunnel: true
+ - name: '{{ po1 }}'
+ mapping:
+ - service_vlan: 3000
+ dot1q_tunnel: true
+ vlan_ids:
+ - 506-514
+ - 501
+ - 561
+ priority: 1
+ - name: test_case_04
+ description: Update existing and add new vlan mapping configurations
+ state: merged
+ input:
+ - name: '{{ interface2 }}'
+ mapping:
+ - service_vlan: 2758
+ vlan_ids:
+ - 2857
+ - service_vlan: 2855
+ priority: 2
+ dot1q_tunnel: true
+ - name: '{{ po2 }}'
+ mapping:
+ - service_vlan: 3200
+ dot1q_tunnel: true
+ vlan_ids:
+ - 576-584
+ - 591
+ - name: test_case_05
+ description: Replace vlan mapping configurations
+ state: replaced
+ input:
+ - name: '{{ interface2 }}'
+ mapping:
+ - service_vlan: 2768
+ vlan_ids:
+ - 2923
+ - name: '{{ interface4 }}'
+ mapping:
+ - service_vlan: 2567
+ vlan_ids:
+ - 310
+ - service_vlan: 2436
+ vlan_ids:
+ - 400-402
+ - 422
+ - 430-431
+ dot1q_tunnel: true
+ priority: 1
+ - name: '{{ po1 }}'
+ mapping:
+ - service_vlan: 3000
+ dot1q_tunnel: true
+ vlan_ids:
+ - 506-512
+ - 561
+ priority: 7
+ - name: test_case_06
+ description: Delete vlan mapping configurations
+ state: deleted
+ input:
+ - name: '{{ interface2 }}'
+ - name: '{{ interface4 }}'
+ mapping:
+ - service_vlan: 2567
+ - service_vlan: 2436
+ vlan_ids:
+ - 422
+ - 400-405
+ priority: 1
+ - name: '{{ po1 }}'
+ mapping:
+ - service_vlan: 3000
+ priority: 7
+ - name: '{{ po2 }}'
+ mapping:
+ - service_vlan: 3200
+ dot1q_tunnel: true
+ vlan_ids:
+ - 578-582
+ - name: test_case_07
+ description: Add vlan mapping configurations
+ state: merged
+ input:
+ - name: '{{ interface2 }}'
+ mapping:
+ - service_vlan: 2755
+ vlan_ids:
+ - 392
+ dot1q_tunnel: false
+ inner_vlan: 590
+ - service_vlan: 2855
+ vlan_ids:
+ - 393
+ - 395
+ dot1q_tunnel: true
+ priority: 6
+ - name: test_case_08
+ description: Override vlan mapping configurations
+ state: overridden
+ input:
+ - name: '{{ interface2 }}'
+ mapping:
+ - service_vlan: 2754
+ vlan_ids:
+ - 392
+ dot1q_tunnel: false
+ inner_vlan: 590
+ - name: '{{ interface6 }}'
+ mapping:
+ - service_vlan: 2700
+ vlan_ids:
+ - 132-145
+ - 120
+ dot1q_tunnel: true
+ priority: 3
+ - name: test_case_09
+ description: Delete all vlan mapping configurations
+ state: deleted
+ input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/meta/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/meta/main.yml
new file mode 100644
index 000000000..d0ceaf6f5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/meta/main.yml
@@ -0,0 +1,5 @@
+---
+collections:
+ - dellemc.enterprise_sonic
+dependencies:
+ - { role: common }
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/cleanup_tests.yaml
new file mode 100644
index 000000000..4d7828e08
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/cleanup_tests.yaml
@@ -0,0 +1,6 @@
+---
+- name: Delete vlan mapping configurations
+ dellemc.enterprise_sonic.sonic_vlan_mapping:
+ config: []
+ state: deleted
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/main.yml
new file mode 100644
index 000000000..45178b754
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/main.yml
@@ -0,0 +1,17 @@
+---
+- ansible.builtin.debug:
+ msg: "sonic_vlan_mapping Test started ..."
+
+- name: "Preparations for {{ module_name }}"
+ ansible.builtin.include_tasks: preparation_tests.yaml
+
+- name: "Test {{ module_name }} started"
+ ansible.builtin.include_tasks: tasks_template.yaml
+ loop: "{{ tests }}"
+
+- name: "Cleanup of {{ module_name }}"
+ ansible.builtin.include_tasks: cleanup_tests.yaml
+
+- name: Display all variables/facts known for a host
+ ansible.builtin.debug:
+ var: hostvars[inventory_hostname].ansible_facts.test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/preparation_tests.yaml
new file mode 100644
index 000000000..c73996e30
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/preparation_tests.yaml
@@ -0,0 +1,21 @@
+---
+- name: Delete old vlan mapping configurations
+ dellemc.enterprise_sonic.sonic_vlan_mapping:
+ config: []
+ state: deleted
+ ignore_errors: yes
+
+- name: "initialize default interfaces"
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ default_interface_cli }}"
+ register: output
+ ignore_errors: yes
+
+- name: "initialize init_portchannel"
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.init_portchannel }}"
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/tasks_template.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/tasks_template.yaml
new file mode 100644
index 000000000..1e64a42bf
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlan_mapping/tasks/tasks_template.yaml
@@ -0,0 +1,22 @@
+---
+- name: "{{ item.name }} , {{ item.description }}"
+ dellemc.enterprise_sonic.sonic_vlan_mapping:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: action_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: action.facts.report.yaml
+
+- name: "{{ item.name }} , {{ item.description }} Idempotent"
+ dellemc.enterprise_sonic.sonic_vlan_mapping:
+ config: "{{ item.input }}"
+ state: "{{ item.state }}"
+ register: idempotent_task_output
+ ignore_errors: yes
+
+- ansible.builtin.import_role:
+ name: common
+ tasks_from: idempotent.facts.report.yaml
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml
index b7deed026..62f6dcc74 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vlans/defaults/main.yml
@@ -8,7 +8,9 @@ preparations_tests:
- vlan_id: 501
- vlan_id: 502
- vlan_id: 503
-
+ - vlan_id: 504
+ - vlan_id: 505
+ - vlan_id: 506
tests:
# merge test cases started
@@ -36,7 +38,54 @@ tests:
- vlan_id: 500
description: "modified vlan500 description"
- vlan_id: 501
+ # replace test cases started
- name: test_case_04
+ description: Replace VLANs
+ state: replaced
+ input:
+ - vlan_id: 500
+ - vlan_id: 502
+ description: "modified vlan502 description"
+ # overridden test cases started
+ - name: test_case_05
+ description: Override VLANs configuration
+ state: overridden
+ input:
+ - vlan_id: 501
+ - vlan_id: 502
+ description: "overridden vlan502 description"
+ - vlan_id: 504
+ - name: test_case_06
+ description: Override VLANs
+ state: overridden
+ input:
+ - vlan_id: 505
+ - vlan_id: 506
+ description: "overridden vlan506 description"
+ - name: test_case_07
+ description: Create VLANs for the folowing replaced and overriden cases
+ state: merged
+ input:
+ - vlan_id: 510
+ description: "new vlan510 description"
+ - vlan_id: 511
+ description: "new vlan511 description"
+ - vlan_id: 512
+ description: "new vlan512 description"
+ # more replace test cases started
+ - name: test_case_08
+ description: Replace VLANs
+ state: replaced
+ input:
+ - vlan_id: 510
+ # more overridden test cases started
+ - name: test_case_09
+ description: Override VLANs configuration
+ state: overridden
+ input:
+ - vlan_id: 511
+ # Clean up
+ - name: test_case_10
description: Delete specific trunk VLANs
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml
index afbfd754c..e4f79f4a8 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/defaults/main.yml
@@ -4,6 +4,7 @@ module_name: vrf
vrf_1: VrfReg1
vrf_2: VrfReg2
+vrf_3: VrfReg3
po1: Portchannel 100
vlan1: Vlan 100
@@ -13,6 +14,10 @@ po2: Portchannel 101
vlan2: Vlan 101
looopback2: Loopback 101
+po3: Portchannel 103
+vlan3: Vlan 103
+looopback3: Loopback 103
+
preparations_tests:
delete_interfaces:
- "no interface {{ po1 }}"
@@ -21,6 +26,9 @@ preparations_tests:
- "no interface {{ po2 }}"
- "no interface {{ vlan2 }}"
- "no interface {{ looopback2 }}"
+ - "no interface {{ po3 }}"
+ - "no interface {{ vlan3 }}"
+ - "no interface {{ looopback3 }}"
init_interfaces:
- "interface {{ po1 }}"
- "interface {{ vlan1 }}"
@@ -28,20 +36,9 @@ preparations_tests:
- "interface {{ po2 }}"
- "interface {{ vlan2 }}"
- "interface {{ looopback2 }}"
-
-tests_cli:
- - name: cli_test_case_01
- description: creates VRF properties
- state: merged
- input:
- - name: "{{ vrf_1 }}"
- members:
- interfaces:
- - name: "{{ interface1 }}"
- - name: "{{ po1 }}"
- - name: "{{ vlan1 }}"
- - name: "{{ looopback1 }}"
- - name: "{{ vrf_2 }}"
+ - "interface {{ po3 }}"
+ - "interface {{ vlan3 }}"
+ - "interface {{ looopback3 }}"
tests:
- name: test_case_01
@@ -87,7 +84,7 @@ tests:
- name: "{{ vlan2 }}"
- name: "{{ looopback2 }}"
- - name: del_test_case_04
+ - name: test_case_04
description: Delete VRF properties
state: deleted
input:
@@ -101,7 +98,7 @@ tests:
- name: "{{ interface2 }}"
- name: "{{ po2 }}"
- - name: del_test_case_05
+ - name: test_case_05
description: Delete VRF properties
state: deleted
input:
@@ -109,7 +106,7 @@ tests:
members:
interfaces:
- - name: del_test_case_06
+ - name: test_case_06
description: Delete VRF properties
state: deleted
input:
@@ -133,7 +130,58 @@ tests:
- name: "{{ vlan2 }}"
- name: "{{ looopback2 }}"
- - name: del_test_case_08
+ - name: test_case_08
+ description: Overridden VRF properties
+ state: overridden
+ input:
+ - name: "{{ vrf_3 }}"
+ members:
+ interfaces:
+ - name: "{{ interface3 }}"
+ - name: "{{ vlan3 }}"
+ - name: "{{ looopback3 }}"
+ - name: "{{ vrf_1 }}"
+ members:
+ interfaces:
+ - name: "{{ interface1 }}"
+ - name: "{{ po3 }}"
+
+ - name: test_case_09
+ description: Replace VRF properties
+ state: replaced
+ input:
+ - name: "{{ vrf_3 }}"
+ members:
+ interfaces:
+ - name: "{{ interface3 }}"
+ - name: "{{ looopback3 }}"
+ - name: "{{ vrf_1 }}"
+ members:
+ interfaces:
+ - name: "{{ interface2 }}"
+ - name: "{{ po2 }}"
+
+ - name: test_case_10
+ description: Replace VRF properties with new VRF
+ state: replaced
+ input:
+ - name: "{{ vrf_3 }}"
+ members:
+ interfaces:
+ - name: "{{ interface3 }}"
+ - name: "{{ vlan3 }}"
+ - name: "{{ vrf_2 }}"
+ members:
+ interfaces:
+ - name: "{{ interface1 }}"
+ - name: "{{ vlan2 }}"
+ - name: "{{ vrf_1 }}"
+ members:
+ interfaces:
+ - name: "{{ interface2 }}"
+ - name: "{{ po2 }}"
+
+ - name: test_case_11
description: Delete VRF properties
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml
deleted file mode 100644
index c6a26a684..000000000
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/cleanup_tests.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-- name: Deletes old vrf
- sonic_vrfs:
- config: []
- state: deleted
- ignore_errors: yes
-
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml
index 8e165bc67..88ceae783 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/main.yml
@@ -6,29 +6,14 @@
- name: Preparations test
include_tasks: preparation_tests.yaml
-- name: "Test {{ module_name }} CLI validation started ..."
- include_tasks: tasks_template.yaml
- loop: "{{ tests_cli }}"
-
-- name: "Test CLI validation started ..."
- include_role:
- name: common
- tasks_from: cli_tasks_template.yaml
- loop: "{{ tests_cli }}"
-
-- name: Preparations test
- include_tasks: preparation_tests.yaml
-
-- name: Cleanup tests
- include_tasks: cleanup_tests.yaml
-
- name: "Test {{ module_name }} started ..."
include_tasks: tasks_template.yaml
loop: "{{ tests }}"
-- name: Cleanup tests
- include_tasks: cleanup_tests.yaml
-
-# - name: Display all variables/facts known for a host
-# debug:
-# var: hostvars[inventory_hostname].ansible_facts.test_reports
+- name: "Delete interfaces"
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.delete_interfaces }}"
+ register: output
+ ignore_errors: yes
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml
index c6a33af48..b05800722 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/tasks/preparation_tests.yaml
@@ -1,13 +1,3 @@
-- name: Delete existing mclag
- sonic_mclag:
- config:
- state: deleted
- ignore_errors: yes
-- name: Deletes old vxlans
- sonic_vxlans:
- config: []
- state: deleted
- ignore_errors: yes
- name: "initialize default interfaces"
vars:
ansible_connection: network_cli
@@ -15,10 +5,12 @@
commands: "{{ default_interface_cli }}"
register: output
ignore_errors: yes
-- name: Deletes old VRFs
- sonic_vrfs:
- config: []
- state: deleted
+- name: "delete interfaces"
+ vars:
+ ansible_connection: network_cli
+ sonic_config:
+ commands: "{{ preparations_tests.delete_interfaces }}"
+ register: output
ignore_errors: yes
- name: "initialize interfaces"
vars:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg
index 35cd6ded0..90695d538 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vrfs/templates/cli_test_case_01.cfg
@@ -4,7 +4,7 @@ interface Vlan100
ip vrf forwarding VrfReg1
interface Loopback 100
ip vrf forwarding VrfReg1
-interface PortChannel 100
+interface PortChannel100
ip vrf forwarding VrfReg1
interface {{ interface1 }}
ip vrf forwarding VrfReg1
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml
index f2687a09d..6a330afa7 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/sonic_vxlan/defaults/main.yml
@@ -45,7 +45,7 @@ tests_cli:
tests:
- name: test_case_01
- description: creates Vxlan properties
+ description: creates VXLAN properties
state: merged
input:
- name: vtep1
@@ -56,12 +56,12 @@ tests:
- name: vtep1
source_ip: 1.1.1.1
primary_ip: 2.2.2.2
- evpn_nvo: nvo5
- name: test_case_03
description: Update VRF properties
state: merged
input:
- name: vtep1
+ evpn_nvo: nvo5
vlan_map:
- vni: 101
vlan: "{{vlan1}}"
@@ -87,35 +87,67 @@ tests:
vrf_map:
- vni: 102
vrf: "{{vrf2}}"
- - name: del_test_case_06
- description: Delete VRF properties
- state: deleted
+ - name: test_case_06
+ description: Override VXLAN properties
+ state: overridden
input:
- - name: vtep1
- source_ip: 1.1.1.1
- primary_ip: 2.2.2.2
+ - name: vtep2
+ source_ip: 3.3.3.3
+ primary_ip: 4.4.4.4
evpn_nvo: nvo5
vlan_map:
- vni: 101
vlan: "{{vlan1}}"
- - vni: 102
+ vrf_map:
+ - vni: 101
+ vrf: "{{vrf1}}"
+ - name: test_case_07
+ description: Replace VXLAN properties
+ state: replaced
+ input:
+ - name: vtep2
+ source_ip: 5.5.5.5
+ primary_ip: 6.6.6.6
+ evpn_nvo: nvo6
+ vlan_map:
+ - vni: 101
vlan: "{{vlan2}}"
+ - vni: 102
+ vlan: "{{vlan1}}"
vrf_map:
- vni: 101
+ vrf: "{{vrf2}}"
+ - vni: 102
vrf: "{{vrf1}}"
+ - name: test_case_08
+ description: Delete VRF properties
+ state: deleted
+ input:
+ - name: vtep2
+ source_ip: 5.5.5.5
+ primary_ip: 6.6.6.6
+ evpn_nvo: nvo6
+ vlan_map:
+ - vni: 101
+ vlan: "{{vlan2}}"
- vni: 102
+ vlan: "{{vlan1}}"
+ vrf_map:
+ - vni: 101
vrf: "{{vrf2}}"
- - name: del_test_case_07
+ - vni: 102
+ vrf: "{{vrf1}}"
+ - name: test_case_09
description: Delete VRF properties
state: deleted
input:
- - name: vtep1
- - name: test_case_08
- description: Recreate Vxlan
+ - name: vtep2
+ - name: test_case_10
+ description: Recreate VXLAN
state: merged
input:
- name: vtep1
- - name: del_test_case_09
+ - name: test_case_11
description: Delete VRF properties
state: deleted
input: []
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2 b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2
index 0695c86bc..6937eb8a9 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/roles/test_reports/templates/regression_html_report.j2
@@ -176,23 +176,21 @@ color: red;
</style>
<h2 style="color:blue;">Regression report </h2>
-<h3 style="color:blue;"> Date: <i style="color:black;">{{ date}} </i> Time: <i style="color:black;">{{ time }}</i></h3>
-
+<h3 style="color:blue;"> Date: <i style="color:black;">{{ date }} </i> Time: <i style="color:black;">{{ time }}</i></h3>
+<h3 style="color:blue;">Summary report </h3>
+<table class="table table-striped table-bordered">
+ <thead>
+ <tr>
+ <th>Module</th>
+ <th>Total testcases</th>
+ <th>Passed</th>
+ <th>Failed</th>
+ </tr>
+ </thead>
+ <tbody>
{% set complete_passed = [0] %}
{% set complete_failed = [0] %}
{% set complete_total = [0] %}
-<h3 style="color:blue;">Summary report </h3>
-<table class="table table-striped table-bordered">
- <thead>
- <tr>
- <th>Module</th>
- <th>Total testcases</th>
- <th>Passed</th>
- <th>Failed</th>
- </tr>
- </thead>
-<tbody>
-
{% for module_name, test_data_list in ansible_facts.test_reports.items() %}
{% set passed = [0] %}
{% set failed = [0] %}
@@ -200,106 +198,85 @@ color: red;
{% for testcase_name, test_data in test_data_list.items() %}
{% if total.append(total.pop() + 1) %}{% endif %}
{% if complete_total.append(complete_total.pop() + 1) %}{% endif %}
-
{% if 'Passed' in test_data.status %}
{% if passed.append(passed.pop() + 1) %}{% endif %}
{% if complete_passed.append(complete_passed.pop() + 1) %}{% endif %}
{% endif %}
-
{% if 'Failed' in test_data.status %}
{% if failed.append(failed.pop() + 1) %}{% endif %}
{% if complete_failed.append(complete_failed.pop() + 1) %}{% endif %}
{% endif %}
{% endfor %}
-<tr>
-<td>{{ module_name }}</td>
-<td>{{ total[0] }}</td>
-<td>{{ passed[0] }}</td>
-<td>{{ failed[0] }}</td>
-</tr>
+ <tr>
+ <td>{{ module_name }}</td>
+ <td>{{ total[0] }}</td>
+ <td>{{ passed[0] }}</td>
+ <td>{{ failed[0] }}</td>
+ </tr>
{% endfor %}
-</tbody>
+ </tbody>
</table>
<h2> Testcase Complete Summary: Total: {{ complete_total[0] }}, Passed: {{ complete_passed[0] }}, Failed: {{ complete_failed[0] }} </h2>
-
-
</br>
</br>
</br>
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
{% for module_name, test_data_list in ansible_facts.test_reports.items() %}
{% set passed = [0] %}
{% set failed = [0] %}
{% set total = [0] %}
-
{% for testcase_name, test_data in test_data_list.items() %}
{% if total.append(total.pop() + 1) %}{% endif %}
-
{% if 'Passed' in test_data.status %}
{% if passed.append(passed.pop() + 1) %}{% endif %}
{% endif %}
-
{% if 'Failed' in test_data.status %}
{% if failed.append(failed.pop() + 1) %}{% endif %}
{% endif %}
{% endfor %}
-<h3 style="color:blue;">Detailed report of <i style="color:black;"> {{module_name }} </i> </h3>
+<h3 style="color:blue;">Detailed report of <i style="color:black;"> {{ module_name }} </i> </h3>
<h2> Testcase summary: Total: {{ total[0] }}, Passed: {{ passed[0] }}, Failed: {{ failed[0] }} </h2>
<table class="table table-striped table-bordered">
- <thead>
- <tr>
- <th>Testcase name</th>
- <th>Status</th>
- <th>User Input</th>
- <th>Commands</th>
- <th>Before</th>
- <th>After</th>
- <th>Module exception</th>
- </tr>
- </thead>
-<tbody>
+ <thead>
+ <tr>
+ <th>Testcase name</th>
+ <th>Status</th>
+ <th>User Input</th>
+ <th>Commands</th>
+ <th>Before</th>
+ <th>After</th>
+ <th>Module exception</th>
+ </tr>
+ </thead>
+ <tbody>
{% for name, test_data in test_data_list.items() %}
-<tr>
-<td>{{ name}}</td>
-<td>{{ test_data.status | default('Template Error')}}</td>
+ <tr>
+ <td>{{ name }}</td>
+ <td>{{ test_data.status | default('Template Error') }}</td>
{% if 'Passed' in test_data.status %}
-<td></td>
-<td></td>
-<td></td>
-<td></td>
-<td></td>
-<td></td>
+{% if ansible_verbosity >= 3 %}
+ <td><pre>Input: {{ test_data.configs | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+ <td><pre>Commands: {{ test_data.commands | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+ <td><pre>Before: {{ test_data.before | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+ <td><pre>After: {{ test_data.after | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+ <td><pre>Error: {{ test_data.module_stderr | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+{% else %}
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+ <td></td>
+{% endif %}
{% else %}
-<td>Input: {{ test_data.configs | default('Template Error') | to_nice_json(indent=3) }}</td>
-<td>Commands: {{ test_data.commands | default('Template Error') | to_nice_json(indent=3) }}</td>
-<td>Before: {{ test_data.before | default('Template Error') | to_nice_json(indent=3) }}</td>
-<td>After: {{ test_data.after | default('Template Error') | to_nice_json(indent=3) }}</td>
-<td>Error: {{ test_data.module_stderr | default('Template Error') | to_nice_json(indent=3) }}</td>
+ <td><pre>Input: {{ test_data.configs | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+ <td><pre>Commands: {{ test_data.commands | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+ <td><pre>Before: {{ test_data.before | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+ <td><pre>After: {{ test_data.after | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
+ <td><pre>Error: {{ test_data.module_stderr | default('Template Error') | to_nice_json(indent=3) }}</pre></td>
{% endif %}
-</tr>
+ </tr>
{% endfor %}
-</tbody>
+ </tbody>
</table>
{% endfor %}
@@ -308,21 +285,21 @@ color: red;
<script>
window.onload = function() {
-var ele = document.getElementsByTagName('table');
-for(var tb = 0; tb < ele.length;tb++) {
- tbody = ele[tb].getElementsByTagName('tbody')
- for(var tbd = 0; tbd < tbody.length;tbd++) {
- tr = tbody[tbd].getElementsByTagName('tr');
- for (var trs = 0; trs < tr.length;trs++) {
- td = tr[trs].getElementsByTagName('td')
- if(td[1].innerHTML === 'Passed') {
- td[1].className ='passed';
- }
- if(td[1].innerHTML === 'Failed') {
- td[1].className ='failed';
- }
- }
- }
- }
+ var ele = document.getElementsByTagName('table');
+ for(var tb = 0; tb < ele.length;tb++) {
+ tbody = ele[tb].getElementsByTagName('tbody')
+ for(var tbd = 0; tbd < tbody.length;tbd++) {
+ tr = tbody[tbd].getElementsByTagName('tr');
+ for (var trs = 0; trs < tr.length;trs++) {
+ td = tr[trs].getElementsByTagName('td')
+ if(td[1].innerHTML === 'Passed') {
+ td[1].className ='passed';
+ }
+ if(td[1].innerHTML === 'Failed') {
+ td[1].className ='failed';
+ }
+ }
+ }
+ }
}
</script>
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml
index c34c286cc..e3f6c492b 100644
--- a/ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/regression/test.yaml
@@ -25,6 +25,8 @@
- sonic_bgp_af
- sonic_bgp_neighbors
- sonic_bgp_neighbors_af
+ - sonic_dhcp_snooping
+ - sonic_vlan_mapping
- sonic_vrfs
- sonic_vxlan
- sonic_port_breakout
@@ -36,4 +38,17 @@
- sonic_prefix_lists
- sonic_static_routes
- sonic_ntp
+ - sonic_logging
+ - sonic_ip_neighbor
+ - sonic_port_group
+ - sonic_dhcp_relay
+ - sonic_acl_interfaces
+ - sonic_l2_acls
+ - sonic_l3_acls
+ - sonic_lldp_global
+ - sonic_mac
+ - sonic_bfd
+ - sonic_copp
+ - sonic_route_maps
+ - sonic_stp
- test_reports
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.15.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.15.txt
new file mode 100644
index 000000000..c2cf4ded5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.15.txt
@@ -0,0 +1 @@
+plugins/action/sonic.py action-plugin-docs #action plugin for base class
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.16.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.16.txt
new file mode 100644
index 000000000..c2cf4ded5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.16.txt
@@ -0,0 +1 @@
+plugins/action/sonic.py action-plugin-docs #action plugin for base class
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.17.txt b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.17.txt
new file mode 100644
index 000000000..c2cf4ded5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/sanity/ignore-2.17.txt
@@ -0,0 +1 @@
+plugins/action/sonic.py action-plugin-docs #action plugin for base class
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tests/xstp_basic.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/__init__.py
index e69de29bb..e69de29bb 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tests/xstp_basic.yaml
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/mock.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/mock.py
new file mode 100644
index 000000000..031546093
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/mock.py
@@ -0,0 +1,23 @@
+"""
+Compatibility shim for mock imports in modules and module_utils.
+This can be removed once support for Python 2.7 is dropped.
+"""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+try:
+ from unittest.mock import ( # pylint: disable=unused-import
+ call,
+ patch,
+ mock_open,
+ MagicMock,
+ Mock,
+ )
+except ImportError:
+ from mock import (
+ call,
+ patch,
+ mock_open,
+ MagicMock,
+ Mock,
+ )
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/unittest.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/unittest.py
new file mode 100644
index 000000000..b41677417
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/compat/unittest.py
@@ -0,0 +1,29 @@
+# (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+#
+# This file is part of Ansible
+#
+# Ansible is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# Ansible is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+from unittest import *
+
+if not hasattr(TestCase, 'assertRaisesRegex'):
+ # added in Python 3.2
+ TestCase.assertRaisesRegex = TestCase.assertRaisesRegexp
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/__init__.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/conftest.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/conftest.py
new file mode 100644
index 000000000..a7d1e0475
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/conftest.py
@@ -0,0 +1,31 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def patch_ansible_module(request, mocker):
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the patch_ansible_module pytest fixture')
+
+ mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args))
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/__init__.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/__init__.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/__init__.py
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_aaa.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_aaa.yaml
new file mode 100644
index 000000000..e7ca4429a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_aaa.yaml
@@ -0,0 +1,78 @@
+---
+merged_01:
+ module_args:
+ config:
+ authentication:
+ data:
+ local: true
+ fail_through: true
+ group: radius
+ existing_aaa_config:
+ - path: "data/openconfig-system:system/aaa"
+ response:
+ code: 200
+ - path: "data/sonic-system-aaa:sonic-system-aaa"
+ response:
+ code: 200
+ value:
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa"
+ method: "patch"
+ data:
+ openconfig-system:aaa:
+ authentication:
+ config:
+ authentication-method:
+ - local
+ - radius
+ failthrough: 'True'
+deleted_01:
+ module_args:
+ state: deleted
+ existing_aaa_config:
+ - path: "data/openconfig-system:system/aaa"
+ response:
+ code: 200
+ value:
+ openconfig-system:aaa:
+ authentication:
+ config:
+ authentication-method:
+ - radius
+ - local
+ failthrough: true
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/authentication/config/authentication-method"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/authentication/config/failthrough"
+ method: "delete"
+ data:
+deleted_02:
+ module_args:
+ config:
+ authentication:
+ data:
+ local: true
+ fail_through: true
+ group: radius
+ state: deleted
+ existing_aaa_config:
+ - path: "data/openconfig-system:system/aaa"
+ response:
+ code: 200
+ value:
+ openconfig-system:aaa:
+ authentication:
+ config:
+ authentication-method:
+ - radius
+ - local
+ failthrough: true
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/authentication/config/authentication-method"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/authentication/config/failthrough"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_acl_interfaces.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_acl_interfaces.yaml
new file mode 100644
index 000000000..84741e219
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_acl_interfaces.yaml
@@ -0,0 +1,511 @@
+---
+merged_01:
+ module_args:
+ state: 'merged'
+ config:
+ - name: 'Eth1/1'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: 'mac-acl-1'
+ direction: 'in'
+ - type: 'ipv4'
+ acls:
+ - name: 'ipv4-acl-1'
+ direction: 'in'
+ - name: 'ipv4-acl-2'
+ direction: 'out'
+ - name: 'Portchannel1.10'
+ access_groups:
+ - type: 'ipv6'
+ acls:
+ - name: 'ipv6-acl-1'
+ direction: 'out'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/interfaces'
+ response:
+ code: 200
+ value:
+ openconfig-acl:interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config_requests:
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1'
+ method: 'post'
+ data:
+ openconfig-acl:config:
+ id: 'Eth1/1'
+ openconfig-acl:interface-ref:
+ config:
+ interface: 'Eth1/1'
+ openconfig-acl:ingress-acl-sets:
+ ingress-acl-set:
+ - config:
+ set-name: 'mac-acl-1'
+ type: 'ACL_L2'
+ set-name: 'mac-acl-1'
+ type: 'ACL_L2'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1'
+ method: 'post'
+ data:
+ openconfig-acl:config:
+ id: 'Eth1/1'
+ openconfig-acl:interface-ref:
+ config:
+ interface: 'Eth1/1'
+ openconfig-acl:egress-acl-sets:
+ egress-acl-set:
+ - config:
+ set-name: 'ipv4-acl-2'
+ type: 'ACL_IPV4'
+ set-name: 'ipv4-acl-2'
+ type: 'ACL_IPV4'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=PortChannel1.10'
+ method: 'post'
+ data:
+ openconfig-acl:config:
+ id: 'PortChannel1.10'
+ openconfig-acl:interface-ref:
+ config:
+ interface: 'PortChannel1'
+ subinterface: 10
+ openconfig-acl:egress-acl-sets:
+ egress-acl-set:
+ - config:
+ set-name: 'ipv6-acl-1'
+ type: 'ACL_IPV6'
+ set-name: 'ipv6-acl-1'
+ type: 'ACL_IPV6'
+
+merged_02:
+ module_args:
+ state: 'merged'
+ config:
+ - name: 'Eth1/1'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: 'mac-acl-1'
+ direction: 'in'
+ - type: 'ipv4'
+ acls:
+ - name: 'ipv4-acl-1'
+ direction: 'in'
+ - name: 'ipv4-acl-2'
+ direction: 'out'
+ - name: 'Portchannel1.10'
+ access_groups:
+ - type: 'ipv6'
+ acls:
+ - name: 'ipv6-acl-1'
+ direction: 'out'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/interfaces'
+ response:
+ code: 200
+ value:
+ openconfig-acl:interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ - set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ - id: 'PortChannel1.10'
+ config:
+ id: 'PortChannel1.10'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv6-acl-1'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ set-name: 'ipv6-acl-1'
+ type: 'openconfig-acl:ACL_IPV6'
+ config_requests: []
+
+replaced_01:
+ module_args:
+ state: 'replaced'
+ config:
+ - name: 'Eth1/1'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: 'mac-acl-1'
+ direction: 'in'
+ - type: 'ipv4'
+ acls:
+ - name: 'ipv4-acl-1'
+ direction: 'in'
+ - type: 'ipv6'
+ acls:
+ - name: 'ipv6-acl-2'
+ direction: 'out'
+ - name: 'Eth1/2'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/interfaces'
+ response:
+ code: 200
+ value:
+ openconfig-acl:interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ - set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv6-acl-2'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ set-name: 'ipv6-acl-2'
+ type: 'openconfig-acl:ACL_IPV6'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config_requests:
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1/egress-acl-sets/egress-acl-set=ipv4-acl-2,ACL_IPV4'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f2/egress-acl-sets/egress-acl-set=ipv6-acl-2,ACL_IPV6'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f2/ingress-acl-sets/ingress-acl-set=mac-acl-1,ACL_L2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1'
+ method: 'post'
+ data:
+ openconfig-acl:config:
+ id: 'Eth1/1'
+ openconfig-acl:interface-ref:
+ config:
+ interface: 'Eth1/1'
+ openconfig-acl:egress-acl-sets:
+ egress-acl-set:
+ - config:
+ set-name: 'ipv6-acl-2'
+ type: 'ACL_IPV6'
+ set-name: 'ipv6-acl-2'
+ type: 'ACL_IPV6'
+
+overridden_01:
+ module_args:
+ state: 'overridden'
+ config:
+ - name: 'Eth1/1'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: 'mac-acl-1'
+ direction: 'in'
+ - type: 'ipv4'
+ acls:
+ - name: 'ipv4-acl-1'
+ direction: 'in'
+ - type: 'ipv6'
+ acls:
+ - name: 'ipv6-acl-2'
+ direction: 'out'
+ - name: 'Eth1/3'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: 'mac-acl-2'
+ direction: 'out'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/interfaces'
+ response:
+ code: 200
+ value:
+ openconfig-acl:interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ - set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv6-acl-2'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ set-name: 'ipv6-acl-2'
+ type: 'openconfig-acl:ACL_IPV6'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config_requests:
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1/egress-acl-sets/egress-acl-set=ipv4-acl-2,ACL_IPV4'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1'
+ method: 'post'
+ data:
+ openconfig-acl:config:
+ id: 'Eth1/1'
+ openconfig-acl:interface-ref:
+ config:
+ interface: 'Eth1/1'
+ openconfig-acl:egress-acl-sets:
+ egress-acl-set:
+ - config:
+ set-name: 'ipv6-acl-2'
+ type: 'ACL_IPV6'
+ set-name: 'ipv6-acl-2'
+ type: 'ACL_IPV6'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f3'
+ method: 'post'
+ data:
+ openconfig-acl:config:
+ id: 'Eth1/3'
+ openconfig-acl:interface-ref:
+ config:
+ interface: 'Eth1/3'
+ openconfig-acl:egress-acl-sets:
+ egress-acl-set:
+ - config:
+ set-name: 'mac-acl-2'
+ type: 'ACL_L2'
+ set-name: 'mac-acl-2'
+ type: 'ACL_L2'
+
+deleted_01:
+ module_args:
+ state: 'deleted'
+ config:
+ - name: 'Eth1/1'
+ access_groups:
+ - type: 'mac'
+ acls:
+ - name: 'mac-acl-1'
+ direction: 'in'
+ - type: 'ipv4'
+ acls:
+ - name: 'ipv4-acl-1'
+ direction: 'in'
+ - name: 'Eth1/2'
+ access_groups:
+ - type: 'ipv4'
+ - type: 'ipv6'
+ - name: 'Eth1/3'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/interfaces'
+ response:
+ code: 200
+ value:
+ openconfig-acl:interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ - set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv6-acl-2'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ set-name: 'ipv6-acl-2'
+ type: 'openconfig-acl:ACL_IPV6'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config_requests:
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1/ingress-acl-sets/ingress-acl-set=mac-acl-1,ACL_L2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1/ingress-acl-sets/ingress-acl-set=ipv4-acl-1,ACL_IPV4'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f2/egress-acl-sets/egress-acl-set=ipv6-acl-2,ACL_IPV6'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f2/ingress-acl-sets/ingress-acl-set=ipv4-acl-1,ACL_IPV4'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f3'
+ method: 'delete'
+
+deleted_02:
+ module_args:
+ config: []
+ state: 'deleted'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/interfaces'
+ response:
+ code: 200
+ value:
+ openconfig-acl:interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-2'
+ type: 'openconfig-acl:ACL_IPV4'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ set-name: 'mac-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ - set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ egress-acl-sets:
+ egress-acl-set:
+ - set-name: 'ipv6-acl-2'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ set-name: 'ipv6-acl-2'
+ type: 'openconfig-acl:ACL_IPV6'
+ ingress-acl-sets:
+ ingress-acl-set:
+ - set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ set-name: 'ipv4-acl-1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config_requests:
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f1'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/interfaces/interface=Eth1%2f2'
+ method: 'delete'
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_api.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_api.yaml
new file mode 100644
index 000000000..f243dd5ab
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_api.yaml
@@ -0,0 +1,7 @@
+---
+merged_01:
+ module_args:
+ url: data/openconfig-network-instance:network-instances/network-instance=Vlan100/
+ method: "PUT"
+ body: {"openconfig-network-instance:network-instance": [{"name": "Vlan100", "config": {"name": "Vlan100"}}]}
+ status_code: 204
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bfd.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bfd.yaml
new file mode 100644
index 000000000..ed6c441b1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bfd.yaml
@@ -0,0 +1,598 @@
+---
+merged_01:
+ module_args:
+ config:
+ profiles:
+ - profile_name: 'p1'
+ enabled: True
+ transmit_interval: 120
+ receive_interval: 200
+ detect_multiplier: 2
+ passive_mode: True
+ min_ttl: 140
+ echo_interval: 150
+ echo_mode: True
+ single_hops:
+ - remote_address: '196.88.6.1'
+ vrf: 'default'
+ interface: 'Ethernet20'
+ local_address: '1.1.1.1'
+ enabled: True
+ transmit_interval: 50
+ receive_interval: 80
+ detect_multiplier: 4
+ passive_mode: True
+ echo_interval: 110
+ echo_mode: True
+ profile_name: 'p1'
+ multi_hops:
+ - remote_address: '192.40.1.3'
+ vrf: 'default'
+ local_address: '3.3.3.3'
+ enabled: True
+ transmit_interval: 75
+ receive_interval: 100
+ detect_multiplier: 3
+ passive_mode: True
+ min_ttl: 125
+ profile_name: 'p1'
+ existing_bfd_config:
+ - path: "/data/openconfig-bfd:bfd"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-bfd:bfd"
+ method: "patch"
+ data:
+ openconfig-bfd:bfd:
+ openconfig-bfd-ext:bfd-profile:
+ profile:
+ - profile-name: 'p1'
+ config:
+ profile-name: 'p1'
+ enabled: True
+ desired-minimum-tx-interval: 120
+ required-minimum-receive: 200
+ detection-multiplier: 2
+ passive-mode: True
+ minimum-ttl: 140
+ desired-minimum-echo-receive: 150
+ echo-active: True
+ openconfig-bfd-ext:bfd-shop-sessions:
+ single-hop:
+ - remote-address: '196.88.6.1'
+ vrf: 'default'
+ interface: 'Ethernet20'
+ local-address: '1.1.1.1'
+ config:
+ remote-address: '196.88.6.1'
+ vrf: 'default'
+ interface: 'Ethernet20'
+ local-address: '1.1.1.1'
+ enabled: True
+ desired-minimum-tx-interval: 50
+ required-minimum-receive: 80
+ detection-multiplier: 4
+ passive-mode: True
+ desired-minimum-echo-receive: 110
+ echo-active: True
+ profile-name: 'p1'
+ openconfig-bfd-ext:bfd-mhop-sessions:
+ multi-hop:
+ - remote-address: '192.40.1.3'
+ vrf: 'default'
+ local-address: '3.3.3.3'
+ interface: 'null'
+ config:
+ remote-address: '192.40.1.3'
+ vrf: 'default'
+ local-address: '3.3.3.3'
+ enabled: True
+ desired-minimum-tx-interval: 75
+ required-minimum-receive: 100
+ detection-multiplier: 3
+ passive-mode: True
+ minimum-ttl: 125
+ profile-name: 'p1'
+ interface: 'null'
+
+replaced_01:
+ module_args:
+ config:
+ profiles:
+ - profile_name: 'p2'
+ enabled: False
+ single_hops:
+ - remote_address: '194.56.2.1'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local_address: '2.2.2.2'
+ echo_interval: 125
+ echo_mode: True
+ profile_name: 'p1'
+ multi_hops:
+ - remote_address: '198.72.1.4'
+ vrf: 'default'
+ local_address: '4.4.4.4'
+ enabled: True
+ transmit_interval: 71
+ receive_interval: 286
+ detect_multiplier: 9
+ state: replaced
+ existing_bfd_config:
+ - path: "/data/openconfig-bfd:bfd"
+ response:
+ code: 200
+ value:
+ openconfig-bfd:bfd:
+ openconfig-bfd-ext:bfd-profile:
+ profile:
+ - profile-name: 'p2'
+ config:
+ profile-name: 'p2'
+ enabled: True
+ desired-minimum-tx-interval: 135
+ required-minimum-receive: 225
+ detection-multiplier: 10
+ passive-mode: True
+ minimum-ttl: 250
+ desired-minimum-echo-receive: 250
+ echo-active: True
+ openconfig-bfd-ext:bfd-shop-sessions:
+ single-hop:
+ - remote-address: '194.56.2.1'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local-address: '2.2.2.2'
+ config:
+ remote-address: '194.56.2.1'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local-address: '2.2.2.2'
+ enabled: False
+ desired-minimum-tx-interval: 65
+ required-minimum-receive: 95
+ detection-multiplier: 7
+ passive-mode: False
+ desired-minimum-echo-receive: 145
+ echo-active: False
+ profile-name: 'p2'
+ openconfig-bfd-ext:bfd-mhop-sessions:
+ multi-hop:
+ - remote-address: '198.72.1.4'
+ vrf: 'default'
+ local-address: '4.4.4.4'
+ config:
+ remote-address: '198.72.1.4'
+ vrf: 'default'
+ local-address: '4.4.4.4'
+ enabled: False
+ desired-minimum-tx-interval: 70
+ required-minimum-receive: 285
+ detection-multiplier: 8
+ passive-mode: False
+ minimum-ttl: 155
+ profile-name: 'p2'
+ expected_config_requests:
+ - path: "/data/openconfig-bfd:bfd"
+ method: "patch"
+ data:
+ openconfig-bfd:bfd:
+ openconfig-bfd-ext:bfd-profile:
+ profile:
+ - profile-name: 'p2'
+ config:
+ profile-name: 'p2'
+ enabled: False
+ desired-minimum-tx-interval: 300
+ required-minimum-receive: 300
+ detection-multiplier: 3
+ passive-mode: False
+ minimum-ttl: 254
+ desired-minimum-echo-receive: 300
+ echo-active: False
+ openconfig-bfd-ext:bfd-shop-sessions:
+ single-hop:
+ - remote-address: '194.56.2.1'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local-address: '2.2.2.2'
+ config:
+ remote-address: '194.56.2.1'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local-address: '2.2.2.2'
+ enabled: True
+ desired-minimum-tx-interval: 300
+ required-minimum-receive: 300
+ detection-multiplier: 3
+ passive-mode: False
+ desired-minimum-echo-receive: 125
+ echo-active: True
+ profile-name: 'p1'
+ openconfig-bfd-ext:bfd-mhop-sessions:
+ multi-hop:
+ - remote-address: '198.72.1.4'
+ vrf: 'default'
+ local-address: '4.4.4.4'
+ interface: 'null'
+ config:
+ remote-address: '198.72.1.4'
+ vrf: 'default'
+ local-address: '4.4.4.4'
+ enabled: True
+ desired-minimum-tx-interval: 71
+ required-minimum-receive: 286
+ detection-multiplier: 9
+ passive-mode: False
+ minimum-ttl: 254
+ interface: 'null'
+overridden_01:
+ module_args:
+ config:
+ profiles:
+ - profile_name: 'p3'
+ enabled: True
+ transmit_interval: 110
+ receive_interval: 230
+ detect_multiplier: 10
+ passive_mode: True
+ min_ttl: 170
+ echo_interval: 140
+ echo_mode: True
+ - profile_name: 'p4'
+ single_hops:
+ - remote_address: '182.98.4.1'
+ vrf: 'default'
+ interface: 'Ethernet28'
+ local_address: '3.3.3.3'
+ enabled: True
+ transmit_interval: 42
+ receive_interval: 84
+ detect_multiplier: 8
+ passive_mode: True
+ echo_interval: 115
+ echo_mode: True
+ profile_name: 'p3'
+ - remote_address: '183.98.3.2'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local_address: '1.2.3.4'
+ multi_hops:
+ - remote_address: '182.44.1.2'
+ vrf: 'default'
+ local_address: '2.2.2.2'
+ enabled: True
+ transmit_interval: 74
+ receive_interval: 101
+ detect_multiplier: 6
+ passive_mode: True
+ min_ttl: 127
+ profile_name: 'p3'
+ - remote_address: '162.45.5.1'
+ vrf: 'default'
+ local_address: '2.1.1.1'
+ state: overridden
+ existing_bfd_config:
+ - path: "/data/openconfig-bfd:bfd"
+ response:
+ code: 200
+ value:
+ openconfig-bfd:bfd:
+ openconfig-bfd-ext:bfd-profile:
+ profile:
+ - profile-name: 'p2'
+ config:
+ profile-name: 'p2'
+ enabled: True
+ desired-minimum-tx-interval: 135
+ required-minimum-receive: 225
+ detection-multiplier: 10
+ passive-mode: True
+ minimum-ttl: 250
+ desired-minimum-echo-receive: 250
+ echo-active: True
+ openconfig-bfd-ext:bfd-shop-sessions:
+ single-hop:
+ - remote-address: '194.56.2.1'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local-address: '2.2.2.2'
+ config:
+ remote-address: '194.56.2.1'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local-address: '2.2.2.2'
+ enabled: False
+ desired-minimum-tx-interval: 65
+ required-minimum-receive: 95
+ detection-multiplier: 7
+ passive-mode: False
+ desired-minimum-echo-receive: 145
+ echo-active: False
+ profile-name: 'p2'
+ openconfig-bfd-ext:bfd-mhop-sessions:
+ multi-hop:
+ - remote-address: '198.72.1.4'
+ vrf: 'default'
+ local-address: '4.4.4.4'
+ config:
+ remote-address: '198.72.1.4'
+ vrf: 'default'
+ local-address: '4.4.4.4'
+ enabled: False
+ desired-minimum-tx-interval: 70
+ required-minimum-receive: 285
+ detection-multiplier: 8
+ passive-mode: False
+ minimum-ttl: 155
+ profile-name: 'p2'
+ expected_config_requests:
+ - path: "/data/openconfig-bfd:bfd"
+ method: "patch"
+ data:
+ openconfig-bfd:bfd:
+ openconfig-bfd-ext:bfd-profile:
+ profile:
+ - profile-name: 'p3'
+ config:
+ profile-name: 'p3'
+ enabled: True
+ desired-minimum-tx-interval: 110
+ required-minimum-receive: 230
+ detection-multiplier: 10
+ passive-mode: True
+ minimum-ttl: 170
+ desired-minimum-echo-receive: 140
+ echo-active: True
+ openconfig-bfd-ext:bfd-shop-sessions:
+ single-hop:
+ - remote-address: '182.98.4.1'
+ vrf: 'default'
+ interface: 'Ethernet28'
+ local-address: '3.3.3.3'
+ config:
+ remote-address: '182.98.4.1'
+ vrf: 'default'
+ interface: 'Ethernet28'
+ local-address: '3.3.3.3'
+ enabled: True
+ desired-minimum-tx-interval: 42
+ required-minimum-receive: 84
+ detection-multiplier: 8
+ passive-mode: True
+ desired-minimum-echo-receive: 115
+ echo-active: True
+ profile-name: 'p3'
+ openconfig-bfd-ext:bfd-mhop-sessions:
+ multi-hop:
+ - remote-address: '162.45.5.1'
+ vrf: 'default'
+ local-address: '2.1.1.1'
+ interface: 'null'
+ config:
+ remote-address: '162.45.5.1'
+ vrf: 'default'
+ local-address: '2.1.1.1'
+ enabled: True
+ desired-minimum-tx-interval: 300
+ required-minimum-receive: 300
+ detection-multiplier: 3
+ passive-mode: False
+ minimum-ttl: 254
+ interface: 'null'
+
+deleted_01:
+ module_args:
+ config:
+ profiles:
+ - profile_name: 'p3'
+ enabled: True
+ transmit_interval: 110
+ receive_interval: 230
+ detect_multiplier: 10
+ passive_mode: True
+ min_ttl: 170
+ echo_interval: 140
+ echo_mode: True
+ - profile_name: 'p4'
+ single_hops:
+ - remote_address: '182.98.4.1'
+ vrf: 'default'
+ interface: 'Ethernet28'
+ local_address: '3.3.3.3'
+ enabled: True
+ transmit_interval: 42
+ receive_interval: 84
+ detect_multiplier: 8
+ passive_mode: True
+ echo_interval: 115
+ echo_mode: True
+ profile_name: 'p3'
+ - remote_address: '183.98.3.2'
+ vrf: 'default'
+ interface: 'Ethernet24'
+ local_address: '1.2.3.4'
+ multi_hops:
+ - remote_address: '182.44.1.2'
+ vrf: 'default'
+ local_address: '2.2.2.2'
+ enabled: True
+ transmit_interval: 74
+ receive_interval: 101
+ detect_multiplier: 6
+ passive_mode: True
+ min_ttl: 127
+ profile_name: 'p3'
+ - remote_address: '162.45.5.1'
+ vrf: 'default'
+ local_address: '2.1.1.1'
+ state: deleted
+ existing_bfd_config:
+ - path: "/data/openconfig-bfd:bfd"
+ response:
+ code: 200
+ value:
+ openconfig-bfd:bfd:
+ openconfig-bfd-ext:bfd-mhop-sessions:
+ multi-hop:
+ - config:
+ desired-minimum-tx-interval: 300
+ detection-multiplier: 3
+ enabled: true
+ interface: 'null'
+ local-address: 2.1.1.1
+ minimum-ttl: 254
+ passive-mode: false
+ remote-address: 162.45.5.1
+ required-minimum-receive: 300
+ vrf: default
+ interface: 'null'
+ local-address: 2.1.1.1
+ remote-address: 162.45.5.1
+ vrf: default
+ - config:
+ desired-minimum-tx-interval: 74
+ detection-multiplier: 6
+ enabled: true
+ interface: 'null'
+ local-address: 2.2.2.2
+ minimum-ttl: 127
+ passive-mode: true
+ profile-name: p3
+ remote-address: 182.44.1.2
+ required-minimum-receive: 101
+ vrf: default
+ interface: 'null'
+ local-address: 2.2.2.2
+ remote-address: 182.44.1.2
+ vrf: default
+ openconfig-bfd-ext:bfd-profile:
+ profile:
+ - config:
+ desired-minimum-echo-receive: 140
+ desired-minimum-tx-interval: 110
+ detection-multiplier: 10
+ echo-active: true
+ enabled: true
+ minimum-ttl: 170
+ passive-mode: true
+ profile-name: p3
+ required-minimum-receive: 230
+ profile-name: p3
+ - config:
+ desired-minimum-echo-receive: 300
+ desired-minimum-tx-interval: 300
+ detection-multiplier: 3
+ echo-active: false
+ enabled: true
+ minimum-ttl: 254
+ passive-mode: false
+ profile-name: p4
+ required-minimum-receive: 300
+ profile-name: p4
+ openconfig-bfd-ext:bfd-shop-sessions:
+ single-hop:
+ - config:
+ desired-minimum-echo-receive: 115
+ desired-minimum-tx-interval: 42
+ detection-multiplier: 8
+ echo-active: true
+ enabled: true
+ interface: Ethernet28
+ local-address: 3.3.3.3
+ passive-mode: true
+ profile-name: p3
+ remote-address: 182.98.4.1
+ required-minimum-receive: 84
+ vrf: default
+ interface: Ethernet28
+ local-address: 3.3.3.3
+ remote-address: 182.98.4.1
+ vrf: default
+ - config:
+ desired-minimum-echo-receive: 300
+ desired-minimum-tx-interval: 300
+ detection-multiplier: 3
+ echo-active: false
+ enabled: true
+ interface: Ethernet24
+ local-address: 1.2.3.4
+ passive-mode: false
+ remote-address: 183.98.3.2
+ required-minimum-receive: 300
+ vrf: default
+ interface: Ethernet24
+ local-address: 1.2.3.4
+ remote-address: 183.98.3.2
+ vrf: default
+ expected_config_requests:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-profile/profile=p3/config/desired-minimum-tx-interval"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-profile/profile=p3/config/required-minimum-receive"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-profile/profile=p3/config/detection-multiplier"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-profile/profile=p3/config/passive-mode"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-profile/profile=p3/config/minimum-ttl"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-profile/profile=p3/config/desired-minimum-echo-receive"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-profile/profile=p3/config/echo-active"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-profile/profile=p4"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-shop-sessions/single-hop=182.98.4.1,Ethernet28,default,3.3.3.3/config/desired-minimum-tx-interval"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-shop-sessions/single-hop=182.98.4.1,Ethernet28,default,3.3.3.3/config/required-minimum-receive"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-shop-sessions/single-hop=182.98.4.1,Ethernet28,default,3.3.3.3/config/detection-multiplier"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-shop-sessions/single-hop=182.98.4.1,Ethernet28,default,3.3.3.3/config/passive-mode"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-shop-sessions/single-hop=182.98.4.1,Ethernet28,default,3.3.3.3/config/desired-minimum-echo-receive"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-shop-sessions/single-hop=182.98.4.1,Ethernet28,default,3.3.3.3/config/echo-active"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-shop-sessions/single-hop=182.98.4.1,Ethernet28,default,3.3.3.3/config/profile-name"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-shop-sessions/single-hop=183.98.3.2,Ethernet24,default,1.2.3.4"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=182.44.1.2,null,default,2.2.2.2/config/desired-minimum-tx-interval"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=182.44.1.2,null,default,2.2.2.2/config/required-minimum-receive"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=182.44.1.2,null,default,2.2.2.2/config/detection-multiplier"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=182.44.1.2,null,default,2.2.2.2/config/passive-mode"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=182.44.1.2,null,default,2.2.2.2/config/minimum-ttl"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=182.44.1.2,null,default,2.2.2.2/config/profile-name"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-bfd:bfd/openconfig-bfd-ext:bfd-mhop-sessions/multi-hop=162.45.5.1,null,default,2.1.1.1"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp.yaml
new file mode 100644
index 000000000..1c5d59fba
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp.yaml
@@ -0,0 +1,473 @@
+---
+merged_01:
+ module_args:
+ config:
+ - bgp_as: 4
+ router_id: 10.2.2.4
+ rt_delay: 10
+ log_neighbor_changes: False
+ timers:
+ holdtime: 20
+ keepalive_interval: 30
+ bestpath:
+ as_path:
+ confed: True
+ ignore: True
+ multipath_relax: False
+ multipath_relax_as_set: True
+ compare_routerid: True
+ med:
+ confed: True
+ missing_as_worst: True
+ always_compare_med: True
+ max_med:
+ on_startup:
+ timer: 667
+ med_val: 7878
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 4
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ method: "patch"
+ data:
+ openconfig-network-instance:config:
+ router-id: "10.2.2.4"
+ as: 4.0
+ route-map-process-delay: 10
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/hold-time"
+ method: "patch"
+ data:
+ hold-time: 20
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/keepalive-interval"
+ method: "patch"
+ data:
+ keepalive-interval: 30
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med"
+ method: "patch"
+ data:
+ max-med:
+ config:
+ max-med-val: 7878
+ time: 667
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options"
+ method: "patch"
+ data:
+ route-selection-options:
+ config:
+ external-compare-router-id: True
+ compare-confed-as-path: True
+ ignore-as-path-length: True
+ med-confed: True
+ med-missing-as-worst: True
+ always-compare-med: True
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/use-multiple-paths/ebgp/config"
+ method: "patch"
+ data:
+ openconfig-network-instance:config:
+ as-set: True
+deleted_01:
+ module_args:
+ config:
+ - bgp_as: 4
+ router_id: 10.2.2.4
+ rt_delay: 10
+ log_neighbor_changes: False
+ bestpath:
+ as_path:
+ confed: True
+ ignore: True
+ multipath_relax: False
+ multipath_relax_as_set: True
+ compare_routerid: True
+ med:
+ confed: True
+ missing_as_worst: True
+ always_compare_med: True
+ timers:
+ holdtime: 20
+ keepalive_interval: 30
+ max_med:
+ on_startup:
+ timer: 667
+ med_val: 7878
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 4
+ router-id: "10.2.2.4"
+ route-map-process-delay: 10
+ hold-time: 20
+ keepalive-interval: 30
+ logging-options:
+ config:
+ log-neighbor-state-changes: False
+ route-selection-options:
+ config:
+ always-compare-med: True
+ external-compare-router-id: True
+ ignore-as-path-length: True
+ compare-confed-as-path: True
+ med-confed: True
+ med-missing-as-worst: True
+ use-multiple-paths:
+ ebgp:
+ config:
+ allow-multiple-as: False
+ as-set: False
+ max-med:
+ config:
+ time: 667
+ max-med-val: 7878
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/hold-time"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/keepalive-interval"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/router-id"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/route-map-process-delay"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med/config/max-med-val"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med/config/time"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options"
+ method: "patch"
+ data:
+ route-selection-options:
+ config:
+ external-compare-router-id: False
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/always-compare-med"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/compare-confed-as-path"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/ignore-as-path-length"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/med-confed"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/med-missing-as-worst"
+ method: "delete"
+
+deleted_02:
+ module_args:
+ config:
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 4
+ router-id: "10.2.2.4"
+ route-map-process-delay: 10
+ hold-time: 20
+ keepalive-interval: 30
+ logging-options:
+ config:
+ log-neighbor-state-changes: False
+ route-selection-options:
+ config:
+ always-compare-med: True
+ external-compare-router-id: True
+ ignore-as-path-length: True
+ compare-confed-as-path: True
+ med-confed: True
+ med-missing-as-worst: True
+ use-multiple-paths:
+ ebgp:
+ config:
+ allow-multiple-as: False
+ as-set: False
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp"
+ method: "delete"
+
+replaced_01:
+ module_args:
+ config:
+ - bgp_as: 5
+ vrf_name: 'VrfReg1'
+ router_id: 10.2.2.5
+ timers:
+ holdtime: 20
+ keepalive_interval: 30
+ - bgp_as: 4
+ router_id: 10.2.2.4
+ max_med:
+ on_startup:
+ timer: 776
+ med_val: 8787
+ state: replaced
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 4
+ router-id: "10.2.2.4"
+ route-map-process-delay: 10
+ hold-time: 20
+ keepalive-interval: 30
+ logging-options:
+ config:
+ log-neighbor-state-changes: False
+ route-selection-options:
+ config:
+ always-compare-med: True
+ external-compare-router-id: True
+ ignore-as-path-length: True
+ compare-confed-as-path: True
+ med-confed: True
+ med-missing-as-worst: True
+ use-multiple-paths:
+ ebgp:
+ config:
+ allow-multiple-as: False
+ as-set: False
+ max-med:
+ config:
+ time: 667
+ max-med-val: 7878
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/hold-time"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/keepalive-interval"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config/route-map-process-delay"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med/config/max-med-val"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med/config/time"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/logging-options/config/log-neighbor-state-changes"
+ method: "patch"
+ data:
+ log-neighbor-state-changes: True
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options"
+ method: "patch"
+ data:
+ route-selection-options:
+ config:
+ external-compare-router-id: False
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/always-compare-med"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/compare-confed-as-path"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/ignore-as-path-length"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/med-confed"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/med-missing-as-worst"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ method: "patch"
+ data:
+ openconfig-network-instance:config:
+ as: 4.0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med"
+ method: "patch"
+ data:
+ max-med:
+ config:
+ max-med-val: 8787
+ time: 776
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol/"
+ method: "patch"
+ data:
+ openconfig-network-instance:protocol:
+ - name: "bgp"
+ identifier: "openconfig-policy-types:BGP"
+ bgp:
+ global:
+ config:
+ as: 5.0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ method: "patch"
+ data:
+ openconfig-network-instance:config:
+ router-id: "10.2.2.5"
+ as: 5.0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config/hold-time"
+ method: "patch"
+ data:
+ hold-time: 20
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config/keepalive-interval"
+ method: "patch"
+ data:
+ keepalive-interval: 30
+
+overridden_01:
+ module_args:
+ config:
+ - bgp_as: 5
+ vrf_name: 'VrfReg2'
+ router_id: 10.2.2.6
+ rt_delay: 10
+ log_neighbor_changes: True
+ - bgp_as: 4
+ router_id: 10.2.2.5
+ rt_delay: 10
+ bestpath:
+ as_path:
+ confed: True
+ ignore: True
+ compare_routerid: True
+ med:
+ confed: True
+ missing_as_worst: True
+ max_med:
+ on_startup:
+ timer: 776
+ med_val: 8787
+ state: overridden
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 4
+ router-id: "10.2.2.4"
+ route-map-process-delay: 10
+ hold-time: 180
+ keepalive-interval: 60
+ logging-options:
+ config:
+ log-neighbor-state-changes: True
+ route-selection-options:
+ config:
+ always-compare-med: True
+ external-compare-router-id: True
+ ignore-as-path-length: True
+ compare-confed-as-path: True
+ med-confed: True
+ med-missing-as-worst: True
+ use-multiple-paths:
+ ebgp:
+ config:
+ allow-multiple-as: False
+ as-set: False
+ max-med:
+ config:
+ time: 667
+ max-med-val: 7878
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 5
+ router-id: "10.2.2.6"
+ hold-time: 180
+ keepalive-interval: 60
+ logging-options:
+ config:
+ log-neighbor-state-changes: True
+ route-selection-options:
+ config:
+ always-compare-med: False
+ external-compare-router-id: False
+ ignore-as-path-length: False
+ use-multiple-paths:
+ ebgp:
+ config:
+ allow-multiple-as: False
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg2/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - vrf_name: VrfReg2
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med/config/max-med-val"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med/config/time"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/route-selection-options/config/always-compare-med"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ method: "patch"
+ data:
+ openconfig-network-instance:config:
+ router-id: "10.2.2.5"
+ as: 4.0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/max-med"
+ method: "patch"
+ data:
+ max-med:
+ config:
+ max-med-val: 8787
+ time: 776
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg2/protocols/protocol/"
+ method: "patch"
+ data:
+ openconfig-network-instance:protocol:
+ - name: "bgp"
+ identifier: "openconfig-policy-types:BGP"
+ bgp:
+ global:
+ config:
+ as: 5.0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg2/protocols/protocol=BGP,bgp/bgp/global/config"
+ method: "patch"
+ data:
+ openconfig-network-instance:config:
+ router-id: "10.2.2.6"
+ route-map-process-delay: 10
+ as: 5.0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg2/protocols/protocol=BGP,bgp/bgp/global/logging-options/config/log-neighbor-state-changes"
+ method: "patch"
+ data:
+ log-neighbor-state-changes: True
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_af.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_af.yaml
new file mode 100644
index 000000000..22539357c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_af.yaml
@@ -0,0 +1,1007 @@
+---
+merged_01:
+ module_args:
+ config:
+ - bgp_as: 51
+ address_family:
+ afis:
+ - afi: l2vpn
+ safi: evpn
+ advertise_pip: True
+ advertise_pip_ip: "3.3.3.3"
+ advertise_pip_peer_ip: "4.4.4.4"
+ advertise_svi_ip: True
+ advertise_all_vni: False
+ advertise_default_gw: False
+ route_advertise_list:
+ - advertise_afi: ipv4
+ route_map: aaa
+ - advertise_afi: ipv6
+ route_map: bbb
+ rd: "3.3.3.3:33"
+ rt_in:
+ - "22:22"
+ dampening: True
+ - afi: ipv4
+ safi: unicast
+ network:
+ - 2.2.2.2/16
+ - 192.168.10.1/32
+ dampening: True
+ - afi: ipv6
+ safi: unicast
+ dampening: True
+ max_path:
+ ebgp: 4
+ ibgp: 5
+ redistribute:
+ - metric: "21"
+ protocol: connected
+ route_map: bb
+ - metric: "27"
+ protocol: ospf
+ route_map: aa
+ - metric: "26"
+ protocol: static
+ route_map: bb
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 51
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ method: "patch"
+ data:
+ openconfig-network-instance:global:
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: 'openconfig-bgp-types:L2VPN_EVPN'
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ method: "patch"
+ data:
+ openconfig-network-instance:global:
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: 'openconfig-bgp-types:L2VPN_EVPN'
+ l2vpn-evpn:
+ openconfig-bgp-evpn-ext:config:
+ advertise-pip: True
+ advertise-pip-ip: 3.3.3.3
+ advertise-pip-peer-ip: 4.4.4.4
+ advertise-svi-ip: True
+ advertise-all-vni: False
+ advertise-default-gw: False
+ route-distinguisher: 3.3.3.3:33
+ import-rts:
+ - '22:22'
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise/route-advertise-list"
+ method: "patch"
+ data:
+ openconfig-bgp-evpn-ext:route-advertise-list:
+ - advertise-afi-safi: IPV4_UNICAST
+ config:
+ advertise-afi-safi: IPV4_UNICAST
+ route-map:
+ - aaa
+ - advertise-afi-safi: IPV6_UNICAST
+ config:
+ advertise-afi-safi: IPV6_UNICAST
+ route-map:
+ - bbb
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ method: "patch"
+ data:
+ openconfig-network-instance:global:
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: 'openconfig-bgp-types:IPV4_UNICAST'
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ method: "patch"
+ data:
+ openconfig-network-instance:global:
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: 'openconfig-bgp-types:IPV6_UNICAST'
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/network-config"
+ method: "patch"
+ data:
+ network-config:
+ network:
+ - config:
+ prefix: '2.2.2.2/16'
+ prefix: '2.2.2.2/16'
+ - config:
+ prefix: '192.168.10.1/32'
+ prefix: '192.168.10.1/32'
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/route-flap-damping"
+ method: "patch"
+ data:
+ route-flap-damping:
+ config:
+ enabled: True
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV6_UNICAST/use-multiple-paths"
+ method: "patch"
+ data:
+ openconfig-network-instance:use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 4
+ ibgp:
+ config:
+ maximum-paths: 5
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ method: "patch"
+ data:
+ openconfig-network-instance:table-connections:
+ table-connection:
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:DIRECTLY_CONNECTED
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 21.0
+ import-policy:
+ - bb
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:OSPF
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 27.0
+ import-policy:
+ - aa
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:STATIC
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 26.0
+ import-policy:
+ - bb
+merged_02:
+ module_args:
+ config:
+ - bgp_as: 51
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ dampening: false
+ max_path:
+ ebgp: 2
+ ibgp: 1
+ advertise_all_vni: false
+ redistribute:
+ - metric: "20"
+ protocol: connected
+ route_map: aa
+ - metric: "26"
+ protocol: ospf
+ route_map: bb
+ - metric: "25"
+ protocol: static
+ route_map: aa
+ - afi: ipv6
+ safi: unicast
+ max_path:
+ ebgp: 1
+ ibgp: 1
+ advertise_all_vni: true
+ redistribute:
+ - metric: "21"
+ protocol: connected
+ route_map: bb
+ - metric: "27"
+ protocol: ospf
+ route_map: aa
+ - metric: "28"
+ protocol: static
+ route_map: aa
+ - afi: l2vpn
+ safi: evpn
+ advertise_pip: True
+ advertise_pip_ip: "3.3.3.3"
+ advertise_pip_peer_ip: "4.4.4.4"
+ advertise_svi_ip: True
+ advertise_all_vni: True
+ advertise_default_gw: False
+ dampening: True
+ max_path:
+ ebgp: 4
+ ibgp: 5
+ route_advertise_list:
+ - advertise_afi: ipv4
+ route_map: aaNew
+ - advertise_afi: ipv6
+ route_map: bbNew
+ state: merged
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 51
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ route-flap-damping:
+ config:
+ enabled: false
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 2
+ ibgp:
+ config:
+ maximum-paths: 1
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 1
+ ibgp:
+ config:
+ maximum-paths: 1
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ l2vpn-evpn:
+ openconfig-bgp-evpn-ext:config:
+ advertise-all-vni: true
+ advertise-default-gw: true
+ advertise-pip: true
+ advertise-pip-ip: 3.3.3.3
+ advertise-pip-peer-ip: 4.4.4.4
+ advertise-svi-ip: true
+ openconfig-bgp-evpn-ext:route-advertise:
+ route-advertise-list:
+ - advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ route-map:
+ - aa
+ - advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ route-map:
+ - bb
+ openconfig-bgp-evpn-ext:vnis:
+ vni:
+ - config:
+ vni-number: 600
+ vni-number: 600
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ method: "patch"
+ data:
+ openconfig-network-instance:global:
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: 'openconfig-bgp-types:L2VPN_EVPN'
+ l2vpn-evpn:
+ openconfig-bgp-evpn-ext:config:
+ advertise-default-gw: False
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise/route-advertise-list"
+ method: "patch"
+ data:
+ openconfig-bgp-evpn-ext:route-advertise-list:
+ - advertise-afi-safi: IPV4_UNICAST
+ config:
+ advertise-afi-safi: IPV4_UNICAST
+ route-map:
+ - aaNew
+ - advertise-afi-safi: IPV6_UNICAST
+ config:
+ advertise-afi-safi: IPV6_UNICAST
+ route-map:
+ - bbNew
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ method: "patch"
+ data:
+ openconfig-network-instance:table-connections:
+ table-connection:
+ - address-family: openconfig-types:IPV4
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:DIRECTLY_CONNECTED
+ config:
+ address-family: openconfig-types:IPV4
+ metric: 20.0
+ import-policy:
+ - aa
+ - address-family: openconfig-types:IPV4
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:OSPF
+ config:
+ address-family: openconfig-types:IPV4
+ metric: 26.0
+ import-policy:
+ - bb
+ - address-family: openconfig-types:IPV4
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:STATIC
+ config:
+ address-family: openconfig-types:IPV4
+ metric: 25.0
+ import-policy:
+ - aa
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ method: "patch"
+ data:
+ openconfig-network-instance:table-connections:
+ table-connection:
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:DIRECTLY_CONNECTED
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 21.0
+ import-policy:
+ - bb
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:OSPF
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 27.0
+ import-policy:
+ - aa
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:STATIC
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 28.0
+ import-policy:
+ - aa
+merged_03:
+ module_args:
+ config:
+ - bgp_as: 51
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ dampening: false
+ max_path:
+ ebgp: 2
+ ibgp: 1
+ advertise_all_vni: false
+ - afi: ipv6
+ safi: unicast
+ max_path:
+ ebgp: 1
+ ibgp: 1
+ advertise_all_vni: true
+ - afi: l2vpn
+ safi: evpn
+ advertise_pip: True
+ advertise_pip_ip: "3.3.3.99"
+ advertise_pip_peer_ip: "4.4.4.4"
+ advertise_svi_ip: True
+ advertise_all_vni: True
+ advertise_default_gw: True
+ dampening: True
+ max_path:
+ ebgp: 4
+ ibgp: 5
+ route_advertise_list:
+ - advertise_afi: ipv4
+ route_map: aa
+ - advertise_afi: ipv6
+ route_map: bb
+ state: merged
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 51
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ route-flap-damping:
+ config:
+ enabled: false
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 2
+ ibgp:
+ config:
+ maximum-paths: 1
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 1
+ ibgp:
+ config:
+ maximum-paths: 1
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ l2vpn-evpn:
+ openconfig-bgp-evpn-ext:config:
+ advertise-all-vni: true
+ advertise-default-gw: true
+ advertise-pip: true
+ advertise-pip-ip: 3.3.3.3
+ advertise-pip-peer-ip: 4.4.4.4
+ advertise-svi-ip: true
+ openconfig-bgp-evpn-ext:route-advertise:
+ route-advertise-list:
+ - advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ route-map:
+ - aa
+ - advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ route-map:
+ - bb
+ openconfig-bgp-evpn-ext:vnis:
+ vni:
+ - config:
+ vni-number: 600
+ vni-number: 600
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ method: "patch"
+ data:
+ openconfig-network-instance:global:
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: 'openconfig-bgp-types:L2VPN_EVPN'
+ l2vpn-evpn:
+ openconfig-bgp-evpn-ext:config:
+ advertise-pip-ip: 3.3.3.99
+
+deleted_01:
+ module_args:
+ config:
+ - bgp_as: 51
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ dampening: true
+ max_path:
+ ebgp: 2
+ ibgp: 1
+ advertise_all_vni: false
+ redistribute:
+ - metric: "20"
+ protocol: connected
+ route_map: aa
+ - metric: "26"
+ protocol: ospf
+ route_map: bb
+ - metric: "25"
+ protocol: static
+ route_map: aa
+ - afi: ipv6
+ safi: unicast
+ max_path:
+ ebgp: 2
+ ibgp: 1
+ advertise_all_vni: true
+ redistribute:
+ - metric: "21"
+ protocol: connected
+ route_map: bb
+ - metric: "27"
+ protocol: ospf
+ route_map: aa
+ - metric: "28"
+ protocol: static
+ route_map: aa
+ - afi: l2vpn
+ safi: evpn
+ advertise_pip: True
+ advertise_pip_ip: "3.3.3.3"
+ advertise_pip_peer_ip: "4.4.4.4"
+ advertise_svi_ip: True
+ advertise_all_vni: True
+ advertise_default_gw: False
+ dampening: True
+ route_advertise_list:
+ - advertise_afi: ipv4
+ route_map: aa
+ - advertise_afi: ipv6
+ route_map: bb
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 51
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ route-flap-damping:
+ config:
+ enabled: true
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 2
+ ibgp:
+ config:
+ maximum-paths: 2
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 2
+ ibgp:
+ config:
+ maximum-paths: 2
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ l2vpn-evpn:
+ openconfig-bgp-evpn-ext:config:
+ advertise-all-vni: true
+ advertise-default-gw: true
+ advertise-pip: true
+ advertise-pip-ip: 3.3.3.3
+ advertise-pip-peer-ip: 4.4.4.4
+ advertise-svi-ip: true
+ openconfig-bgp-evpn-ext:route-advertise:
+ route-advertise-list:
+ - advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ route-map:
+ - aa
+ - advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ route-map:
+ - bb
+ openconfig-bgp-evpn-ext:vnis:
+ vni:
+ - config:
+ vni-number: 600
+ vni-number: 600
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/route-flap-damping/config/enabled"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/use-multiple-paths/ebgp/config/maximum-paths"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/use-multiple-paths/ibgp/config/maximum-paths"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV6_UNICAST/use-multiple-paths/ebgp/config/maximum-paths"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV6_UNICAST/use-multiple-paths/ibgp/config/maximum-paths"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-all-vni"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip-ip"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip-peer-ip"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-svi-ip"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise/route-advertise-list=IPV4_UNICAST/config/route-map=aa"
+ method: "delete"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise/route-advertise-list=IPV6_UNICAST/config/route-map=bb"
+ method: "delete"
+
+deleted_02:
+ module_args:
+ config:
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 51
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ network-config:
+ network:
+ - config:
+ prefix: 22.22.22.22/16
+ prefix: 22.22.22.22/16
+ route-flap-damping:
+ config:
+ enabled: true
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 2
+ ibgp:
+ config:
+ maximum-paths: 1
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 1
+ ibgp:
+ config:
+ maximum-paths: 1
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ route-flap-damping:
+ config:
+ enabled: true
+ network-config:
+ network:
+ - config:
+ prefix: 22.22.22.22/16
+ prefix: 22.22.22.22/16
+ l2vpn-evpn:
+ openconfig-bgp-evpn-ext:config:
+ advertise-all-vni: true
+ advertise-default-gw: true
+ advertise-pip: true
+ advertise-pip-ip: 3.3.3.3
+ advertise-pip-peer-ip: 4.4.4.4
+ advertise-svi-ip: true
+ openconfig-bgp-evpn-ext:route-advertise:
+ route-advertise-list:
+ - advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ route-map:
+ - aa
+ - advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ route-map:
+ - bb
+ openconfig-bgp-evpn-ext:vnis:
+ vni:
+ - config:
+ vni-number: 600
+ vni-number: 600
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:table-connections:
+ table-connection:
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:DIRECTLY_CONNECTED
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 21.0
+ import-policy:
+ - bb
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:OSPF
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 27.0
+ import-policy:
+ - aa
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:STATIC
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 26.0
+ import-policy:
+ - bb
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/network-config/network=22.22.22.22%2f16"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/route-flap-damping/config/enabled"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/use-multiple-paths/ebgp/config/maximum-paths"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-all-vni"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-default-gw"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip-ip"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip-peer-ip"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-svi-ip"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni=600"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/network-config/network=22.22.22.22%2f16"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/route-flap-damping/config/enabled"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=openconfig-bgp-types:IPV6_UNICAST"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=openconfig-bgp-types:L2VPN_EVPN"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections/table-connection=openconfig-policy-types:DIRECTLY_CONNECTED,openconfig-policy-types:BGP,openconfig-types:IPV6"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections/table-connection=openconfig-policy-types:OSPF,openconfig-policy-types:BGP,openconfig-types:IPV6"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections/table-connection=openconfig-policy-types:STATIC,openconfig-policy-types:BGP,openconfig-types:IPV6"
+ method: "delete"
+ data:
+
+deleted_03:
+ module_args:
+ config:
+ - bgp_as: 51
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ - afi: ipv6
+ safi: unicast
+ max_path:
+ ebgp: 2
+ ibgp: 1
+ advertise_all_vni: true
+ redistribute:
+ - metric: "17"
+ protocol: connected
+ route_map: bb
+ - metric: "18"
+ protocol: ospf
+ route_map: aa
+ - metric: "19"
+ protocol: static
+ route_map: aa
+ - afi: l2vpn
+ safi: evpn
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 51
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ route-flap-damping:
+ config:
+ enabled: true
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 2
+ ibgp:
+ config:
+ maximum-paths: 1
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ use-multiple-paths:
+ ebgp:
+ config:
+ maximum-paths: 2
+ ibgp:
+ config:
+ maximum-paths: 1
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ route-flap-damping:
+ config:
+ enabled: true
+ l2vpn-evpn:
+ openconfig-bgp-evpn-ext:config:
+ advertise-all-vni: true
+ advertise-default-gw: true
+ advertise-pip: true
+ advertise-pip-ip: 3.3.3.3
+ advertise-pip-peer-ip: 4.4.4.4
+ advertise-svi-ip: true
+ openconfig-bgp-evpn-ext:route-advertise:
+ route-advertise-list:
+ - advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV4_UNICAST
+ route-map:
+ - aa
+ - advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ advertise-afi-safi: openconfig-bgp-types:IPV6_UNICAST
+ route-map:
+ - bb
+ openconfig-bgp-evpn-ext:vnis:
+ vni:
+ - config:
+ vni-number: 600
+ vni-number: 600
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:table-connections:
+ table-connection:
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:DIRECTLY_CONNECTED
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 21.0
+ import-policy:
+ - bb
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:OSPF
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 27.0
+ import-policy:
+ - aa
+ - address-family: openconfig-types:IPV6
+ dst-protocol: openconfig-policy-types:BGP
+ src-protocol: openconfig-policy-types:STATIC
+ config:
+ address-family: openconfig-types:IPV6
+ metric: 26.0
+ import-policy:
+ - bb
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/route-flap-damping/config/enabled"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV4_UNICAST/use-multiple-paths/ebgp/config/maximum-paths"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=IPV6_UNICAST/use-multiple-paths/ebgp/config/maximum-paths"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-all-vni"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-default-gw"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip-ip"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-pip-peer-ip"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:config/advertise-svi-ip"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise/route-advertise-list=IPV4_UNICAST/config/route-map=aa"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:route-advertise/route-advertise-list=IPV6_UNICAST/config/route-map=bb"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/l2vpn-evpn/openconfig-bgp-evpn-ext:vnis/vni=600"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=L2VPN_EVPN/route-flap-damping/config/enabled"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/afi-safis/afi-safi=openconfig-bgp-types:L2VPN_EVPN"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections/table-connection=openconfig-policy-types:DIRECTLY_CONNECTED,openconfig-policy-types:BGP,openconfig-types:IPV6"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections/table-connection=openconfig-policy-types:OSPF,openconfig-policy-types:BGP,openconfig-types:IPV6"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/table-connections/table-connection=openconfig-policy-types:STATIC,openconfig-policy-types:BGP,openconfig-types:IPV6"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_as_paths.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_as_paths.yaml
new file mode 100644
index 000000000..4f8f14589
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_as_paths.yaml
@@ -0,0 +1,494 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: test
+ members:
+ - 909.*
+ permit: true
+ - name: test_1
+ members:
+ - 908.*
+ - name: test_2
+ members:
+ - 907.*
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 4
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_2'
+ config:
+ as-path-set-name: 'test_2'
+ as-path-set-member:
+ - 800.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test'
+ config:
+ as-path-set-name: 'test'
+ as-path-set-member:
+ - 909.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_1'
+ config:
+ as-path-set-name: 'test_1'
+ as-path-set-member:
+ - 908.*
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_2'
+ config:
+ as-path-set-name: 'test_2'
+ as-path-set-member:
+ - 907.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+
+deleted_01:
+ module_args:
+ config:
+ - name: test
+ members:
+ - 808.*
+ permit: true
+ - name: test_1
+ members:
+ - 807.*
+ permit: true
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test'
+ config:
+ as-path-set-name: 'test'
+ as-path-set-member:
+ - 808.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - as-path-set-name: 'test_1'
+ config:
+ as-path-set-name: 'test_1'
+ as-path-set-member:
+ - 806.*
+ - 807.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_1/config/as-path-set-member=807.%2A"
+ method: "delete"
+
+deleted_02:
+ module_args:
+ config:
+ - name: test
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test'
+ config:
+ as-path-set-name: 'test'
+ as-path-set-member:
+ - 808.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test"
+ method: "delete"
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ - name: test
+ - name: test_1
+ members:
+ - "301.301"
+ permit: False
+ - name: test_2
+ members:
+ - "111*"
+ - "120"
+ - "^800"
+ - "25$"
+ permit: True
+ - name: test_3
+ members:
+ - "900.*"
+ - "910.*"
+ permit: True
+ - name: test_4
+ members:
+ - "200"
+ - "210"
+ - "220"
+ - name: test_5
+ members:
+ - "300"
+ - name: test_6
+ members:
+ - "800.*"
+ permit: True
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 4
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test'
+ config:
+ as-path-set-name: 'test'
+ as-path-set-member:
+ - "11"
+ - "22"
+ - "44"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - as-path-set-name: 'test_1'
+ config:
+ as-path-set-name: 'test_1'
+ as-path-set-member:
+ - "100.*"
+ - "200.*"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - as-path-set-name: 'test_2'
+ config:
+ as-path-set-name: 'test_2'
+ as-path-set-member:
+ - "110"
+ - "120"
+ - "^800"
+ - "25$"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - as-path-set-name: 'test_3'
+ config:
+ as-path-set-name: 'test_3'
+ as-path-set-member:
+ - "900.*"
+ - "910.*"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - as-path-set-name: 'test_4'
+ config:
+ as-path-set-name: 'test_4'
+ as-path-set-member:
+ - "200"
+ - "210"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - as-path-set-name: 'test_5'
+ config:
+ as-path-set-name: 'test_5'
+ as-path-set-member:
+ - "300"
+ - "310"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - as-path-set-name: 'test_def'
+ config:
+ as-path-set-name: 'test_def'
+ as-path-set-member:
+ - "50.*"
+ - "60.*"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_1"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_2/config/as-path-set-member=110"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_3"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_5/config/as-path-set-member=310"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_1'
+ config:
+ as-path-set-name: 'test_1'
+ as-path-set-member:
+ - "301.301"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_2'
+ config:
+ as-path-set-name: 'test_2'
+ as-path-set-member:
+ - "111*"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_3'
+ config:
+ as-path-set-name: 'test_3'
+ as-path-set-member:
+ - "900.*"
+ - "910.*"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_4'
+ config:
+ as-path-set-name: 'test_4'
+ as-path-set-member:
+ - "220"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_6'
+ config:
+ as-path-set-name: 'test_6'
+ as-path-set-member:
+ - "800.*"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ - name: test
+ - name: test_1
+ members:
+ - "301.301"
+ permit: False
+ - name: test_2
+ members:
+ - "111*"
+ - "120"
+ - "^800"
+ - "25$"
+ permit: True
+ - name: test_3
+ members:
+ - "900.*"
+ - "910.*"
+ permit: True
+ - name: test_4
+ members:
+ - "200"
+ - "210"
+ - "220"
+ - name: test_5
+ members:
+ - "300"
+ - name: test_6
+ members:
+ - "800.*"
+ permit: True
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 4
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test'
+ config:
+ as-path-set-name: 'test'
+ as-path-set-member:
+ - "11"
+ - "22"
+ - "44"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - as-path-set-name: 'test_1'
+ config:
+ as-path-set-name: 'test_1'
+ as-path-set-member:
+ - "100.*"
+ - "200.*"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - as-path-set-name: 'test_2'
+ config:
+ as-path-set-name: 'test_2'
+ as-path-set-member:
+ - "110"
+ - "120"
+ - "^800"
+ - "25$"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - as-path-set-name: 'test_3'
+ config:
+ as-path-set-name: 'test_3'
+ as-path-set-member:
+ - "900.*"
+ - "910.*"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - as-path-set-name: 'test_4'
+ config:
+ as-path-set-name: 'test_4'
+ as-path-set-member:
+ - "200"
+ - "210"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - as-path-set-name: 'test_5'
+ config:
+ as-path-set-name: 'test_5'
+ as-path-set-member:
+ - "300"
+ - "310"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - as-path-set-name: 'test_def'
+ config:
+ as-path-set-name: 'test_def'
+ as-path-set-member:
+ - "50.*"
+ - "60.*"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_1"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_2/config/as-path-set-member=110"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_3"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_5/config/as-path-set-member=310"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets/as-path-set=test_def"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_1'
+ config:
+ as-path-set-name: 'test_1'
+ as-path-set-member:
+ - "301.301"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_2'
+ config:
+ as-path-set-name: 'test_2'
+ as-path-set-member:
+ - "111*"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_3'
+ config:
+ as-path-set-name: 'test_3'
+ as-path-set-member:
+ - "900.*"
+ - "910.*"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_4'
+ config:
+ as-path-set-name: 'test_4'
+ as-path-set-member:
+ - "220"
+ openconfig-bgp-policy-ext:action: 'DENY'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/as-path-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:as-path-sets:
+ as-path-set:
+ - as-path-set-name: 'test_6'
+ config:
+ as-path-set-name: 'test_6'
+ as-path-set-member:
+ - "800.*"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_communities.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_communities.yaml
new file mode 100644
index 000000000..5caca21b4
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_communities.yaml
@@ -0,0 +1,390 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: test
+ members:
+ regex:
+ - 808.*
+ match: ALL
+ permit: True
+ type: expanded
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:global:
+ config:
+ as: 5
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test2'
+ config:
+ community-set-name: 'test2'
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - REGEX:808.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+merged_02:
+ module_args:
+ config:
+ - name: test
+ type: standard
+ permit: True
+ local_as: False
+ no_export: True
+ no_peer: True
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - NO_ADVERTISE
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - NO_ADVERTISE
+ - NOPEER
+ - NO_EXPORT
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ANY'
+deleted_01:
+ module_args:
+ config:
+ - name: test
+ type: expanded
+ members:
+ regex:
+ - 808.*
+ permit: true
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - REGEX:808.*
+ - REGEX:919.*
+ - REGEX:930.*
+ - REGEX:772.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=test/config/community-member=REGEX%3A808.%2A"
+ method: "delete"
+deleted_02:
+ module_args:
+ config:
+ - name: test
+ type: expanded
+ match: ALL
+ permit: True
+ members:
+ regex:
+ - name: test2
+ type: standard
+ match: ANY
+ permit: False
+ local_as: True
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - REGEX:808.*
+ - REGEX:919.*
+ - REGEX:700.*
+ - REGEX:888.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - community-set-name: 'test2'
+ config:
+ community-set-name: 'test2'
+ community-member:
+ - NO_ADVERTISE
+ - NOPEER
+ - NO_EXPORT_SUBCONFED
+ - NO_EXPORT
+ openconfig-bgp-policy-ext:action: 'DENY'
+ match-set-options: 'ANY'
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=test"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=test2/config/community-member=NO_EXPORT_SUBCONFED"
+ method: "delete"
+deleted_03:
+ module_args:
+ config:
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - NOPEER
+ - NO_EXPORT
+ - NO_ADVERTISE
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ method: "delete"
+
+replaced_01:
+ module_args:
+ config:
+ - name: test
+ members:
+ regex:
+ - 808.*
+ match: ALL
+ permit: True
+ type: expanded
+ state: replaced
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - NO_ADVERTISE
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ANY'
+ - community-set-name: 'test2'
+ config:
+ community-set-name: 'test2'
+ community-member:
+ - REGEX:808.*
+ - REGEX:919.*
+ - REGEX:700.*
+ - REGEX:888.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=test"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - REGEX:808.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+
+replaced_02:
+ module_args:
+ config:
+ - name: test2
+ members:
+ regex:
+ - 808.*
+ match: ALL
+ permit: False
+ type: expanded
+ state: replaced
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - NO_ADVERTISE
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ANY'
+ - community-set-name: 'test2'
+ config:
+ community-set-name: 'test2'
+ community-member:
+ - REGEX:808.*
+ - REGEX:919.*
+ - REGEX:700.*
+ - REGEX:888.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=test2"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test2'
+ config:
+ community-set-name: 'test2'
+ community-member:
+ - REGEX:808.*
+ openconfig-bgp-policy-ext:action: 'DENY'
+ match-set-options: 'ALL'
+
+overridden_01:
+ module_args:
+ config:
+ - name: test
+ type: standard
+ permit: True
+ local_as: True
+ no_export: True
+ no_peer: True
+ - name: test1
+ no_advertise: True
+ permit: true
+ type: standard
+ state: overridden
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test1'
+ config:
+ community-set-name: 'test1'
+ community-member:
+ - NO_ADVERTISE
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ANY'
+ - community-set-name: 'test3'
+ config:
+ community-set-name: 'test3'
+ community-member:
+ - NO_ADVERTISE
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ANY'
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - REGEX:808.*
+ - REGEX:919.*
+ - REGEX:700.*
+ - REGEX:888.*
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=test"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets/community-set=test3"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:community-sets:
+ community-set:
+ - community-set-name: 'test'
+ config:
+ community-set-name: 'test'
+ community-member:
+ - NO_EXPORT_SUBCONFED
+ - NO_EXPORT
+ - NOPEER
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ANY'
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_ext_communities.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_ext_communities.yaml
new file mode 100644
index 000000000..d860c4900
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_ext_communities.yaml
@@ -0,0 +1,499 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: test1
+ members:
+ regex:
+ - 808
+ match: all
+ permit: True
+ type: expanded
+ state: merged
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test1'
+ config:
+ ext-community-set-name: 'test1'
+ ext-community-member:
+ - REGEX:808
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+merged_02:
+ module_args:
+ config:
+ - name: test1
+ members:
+ route_origin:
+ - "500:500"
+ type: standard
+ permit: true
+ match: all
+ - name: test_ext
+ members:
+ regex:
+ - 800
+ match: any
+ permit: True
+ type: expanded
+ state: merged
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test1'
+ config:
+ ext-community-set-name: 'test1'
+ ext-community-member:
+ - "route-origin:200:200"
+ - "route-target:400:400"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - ext-community-set-name: 'test_ext'
+ config:
+ ext-community-set-name: 'test_ext'
+ ext-community-member:
+ - "REGEX:808"
+ - "REGEX:608"
+ - "REGEX:908"
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ANY'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test1'
+ config:
+ ext-community-set-name: 'test1'
+ ext-community-member:
+ - route-origin:500:500
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test_ext'
+ config:
+ ext-community-set-name: 'test_ext'
+ ext-community-member:
+ - REGEX:800
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ANY'
+merged_03:
+ module_args:
+ config:
+ - name: test1
+ members:
+ route_target:
+ - "2.2.2.2:201"
+ match: all
+ permit: True
+ type: standard
+ state: merged
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test1'
+ config:
+ ext-community-set-name: 'test1'
+ ext-community-member:
+ - route-target:2.2.2.2:201
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+deleted_01:
+ module_args:
+ config:
+ - name: test1
+ type: expanded
+ match: all
+ members:
+ regex:
+ - 808
+ permit: true
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test1'
+ config:
+ ext-community-set-name: 'test1'
+ ext-community-member:
+ - REGEX:808
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test1"
+ method: "delete"
+deleted_02:
+ module_args:
+ config:
+ - name: test2
+ type: expanded
+ members:
+ regex:
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test2'
+ config:
+ ext-community-set-name: 'test2'
+ ext-community-member:
+ - REGEX:808
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test2"
+ method: "delete"
+deleted_03:
+ module_args:
+ config:
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test3'
+ config:
+ ext-community-set-name: 'test3'
+ ext-community-member:
+ - REGEX:710
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "delete"
+deleted_04:
+ module_args:
+ config:
+ - name: test4
+ members:
+ route_origin:
+ - 600:600
+ permit: true
+ match: all
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test4'
+ config:
+ ext-community-set-name: 'test4'
+ ext-community-member:
+ - route-origin:600:600
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test4"
+ method: "delete"
+deleted_05:
+ module_args:
+ config:
+ - name: test5
+ members:
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test5'
+ config:
+ ext-community-set-name: 'test5'
+ ext-community-member:
+ - route-origin:4403:301
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test5"
+ method: "delete"
+deleted_06:
+ module_args:
+ config:
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test6'
+ config:
+ ext-community-set-name: 'test6'
+ ext-community-member:
+ - route-origin:3303:201
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "delete"
+deleted_07:
+ module_args:
+ config:
+ - name: test7
+ members:
+ route_target:
+ - 1.1.1.1:33
+ permit: true
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test7'
+ config:
+ ext-community-set-name: 'test7'
+ ext-community-member:
+ - route-target:1.1.1.1:33
+ - route-target:2.2.2.2:33
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test7/config/ext-community-member=route-target%3A1.1.1.1%3A33"
+ method: "delete"
+deleted_08:
+ module_args:
+ config:
+ - name: test8
+ members:
+ route_target:
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test8'
+ config:
+ ext-community-set-name: 'test8'
+ ext-community-member:
+ - route-target:2.2.2.2:33
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test8"
+ method: "delete"
+deleted_09:
+ module_args:
+ config:
+ state: deleted
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test9'
+ config:
+ ext-community-set-name: 'test9'
+ ext-community-member:
+ - route-target:30.30.30.1:12
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "delete"
+
+replaced_01:
+ module_args:
+ config:
+ - name: replace_test1
+ members:
+ regex:
+ - 919
+ match: all
+ permit: True
+ type: expanded
+ - name: replace_test3
+ members:
+ route_origin:
+ - "101:101"
+ match: any
+ permit: False
+ type: standard
+ state: replaced
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'replace_test2'
+ config:
+ ext-community-set-name: 'replace_test2'
+ ext-community-member:
+ - REGEX:808
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - ext-community-set-name: 'replace_test1'
+ config:
+ ext-community-set-name: 'replace_test1'
+ ext-community-member:
+ - route-target:120.1.1.1:32
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - ext-community-set-name: 'replace_test3'
+ config:
+ ext-community-set-name: 'replace_test3'
+ ext-community-member:
+ - route-target:808:808
+ - route-origin:101:101
+ - route-origin:201:201
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=replace_test1"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=replace_test3"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'replace_test1'
+ config:
+ ext-community-set-name: 'replace_test1'
+ ext-community-member:
+ - REGEX:919
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'replace_test3'
+ config:
+ ext-community-set-name: 'replace_test3'
+ ext-community-member:
+ - route-origin:101:101
+ openconfig-bgp-policy-ext:action: 'DENY'
+ match-set-options: 'ANY'
+
+overridden_01:
+ module_args:
+ config:
+ - name: test1
+ members:
+ regex:
+ - 919
+ match: all
+ permit: True
+ type: expanded
+ - name: test4
+ members:
+ route_origin:
+ - "101:101"
+ match: any
+ permit: False
+ type: standard
+ state: overridden
+ existing_bgp_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ response:
+ code: 200
+ value:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test2'
+ config:
+ ext-community-set-name: 'test2'
+ ext-community-member:
+ - REGEX:808
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - ext-community-set-name: 'test1'
+ config:
+ ext-community-set-name: 'test1'
+ ext-community-member:
+ - route-target:2.2.2.2:11
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - ext-community-set-name: 'test3'
+ config:
+ ext-community-set-name: 'test3'
+ ext-community-member:
+ - route-target:808:301
+ - route-origin:101:101
+ - route-origin:201:201
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test1"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test2"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets/ext-community-set=test3"
+ method: "delete"
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test1'
+ config:
+ ext-community-set-name: 'test1'
+ ext-community-member:
+ - REGEX:919
+ openconfig-bgp-policy-ext:action: 'PERMIT'
+ match-set-options: 'ALL'
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/openconfig-bgp-policy:bgp-defined-sets/ext-community-sets"
+ method: "patch"
+ data:
+ openconfig-bgp-policy:ext-community-sets:
+ ext-community-set:
+ - ext-community-set-name: 'test4'
+ config:
+ ext-community-set-name: 'test4'
+ ext-community-member:
+ - route-origin:101:101
+ openconfig-bgp-policy-ext:action: 'DENY'
+ match-set-options: 'ANY'
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors.yaml
new file mode 100644
index 000000000..0ba93b7e0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors.yaml
@@ -0,0 +1,1114 @@
+---
+merged_01:
+ module_args:
+ config:
+ - bgp_as: 51
+ neighbors:
+ - neighbor: Eth1/2
+ auth_pwd:
+ pwd: 'pw123'
+ encrypted: false
+ dont_negotiate_capability: true
+ ebgp_multihop:
+ enabled: true
+ multihop_ttl: 1
+ enforce_first_as: true
+ enforce_multihop: true
+ local_address: 'Ethernet4'
+ local_as:
+ as: 2
+ no_prepend: true
+ replace_as: true
+ nbr_description: "description 1"
+ override_capability: true
+ passive: true
+ port: 3
+ shutdown_msg: 'msg1'
+ solo: true
+ - neighbor: 1.1.1.1
+ disable_connected_check: true
+ ttl_security: 5
+ - bgp_as: 51
+ vrf_name: VrfReg1
+ peer_group:
+ - name: SPINE
+ bfd:
+ check_failure: true
+ enabled: true
+ profile: 'profile 1'
+ capability:
+ dynamic: true
+ extended_nexthop: true
+ auth_pwd:
+ pwd: 'U2FsdGVkX1/4sRsZ624wbAJfDmagPLq2LsGDOcW/47M='
+ encrypted: true
+ dont_negotiate_capability: true
+ ebgp_multihop:
+ enabled: true
+ multihop_ttl: 1
+ enforce_first_as: true
+ enforce_multihop: true
+ local_address: 'Ethernet4'
+ local_as:
+ as: 2
+ no_prepend: true
+ replace_as: true
+ pg_description: 'description 1'
+ override_capability: true
+ passive: true
+ solo: true
+ remote_as:
+ peer_as: 4
+ - name: SPINE1
+ disable_connected_check: true
+ shutdown_msg: "msg1"
+ strict_capability_match: true
+ timers:
+ keepalive: 30
+ holdtime: 15
+ connect_retry: 25
+ ttl_security: 5
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ activate: true
+ allowas_in:
+ origin: true
+ - afi: ipv6
+ safi: unicast
+ activate: true
+ allowas_in:
+ value: 5
+ neighbors:
+ - neighbor: Eth1/3
+ remote_as:
+ peer_type: internal
+ peer_group: SPINE
+ advertisement_interval: 15
+ timers:
+ keepalive: 30
+ holdtime: 15
+ connect_retry: 25
+ bfd:
+ check_failure: true
+ enabled: true
+ profile: 'profile 1'
+ capability:
+ dynamic: true
+ extended_nexthop: true
+ auth_pwd:
+ pwd: 'U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg='
+ encrypted: true
+ nbr_description: 'description 2'
+ strict_capability_match: true
+ v6only: true
+ - neighbor: 192.168.1.4
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ method: "patch"
+ data:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - auth-password:
+ config:
+ password: U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg=
+ encrypted: True
+ neighbor-address: Eth1/3
+ enable-bfd:
+ config:
+ enabled: True
+ check-control-plane-failure: True
+ bfd-profile: 'profile 1'
+ timers:
+ config:
+ hold-time: 15
+ keepalive-interval: 30
+ connect-retry: 25
+ minimum-advertisement-interval: 15
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: Eth1/3
+ peer-group: SPINE
+ description: 'description 2'
+ strict-capability-match: True
+ openconfig-bgp-ext:v6only: True
+ capability-dynamic: True
+ capability-extended-nexthop: True
+ peer-type: INTERNAL
+ - neighbor-address: 192.168.1.4
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: 192.168.1.4
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ method: "patch"
+ data:
+ openconfig-network-instance:peer-groups:
+ peer-group:
+ - peer-group-name: SPINE
+ auth-password:
+ config:
+ password: U2FsdGVkX1/4sRsZ624wbAJfDmagPLq2LsGDOcW/47M=
+ encrypted: True
+ enable-bfd:
+ config:
+ enabled: True
+ check-control-plane-failure: True
+ bfd-profile: 'profile 1'
+ ebgp-multihop:
+ config:
+ enabled: True
+ multihop-ttl: 1
+ transport:
+ config:
+ local-address: Ethernet4
+ passive-mode: True
+ config:
+ peer-group-name: SPINE
+ description: 'description 1'
+ dont-negotiate-capability: True
+ enforce-first-as: True
+ enforce-multihop: True
+ override-capability: True
+ solo-peer: True
+ local-as: 2
+ local-as-no-prepend: True
+ local-as-replace-as: True
+ capability-dynamic: True
+ capability-extended-nexthop: True
+ peer-as: 4
+ - peer-group-name: SPINE1
+ timers:
+ config:
+ hold-time: 15
+ keepalive-interval: 30
+ connect-retry: 25
+ transport:
+ config:
+ passive-mode: False
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ enabled: True
+ allow-own-as:
+ config:
+ origin: True
+ enabled: True
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ enabled: True
+ allow-own-as:
+ config:
+ as-count: 5
+ enabled: True
+ config:
+ peer-group-name: SPINE1
+ disable-ebgp-connected-route-check: True
+ shutdown-message: msg1
+ strict-capability-match: True
+ ttl-security-hops: 5
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ method: "patch"
+ data:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - auth-password:
+ config:
+ password: pw123
+ encrypted: False
+ neighbor-address: Eth1/2
+ ebgp-multihop:
+ config:
+ enabled: True
+ multihop-ttl: 1
+ transport:
+ config:
+ local-address: Ethernet4
+ passive-mode: True
+ config:
+ neighbor-address: Eth1/2
+ description: 'description 1'
+ dont-negotiate-capability: True
+ enforce-first-as: True
+ enforce-multihop: True
+ override-capability: True
+ peer-port: 3
+ shutdown-message: msg1
+ solo-peer: True
+ local-as: 2
+ local-as-no-prepend: True
+ local-as-replace-as: True
+ - neighbor-address: 1.1.1.1
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: 1.1.1.1
+ disable-ebgp-connected-route-check: True
+ ttl-security-hops: 5
+
+merged_02:
+ module_args:
+ config:
+ - bgp_as: 51
+ neighbors:
+ - neighbor: Eth1/2
+ auth_pwd:
+ pwd: 'pw123'
+ encrypted: false
+ dont_negotiate_capability: true
+ ebgp_multihop:
+ enabled: true
+ multihop_ttl: 1
+ enforce_first_as: true
+ enforce_multihop: true
+ local_address: 'Ethernet4'
+ local_as:
+ as: 2
+ no_prepend: true
+ replace_as: true
+ nbr_description: "description 1"
+ override_capability: true
+ passive: true
+ port: 3
+ shutdown_msg: 'msg1'
+ solo: true
+ - neighbor: 1.1.1.1
+ disable_connected_check: true
+ ttl_security: 5
+ - bgp_as: 51
+ vrf_name: VrfReg1
+ peer_group:
+ - name: SPINE
+ bfd:
+ check_failure: true
+ enabled: true
+ profile: 'profile 1'
+ capability:
+ dynamic: true
+ extended_nexthop: true
+ auth_pwd:
+ pwd: 'U2FsdGVkX1/4sRsZ624wbAJfDmagPLq2LsGDOcW/47M='
+ encrypted: true
+ dont_negotiate_capability: true
+ ebgp_multihop:
+ enabled: true
+ multihop_ttl: 1
+ enforce_first_as: true
+ enforce_multihop: true
+ local_address: 'Ethernet4'
+ local_as:
+ as: 2
+ no_prepend: true
+ replace_as: true
+ pg_description: 'description 1'
+ override_capability: true
+ passive: true
+ solo: true
+ remote_as:
+ peer_as: 4
+ - name: SPINE1
+ disable_connected_check: true
+ shutdown_msg: "msg1"
+ strict_capability_match: true
+ timers:
+ keepalive: 30
+ holdtime: 15
+ connect_retry: 25
+ ttl_security: 5
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ activate: true
+ allowas_in:
+ origin: true
+ - afi: ipv6
+ safi: unicast
+ activate: true
+ allowas_in:
+ value: 5
+ - name: SPINE5
+ remote_as:
+ peer_type: internal
+ neighbors:
+ - neighbor: Eth1/3
+ remote_as:
+ peer_type: internal
+ peer_group: SPINE
+ advertisement_interval: 15
+ timers:
+ keepalive: 30
+ holdtime: 15
+ connect_retry: 25
+ bfd:
+ check_failure: true
+ enabled: true
+ profile: 'profile 1'
+ capability:
+ dynamic: true
+ extended_nexthop: true
+ auth_pwd:
+ pwd: 'U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg='
+ encrypted: true
+ nbr_description: 'description 2'
+ strict_capability_match: true
+ v6only: true
+ - neighbor: Eth1/4
+ remote_as:
+ peer_as: 700
+ peer_group: SPINE
+ - neighbor: 192.168.1.4
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - neighbor-address: Eth1/3
+ config:
+ neighbor-address: Eth1/3
+ peer-group: SPINE
+ local-as: 51
+ peer-as: 65399
+ - neighbor-address: Eth1/4
+ config:
+ neighbor-address: Eth1/4
+ peer-group: SPINE
+ local-as: 51
+ peer-type: INTERNAL
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:peer-groups:
+ peer-group:
+ - config:
+ peer-group-name: SPINE
+ peer-type: INTERNAL
+ timers:
+ config:
+ connect-retry: 30
+ minimum-advertisement-interval: 0
+ - config:
+ peer-group-name: SPINE5
+ peer-as: 55
+ timers:
+ config:
+ connect-retry: 40
+ minimum-advertisement-interval: 50
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ method: "patch"
+ data:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - auth-password:
+ config:
+ password: U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg=
+ encrypted: True
+ neighbor-address: Eth1/3
+ enable-bfd:
+ config:
+ enabled: True
+ check-control-plane-failure: True
+ bfd-profile: 'profile 1'
+ timers:
+ config:
+ hold-time: 15
+ keepalive-interval: 30
+ connect-retry: 25
+ minimum-advertisement-interval: 15
+ config:
+ neighbor-address: Eth1/3
+ description: 'description 2'
+ strict-capability-match: True
+ openconfig-bgp-ext:v6only: True
+ capability-dynamic: True
+ capability-extended-nexthop: True
+ peer-type: INTERNAL
+ - neighbor-address: Eth1/4
+ config:
+ neighbor-address: Eth1/4
+ peer-as: 700
+ - neighbor-address: 192.168.1.4
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: 192.168.1.4
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f3/config/peer-as"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f4/config/peer-type"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ method: "patch"
+ data:
+ openconfig-network-instance:peer-groups:
+ peer-group:
+ - peer-group-name: SPINE
+ auth-password:
+ config:
+ password: U2FsdGVkX1/4sRsZ624wbAJfDmagPLq2LsGDOcW/47M=
+ encrypted: True
+ enable-bfd:
+ config:
+ enabled: True
+ check-control-plane-failure: True
+ bfd-profile: 'profile 1'
+ ebgp-multihop:
+ config:
+ enabled: True
+ multihop-ttl: 1
+ transport:
+ config:
+ local-address: Ethernet4
+ passive-mode: True
+ config:
+ peer-group-name: SPINE
+ description: 'description 1'
+ dont-negotiate-capability: True
+ enforce-first-as: True
+ enforce-multihop: True
+ override-capability: True
+ solo-peer: True
+ local-as: 2
+ local-as-no-prepend: True
+ local-as-replace-as: True
+ capability-dynamic: True
+ capability-extended-nexthop: True
+ peer-as: 4
+ - peer-group-name: SPINE1
+ timers:
+ config:
+ hold-time: 15
+ keepalive-interval: 30
+ connect-retry: 25
+ transport:
+ config:
+ passive-mode: False
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ enabled: True
+ allow-own-as:
+ config:
+ origin: True
+ enabled: True
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ enabled: True
+ allow-own-as:
+ config:
+ as-count: 5
+ enabled: True
+ config:
+ peer-group-name: SPINE1
+ disable-ebgp-connected-route-check: True
+ shutdown-message: msg1
+ strict-capability-match: True
+ ttl-security-hops: 5
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINE/config/peer-type"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINE5/config/peer-as"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ method: "patch"
+ data:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - auth-password:
+ config:
+ password: pw123
+ encrypted: False
+ neighbor-address: Eth1/2
+ ebgp-multihop:
+ config:
+ enabled: True
+ multihop-ttl: 1
+ transport:
+ config:
+ local-address: Ethernet4
+ passive-mode: True
+ config:
+ neighbor-address: Eth1/2
+ description: 'description 1'
+ dont-negotiate-capability: True
+ enforce-first-as: True
+ enforce-multihop: True
+ override-capability: True
+ peer-port: 3
+ shutdown-message: msg1
+ solo-peer: True
+ local-as: 2
+ local-as-no-prepend: True
+ local-as-replace-as: True
+ - neighbor-address: 1.1.1.1
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: 1.1.1.1
+ disable-ebgp-connected-route-check: True
+ ttl-security-hops: 5
+
+deleted_01:
+ module_args:
+ config:
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - auth-password:
+ config:
+ password: U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg=
+ encrypted: True
+ neighbor-address: Eth1/3
+ enable-bfd:
+ config:
+ enabled: True
+ check-control-plane-failure: True
+ bfd-profile: 'profile 1'
+ timers:
+ config:
+ hold-time: 15
+ keepalive-interval: 30
+ connect-retry: 25
+ minimum-advertisement-interval: 15
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: Eth1/3
+ peer-group: SPINE
+ description: 'description 2'
+ strict-capability-match: True
+ openconfig-bgp-ext:v6only: True
+ capability-dynamic: True
+ capability-extended-nexthop: True
+ peer-as: 10
+ - neighbor-address: 192.168.1.4
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: 192.168.1.4
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - auth-password:
+ config:
+ password: pw123
+ encrypted: False
+ neighbor-address: Eth1/2
+ ebgp-multihop:
+ config:
+ enabled: True
+ multihop-ttl: 1
+ transport:
+ config:
+ local-address: Ethernet4
+ passive-mode: True
+ config:
+ neighbor-address: Eth1/2
+ description: 'description 1'
+ dont-negotiate-capability: True
+ enforce-first-as: True
+ enforce-multihop: True
+ override-capability: True
+ peer-port: 3
+ shutdown-message: msg1
+ solo-peer: True
+ local-as: 2
+ local-as-no-prepend: True
+ local-as-replace-as: True
+ - neighbor-address: 1.1.1.1
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: 1.1.1.1
+ disable-ebgp-connected-route-check: True
+ ttl-security-hops: 5
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=192.168.1.4/"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f3/"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=1.1.1.1/"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ config:
+ - bgp_as: 51
+ vrf_name: VrfReg1
+ neighbors:
+ - neighbor: 192.168.1.4
+ - neighbor: Eth1/3
+ peer_group:
+ - name: SPINETEST1
+ capability:
+ dynamic: true
+ extended_nexthop: true
+ remote_as:
+ peer_as: 65399
+ advertisement_interval: 15
+ timers:
+ keepalive: 77
+ holdtime: 78
+ connect_retry: 11
+ bfd:
+ enabled: true
+ check_failure: true
+ profile: 'kvsk_bfd_profile'
+ ebgp_multihop:
+ enabled: true
+ multihop_ttl: 22
+ auth_pwd:
+ encrypted: true
+ pwd: 'U2FsdGVkX1+LHXncDf0uAxQrs4CN7H5yDKT5sht6Ga4='
+ enforce_first_as: true
+ enforce_multihop: true
+ pg_description: 'pg_kvsk_description'
+ disable_connected_check: true
+ dont_negotiate_capability: true
+ local_as:
+ as: 65299
+ no_prepend: true
+ replace_as: true
+ override_capability: true
+ shutdown_msg: pg_kvsk_shutdown_msg
+ passive: true
+ local_address: 5.5.5.5
+ solo: true
+ address_family:
+ afis:
+ - afi: ipv4
+ safi: unicast
+ activate: true
+ allowas_in:
+ value: 8
+ - afi: ipv6
+ safi: unicast
+ activate: true
+ allowas_in:
+ origin: true
+ prefix_limit:
+ max_prefixes: 20
+ prevent_teardown: true
+ warning_threshold: 40
+ restart_timer: 60
+ - afi: l2vpn
+ safi: evpn
+ prefix_list_in: p1
+ prefix_list_out: p2
+ - bgp_as: 51
+ neighbors:
+ - neighbor: 1.1.1.1
+ - neighbor: Eth1/2
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:peer-groups:
+ peer-group:
+ - config:
+ capability-dynamic: true
+ capability-extended-nexthop: true
+ enabled: true
+ peer-group-name: SPINETEST1
+ description: "pg_kvsk_description"
+ disable-ebgp-connected-route-check: true
+ dont-negotiate-capability: true
+ enforce-first-as: true
+ enforce-multihop: true
+ local-as: 65299
+ shutdown-message: pg_kvsk_shutdown_msg
+ local-as-no-prepend: true
+ local-as-replace-as: true
+ override-capability: true
+ peer-as: 65399
+ solo-peer: true
+ peer-group-name: SPINETEST1
+ enable-bfd:
+ config:
+ enabled: true
+ check-control-plane-failure: true
+ bfd-profile: 'kvsk_bfd_profile'
+ ebgp-multihop:
+ config:
+ enabled: true
+ multihop-ttl: 22
+ auth-password:
+ config:
+ encrypted: true
+ password: 'U2FsdGVkX1+LHXncDf0uAxQrs4CN7H5yDKT5sht6Ga4='
+ advertisement-interval: 15
+ timers:
+ config:
+ keepalive-interval: 77
+ hold-time: 78
+ connect-retry: 11
+ transport:
+ config:
+ passive-mode: true
+ local-address: 5.5.5.5
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ allow-own-as:
+ config:
+ as-count: 8
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ ipv6-unicast:
+ prefix-limit:
+ config:
+ max-prefixes: 20
+ prevent-teardown: true
+ warning-threshold-pct: 40
+ restart-time: 60
+ allow-own-as:
+ config:
+ origin: True
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ enabled: true
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ prefix-list:
+ config:
+ import-policy: p1
+ export-policy: p2
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - auth-password:
+ config:
+ password: U2FsdGVkX199MZ7YOPkOR9O6wEZmtGSgiDfnlcN9hBg=
+ encrypted: True
+ neighbor-address: Eth1/3
+ enable-bfd:
+ config:
+ enabled: True
+ check-control-plane-failure: True
+ bfd-profile: 'profile 1'
+ timers:
+ config:
+ hold-time: 15
+ keepalive-interval: 30
+ connect-retry: 25
+ minimum-advertisement-interval: 15
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: Eth1/3
+ peer-group: SPINETEST1
+ description: 'description 2'
+ strict-capability-match: True
+ openconfig-bgp-ext:v6only: True
+ capability-dynamic: True
+ capability-extended-nexthop: True
+ peer-as: 10
+ - neighbor-address: 192.168.1.4
+ config:
+ neighbor-address: 192.168.1.4
+ capability-extended-nexthop: True
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - auth-password:
+ config:
+ password: pw123
+ encrypted: False
+ neighbor-address: Eth1/2
+ ebgp-multihop:
+ config:
+ enabled: True
+ multihop-ttl: 1
+ transport:
+ config:
+ local-address: Ethernet4
+ passive-mode: False
+ config:
+ neighbor-address: Eth1/2
+ description: 'description 1'
+ dont-negotiate-capability: True
+ enforce-first-as: True
+ enforce-multihop: True
+ override-capability: True
+ peer-port: 3
+ shutdown-message: msg1
+ solo-peer: True
+ local-as: 2
+ local-as-no-prepend: True
+ local-as-replace-as: True
+ capability-extended-nexthop: True
+ - neighbor-address: 1.1.1.1
+ transport:
+ config:
+ passive-mode: False
+ config:
+ neighbor-address: 1.1.1.1
+ disable-ebgp-connected-route-check: True
+ ttl-security-hops: 5
+ capability-extended-nexthop: True
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=192.168.1.4/"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f3/"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/allow-own-as/config/as-count"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/afi-safis/afi-safi=openconfig-bgp-types:IPV6_UNICAST/allow-own-as/config/origin"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/afi-safis/afi-safi=openconfig-bgp-types:IPV6_UNICAST/config/enabled"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/afi-safis/afi-safi=openconfig-bgp-types:IPV6_UNICAST/ipv6-unicast/prefix-limit/config/max-prefixes"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/afi-safis/afi-safi=openconfig-bgp-types:IPV6_UNICAST/ipv6-unicast/prefix-limit/config/prevent-teardown"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/afi-safis/afi-safi=openconfig-bgp-types:IPV6_UNICAST/ipv6-unicast/prefix-limit/config/warning-threshold-pct"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/afi-safis/afi-safi=openconfig-bgp-types:L2VPN_EVPN/prefix-list/config/export-policy"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/afi-safis/afi-safi=openconfig-bgp-types:L2VPN_EVPN/prefix-list/config/import-policy"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/auth-password/config/encrypted"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/auth-password/config/password"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/capability-dynamic"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/capability-extended-nexthop"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/description"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/disable-ebgp-connected-route-check"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/dont-negotiate-capability"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/enforce-first-as"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/enforce-multihop"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/local-as"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/local-as-no-prepend"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/local-as-replace-as"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/override-capability"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/peer-as"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/shutdown-message"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/config/solo-peer"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/ebgp-multihop/config/enabled"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/ebgp-multihop/config/multihop-ttl"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/enable-bfd/config/bfd-profile"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/enable-bfd/config/check-control-plane-failure"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/enable-bfd/config/enabled"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/timers/config/connect-retry"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/timers/config/hold-time"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/timers/config/keepalive-interval"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/transport/config/local-address"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups/peer-group=SPINETEST1/transport/config/passive-mode"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=1.1.1.1/"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors_af.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors_af.yaml
new file mode 100644
index 000000000..2b2dd7be4
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_bgp_neighbors_af.yaml
@@ -0,0 +1,471 @@
+---
+merged_01:
+ module_args:
+ config:
+ - bgp_as: 51
+ neighbors:
+ - neighbor: Eth1/2
+ address_family:
+ - afi: ipv4
+ safi: unicast
+ allowas_in:
+ origin: true
+ ip_afi:
+ default_policy_name: rmap_reg1
+ send_default_route: true
+ prefix_limit:
+ max_prefixes: 1
+ prevent_teardown: true
+ warning_threshold: 99
+ restart_timer: 88
+ prefix_list_in: p1
+ prefix_list_out: p2
+ route_map:
+ - direction: in
+ name: neigh_af_rmap1
+ - direction: out
+ name: neigh_af_rmap2
+ route_reflector_client: true
+ route_server_client: true
+ - bgp_as: 51
+ vrf_name: VrfReg1
+ neighbors:
+ - neighbor: 1.1.1.1
+ address_family:
+ - afi: ipv6
+ safi: unicast
+ allowas_in:
+ value: 55
+ ip_afi:
+ default_policy_name: rmap_reg2
+ send_default_route: true
+ prefix_limit:
+ max_prefixes: 1
+ prevent_teardown: true
+ warning_threshold: 44
+ prefix_list_in: p3
+ prefix_list_out: p4
+ - neighbor: 2.2.2.2
+ address_family:
+ - afi: l2vpn
+ safi: evpn
+ allowas_in:
+ value: 22
+ prefix_list_in: p5
+ prefix_list_out: p6
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/peer-groups"
+ response:
+ code: 200
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=1.1.1.1/afi-safis"
+ method: "patch"
+ data:
+ openconfig-network-instance:afi-safis:
+ afi-safi:
+ - afi-safi-name: IPV6_UNICAST
+ config:
+ afi-safi-name: IPV6_UNICAST
+ prefix-list:
+ config:
+ import-policy: p3
+ export-policy: p4
+ ipv6-unicast:
+ config:
+ default-policy-name: rmap_reg2
+ send-default-route: True
+ prefix-limit:
+ config:
+ max-prefixes: 1
+ prevent-teardown: True
+ warning-threshold-pct: 44
+ allow-own-as:
+ config:
+ as-count: 55
+ enabled: true
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=2.2.2.2/afi-safis"
+ method: "patch"
+ data:
+ openconfig-network-instance:afi-safis:
+ afi-safi:
+ - afi-safi-name: L2VPN_EVPN
+ config:
+ afi-safi-name: L2VPN_EVPN
+ prefix-list:
+ config:
+ import-policy: p5
+ export-policy: p6
+ allow-own-as:
+ config:
+ as-count: 22
+ enabled: true
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis"
+ method: "patch"
+ data:
+ openconfig-network-instance:afi-safis:
+ afi-safi:
+ - afi-safi-name: IPV4_UNICAST
+ config:
+ afi-safi-name: IPV4_UNICAST
+ route-reflector-client: true
+ route-server-client: true
+ apply-policy:
+ config:
+ import-policy:
+ - neigh_af_rmap1
+ export-policy:
+ - neigh_af_rmap2
+ prefix-list:
+ config:
+ import-policy: p1
+ export-policy: p2
+ ipv4-unicast:
+ config:
+ default-policy-name: rmap_reg1
+ send-default-route: True
+ prefix-limit:
+ config:
+ max-prefixes: 1
+ prevent-teardown: True
+ warning-threshold-pct: 99
+ restart-timer: 88
+ allow-own-as:
+ config:
+ origin: true
+ enabled: true
+deleted_01:
+ module_args:
+ config:
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - neighbor-address: 1.1.1.1
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=1.1.1.1/afi-safis/afi-safi=openconfig-bgp-types:IPV6_UNICAST"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=1.1.1.1/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=1.1.1.1/afi-safis/afi-safi=openconfig-bgp-types:L2VPN_EVPN"
+ method: "delete"
+ data:
+deleted_02:
+ module_args:
+ config:
+ - bgp_as: 51
+ neighbors:
+ - neighbor: Eth1/2
+ address_family:
+ - afi: ipv4
+ safi: unicast
+ allowas_in:
+ origin: true
+ ip_afi:
+ default_policy_name: rmap_reg1
+ send_default_route: true
+ prefix_limit:
+ max_prefixes: 1
+ prevent_teardown: true
+ warning_threshold: 99
+ restart_timer: 88
+ prefix_list_in: p1
+ prefix_list_out: p2
+ route_map:
+ - direction: in
+ name: neigh_af_rmap1
+ - direction: out
+ name: neigh_af_rmap2
+ route_reflector_client: true
+ route_server_client: true
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - neighbor-address: Eth1/2
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ route-reflector-client: true
+ route-server-client: true
+ apply-policy:
+ config:
+ import-policy:
+ - neigh_af_rmap1
+ export-policy:
+ - neigh_af_rmap2
+ prefix-list:
+ config:
+ import-policy: p1
+ export-policy: p2
+ ipv4-unicast:
+ config:
+ default-policy-name: rmap_reg1
+ send-default-route: True
+ prefix-limit:
+ config:
+ max-prefixes: 1
+ prevent-teardown: True
+ warning-threshold-pct: 99
+ restart-timer: 88
+ allow-own-as:
+ config:
+ origin: true
+ enabled: true
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - neighbor-address: 1.1.1.1
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ - neighbor-address: 2.2.2.2
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=IPV4_UNICAST/apply-policy/config/export-policy"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=IPV4_UNICAST/apply-policy/config/import-policy"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/allow-own-as/config/enabled"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/allow-own-as/config/origin"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/config/route-reflector-client"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/config/route-server-client"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/ipv4-unicast/config/default-policy-name"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/ipv4-unicast/config/send-default-route"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/ipv4-unicast/prefix-limit/config/max-prefixes"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/ipv4-unicast/prefix-limit/config/prevent-teardown"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/ipv4-unicast/prefix-limit/config/restart-timer"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/ipv4-unicast/prefix-limit/config/warning-threshold-pct"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/prefix-list/config/export-policy"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=Eth1%2f2/afi-safis/afi-safi=openconfig-bgp-types:IPV4_UNICAST/prefix-list/config/import-policy"
+ method: "delete"
+ data:
+deleted_03:
+ module_args:
+ config:
+ - bgp_as: 51
+ vrf_name: VrfReg1
+ neighbors:
+ - neighbor: 1.1.1.1
+ address_family:
+ - afi: ipv6
+ safi: unicast
+ - neighbor: 2.2.2.2
+ address_family:
+ - afi: l2vpn
+ safi: evpn
+ state: deleted
+ existing_bgp_config:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/global/config"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:config:
+ as: 51
+ router-id: "10.2.2.4"
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - neighbor-address: Eth1/2
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV4_UNICAST
+ route-reflector-client: true
+ route-server-client: true
+ apply-policy:
+ config:
+ import-policy:
+ - neigh_af_rmap1
+ export-policy:
+ - neigh_af_rmap2
+ prefix-list:
+ config:
+ import-policy: p1
+ export-policy: p2
+ ipv4-unicast:
+ config:
+ default-policy-name: rmap_reg1
+ send-default-route: True
+ prefix-limit:
+ config:
+ max-prefixes: 1
+ prevent-teardown: True
+ warning-threshold-pct: 99
+ restart-timer: 88
+ allow-own-as:
+ config:
+ origin: true
+ enabled: true
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:neighbors:
+ neighbor:
+ - neighbor-address: 1.1.1.1
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ config:
+ afi-safi-name: openconfig-bgp-types:IPV6_UNICAST
+ - neighbor-address: 2.2.2.2
+ afi-safis:
+ afi-safi:
+ - afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ config:
+ afi-safi-name: openconfig-bgp-types:L2VPN_EVPN
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=1.1.1.1/afi-safis/afi-safi=openconfig-bgp-types:IPV6_UNICAST"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=BGP,bgp/bgp/neighbors/neighbor=2.2.2.2/afi-safis/afi-safi=openconfig-bgp-types:L2VPN_EVPN"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_command.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_command.yaml
new file mode 100644
index 000000000..5241da840
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_command.yaml
@@ -0,0 +1,12 @@
+---
+merged_01:
+ module_args:
+ commands:
+ - show version
+ retries: 5
+ interval: 3
+ wait_for:
+ - result[0] contains Version
+ match: any
+ expected_command_requests:
+ - show version
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_config.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_config.yaml
new file mode 100644
index 000000000..66b30dced
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_config.yaml
@@ -0,0 +1,21 @@
+---
+merged_01:
+ module_args:
+ commands: ['snmp-server community public group ro']
+ save: yes
+ expected_commands_to_device:
+ - snmp-server community public group ro
+ - write memory
+
+merged_02:
+ module_args:
+ lines:
+ - seq 2 permit udp any any
+ - seq 3 deny icmp any any
+ parents: ['ip access-list test']
+ before: ['no ip access-list test']
+ expected_commands_to_device:
+ - no ip access-list test
+ - ip access-list test
+ - seq 2 permit udp any any
+ - seq 3 deny icmp any any
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_copp.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_copp.yaml
new file mode 100644
index 000000000..c0bd0557c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_copp.yaml
@@ -0,0 +1,203 @@
+---
+merged_01:
+ module_args:
+ config:
+ copp_groups:
+ - copp_name: 'copp-1'
+ trap_priority: 1
+ trap_action: 'DROP'
+ queue: 1
+ cir: '45'
+ cbs: '45'
+ - copp_name: 'copp-2'
+ trap_priority: 2
+ trap_action: 'FORWARD'
+ queue: 2
+ cir: '90'
+ cbs: '90'
+ existing_copp_config:
+ - path: "/data/openconfig-copp-ext:copp"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-copp-ext:copp/copp-groups"
+ method: "patch"
+ data:
+ openconfig-copp-ext:copp-groups:
+ copp-group:
+ - name: 'copp-1'
+ config:
+ name: 'copp-1'
+ trap-priority: 1
+ trap-action: 'DROP'
+ queue: 1
+ cir: '45'
+ cbs: '45'
+ - name: 'copp-2'
+ config:
+ name: 'copp-2'
+ trap-priority: 2
+ trap-action: 'FORWARD'
+ queue: 2
+ cir: '90'
+ cbs: '90'
+
+replaced_01:
+ module_args:
+ config:
+ copp_groups:
+ - copp_name: 'copp-2'
+ trap_action: 'FORWARD'
+ cir: '60'
+ cbs: '60'
+ - copp_name: 'copp-3'
+ trap_priority: 3
+ trap_action: 'DROP'
+ queue: 3
+ cir: '70'
+ cbs: '70'
+ - copp_name: 'copp-4'
+ trap_priority: 5
+ trap_action: 'DROP'
+ queue: 5
+ cir: '75'
+ cbs: '75'
+ state: replaced
+ existing_copp_config:
+ - path: "/data/openconfig-copp-ext:copp"
+ response:
+ code: 200
+ value:
+ openconfig-copp-ext:copp:
+ copp-groups:
+ copp-group:
+ - name: 'copp-1'
+ config:
+ name: 'copp-1'
+ trap-priority: 1
+ trap-action: 'DROP'
+ queue: 1
+ cir: '45'
+ cbs: '45'
+ expected_config_requests:
+ - path: "/data/openconfig-copp-ext:copp/copp-groups"
+ method: "patch"
+ data:
+ openconfig-copp-ext:copp-groups:
+ copp-group:
+ - name: 'copp-2'
+ config:
+ name: 'copp-2'
+ trap-action: 'FORWARD'
+ cir: '60'
+ cbs: '60'
+ - name: 'copp-3'
+ config:
+ name: 'copp-3'
+ trap-priority: 3
+ trap-action: 'DROP'
+ queue: 3
+ cir: '70'
+ cbs: '70'
+ - name: 'copp-4'
+ config:
+ name: 'copp-4'
+ trap-priority: 5
+ trap-action: 'DROP'
+ queue: 5
+ cir: '75'
+ cbs: '75'
+
+overridden_01:
+ module_args:
+ config:
+ copp_groups:
+ - copp_name: 'copp-5'
+ trap_priority: 1
+ trap_action: 'FORWARD'
+ queue: 1
+ cir: '15'
+ cbs: '15'
+ state: overridden
+ existing_copp_config:
+ - path: "/data/openconfig-copp-ext:copp"
+ response:
+ code: 200
+ value:
+ openconfig-copp-ext:copp:
+ copp-groups:
+ copp-group:
+ - name: 'copp-1'
+ config:
+ name: 'copp-1'
+ trap-priority: 1
+ trap-action: 'DROP'
+ queue: 1
+ cir: '45'
+ cbs: '45'
+ expected_config_requests:
+ - path: "/data/openconfig-copp-ext:copp/copp-groups"
+ method: "patch"
+ data:
+ openconfig-copp-ext:copp-groups:
+ copp-group:
+ - name: 'copp-5'
+ config:
+ name: 'copp-5'
+ trap-priority: 1
+ trap-action: 'FORWARD'
+ queue: 1
+ cir: '15'
+ cbs: '15'
+
+deleted_01:
+ module_args:
+ config:
+ copp_groups:
+ - copp_name: 'copp-1'
+ trap_priority: 8
+ queue: 8
+ cir: '20'
+ cbs: '20'
+ - copp_name: 'copp-2'
+ state: deleted
+ existing_copp_config:
+ - path: "/data/openconfig-copp-ext:copp"
+ response:
+ code: 200
+ value:
+ openconfig-copp-ext:copp:
+ copp-groups:
+ copp-group:
+ - name: 'copp-1'
+ config:
+ name: 'copp-1'
+ trap-priority: 8
+ trap-action: 'DROP'
+ queue: 8
+ cir: '20'
+ cbs: '20'
+ - name: 'copp-2'
+ config:
+ name: 'copp-2'
+ trap-priority: 2
+ trap-action: 'FORWARD'
+ queue: 2
+ cir: '60'
+ cbs: '60'
+ expected_config_requests:
+ - path: "/data/openconfig-copp-ext:copp/copp-groups/copp-group=copp-1/config/trap-priority"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-copp-ext:copp/copp-groups/copp-group=copp-1/config/queue"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-copp-ext:copp/copp-groups/copp-group=copp-1/config/cir"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-copp-ext:copp/copp-groups/copp-group=copp-1/config/cbs"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-copp-ext:copp/copp-groups/copp-group=copp-2"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_dhcp_relay.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_dhcp_relay.yaml
new file mode 100644
index 000000000..e07bf9da7
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_dhcp_relay.yaml
@@ -0,0 +1,917 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: 'Eth1/5'
+ ipv4:
+ server_addresses:
+ - address: 100.1.1.2
+ - address: 100.1.1.3
+ source_interface: "Vlan 101"
+ vrf_name: "VrfReg1"
+ vrf_select: true
+ link_select: true
+ policy_action: "replace"
+ circuit_id: "%h:%p"
+ ipv6:
+ server_addresses:
+ - address: 100::2
+ - address: 100::3
+ source_interface: "Vlan 101"
+ vrf_name: "VrfReg2"
+ vrf_select: true
+ - name: 'Eth1/31'
+ ipv4:
+ max_hop_count: 8
+ - name: 'Eth1/32'
+ ipv6:
+ max_hop_count: 8
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcp:
+ interfaces:
+ interface:
+ - id: 'Eth1/31'
+ config:
+ id: 'Eth1/31'
+ helper-address:
+ - '131.1.1.2'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcpv6:
+ interfaces:
+ interface:
+ - id: 'Eth1/32'
+ config:
+ id: 'Eth1/32'
+ helper-address:
+ - '131::2'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ config_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f5/config/helper-address"
+ method: "patch"
+ data:
+ openconfig-relay-agent:helper-address:
+ - '100.1.1.2'
+ - '100.1.1.3'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f5/config/openconfig-relay-agent-ext:src-intf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:src-intf: "Vlan101"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f5/config/openconfig-relay-agent-ext:vrf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf: "VrfReg1"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f5/agent-information-option/config/openconfig-relay-agent-ext:vrf-select"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf-select: "ENABLE"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f5/agent-information-option/config/openconfig-relay-agent-ext:link-select"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:link-select: "ENABLE"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f5/config/openconfig-relay-agent-ext:policy-action"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:policy-action: "REPLACE"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f5/agent-information-option/config/circuit-id"
+ method: "patch"
+ data:
+ openconfig-relay-agent:circuit-id: "%h:%p"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f5/config/helper-address"
+ method: "patch"
+ data:
+ openconfig-relay-agent:helper-address:
+ - '100::2'
+ - '100::3'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f5/config/openconfig-relay-agent-ext:src-intf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:src-intf: "Vlan101"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f5/config/openconfig-relay-agent-ext:vrf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf: "VrfReg2"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f5/options/config/openconfig-relay-agent-ext:vrf-select"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf-select: "ENABLE"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f31/config/openconfig-relay-agent-ext:max-hop-count"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:max-hop-count: 8
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f32/config/openconfig-relay-agent-ext:max-hop-count"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:max-hop-count: 8
+merged_02:
+ module_args:
+ config:
+ - name: 'Eth1/32'
+ ipv4:
+ server_addresses:
+ - address: '132.1.1.2'
+ ipv6:
+ server_addresses:
+ - address: '132::2'
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcp:
+ interfaces:
+ interface:
+ - id: 'Eth1/32'
+ config:
+ id: 'Eth1/32'
+ helper-address:
+ - '132.1.1.2'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcpv6:
+ interfaces:
+ interface:
+ - id: 'Eth1/32'
+ config:
+ id: 'Eth1/32'
+ helper-address:
+ - '132::2'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ config_requests: []
+deleted_01:
+ module_args:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address: '100.1.1.2'
+ vrf_select: true
+ source_interface: 'Vlan100'
+ link_select: true
+ policy_action: 'replace'
+ circuit_id: '%i'
+ - name: 'Eth1/2'
+ ipv6:
+ server_addresses:
+ - address: '101::2'
+ vrf_select: true
+ source_interface: 'Vlan100'
+ - name: 'Eth1/3'
+ ipv4:
+ max_hop_count: 12
+ ipv6:
+ max_hop_count: 12
+ state: deleted
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcp:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100.1.1.2'
+ - '100.1.1.3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ openconfig-relay-agent-ext:policy-action: 'REPLACE'
+ agent-information-option:
+ config:
+ circuit-id: '%i'
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ openconfig-relay-agent-ext:link-select: 'ENABLE'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ helper-address:
+ - '101.1.1.2'
+ - '101.1.1.3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ openconfig-relay-agent-ext:policy-action: 'REPLACE'
+ agent-information-option:
+ config:
+ circuit-id: '%i'
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ openconfig-relay-agent-ext:link-select: 'ENABLE'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ helper-address:
+ - '102.1.1.2'
+ - '102.1.1.3'
+ openconfig-relay-agent-ext:max-hop-count: 12
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcpv6:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100::2'
+ - '100::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ helper-address:
+ - '101::2'
+ - '101::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ helper-address:
+ - '102::2'
+ - '102::3'
+ openconfig-relay-agent-ext:max-hop-count: 12
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ config_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/helper-address=100.1.1.2"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/agent-information-option/config/openconfig-relay-agent-ext:vrf-select"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/openconfig-relay-agent-ext:src-intf"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/agent-information-option/config/openconfig-relay-agent-ext:link-select"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/openconfig-relay-agent-ext:policy-action"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/agent-information-option/config/circuit-id"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f2/config/helper-address=101::2"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f2/options/config/openconfig-relay-agent-ext:vrf-select"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f2/config/openconfig-relay-agent-ext:src-intf"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f3/config/openconfig-relay-agent-ext:max-hop-count"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/openconfig-relay-agent-ext:max-hop-count"
+ method: "delete"
+deleted_02:
+ module_args:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address:
+ ipv6:
+ server_addresses:
+ - address:
+ - name: 'Eth1/2'
+ ipv4:
+ server_addresses:
+ - address: '101.1.1.2'
+ - address: '101.1.1.3'
+ ipv6:
+ server_addresses:
+ - address: '101::2'
+ - address: '101::3'
+ - name: 'Eth1/3'
+ state: deleted
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcp:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100.1.1.2'
+ - '100.1.1.3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ openconfig-relay-agent-ext:policy-action: 'REPLACE'
+ agent-information-option:
+ config:
+ circuit-id: '%i'
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ openconfig-relay-agent-ext:link-select: 'ENABLE'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ helper-address:
+ - '101.1.1.2'
+ - '101.1.1.3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ openconfig-relay-agent-ext:policy-action: 'REPLACE'
+ agent-information-option:
+ config:
+ circuit-id: '%i'
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ openconfig-relay-agent-ext:link-select: 'ENABLE'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ helper-address:
+ - '102.1.1.2'
+ - '102.1.1.3'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcpv6:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100::2'
+ - '100::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ helper-address:
+ - '101::2'
+ - '101::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ helper-address:
+ - '102::2'
+ - '102::3'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ config_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f1/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f2/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f3/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/helper-address"
+ method: "delete"
+deleted_03:
+ module_args:
+ config:
+ state: deleted
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcp:
+ interfaces:
+ interface:
+ - id: 'Eth1/32'
+ config:
+ id: 'Eth1/32'
+ helper-address:
+ - '132.1.1.2'
+ - '132.1.1.3'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcpv6:
+ interfaces:
+ interface:
+ - id: 'Eth1/32'
+ config:
+ id: 'Eth1/32'
+ helper-address:
+ - '132::2'
+ - '132::3'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ config_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f32/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f32/config/helper-address"
+ method: "delete"
+deleted_04:
+ module_args:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address: '100.1.1.2'
+ vrf_select: true
+ max_hop_count: 8
+ source_interface: 'Vlan100'
+ link_select: true
+ policy_action: 'replace'
+ ipv6:
+ server_addresses:
+ - address: '100::2'
+ source_interface: 'Vlan100'
+ state: deleted
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value: {}
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value: {}
+ config_requests: []
+replaced_01:
+ module_args:
+ config:
+ - name: 'Eth1/1'
+ ipv4:
+ server_addresses:
+ - address: '100.1.1.2'
+ - address: '100.1.1.3'
+ source_interface: 'Vlan100'
+ policy_action: 'append'
+ ipv6:
+ server_addresses:
+ - address: '100::2'
+ - address: '100::3'
+ - name: 'Eth1/2'
+ ipv4:
+ server_addresses:
+ - address: '101.1.1.2'
+ - address: '101.1.1.4'
+ - address: '101.1.1.6'
+ vrf_name: 'VrfReg2'
+ vrf_select: false
+ max_hop_count: 10
+ - name: 'Eth1/3'
+ ipv6:
+ server_addresses:
+ - address: '102::2'
+ - address: '102::4'
+ - address: '102::6'
+ vrf_name: 'VrfReg2'
+ vrf_select: false
+ state: replaced
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcp:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100.1.1.2'
+ - '100.1.1.3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ openconfig-relay-agent-ext:policy-action: 'REPLACE'
+ agent-information-option:
+ config:
+ circuit-id: '%i'
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ openconfig-relay-agent-ext:link-select: 'ENABLE'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ helper-address:
+ - '101.1.1.2'
+ - '101.1.1.3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%i'
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ openconfig-relay-agent-ext:link-select: 'ENABLE'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ helper-address:
+ - '102.1.1.2'
+ - '102.1.1.3'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - id: 'Eth1/4'
+ config:
+ id: 'Eth1/4'
+ helper-address:
+ - '103.1.1.2'
+ - '103.1.1.3'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcpv6:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100::2'
+ - '100::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ helper-address:
+ - '101::2'
+ - '101::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ helper-address:
+ - '102::2'
+ - '102::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ config_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f1/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/helper-address=101.1.1.3"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/openconfig-relay-agent-ext:src-intf"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/agent-information-option/config/openconfig-relay-agent-ext:link-select"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/agent-information-option/config/circuit-id"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f2/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f3/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/helper-address=102::3"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/openconfig-relay-agent-ext:src-intf"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/openconfig-relay-agent-ext:max-hop-count"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/helper-address"
+ method: "patch"
+ data:
+ openconfig-relay-agent:helper-address:
+ - '100.1.1.2'
+ - '100.1.1.3'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/openconfig-relay-agent-ext:src-intf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:src-intf: "Vlan100"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/openconfig-relay-agent-ext:policy-action"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:policy-action: "APPEND"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f1/config/helper-address"
+ method: "patch"
+ data:
+ openconfig-relay-agent:helper-address:
+ - '100::2'
+ - '100::3'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/helper-address"
+ method: "patch"
+ data:
+ openconfig-relay-agent:helper-address:
+ - '101.1.1.4'
+ - '101.1.1.6'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/openconfig-relay-agent-ext:vrf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf: "VrfReg2"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/agent-information-option/config/openconfig-relay-agent-ext:vrf-select"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf-select: "DISABLE"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/openconfig-relay-agent-ext:max-hop-count"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:max-hop-count: 10
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/helper-address"
+ method: "patch"
+ data:
+ openconfig-relay-agent:helper-address:
+ - '102::4'
+ - '102::6'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/openconfig-relay-agent-ext:vrf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf: "VrfReg2"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/options/config/openconfig-relay-agent-ext:vrf-select"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf-select: "DISABLE"
+replaced_02:
+ module_args:
+ config:
+ - name: 'Eth1/2'
+ ipv4:
+ server_addresses:
+ - address: '101.1.1.2'
+ - address: '101.1.1.4'
+ vrf_name: 'VrfReg1'
+ max_hop_count: 12
+ - name: 'Eth1/3'
+ ipv6:
+ server_addresses:
+ - address: '102::2'
+ - address: '102::4'
+ vrf_name: 'VrfReg1'
+ state: replaced
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcp:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100.1.1.2'
+ - '100.1.1.3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ openconfig-relay-agent-ext:policy-action: 'REPLACE'
+ agent-information-option:
+ config:
+ circuit-id: '%i'
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ openconfig-relay-agent-ext:link-select: 'ENABLE'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ helper-address:
+ - '101.1.1.2'
+ - '101.1.1.4'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 12
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcpv6:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100::2'
+ - '100::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ helper-address:
+ - '102::2'
+ - '102::4'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ config_requests: []
+overridden_01:
+ module_args:
+ config:
+ - name: 'Eth1/2'
+ ipv4:
+ server_addresses:
+ - address: '110.1.1.2'
+ - address: '110.1.1.3'
+ vrf_name: 'VrfReg1'
+ - name: 'Eth1/3'
+ ipv6:
+ server_addresses:
+ - address: '120::2'
+ - address: '120::3'
+ vrf_name: 'VrfReg1'
+ state: overridden
+ facts_get_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcp:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100.1.1.2'
+ - '100.1.1.3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ openconfig-relay-agent-ext:policy-action: 'REPLACE'
+ agent-information-option:
+ config:
+ circuit-id: '%i'
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ openconfig-relay-agent-ext:link-select: 'ENABLE'
+ - id: 'Eth1/2'
+ config:
+ id: 'Eth1/2'
+ helper-address:
+ - '101.1.1.2'
+ - '101.1.1.4'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 12
+ openconfig-relay-agent-ext:policy-action: 'DISCARD'
+ agent-information-option:
+ config:
+ circuit-id: '%p'
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ openconfig-relay-agent-ext:link-select: 'DISABLE'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6"
+ response:
+ code: 200
+ value:
+ openconfig-relay-agent:dhcpv6:
+ interfaces:
+ interface:
+ - id: 'Eth1/1'
+ config:
+ id: 'Eth1/1'
+ helper-address:
+ - '100::2'
+ - '100::3'
+ openconfig-relay-agent-ext:src-intf: 'Vlan100'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 8
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'ENABLE'
+ - id: 'Eth1/3'
+ config:
+ id: 'Eth1/3'
+ helper-address:
+ - '102::2'
+ - '102::4'
+ openconfig-relay-agent-ext:vrf: 'VrfReg1'
+ openconfig-relay-agent-ext:max-hop-count: 10
+ options:
+ config:
+ openconfig-relay-agent-ext:vrf-select: 'DISABLE'
+ config_requests:
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f1/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f1/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/helper-address"
+ method: "delete"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/helper-address"
+ method: "patch"
+ data:
+ openconfig-relay-agent:helper-address:
+ - '110.1.1.2'
+ - '110.1.1.3'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcp/interfaces/interface=Eth1%2f2/config/openconfig-relay-agent-ext:vrf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf: "VrfReg1"
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/helper-address"
+ method: "patch"
+ data:
+ openconfig-relay-agent:helper-address:
+ - '120::2'
+ - '120::3'
+ - path: "data/openconfig-relay-agent:relay-agent/dhcpv6/interfaces/interface=Eth1%2f3/config/openconfig-relay-agent-ext:vrf"
+ method: "patch"
+ data:
+ openconfig-relay-agent-ext:vrf: "VrfReg1"
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_dhcp_snooping.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_dhcp_snooping.yaml
new file mode 100644
index 000000000..3fedc7eea
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_dhcp_snooping.yaml
@@ -0,0 +1,1128 @@
+merged_01:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv4'
+ enabled: true
+ verify_mac: true
+ vlans: ['1', '2', '3', '5']
+ trusted:
+ - intf_name: 'Ethernet8'
+ state: merged
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv4-admin-enable'
+ method: 'patch'
+ data:
+ openconfig-dhcp-snooping:dhcpv4-admin-enable: true
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv4-verify-mac-address'
+ method: 'patch'
+ data:
+ openconfig-dhcp-snooping:dhcpv4-verify-mac-address: true
+ - path: 'data/openconfig-interfaces:interfaces/interface=Ethernet8/dhcpv4-snooping-trust/config/dhcpv4-snooping-trust'
+ method: 'patch'
+ data:
+ openconfig-interfaces:dhcpv4-snooping-trust: 'ENABLE'
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan1/dhcpv4_snooping_enable'
+ method: 'patch'
+ data:
+ sonic-vlan:dhcpv4_snooping_enable: 'enable'
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan2/dhcpv4_snooping_enable'
+ method: 'patch'
+ data:
+ sonic-vlan:dhcpv4_snooping_enable: 'enable'
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan3/dhcpv4_snooping_enable'
+ method: 'patch'
+ data:
+ sonic-vlan:dhcpv4_snooping_enable: 'enable'
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan5/dhcpv4_snooping_enable'
+ method: 'patch'
+ data:
+ sonic-vlan:dhcpv4_snooping_enable: 'enable'
+
+merged_02:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ enabled: true
+ vlans:
+ - '4'
+ trusted:
+ - intf_name: 'Ethernet2'
+ - intf_name: PortChannel1
+ state: merged
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-admin-enable'
+ method: 'patch'
+ data:
+ openconfig-dhcp-snooping:dhcpv6-admin-enable: true
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan4/dhcpv6_snooping_enable'
+ method: 'patch'
+ data:
+ sonic-vlan:dhcpv6_snooping_enable: 'enable'
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel1/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'patch'
+ data:
+ openconfig-interfaces:dhcpv6-snooping-trust: 'ENABLE'
+ - path: 'data/openconfig-interfaces:interfaces/interface=Ethernet2/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'patch'
+ data:
+ openconfig-interfaces:dhcpv6-snooping-trust: 'ENABLE'
+
+merged_03:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '1'
+ - mac_addr: 'aa:f7:67:fc:f4:9a'
+ ip_addr: '156.33.90.167'
+ intf_name: 'PortChannel1'
+ vlan_id: '2'
+ state: merged
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry
+ method: patch
+ data:
+ openconfig-dhcp-snooping:entry:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ config:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ vlan: "Vlan1"
+ interface: Ethernet4
+ ip: '192.0.2.146'
+ - mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ config:
+ mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ vlan: "Vlan2"
+ interface: PortChannel1
+ ip: '156.33.90.167'
+
+merged_04_blank:
+ module_args:
+ config:
+ afis:
+ - afi: ipv4
+ - afi: ipv6
+ state: merged
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests: []
+
+deleted_01:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv4'
+ vlans:
+ - '3'
+ - '5'
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: true
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: true
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: ["1", "2", "3", "5"]
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: ["Ethernet8"]
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan3/dhcpv4_snooping_enable'
+ method: 'delete'
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan5/dhcpv4_snooping_enable'
+ method: 'delete'
+
+deleted_02_clear_vlans:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ vlans: []
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["4"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["PortChannel1", "PortChannel2", "PortChannel3", "PortChannel4"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan4/dhcpv6_snooping_enable'
+ method: 'delete'
+
+deleted_02_2_select_vlans:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ vlans:
+ - "2"
+ - "6"
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["4", "2"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["PortChannel1", "PortChannel2", "PortChannel3", "PortChannel4"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan2/dhcpv6_snooping_enable'
+ method: 'delete'
+
+deleted_03:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["4"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["PortChannel1", "PortChannel2", "PortChannel3", "PortChannel4"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-admin-enable
+ method: patch
+ data:
+ openconfig-dhcp-snooping:dhcpv6-admin-enable: false
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan4/dhcpv6_snooping_enable'
+ method: 'delete'
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel1/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: delete
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel2/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: delete
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel3/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'delete'
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel4/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'delete'
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-verify-mac-address"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:dhcpv6-verify-mac-address: true
+
+deleted_04_clear_bindings:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings: []
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ state:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ vlan: "1"
+ intf: Ethernet4
+ ipaddress: "192.0.2.146"
+ - mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ state:
+ mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ vlan: "2"
+ intf: PortChannel1
+ ipaddress: '156.33.90.167'
+ expected_config_requests:
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=00:b0:d0:63:c2:26,ipv4
+ method: delete
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=aa:f7:67:fc:f4:9a,ipv4
+ method: delete
+ data: Null
+
+deleted_05_select_bindings:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '1'
+ - mac_addr: 'a6:83:f5:92:c2:69'
+ ip_addr: '46.9.247.7'
+ intf_name: 'Ethernet6'
+ vlan_id: '7'
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ state:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ vlan: "1"
+ intf: Ethernet4
+ ipaddress: "192.0.2.146"
+ - mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ state:
+ mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ vlan: "2"
+ intf: PortChannel1
+ ipaddress: '156.33.90.167'
+ expected_config_requests:
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=00:b0:d0:63:c2:26,ipv4
+ method: delete
+
+deleted_06_clear_trusted:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ trusted: []
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["4"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["PortChannel1", "PortChannel2", "PortChannel3", "PortChannel4"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel1/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: delete
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel2/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: delete
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel3/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'delete'
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel4/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'delete'
+
+deleted_07_select_trusted:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ trusted:
+ - intf_name: PortChannel1
+ - intf_name: PortChannel6
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["4"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["PortChannel1", "PortChannel2", "PortChannel3", "PortChannel4"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel1/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: delete
+
+deleted_08_booleans:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ enabled: true
+ verify_mac: false
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["4"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["PortChannel1", "PortChannel2", "PortChannel3", "PortChannel4"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-admin-enable
+ method: patch
+ data:
+ openconfig-dhcp-snooping:dhcpv6-admin-enable: false
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-verify-mac-address"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:dhcpv6-verify-mac-address: true
+
+deleted_09_empty:
+ module_args:
+ config:
+ afis: []
+ state: deleted
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: false
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["4"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["PortChannel1", "PortChannel2", "PortChannel3", "PortChannel4"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-admin-enable
+ method: patch
+ data:
+ openconfig-dhcp-snooping:dhcpv6-admin-enable: false
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-verify-mac-address"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:dhcpv6-verify-mac-address: true
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv4-verify-mac-address"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:dhcpv4-verify-mac-address: true
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan4/dhcpv6_snooping_enable'
+ method: 'delete'
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel1/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: delete
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel2/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: delete
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel3/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'delete'
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel4/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'delete'
+
+overridden_01:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv4'
+ enabled: false
+ verify_mac: false
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '3'
+ - afi: 'ipv6'
+ enabled: false
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:27'
+ ip_addr: '2002::2'
+ intf_name: 'Ethernet1'
+ vlan_id: '3'
+ state: overridden
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: true
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: true
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ state:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ vlan: "1"
+ intf: Ethernet4
+ ipaddress: "192.0.2.146"
+ - mac: '28:21:28:15:c1:1b'
+ iptype: ipv4
+ state:
+ mac: '28:21:28:15:c1:1b'
+ iptype: ipv4
+ vlan: "1"
+ intf: Ethernet2
+ ipaddress: "141.202.222.118"
+ - mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ state:
+ mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ vlan: "2"
+ intf: PortChannel1
+ ipaddress: '156.33.90.167'
+ - mac: '00:b0:d0:63:c2:27'
+ iptype: ipv6
+ state:
+ mac: '00:b0:d0:63:c2:27'
+ iptype: ipv6
+ vlan: "1"
+ intf: Ethernet3
+ ipaddress: "2002::2"
+ expected_config_requests:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=00:b0:d0:63:c2:26,ipv4'
+ method: 'delete'
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=28:21:28:15:c1:1b,ipv4'
+ method: 'delete'
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=aa:f7:67:fc:f4:9a,ipv4'
+ method: "delete"
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry
+ method: patch
+ data:
+ openconfig-dhcp-snooping:entry:
+ - mac: 00:b0:d0:63:c2:26
+ iptype: ipv4
+ config:
+ mac: 00:b0:d0:63:c2:26
+ iptype: ipv4
+ vlan: "Vlan3"
+ interface: Ethernet4
+ ip: "192.0.2.146"
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv4-admin-enable
+ method: patch
+ data:
+ openconfig-dhcp-snooping:dhcpv4-admin-enable: false
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv4-verify-mac-address"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:dhcpv4-verify-mac-address: false
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=00:b0:d0:63:c2:27,ipv6'
+ method: "delete"
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry
+ method: patch
+ data:
+ openconfig-dhcp-snooping:entry:
+ - mac: '00:b0:d0:63:c2:27'
+ iptype: ipv6
+ config:
+ mac: '00:b0:d0:63:c2:27'
+ iptype: ipv6
+ vlan: "Vlan3"
+ interface: Ethernet1
+ ip: "2002::2"
+
+replaced_01:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '3'
+ enabled: true
+ verify_mac: true
+ - afi: 'ipv6'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:23'
+ ip_addr: '1640:0:0::83'
+ intf_name: 'Ethernet2'
+ vlan_id: '1'
+ state: replaced
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: true
+ dhcpv4-verify-mac-address: true
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ state:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ vlan: "1"
+ intf: Ethernet4
+ ipaddress: "192.0.2.146"
+ - mac: '28:21:28:15:c1:1b'
+ iptype: ipv4
+ state:
+ mac: '28:21:28:15:c1:1b'
+ iptype: ipv4
+ vlan: "1"
+ intf: Ethernet2
+ ipaddress: "141.202.222.118"
+ - mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ state:
+ mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ vlan: "2"
+ intf: PortChannel1
+ ipaddress: '156.33.90.167'
+ expected_config_requests:
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv4-admin-enable
+ method: patch
+ data:
+ openconfig-dhcp-snooping:dhcpv4-admin-enable: true
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=00:b0:d0:63:c2:26,ipv4'
+ method: 'delete'
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=28:21:28:15:c1:1b,ipv4'
+ method: 'delete'
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=aa:f7:67:fc:f4:9a,ipv4'
+ method: "delete"
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:entry:
+ - mac: "00:b0:d0:63:c2:26"
+ iptype: "ipv4"
+ config:
+ mac: "00:b0:d0:63:c2:26"
+ iptype: "ipv4"
+ vlan: "Vlan3"
+ interface: "Ethernet4"
+ ip: "192.0.2.146"
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:entry:
+ - mac: "00:b0:d0:63:c2:23"
+ iptype: "ipv6"
+ config:
+ mac: "00:b0:d0:63:c2:23"
+ iptype: "ipv6"
+ vlan: "Vlan1"
+ interface: "Ethernet2"
+ ip: "1640:0:0::83"
+
+replaced_02:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv4'
+ source_bindings:
+ - mac_addr: '00:b0:d0:63:c2:26'
+ ip_addr: '192.0.2.146'
+ intf_name: 'Ethernet4'
+ vlan_id: '3'
+ state: replaced
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: true
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: []
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ state:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv4
+ vlan: "1"
+ intf: Ethernet4
+ ipaddress: "192.0.2.146"
+ - mac: '28:21:28:15:c1:1b'
+ iptype: ipv4
+ state:
+ mac: '28:21:28:15:c1:1b'
+ iptype: ipv4
+ vlan: "1"
+ intf: Ethernet2
+ ipaddress: "141.202.222.118"
+ - mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ state:
+ mac: 'aa:f7:67:fc:f4:9a'
+ iptype: ipv4
+ vlan: "2"
+ intf: PortChannel1
+ ipaddress: '156.33.90.167'
+ expected_config_requests:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=00:b0:d0:63:c2:26,ipv4'
+ method: 'delete'
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=28:21:28:15:c1:1b,ipv4'
+ method: 'delete'
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=aa:f7:67:fc:f4:9a,ipv4'
+ method: 'delete'
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:entry:
+ - mac: "00:b0:d0:63:c2:26"
+ iptype: "ipv4"
+ config:
+ mac: "00:b0:d0:63:c2:26"
+ iptype: "ipv4"
+ vlan: "Vlan3"
+ interface: "Ethernet4"
+ ip: "192.0.2.146"
+
+replaced_03_vlan_replace:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ vlans:
+ - "3"
+ - "2"
+ state: replaced
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: true
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["3", "1"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: []
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv6
+ state:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv6
+ vlan: "1"
+ intf: Ethernet4
+ ipaddress: "192.0.2.146"
+ expected_config_requests:
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan1/dhcpv6_snooping_enable'
+ method: 'delete'
+ - path: 'data/sonic-vlan:sonic-vlan/VLAN/VLAN_LIST=Vlan2/dhcpv6_snooping_enable'
+ method: 'patch'
+ data:
+ sonic-vlan:dhcpv6_snooping_enable: 'enable'
+
+replaced_04_trusted:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ trusted:
+ - intf_name: Ethernet1
+ - intf_name: PortChannel1
+ verify_mac: true
+ state: replaced
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: true
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["3"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["Ethernet54"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv6
+ state:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv6
+ vlan: "1"
+ intf: Ethernet4
+ ipaddress: "192.0.2.146"
+ expected_config_requests:
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-verify-mac-address"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:dhcpv6-verify-mac-address: true
+ - path: 'data/openconfig-interfaces:interfaces/interface=Ethernet54/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: delete
+ - path: 'data/openconfig-interfaces:interfaces/interface=Ethernet1/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'patch'
+ data:
+ openconfig-interfaces:dhcpv6-snooping-trust: 'ENABLE'
+ - path: 'data/openconfig-interfaces:interfaces/interface=PortChannel1/dhcpv6-snooping-trust/config/dhcpv6-snooping-trust'
+ method: 'patch'
+ data:
+ openconfig-interfaces:dhcpv6-snooping-trust: 'ENABLE'
+
+replaced_05_verify:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ verify_mac: true
+ state: replaced
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: true
+ dhcpv6-verify-mac-address: false
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["3"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["Ethernet54"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list: []
+ expected_config_requests:
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-verify-mac-address"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:dhcpv6-verify-mac-address: true
+
+replaced_06_empty_bindings:
+ module_args:
+ config:
+ afis:
+ - afi: 'ipv6'
+ source_bindings: []
+ verify_mac: false
+ state: replaced
+ existing_config:
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping'
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping:
+ config:
+ dhcpv4-admin-enable: false
+ dhcpv6-admin-enable: false
+ dhcpv4-verify-mac-address: true
+ dhcpv6-verify-mac-address: true
+ state:
+ dhcpv4-snooping-vlan: []
+ dhcpv6-snooping-vlan: ["3"]
+ dhcpv4-trusted-intf: []
+ dhcpv6-trusted-intf: ["Ethernet54"]
+ - path: data/openconfig-dhcp-snooping:dhcp-snooping-binding
+ response:
+ code: 200
+ value:
+ openconfig-dhcp-snooping:dhcp-snooping-binding:
+ dhcp-snooping-binding-entry-list:
+ dhcp-snooping-binding-list:
+ - mac: '00:b0:d0:63:c2:26'
+ iptype: ipv6
+ state:
+ mac: '00:b0:d0:63:c2:26'
+ iptype: ipv6
+ vlan: "1"
+ intf: Ethernet4
+ ipaddress: "192.0.2.146"
+ - mac: '28:21:28:15:c1:1b'
+ iptype: ipv6
+ state:
+ mac: '28:21:28:15:c1:1b'
+ iptype: ipv6
+ vlan: "1"
+ intf: Ethernet2
+ ipaddress: "141.202.222.118"
+ expected_config_requests:
+ - path: "data/openconfig-dhcp-snooping:dhcp-snooping/config/dhcpv6-verify-mac-address"
+ method: "patch"
+ data:
+ openconfig-dhcp-snooping:dhcpv6-verify-mac-address: false
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=00:b0:d0:63:c2:26,ipv6'
+ method: 'delete'
+ - path: 'data/openconfig-dhcp-snooping:dhcp-snooping-static-binding/entry=28:21:28:15:c1:1b,ipv6'
+ method: 'delete'
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_facts.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_facts.yaml
new file mode 100644
index 000000000..87dd809ec
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_facts.yaml
@@ -0,0 +1,5 @@
+---
+merged_01:
+ module_args:
+ gather_network_resources:
+ - "vlans"
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_interfaces.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_interfaces.yaml
new file mode 100644
index 000000000..da213b099
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_interfaces.yaml
@@ -0,0 +1,339 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: Eth1/1
+ description: "Test Desc for eth1/1"
+ enabled: false
+ mtu: 1600
+ - name: Loopback1
+ description: "Test Desc for Loopback1"
+ enabled: false
+ existing_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: 'Eth1/1'
+ config:
+ mtu: 2000
+ description: ''
+ enabled: true
+ openconfig-if-ethernet:ethernet:
+ config:
+ openconfig-if-ethernet-ext2:advertised-speed: ''
+ auto-negotiate: false
+ openconfig-if-ethernet-ext2:port-fec: "openconfig-platform-types:FEC_DISABLED"
+ port-speed: "openconfig-if-ethernet:SPEED_25GB"
+ - path: "data/openconfig-port-group:port-groups"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-groups:
+ port-group:
+ - state:
+ member-if-end: "Eth1/4"
+ member-if-start: "Eth1/1"
+
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Loopback1
+ config:
+ name: Loopback1
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ description: 'Test Desc for eth1/1'
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ mtu: 1600
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ enabled: false
+deleted_01:
+ module_args:
+ state: deleted
+ existing_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: 'Eth1/1'
+ config:
+ enabled: false
+ description: 'Test Desc for eth1/1'
+ mtu: 8888
+ openconfig-if-ethernet:ethernet:
+ config:
+ openconfig-if-ethernet-ext2:advertised-speed: ''
+ auto-negotiate: true
+ openconfig-if-ethernet-ext2:port-fec: "openconfig-platform-types:FEC_DISABLED"
+ port-speed: "openconfig-if-ethernet:SPEED_25GB"
+ - name: 'Loopback123'
+ config:
+ enabled: false
+ description: 'Test Desc for Loopback123'
+ - path: "data/openconfig-port-group:port-groups"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-groups:
+ port-group:
+ - state:
+ member-if-end: "Eth1/4"
+ member-if-start: "Eth1/2"
+ - path: "data/sonic-port:sonic-port/PORT/PORT_LIST=Eth1%2F1/valid_speeds"
+ response:
+ code: 200
+ value:
+ sonic-port:valid_speeds: "100000, 40000"
+
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config/description"
+ method: "delete"
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config/mtu"
+ method: "delete"
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ auto-negotiate: false
+ - path: "data/openconfig-interfaces:interfaces/interface=Loopback123"
+ method: "delete"
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: 'Eth1/1'
+ fec: FEC_DISABLED
+ auto_negotiate: true
+ speed: SPEED_100GB
+ advertised_speed:
+ - 100000
+ - name: 'Loopback123'
+ existing_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: 'Eth1/1'
+ config:
+ mtu: 6767
+ openconfig-if-ethernet:ethernet:
+ config:
+ auto-negotiate: true
+ port-speed: openconfig-if-ethernet:SPEED_40GB
+ openconfig-if-ethernet-ext2:port-fec: FEC_FC
+ openconfig-if-ethernet-ext2:advertised-speed: '100000,40000'
+ - name: 'Loopback123'
+ config:
+ enabled: false
+ description: 'Test Desc for Loopback123'
+ - path: "data/openconfig-port-group:port-groups"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-groups:
+ port-group:
+ - state:
+ member-if-end: "Eth1/4"
+ member-if-start: "Eth1/2"
+ - path: "data/sonic-port:sonic-port/PORT/PORT_LIST=Eth1%2F1/valid_speeds"
+ response:
+ code: 200
+ value:
+ sonic-port:valid_speeds: "100000, 40000"
+
+
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ auto-negotiate: false
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ openconfig-if-ethernet-ext2:port-fec: "FEC_DISABLED"
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ openconfig-if-ethernet-ext2:advertised-speed: '40000'
+ - path: "data/openconfig-interfaces:interfaces/interface=Loopback123"
+ method: "delete"
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ - name: Eth1/1
+ mtu: 1600
+ speed: "SPEED_40GB"
+ existing_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: 'Eth1/1'
+ config:
+ description: 'test descr for eth1/1'
+ mtu: 2000
+ enabled: false
+ openconfig-if-ethernet:ethernet:
+ config:
+ openconfig-if-ethernet-ext2:advertised-speed: ''
+ auto-negotiate: true
+ openconfig-if-ethernet-ext2:port-fec: "openconfig-platform-types:FEC_DISABLED"
+ port-speed: "openconfig-if-ethernet:SPEED_25GB"
+ - name: 'Eth1/2'
+ config:
+ mtu: 6767
+ openconfig-if-ethernet:ethernet:
+ config:
+ auto-negotiate: true
+ port-speed: openconfig-if-ethernet:SPEED_40GB
+ openconfig-if-ethernet-ext2:port-fec: FEC_FC
+ openconfig-if-ethernet-ext2:advertised-speed: '100000,40000'
+ - name: 'Loopback123'
+ config:
+ enabled: false
+ description: 'Test Desc for Loopback123'
+ - path: "data/openconfig-port-group:port-groups"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-groups:
+ port-group:
+ - state:
+ member-if-end: "Eth1/4"
+ member-if-start: "Eth1/3"
+ - path: "data/sonic-port:sonic-port/PORT/PORT_LIST=Eth1%2F1/valid_speeds"
+ response:
+ code: 200
+ value:
+ sonic-port:valid_speeds: "100000, 40000"
+ - path: "data/sonic-port:sonic-port/PORT/PORT_LIST=Eth1%2F2/valid_speeds"
+ response:
+ code: 200
+ value:
+ sonic-port:valid_speeds: "100000, 40000"
+
+
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ mtu: 1600
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config/description"
+ method: "delete"
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ auto-negotiate: false
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ port-speed: "openconfig-if-ethernet:SPEED_40GB"
+
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ - name: Eth1/1
+ mtu: 1600
+ speed: "SPEED_40GB"
+ existing_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: 'Eth1/1'
+ config:
+ mtu: 2000
+ openconfig-if-ethernet:ethernet:
+ config:
+ openconfig-if-ethernet-ext2:advertised-speed: ''
+ auto-negotiate: true
+ openconfig-if-ethernet-ext2:port-fec: "openconfig-platform-types:FEC_DISABLED"
+ port-speed: "openconfig-if-ethernet:SPEED_25GB"
+ - name: 'Eth1/2'
+ config:
+ mtu: 2000
+ openconfig-if-ethernet:ethernet:
+ config:
+ openconfig-if-ethernet-ext2:advertised-speed: ''
+ auto-negotiate: true
+ openconfig-if-ethernet-ext2:port-fec: "openconfig-platform-types:FEC_DISABLED"
+ port-speed: "openconfig-if-ethernet:SPEED_25GB"
+ - path: "data/openconfig-port-group:port-groups"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-groups:
+ port-group:
+ - state:
+ member-if-end: "Eth1/4"
+ member-if-start: "Eth1/3"
+ - path: "data/sonic-port:sonic-port/PORT/PORT_LIST=Eth1%2F1/valid_speeds"
+ response:
+ code: 200
+ value:
+ sonic-port:valid_speeds: "100000, 40000"
+ - path: "data/sonic-port:sonic-port/PORT/PORT_LIST=Eth1%2F2/valid_speeds"
+ response:
+ code: 200
+ value:
+ sonic-port:valid_speeds: "100000, 40000"
+
+
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ mtu: 1600
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/config/enabled"
+ method: "delete"
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ auto-negotiate: false
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F1/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ port-speed: "openconfig-if-ethernet:SPEED_40GB"
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F2/config/enabled"
+ method: "delete"
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F2/config/mtu"
+ method: "delete"
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F2/openconfig-if-ethernet:ethernet/config"
+ method: "patch"
+ data:
+ openconfig-if-ethernet:config:
+ auto-negotiate: false
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_ip_neighbor.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_ip_neighbor.yaml
new file mode 100644
index 000000000..5c0ba555a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_ip_neighbor.yaml
@@ -0,0 +1,146 @@
+---
+merged_01:
+ module_args:
+ config:
+ ipv4_arp_timeout: 1200
+ ipv4_drop_neighbor_aging_time: 600
+ ipv6_drop_neighbor_aging_time: 600
+ ipv6_nd_cache_expiry: 1200
+ num_local_neigh: 1000
+ existing_ip_neighbor_config:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ response:
+ code: 200
+ value:
+ openconfig-neighbor:config:
+ ipv4-arp-timeout: 1800
+ ipv6-nd-cache-expiry: 1800
+ name: Values
+ expected_config_requests:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ method: "patch"
+ data:
+ config:
+ ipv4-arp-timeout: 1200
+ ipv4-drop-neighbor-aging-time: 600
+ ipv6-drop-neighbor-aging-time: 600
+ ipv6-nd-cache-expiry: 1200
+ num-local-neigh: 1000
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_ip_neighbor_config:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ response:
+ code: 200
+ value:
+ openconfig-neighbor:config:
+ ipv4-arp-timeout: 1200
+ ipv6-nd-cache-expiry: 1200
+ ipv4-drop-neighbor-aging-time: 600
+ ipv6-drop-neighbor-aging-time: 600
+ num-local-neigh: 1000
+ name: Values
+ expected_config_requests:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ ipv4_arp_timeout: 1200
+ ipv4_drop_neighbor_aging_time: 600
+ ipv6_drop_neighbor_aging_time: 600
+ ipv6_nd_cache_expiry: 1200
+ num_local_neigh: 1000
+ existing_ip_neighbor_config:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ response:
+ code: 200
+ value:
+ openconfig-neighbor:config:
+ ipv4-arp-timeout: 1200
+ ipv6-nd-cache-expiry: 1200
+ ipv4-drop-neighbor-aging-time: 600
+ ipv6-drop-neighbor-aging-time: 600
+ num-local-neigh: 1000
+ name: Values
+ expected_config_requests:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config/ipv4-arp-timeout"
+ method: "delete"
+ data:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config/ipv4-drop-neighbor-aging-time"
+ method: "delete"
+ data:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config/ipv6-drop-neighbor-aging-time"
+ method: "delete"
+ data:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config/ipv6-nd-cache-expiry"
+ method: "delete"
+ data:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config/num-local-neigh"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ ipv4_arp_timeout: 1201
+ ipv4_drop_neighbor_aging_time: 601
+ ipv6_nd_cache_expiry: 1201
+ num_local_neigh: 1001
+ existing_ip_neighbor_config:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ response:
+ code: 200
+ value:
+ openconfig-neighbor:config:
+ ipv4-arp-timeout: 1200
+ ipv6-nd-cache-expiry: 1200
+ ipv4-drop-neighbor-aging-time: 600
+ ipv6-drop-neighbor-aging-time: 600
+ num-local-neigh: 1000
+ name: Values
+ expected_config_requests:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ method: "patch"
+ data:
+ config:
+ ipv4-arp-timeout: 1201
+ ipv4-drop-neighbor-aging-time: 601
+ ipv6-nd-cache-expiry: 1201
+ num-local-neigh: 1001
+
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ ipv4_drop_neighbor_aging_time: 602
+ ipv6_drop_neighbor_aging_time: 602
+ num_local_neigh: 1002
+ existing_ip_neighbor_config:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ response:
+ code: 200
+ value:
+ openconfig-neighbor:config:
+ ipv4-arp-timeout: 1200
+ ipv6-nd-cache-expiry: 1200
+ ipv4-drop-neighbor-aging-time: 600
+ ipv6-drop-neighbor-aging-time: 600
+ num-local-neigh: 1000
+ name: Values
+ expected_config_requests:
+ - path: "data/openconfig-neighbor:neighbor-globals/neighbor-global=Values/config"
+ method: "patch"
+ data:
+ config:
+ ipv4-arp-timeout: 180
+ ipv4-drop-neighbor-aging-time: 602
+ ipv6-drop-neighbor-aging-time: 602
+ ipv6-nd-cache-expiry: 180
+ num-local-neigh: 1002
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l2_acls.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l2_acls.yaml
new file mode 100644
index 000000000..a02aabcf0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l2_acls.yaml
@@ -0,0 +1,962 @@
+---
+merged_01:
+ module_args:
+ state: 'merged'
+ config:
+ - name: 'acl1'
+ remark: 'L2 ACL 1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '11:11:11:11:11:11'
+ destination:
+ host: '11:11:11:22:22:22'
+ remark: 'Rule1'
+ - sequence_num: 2
+ action: 'transit'
+ source:
+ address: '00:00:10:00:00:00'
+ address_mask: '00:00:ff:ff:00:00'
+ destination:
+ any: true
+ vlan_id: 100
+ - sequence_num: 3
+ action: 'transit'
+ source:
+ any: true
+ destination:
+ address: '00:00:00:00:10:00'
+ address_mask: '00:00:00:00:ff:ff'
+ ethertype:
+ value: '0x800'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value: {}
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'acl1'
+ type: 'ACL_L2'
+ config:
+ name: 'acl1'
+ type: 'ACL_L2'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/config/description'
+ method: 'patch'
+ data:
+ description: 'L2 ACL 1'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ description: 'Rule1'
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ l2:
+ config:
+ source-mac: '11:11:11:11:11:11'
+ destination-mac: '11:11:11:22:22:22'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'TRANSIT'
+ l2:
+ config:
+ source-mac: '00:00:10:00:00:00'
+ source-mac-mask: '00:00:ff:ff:00:00'
+ vlanid: 100
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 3
+ config:
+ sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'TRANSIT'
+ l2:
+ config:
+ destination-mac: '00:00:00:00:10:00'
+ destination-mac-mask: '00:00:00:00:ff:ff'
+ ethertype: 'ETHERTYPE_IPV4'
+
+merged_02:
+ module_args:
+ state: 'merged'
+ config:
+ - name: 'acl1'
+ remark: 'Remark_ACL1'
+ rules:
+ - sequence_num: 4
+ action: 'discard'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ arp: true
+ - sequence_num: 5
+ action: 'discard'
+ source:
+ any: true
+ destination:
+ any: true
+ vlan_tag_format:
+ multi_tagged: true
+ remark: 'VLAN_multi_tagged'
+ - name: 'acl2'
+ remark: 'Remark_ACL2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ ipv6: true
+ vlan_id: 200
+ - sequence_num: 2
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ dei: 1
+ - sequence_num: 3
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ pcp:
+ value: 4
+ mask: 6
+ - sequence_num: 4
+ action: 'do-not-nat'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ ipv4: true
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - config:
+ description: 'L2 ACL 1'
+ name: 'acl1'
+ type: 'openconfig-acl:ACL_L2'
+ name: 'acl1'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ description: 'Rule1'
+ sequence-id: 1
+ l2:
+ config:
+ destination-mac: '11:11:11:22:22:22'
+ source-mac: '11:11:11:11:11:11'
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ config:
+ sequence-id: 2
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 100
+ source-mac: '00:00:10:00:00:00'
+ source-mac-mask: '00:00:ff:ff:00:00'
+ - sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ config:
+ sequence-id: 3
+ l2:
+ config:
+ destination-mac: '00:00:00:00:10:00'
+ destination-mac-mask: '00:00:00:00:ff:ff'
+ ethertype: 2114
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/config/description'
+ method: 'patch'
+ data:
+ description: 'Remark_ACL1'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 4
+ actions:
+ config:
+ forwarding-action: 'DISCARD'
+ config:
+ sequence-id: 4
+ l2:
+ config:
+ ethertype: 'ETHERTYPE_ARP'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 5
+ actions:
+ config:
+ forwarding-action: 'DISCARD'
+ config:
+ description: 'VLAN_multi_tagged'
+ sequence-id: 5
+ l2:
+ config:
+ vlan-tag-format: 'openconfig-acl-ext:MULTI_TAGGED'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'acl2'
+ type: 'ACL_L2'
+ config:
+ name: 'acl2'
+ type: 'ACL_L2'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/config/description'
+ method: 'patch'
+ data:
+ description: 'Remark_ACL2'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 1
+ l2:
+ config:
+ ethertype: 'ETHERTYPE_IPV6'
+ vlanid: 200
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 2
+ l2:
+ config:
+ dei: 1
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 3
+ l2:
+ config:
+ pcp: 4
+ pcp-mask: 6
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 4
+ actions:
+ config:
+ forwarding-action: 'DO_NOT_NAT'
+ config:
+ sequence-id: 4
+ l2:
+ config:
+ ethertype: 'ETHERTYPE_IPV4'
+
+replaced_01:
+ module_args:
+ state: 'replaced'
+ config:
+ - name: 'acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ host: '11:11:11:22:22:22'
+ - sequence_num: 2
+ action: 'deny'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ ipv4: true
+ vlan_id: 100
+ - name: 'acl3'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '22:22:22:22:22:22'
+ destination:
+ any: true
+ pcp:
+ traffic_type: 'ca'
+ - sequence_num: 2
+ action: 'deny'
+ source:
+ any: true
+ destination:
+ any: true
+ remark: 'Deny_All'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'acl1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ description: 'Remark_ACL1'
+ name: 'acl1'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ description: 'Rule1'
+ sequence-id: 1
+ l2:
+ config:
+ destination-mac: '11:11:11:22:22:22'
+ source-mac: '11:11:11:11:11:11'
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ config:
+ sequence-id: 2
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 100
+ source-mac: '00:00:10:00:00:00'
+ source-mac-mask: '00:00:ff:ff:00:00'
+ - sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ config:
+ sequence-id: 3
+ l2:
+ config:
+ destination-mac: '00:00:00:00:10:00'
+ destination-mac-mask: '00:00:00:00:ff:ff'
+ ethertype: 2114
+ - name: 'acl2'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ description: 'Remark_ACL2'
+ name: 'acl2'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ sequence-id: 1
+ l2:
+ config:
+ ethertype: 'openconfig-packet-match-types:ETHERTYPE_IPV6'
+ openconfig-acl-ext:vlanid: 200
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ sequence-id: 2
+ l2:
+ config:
+ openconfig-acl-ext:dei: 1
+ - sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ sequence-id: 3
+ l2:
+ config:
+ openconfig-acl-ext:pcp: 4
+ openconfig-acl-ext:pcp-mask: 6
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/config/description'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries/acl-entry=1'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries/acl-entry=2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries/acl-entry=3'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ l2:
+ config:
+ destination-mac: '11:11:11:22:22:22'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'DROP'
+ l2:
+ config:
+ ethertype: 'ETHERTYPE_IPV4'
+ vlanid: 100
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'acl3'
+ type: 'ACL_L2'
+ config:
+ name: 'acl3'
+ type: 'ACL_L2'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl3,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ l2:
+ config:
+ pcp: 3
+ source-mac: '22:22:22:22:22:22'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl3,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ config:
+ description: 'Deny_All'
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'DROP'
+ l2:
+ config: {}
+
+overridden_01:
+ module_args:
+ state: 'overridden'
+ config:
+ - name: 'acl1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '33:33:33:33:33:33'
+ destination:
+ host: '44:44:44:44:44:44'
+ - name: 'test-acl'
+ remark: 'test_mac_acl'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ host: '22:22:22:22:22:22'
+ destination:
+ any: true
+ vlan_id: 20
+ - sequence_num: 2
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ value: '0x88cc'
+ remark: 'LLDP'
+ - sequence_num: 3
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ address: '00:00:10:00:00:00'
+ address_mask: '00:00:ff:ff:00:00'
+ pcp:
+ value: 4
+ mask: 6
+ - name: 'test-acl-1'
+ remark: 'test_mac_acl_1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ arp: true
+ vlan_id: 200
+ - sequence_num: 2
+ action: 'discard'
+ source:
+ any: true
+ destination:
+ any: true
+ ethertype:
+ value: 0x8035
+ pcp:
+ value: 5
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'acl1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ description: 'Remark_ACL1'
+ name: 'acl1'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ description: 'Rule1'
+ sequence-id: 1
+ l2:
+ config:
+ destination-mac: '11:11:11:22:22:22'
+ source-mac: '11:11:11:11:11:11'
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ config:
+ sequence-id: 2
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 100
+ source-mac: '00:00:10:00:00:00'
+ source-mac-mask: '00:00:ff:ff:00:00'
+ - sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ config:
+ sequence-id: 3
+ l2:
+ config:
+ destination-mac: '00:00:00:00:10:00'
+ destination-mac-mask: '00:00:00:00:ff:ff'
+ ethertype: 2114
+ - name: 'acl2'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ name: 'acl2'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ sequence-id: 1
+ l2:
+ config:
+ destination-mac: '11:11:11:22:22:22'
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:DROP'
+ config:
+ sequence-id: 2
+ l2:
+ config:
+ ethertype: 'openconfig-packet-match-types:ETHERTYPE_IPV4'
+ openconfig-acl-ext:vlanid: 100
+ - name: 'acl3'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ name: 'acl3'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ sequence-id: 1
+ l2:
+ config:
+ openconfig-acl-ext:pcp: 3
+ source-mac: '22:22:22:22:22:22'
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:DROP'
+ config:
+ description: 'Deny_All'
+ sequence-id: 2
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl2,ACL_L2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl3,ACL_L2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/config/description'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries/acl-entry=2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries/acl-entry=3'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries/acl-entry=1'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl-1,ACL_L2/config/description'
+ method: 'patch'
+ data:
+ description: 'test_mac_acl_1'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ l2:
+ config:
+ destination-mac: '44:44:44:44:44:44'
+ source-mac: '33:33:33:33:33:33'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'test-acl'
+ type: 'ACL_L2'
+ config:
+ name: 'test-acl'
+ type: 'ACL_L2'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl,ACL_L2/config/description'
+ method: 'patch'
+ data:
+ description: 'test_mac_acl'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ l2:
+ config:
+ source-mac: '22:22:22:22:22:22'
+ vlanid: 20
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ config:
+ description: 'LLDP'
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ l2:
+ config:
+ ethertype: 'ETHERTYPE_LLDP'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 3
+ config:
+ sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ l2:
+ config:
+ destination-mac: '00:00:10:00:00:00'
+ destination-mac-mask: '00:00:ff:ff:00:00'
+ pcp: 4
+ pcp-mask: 6
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'test-acl-1'
+ type: 'ACL_L2'
+ config:
+ name: 'test-acl-1'
+ type: 'ACL_L2'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl-1,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ l2:
+ config:
+ ethertype: 'ETHERTYPE_ARP'
+ vlanid: 200
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl-1,ACL_L2/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'DISCARD'
+ l2:
+ config:
+ ethertype: 32821
+ pcp: 5
+
+deleted_01:
+ module_args:
+ state: 'deleted'
+ config:
+ - name: 'acl1'
+ - name: 'test-acl'
+ rules:
+ - sequence_num: 3
+ - name: 'test-acl-1'
+ remark: 'test_mac_acl_1'
+ rules:
+ - sequence_num: 2
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'acl1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ name: 'acl1'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ l2:
+ config:
+ destination-mac: '44:44:44:44:44:44'
+ source-mac: '33:33:33:33:33:33'
+ - name: 'test-acl'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ description: 'test_mac_acl'
+ name: 'test-acl'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 20
+ source-mac: '22:22:22:22:22:22'
+ - sequence-id: 2
+ config:
+ description: 'LLDP'
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ l2:
+ config:
+ ethertype: 'openconfig-packet-match-types:ETHERTYPE_LLDP'
+ - sequence-id: 3
+ config:
+ sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ l2:
+ config:
+ destination-mac: '00:00:10:00:00:00'
+ destination-mac-mask: '00:00:ff:ff:00:00'
+ openconfig-acl-ext:pcp: 4
+ openconfig-acl-ext:pcp-mask: 6
+ - name: 'test-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ description: 'test_mac_acl_1'
+ name: 'test-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ l2:
+ config:
+ ethertype: 'openconfig-packet-match-types:ETHERTYPE_ARP'
+ openconfig-acl-ext:vlanid: 200
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:DISCARD'
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=acl1,ACL_L2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl,ACL_L2/acl-entries/acl-entry=3'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl-1,ACL_L2/config/description'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl-1,ACL_L2/acl-entries/acl-entry=2'
+ method: 'delete'
+
+
+deleted_02:
+ module_args:
+ state: 'deleted'
+ config: []
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'test-acl'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ description: 'test_mac_acl'
+ name: 'test-acl'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ sequence-id: 1
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 20
+ destination-mac: '11:11:11:11:11:11'
+ destination-mac-mask: 'ff:ff:ff:ff:ff:ff'
+ source-mac: '22:22:22:22:22:22'
+ source-mac-mask: 'ff:ff:ff:ff:ff:ff'
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ description: 'LLDP'
+ sequence-id: 2
+ l2:
+ config:
+ ethertype: 'openconfig-packet-match-types:ETHERTYPE_LLDP'
+ - name: 'test-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ config:
+ name: 'test-acl-1'
+ type: 'openconfig-acl:ACL_L2'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ sequence-id: 1
+ l2:
+ config:
+ openconfig-acl-ext:vlan-tag-format: 'openconfig-acl-ext:MULTI_TAGGED'
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl,ACL_L2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=test-acl-1,ACL_L2'
+ method: 'delete'
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l2_interfaces.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l2_interfaces.yaml
new file mode 100644
index 000000000..fc7d17067
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l2_interfaces.yaml
@@ -0,0 +1,187 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: Eth1/1
+ access:
+ vlan: 10
+ trunk:
+ allowed_vlans:
+ - vlan: '11'
+ - vlan: '12'
+ - vlan: '13'
+ - vlan: '14'
+ - vlan: '16'
+ - vlan: '21-30'
+ - name: Mgmt1/1/1
+ access:
+ vlan: 100
+ - name: PortChannel200
+ access:
+ vlan: 200
+ - name: Eth1/12
+ existing_l2_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: 'Eth1/1'
+ config:
+ mtu: 2000
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
+ method: "patch"
+ data:
+ openconfig-vlan:config:
+ access-vlan: 10
+ trunk-vlans:
+ - 11
+ - 12
+ - 13
+ - 14
+ - 16
+ - '21..30'
+ - path: "data/openconfig-interfaces:interfaces/interface=PortChannel200/openconfig-if-aggregate:aggregation/openconfig-vlan:switched-vlan/config"
+ method: "patch"
+ data:
+ openconfig-vlan:config:
+ access-vlan: 200
+merged_02:
+ module_args:
+ config:
+ - name: Eth1/1
+ access:
+ vlan: 10
+ trunk:
+ allowed_vlans:
+ - vlan: 14-20
+ - name: Eth1/2
+ access:
+ vlan: 30
+ trunk:
+ allowed_vlans:
+ - vlan: 21
+ existing_l2_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Eth1/1
+ openconfig-if-ethernet:ethernet:
+ openconfig-vlan:switched-vlan:
+ config:
+ access-vlan: 10
+ interface-mode: TRUNK
+ trunk-vlans:
+ - "11..15"
+ - name: Eth1/2
+ openconfig-if-ethernet:ethernet:
+ openconfig-vlan:switched-vlan:
+ config:
+ access-vlan: 20
+ interface-mode: TRUNK
+ trunk-vlans:
+ - "21..25"
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
+ method: "patch"
+ data:
+ openconfig-vlan:config:
+ trunk-vlans:
+ - '16..20'
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
+ method: "patch"
+ data:
+ openconfig-vlan:config:
+ access-vlan: 30
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_l2_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Eth1/1
+ openconfig-if-ethernet:ethernet:
+ openconfig-vlan:switched-vlan:
+ config:
+ access-vlan: 10
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: Eth1/1
+ access:
+ vlan: 10
+ trunk:
+ allowed_vlans:
+ - vlan: "11-13"
+ - vlan: 16
+ - name: Eth1/2
+ access:
+ vlan:
+ trunk:
+ allowed_vlans:
+ - name: Eth1/3
+ existing_l2_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Eth1/1
+ openconfig-if-ethernet:ethernet:
+ openconfig-vlan:switched-vlan:
+ config:
+ access-vlan: 10
+ interface-mode: TRUNK
+ trunk-vlans:
+ - "11..20"
+ - name: Eth1/2
+ openconfig-if-ethernet:ethernet:
+ openconfig-vlan:switched-vlan:
+ config:
+ access-vlan: 12
+ interface-mode: TRUNK
+ trunk-vlans:
+ - "21..30"
+ - 51
+ - name: Eth1/3
+ openconfig-if-ethernet:ethernet:
+ openconfig-vlan:switched-vlan:
+ config:
+ access-vlan: 13
+ interface-mode: TRUNK
+ trunk-vlans:
+ - "31..40"
+ - 61
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config/access-vlan"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config/trunk-vlans=11..13%2C16"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config/access-vlan"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config/trunk-vlans=21..30%2C51"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f3/openconfig-if-ethernet:ethernet/openconfig-vlan:switched-vlan/config"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l3_acls.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l3_acls.yaml
new file mode 100644
index 000000000..314c803c1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l3_acls.yaml
@@ -0,0 +1,1290 @@
+---
+merged_01:
+ module_args:
+ state: 'merged'
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl1'
+ remark: 'IPv4 ACL 1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ip'
+ source:
+ host: '192.168.1.2'
+ destination:
+ host: '192.168.2.2'
+ remark: 'Host-Rule'
+ - sequence_num: 2
+ action: 'transit'
+ protocol:
+ name: 'icmp'
+ source:
+ prefix: '192.168.0.0/16'
+ destination:
+ any: true
+ protocol_options:
+ icmp:
+ type: 8
+ vlan_id: 100
+ - address_family: 'ipv6'
+ acls:
+ - name: 'ipv6-acl1'
+ remark: 'IPv6 ACL 1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ipv6'
+ source:
+ prefix: '192::/64'
+ destination:
+ any: true
+ - sequence_num: 2
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ port_number:
+ gt: 1024
+ destination:
+ host: '192::2'
+ port_number:
+ eq: 80
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value: {}
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'ip-acl1'
+ type: 'ACL_IPV4'
+ config:
+ name: 'ip-acl1'
+ type: 'ACL_IPV4'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl1,ACL_IPV4/config/description'
+ method: 'patch'
+ data:
+ description: 'IPv4 ACL 1'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl1,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ description: 'Host-Rule'
+ sequence-id: 1
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ source-address: '192.168.1.2/32'
+ transport:
+ config: {}
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl1,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'TRANSIT'
+ config:
+ sequence-id: 2
+ ipv4:
+ config:
+ protocol: 'IP_ICMP'
+ source-address: '192.168.0.0/16'
+ l2:
+ config:
+ vlanid: 100
+ transport:
+ config:
+ icmp-type: 8
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'ipv6-acl1'
+ type: 'ACL_IPV6'
+ config:
+ name: 'ipv6-acl1'
+ type: 'ACL_IPV6'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/config/description'
+ method: 'patch'
+ data:
+ description: 'IPv6 ACL 1'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 1
+ ipv6:
+ config:
+ source-address: '192::/64'
+ transport:
+ config: {}
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 2
+ ipv6:
+ config:
+ destination-address: '192::2/128'
+ protocol: 'IP_TCP'
+ transport:
+ config:
+ destination-port: 80
+ source-port: '1024..65535'
+
+merged_02:
+ module_args:
+ state: 'merged'
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl1'
+ remark: 'IPv4 ACL 1'
+ rules:
+ - sequence_num: 3
+ action: 'deny'
+ protocol:
+ number: 17
+ source:
+ host: '192.168.1.2'
+ destination:
+ prefix: '192.168.1.0/24'
+ port_number:
+ lt: 1024
+ remark: "Drop UDP"
+ - name: 'ip-acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ ack: true
+ syn: true
+ fin: true
+ - sequence_num: 2
+ action: 'permit'
+ protocol:
+ number: 2
+ source:
+ any: true
+ destination:
+ any: true
+ dscp:
+ voice_admit: true
+ - sequence_num: 3
+ action: 'discard'
+ protocol:
+ name: 'icmp'
+ source:
+ any: true
+ destination:
+ any: true
+ - address_family: 'ipv6'
+ acls:
+ - name: 'ipv6-acl1'
+ remark: 'Updated IPv6 ACL 1'
+ rules:
+ - sequence_num: 3
+ action: 'deny'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ destination:
+ host: '100::1'
+ port_number:
+ range:
+ begin: 1024
+ end: 2048
+ - name: 'ipv6-acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'icmpv6'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ icmpv6:
+ type: 128
+ code: 0
+ vlan_id: 200
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'ipv6'
+ source:
+ host: '100::1'
+ destination:
+ any: true
+ vlan_id: 200
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ description: 'IPv4 ACL 1'
+ name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ description: 'Host-Rule'
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ source-address: '192.168.1.2/32'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_ICMP'
+ source-address: '192.168.0.0/16'
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 100
+ transport:
+ config:
+ openconfig-acl-ext:icmp-type: 8
+ - name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ description: 'IPv6 ACL 1'
+ name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ source-address: '192::/64'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ destination-address: '192::2/128'
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ destination-port: 80
+ source-port: '1024..65535'
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl1,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'DROP'
+ config:
+ description: 'Drop UDP'
+ sequence-id: 3
+ ipv4:
+ config:
+ destination-address: '192.168.1.0/24'
+ protocol: 'IP_UDP'
+ source-address: '192.168.1.2/32'
+ transport:
+ config:
+ destination-port: '0..1024'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'ip-acl2'
+ type: 'ACL_IPV4'
+ config:
+ name: 'ip-acl2'
+ type: 'ACL_IPV4'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 1
+ ipv4:
+ config:
+ protocol: 'IP_TCP'
+ transport:
+ config:
+ tcp-flags:
+ - 'TCP_ACK'
+ - 'TCP_SYN'
+ - 'TCP_FIN'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 2
+ ipv4:
+ config:
+ dscp: 44
+ protocol: 'IP_IGMP'
+ transport:
+ config: {}
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'DISCARD'
+ config:
+ sequence-id: 3
+ ipv4:
+ config:
+ protocol: 'IP_ICMP'
+ transport:
+ config: {}
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/config/description'
+ method: 'patch'
+ data:
+ description: 'Updated IPv6 ACL 1'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 3
+ actions:
+ config:
+ forwarding-action: 'DROP'
+ config:
+ sequence-id: 3
+ ipv6:
+ config:
+ destination-address: '100::1/128'
+ protocol: 'IP_UDP'
+ transport:
+ config:
+ destination-port: '1024..2048'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'ipv6-acl2'
+ type: 'ACL_IPV6'
+ config:
+ name: 'ipv6-acl2'
+ type: 'ACL_IPV6'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl2,ACL_IPV6/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 1
+ ipv6:
+ config:
+ protocol: 58
+ l2:
+ config:
+ vlanid: 200
+ transport:
+ config:
+ icmp-code: 0
+ icmp-type: 128
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl2,ACL_IPV6/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'DROP'
+ config:
+ sequence-id: 2
+ ipv6:
+ config:
+ source-address: '100::1/128'
+ l2:
+ config:
+ vlanid: 200
+ transport:
+ config: {}
+
+replaced_01:
+ module_args:
+ state: 'replaced'
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ established: true
+ remark: 'TCP established'
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'icmp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ icmp:
+ code: 128
+ - name: 'ip-acl3'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ host: '192.168.2.2'
+ dscp:
+ value: 4
+ - address_family: 'ipv6'
+ acls:
+ - name: 'ipv6-acl1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ host: '100::2'
+ port_number:
+ gt: 0
+ destination:
+ any: true
+ - sequence_num: 2
+ action: 'deny'
+ protocol:
+ name: 'udp'
+ source:
+ any: true
+ port_number:
+ range:
+ begin: 0
+ end: 2048
+ destination:
+ any: true
+ port_number:
+ range:
+ begin: 8000
+ end: 65535
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ description: 'IPv4 ACL 1'
+ name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ description: 'Host-Rule'
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ source-address: '192.168.1.2/32'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_ICMP'
+ source-address: '192.168.0.0/16'
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 100
+ transport:
+ config:
+ openconfig-acl-ext:icmp-type: 8
+ - name: 'ip-acl2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ name: 'ip-acl2'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ tcp-flags:
+ - 'openconfig-packet-match-types:TCP_FIN'
+ - 'openconfig-packet-match-types:TCP_SYN'
+ - 'openconfig-packet-match-types:TCP_ACK'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ ipv4:
+ config:
+ dscp: 44
+ protocol: 'openconfig-packet-match-types:IP_IGMP'
+ - name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ description: 'IPv6 ACL 1'
+ name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ source-address: '192::/64'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ destination-address: '192::2/128'
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ destination-port: 80
+ source-port: '1024..65535'
+ - name: 'ipv6-acl2'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ name: 'ipv6-acl2'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ protocol: 58
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4/acl-entries/acl-entry=1'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4/acl-entries/acl-entry=2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/config/description'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries/acl-entry=1'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries/acl-entry=2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ description: 'TCP established'
+ sequence-id: 1
+ ipv4:
+ config:
+ protocol: 'IP_TCP'
+ transport:
+ config:
+ tcp-session-established: true
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'DROP'
+ config:
+ sequence-id: 2
+ ipv4:
+ config:
+ protocol: 'IP_ICMP'
+ transport:
+ config:
+ icmp-code: 128
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'ip-acl3'
+ type: 'ACL_IPV4'
+ config:
+ name: 'ip-acl3'
+ type: 'ACL_IPV4'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl3,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 1
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ dscp: 4
+ transport:
+ config: {}
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 1
+ ipv6:
+ config:
+ protocol: 'IP_TCP'
+ source-address: '100::2/128'
+ transport:
+ config:
+ source-port: '0..65535'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'DROP'
+ config:
+ sequence-id: 2
+ ipv6:
+ config:
+ protocol: 'IP_UDP'
+ transport:
+ config:
+ destination-port: '8000..65535'
+ source-port: '0..2048'
+
+overridden_01:
+ module_args:
+ state: 'overridden'
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl2'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ any: true
+ destination:
+ any: true
+ protocol_options:
+ tcp:
+ established: true
+ remark: 'TCP established'
+ - name: 'ip-acl3'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'ip'
+ source:
+ any: true
+ destination:
+ host: '192.168.2.2'
+ dscp:
+ value: 10
+ - address_family: 'ipv6'
+ acls:
+ - name: 'ipv6-acl1'
+ rules:
+ - sequence_num: 1
+ action: 'permit'
+ protocol:
+ name: 'tcp'
+ source:
+ host: '100::2'
+ port_number:
+ gt: 0
+ destination:
+ any: true
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ description: 'IPv4 ACL 1'
+ name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ description: 'Host-Rule'
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ source-address: '192.168.1.2/32'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_ICMP'
+ source-address: '192.168.0.0/16'
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 100
+ transport:
+ config:
+ openconfig-acl-ext:icmp-type: 8
+ - name: 'ip-acl2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ name: 'ip-acl2'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ description: 'TCP established'
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ openconfig-acl-ext:tcp-session-established: true
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ ipv4:
+ config:
+ dscp: 44
+ protocol: 'openconfig-packet-match-types:IP_IGMP'
+ - name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ description: 'IPv6 ACL 1'
+ name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ source-address: '192::/64'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ destination-address: '192::2/128'
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ destination-port: 80
+ source-port: '1024..65535'
+ - name: 'ipv6-acl2'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ name: 'ipv6-acl2'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ protocol: 58
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl1,ACL_IPV4'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl2,ACL_IPV6'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4/acl-entries/acl-entry=2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/config/description'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries/acl-entry=1'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries/acl-entry=2'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set'
+ method: 'patch'
+ data:
+ acl-set:
+ - name: 'ip-acl3'
+ type: 'ACL_IPV4'
+ config:
+ name: 'ip-acl3'
+ type: 'ACL_IPV4'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl3,ACL_IPV4/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 1
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ dscp: 10
+ transport:
+ config: {}
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6/acl-entries'
+ method: 'post'
+ data:
+ openconfig-acl:acl-entry:
+ - sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'ACCEPT'
+ config:
+ sequence-id: 1
+ ipv6:
+ config:
+ protocol: 'IP_TCP'
+ source-address: '100::2/128'
+ transport:
+ config:
+ source-port: '0..65535'
+
+deleted_01:
+ module_args:
+ state: 'deleted'
+ config:
+ - address_family: 'ipv4'
+ acls:
+ - name: 'ip-acl1'
+ remark: 'IPv4 ACL 1'
+ - name: 'ip-acl2'
+ - name: 'ip-acl3'
+ rules:
+ - sequence_num: 1
+ - address_family: 'ipv6'
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ description: 'IPv4 ACL 1'
+ name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ description: 'Host-Rule'
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ source-address: '192.168.1.2/32'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_ICMP'
+ source-address: '192.168.0.0/16'
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 100
+ transport:
+ config:
+ openconfig-acl-ext:icmp-type: 8
+ - name: 'ip-acl2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ name: 'ip-acl2'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ description: 'TCP established'
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ openconfig-acl-ext:tcp-session-established: true
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ ipv4:
+ config:
+ dscp: 44
+ protocol: 'openconfig-packet-match-types:IP_IGMP'
+ - acl-entries:
+ acl-entry:
+ - actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ config:
+ sequence-id: 1
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ dscp: 4
+ sequence-id: 1
+ - actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:DISCARD'
+ config:
+ sequence-id: 2
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ sequence-id: 2
+ transport:
+ config:
+ tcp-flags:
+ - 'openconfig-packet-match-types:TCP_PSH'
+ - 'openconfig-packet-match-types:TCP_URG'
+ config:
+ name: 'ip-acl3'
+ type: 'openconfig-acl:ACL_IPV4'
+ name: 'ip-acl3'
+ type: 'openconfig-acl:ACL_IPV4'
+ - name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ description: 'IPv6 ACL 1'
+ name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ source-address: '192::/64'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ destination-address: '192::2/128'
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ destination-port: 80
+ source-port: '1024..65535'
+ - name: 'ipv6-acl2'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ name: 'ipv6-acl2'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ protocol: 58
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl2,ACL_IPV6'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl1,ACL_IPV4/config/description'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl3,ACL_IPV4/acl-entries/acl-entry=1'
+ method: 'delete'
+
+deleted_02:
+ module_args:
+ state: 'deleted'
+ config: []
+ facts_get_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets'
+ response:
+ code: 200
+ value:
+ openconfig-acl:acl-sets:
+ acl-set:
+ - name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ description: 'IPv4 ACL 1'
+ name: 'ip-acl1'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ description: 'Host-Rule'
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ destination-address: '192.168.2.2/32'
+ source-address: '192.168.1.2/32'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl-ext:TRANSIT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_ICMP'
+ source-address: '192.168.0.0/16'
+ l2:
+ config:
+ openconfig-acl-ext:vlanid: 100
+ transport:
+ config:
+ openconfig-acl-ext:icmp-type: 8
+ - name: 'ip-acl2'
+ type: 'openconfig-acl:ACL_IPV4'
+ config:
+ name: 'ip-acl2'
+ type: 'openconfig-acl:ACL_IPV4'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ description: 'TCP established'
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv4:
+ config:
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ openconfig-acl-ext:tcp-session-established: true
+ - name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ description: 'IPv6 ACL 1'
+ name: 'ipv6-acl1'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ source-address: '192::/64'
+ - sequence-id: 2
+ config:
+ sequence-id: 2
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ destination-address: '192::2/128'
+ protocol: 'openconfig-packet-match-types:IP_TCP'
+ transport:
+ config:
+ destination-port: 80
+ source-port: '1024..65535'
+ - name: 'ipv6-acl2'
+ type: 'openconfig-acl:ACL_IPV6'
+ config:
+ name: 'ipv6-acl2'
+ type: 'openconfig-acl:ACL_IPV6'
+ acl-entries:
+ acl-entry:
+ - sequence-id: 1
+ config:
+ sequence-id: 1
+ actions:
+ config:
+ forwarding-action: 'openconfig-acl:ACCEPT'
+ ipv6:
+ config:
+ protocol: 58
+ config_requests:
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl1,ACL_IPV4'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ip-acl2,ACL_IPV4'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl1,ACL_IPV6'
+ method: 'delete'
+ - path: 'data/openconfig-acl:acl/acl-sets/acl-set=ipv6-acl2,ACL_IPV6'
+ method: 'delete'
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l3_interfaces.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l3_interfaces.yaml
new file mode 100644
index 000000000..fea3cea89
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_l3_interfaces.yaml
@@ -0,0 +1,586 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: Eth1/1
+ ipv4:
+ addresses:
+ - address: 83.1.1.1/16
+ - address: 84.1.1.1/16
+ secondary: True
+ ipv6:
+ enabled: true
+ addresses:
+ - address: 83::1/16
+ - address: 84::1/16
+ - name: Vlan11
+ ipv4:
+ addresses:
+ - address: 73.1.1.1/16
+ - address: 74.1.1.1/16
+ secondary: True
+ ipv6:
+ enabled: true
+ addresses:
+ - address: 73::1/16
+ - name: Vlan12
+ ipv4:
+ anycast_addresses:
+ - 11.12.13.14/12
+ existing_l3_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces/interface"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interface:
+ - name: Eth1/1
+ config:
+ mtu: 2000
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 83.1.1.1
+ openconfig-if-ip:config:
+ ip: 83.1.1.1
+ prefix-length: 16.0
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 84.1.1.1
+ openconfig-if-ip:config:
+ ip: 84.1.1.1
+ prefix-length: 16.0
+ secondary: True
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 83::1
+ openconfig-if-ip:config:
+ ip: 83::1
+ prefix-length: 16.0
+ - ip: 84::1
+ openconfig-if-ip:config:
+ ip: 84::1
+ prefix-length: 16.0
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/config"
+ method: "patch"
+ data:
+ config:
+ enabled: True
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan11/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 73.1.1.1
+ openconfig-if-ip:config:
+ ip: 73.1.1.1
+ prefix-length: 16.0
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan11/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 74.1.1.1
+ openconfig-if-ip:config:
+ ip: 74.1.1.1
+ prefix-length: 16.0
+ secondary: True
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan11/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv6/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 73::1
+ openconfig-if-ip:config:
+ ip: 73::1
+ prefix-length: 16.0
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan11/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv6/config"
+ method: "patch"
+ data:
+ config:
+ enabled: True
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan12/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/openconfig-interfaces-ext:sag-ipv4/config/static-anycast-gateway"
+ method: "patch"
+ data:
+ openconfig-interfaces-ext:static-anycast-gateway:
+ - 11.12.13.14/12
+deleted_01:
+ module_args:
+ state: deleted
+ existing_l3_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces/interface"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interface:
+ - name: Eth1/1
+ subinterfaces:
+ subinterface:
+ - index: 0
+ config:
+ index: 0
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 73.1.1.1
+ prefix-length: 8
+ secondary: False
+ openconfig-if-ip:ipv6:
+ addresses:
+ address:
+ - config:
+ ip: 73::1
+ prefix-length: 64
+ enabled: True
+ - name: Vlan99
+ openconfig-vlan:routed-vlan:
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 99.99.99.99
+ prefix-length: 8
+ secondary: False
+ - name: Vlan88
+ openconfig-vlan:routed-vlan:
+ openconfig-if-ip:ipv4:
+ openconfig-interfaces-ext:sag-ipv4:
+ config:
+ static-anycast-gateway: 11.12.13.14/12
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan88/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/openconfig-interfaces-ext:sag-ipv4/config/static-anycast-gateway=11.12.13.14%2f12"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan99/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/addresses"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: Eth1/1
+ ipv4:
+ addresses:
+ - address: 84.1.1.1/8
+ secondary: True
+ ipv6:
+ addresses:
+ - address: 84::1/64
+ - name: Eth1/2
+ - name: Vlan99
+ ipv4:
+ addresses:
+ - address: 74.1.1.1/8
+ secondary: True
+ ipv6:
+ addresses:
+ - address: 73::1/64
+ - name: Vlan88
+ existing_l3_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces/interface"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interface:
+ - name: Eth1/1
+ subinterfaces:
+ subinterface:
+ - index: 0
+ config:
+ index: 0
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 83.1.1.1
+ prefix-length: 8
+ secondary: False
+ - config:
+ ip: 84.1.1.1
+ prefix-length: 8
+ secondary: True
+ openconfig-if-ip:ipv6:
+ addresses:
+ address:
+ - config:
+ ip: 83::1
+ prefix-length: 64
+ - config:
+ ip: 84::1
+ prefix-length: 64
+ config:
+ enabled: True
+ - name: Eth1/2
+ subinterfaces:
+ subinterface:
+ - index: 0
+ config:
+ index: 0
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 93.1.1.2
+ prefix-length: 8
+ secondary: False
+ - config:
+ ip: 94.1.1.2
+ prefix-length: 8
+ secondary: True
+ openconfig-if-ip:ipv6:
+ addresses:
+ address:
+ - config:
+ ip: 93::2
+ prefix-length: 64
+ - config:
+ ip: 94::2
+ prefix-length: 64
+ config:
+ enabled: True
+ - name: Vlan99
+ openconfig-vlan:routed-vlan:
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 73.1.1.1
+ prefix-length: 8
+ secondary: False
+ - config:
+ ip: 74.1.1.1
+ prefix-length: 8
+ secondary: True
+ openconfig-if-ip:ipv6:
+ addresses:
+ address:
+ - config:
+ ip: 73::1
+ prefix-length: 64
+ config:
+ enabled: True
+ - name: Vlan88
+ openconfig-vlan:routed-vlan:
+ openconfig-if-ip:ipv4:
+ openconfig-interfaces-ext:sag-ipv4:
+ config:
+ static-anycast-gateway: 11.12.13.14/12
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses/address=84.1.1.1/config/secondary"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses/address=84::1"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/config/enabled"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan88/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/openconfig-interfaces-ext:sag-ipv4/config/static-anycast-gateway=11.12.13.14%2f12"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan99/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/addresses/address=74.1.1.1/config/secondary"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan99/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv6/addresses/address=73::1"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ - name: Vlan13
+ ipv4:
+ anycast_addresses:
+ - 11.12.13.14/12
+ ipv6:
+ enabled: true
+ - name: Eth1/1
+ ipv4:
+ addresses:
+ - address: 31.31.31.1/24
+ - address: 32.32.32.1/24
+ secondary: True
+ ipv6:
+ addresses:
+ - address: 31::1/64
+ - address: 32::1/64
+ existing_l3_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces/interface"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interface:
+ - name: Eth1/1
+ subinterfaces:
+ subinterface:
+ - index: 0
+ config:
+ index: 0
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 73.1.1.1
+ prefix-length: 8
+ secondary: False
+ openconfig-if-ip:ipv6:
+ addresses:
+ address:
+ - config:
+ ip: 73::1
+ prefix-length: 64
+ enabled: True
+ - name: Eth1/2
+ subinterfaces:
+ subinterface:
+ - index: 0
+ config:
+ index: 0
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 93.1.1.2
+ prefix-length: 8
+ secondary: False
+ - config:
+ ip: 94.1.1.2
+ prefix-length: 8
+ secondary: True
+ openconfig-if-ip:ipv6:
+ addresses:
+ address:
+ - config:
+ ip: 93::2
+ prefix-length: 64
+ - config:
+ ip: 94::2
+ prefix-length: 64
+ config:
+ enabled: True
+ - name: Vlan99
+ openconfig-vlan:routed-vlan:
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 99.99.99.99
+ prefix-length: 8
+ secondary: False
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 31.31.31.1
+ openconfig-if-ip:config:
+ ip: 31.31.31.1
+ prefix-length: 24.0
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 32.32.32.1
+ openconfig-if-ip:config:
+ ip: 32.32.32.1
+ prefix-length: 24.0
+ secondary: True
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 31::1
+ openconfig-if-ip:config:
+ ip: 31::1
+ prefix-length: 64.0
+ - ip: 32::1
+ openconfig-if-ip:config:
+ ip: 32::1
+ prefix-length: 64.0
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan13/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/openconfig-interfaces-ext:sag-ipv4/config/static-anycast-gateway"
+ method: "patch"
+ data:
+ openconfig-interfaces-ext:static-anycast-gateway:
+ - 11.12.13.14/12
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan13/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv6/config"
+ method: "patch"
+ data:
+ config:
+ enabled: True
+
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ - name: Vlan13
+ ipv4:
+ anycast_addresses:
+ - 11.12.13.14/12
+ ipv6:
+ enabled: true
+ - name: Eth1/1
+ ipv4:
+ addresses:
+ - address: 31.31.31.1/24
+ - address: 32.32.32.1/24
+ secondary: True
+ ipv6:
+ addresses:
+ - address: 31::1/64
+ - address: 32::1/64
+ existing_l3_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces/interface"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interface:
+ - name: Eth1/1
+ subinterfaces:
+ subinterface:
+ - index: 0
+ config:
+ index: 0
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 73.1.1.1
+ prefix-length: 8
+ secondary: False
+ openconfig-if-ip:ipv6:
+ addresses:
+ address:
+ - config:
+ ip: 73::1
+ prefix-length: 64
+ enabled: True
+ - name: Eth1/2
+ subinterfaces:
+ subinterface:
+ - index: 0
+ config:
+ index: 0
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 93.1.1.2
+ prefix-length: 8
+ secondary: False
+ - config:
+ ip: 94.1.1.2
+ prefix-length: 8
+ secondary: True
+ openconfig-if-ip:ipv6:
+ addresses:
+ address:
+ - config:
+ ip: 93::2
+ prefix-length: 64
+ - config:
+ ip: 94::2
+ prefix-length: 64
+ config:
+ enabled: True
+ - name: Vlan99
+ openconfig-vlan:routed-vlan:
+ openconfig-if-ip:ipv4:
+ addresses:
+ address:
+ - config:
+ ip: 99.99.99.99
+ prefix-length: 8
+ secondary: False
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f2/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/config/enabled"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan99/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/addresses"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 31.31.31.1
+ openconfig-if-ip:config:
+ ip: 31.31.31.1
+ prefix-length: 24.0
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv4/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 32.32.32.1
+ openconfig-if-ip:config:
+ ip: 32.32.32.1
+ prefix-length: 24.0
+ secondary: True
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f1/subinterfaces/subinterface=0/openconfig-if-ip:ipv6/addresses"
+ method: "patch"
+ data:
+ openconfig-if-ip:addresses:
+ address:
+ - ip: 31::1
+ openconfig-if-ip:config:
+ ip: 31::1
+ prefix-length: 64.0
+ - ip: 32::1
+ openconfig-if-ip:config:
+ ip: 32::1
+ prefix-length: 64.0
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan13/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv4/openconfig-interfaces-ext:sag-ipv4/config/static-anycast-gateway"
+ method: "patch"
+ data:
+ openconfig-interfaces-ext:static-anycast-gateway:
+ - 11.12.13.14/12
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan13/openconfig-vlan:routed-vlan/openconfig-if-ip:ipv6/config"
+ method: "patch"
+ data:
+ config:
+ enabled: True
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_lag_interfaces.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_lag_interfaces.yaml
new file mode 100644
index 000000000..8ebc1623a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_lag_interfaces.yaml
@@ -0,0 +1,204 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: PortChannel10
+ members:
+ interfaces:
+ - member: Eth1/11
+ - member: Eth1/12
+ - name: PortChannel20
+ members:
+ interfaces:
+ - member: Eth1/21
+ - member: Eth1/22
+ - name: PortChannel30
+ existing_lag_interfaces_config:
+ - path: "data/openconfig-interfaces:interfaces/interface"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interface:
+ - name: 'Eth1/1'
+ config:
+ mtu: 2000
+ - path: "data/sonic-portchannel:sonic-portchannel"
+ response:
+ code: 200
+ - path: "data/sonic-portchannel:sonic-portchannel"
+ response:
+ code: 200
+ value:
+ sonic-portchannel:sonic-portchannel:
+ PORTCHANNEL:
+ PORTCHANNEL_LIST:
+ - name: PortChannel10
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: PortChannel20
+ config:
+ name: PortChannel20
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: PortChannel30
+ config:
+ name: PortChannel30
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F11/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id"
+ method: "patch"
+ data:
+ openconfig-if-aggregate:aggregate-id: PortChannel10
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F12/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id"
+ method: "patch"
+ data:
+ openconfig-if-aggregate:aggregate-id: PortChannel10
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F21/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id"
+ method: "patch"
+ data:
+ openconfig-if-aggregate:aggregate-id: PortChannel20
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2F22/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id"
+ method: "patch"
+ data:
+ openconfig-if-aggregate:aggregate-id: PortChannel20
+deleted_01:
+ module_args:
+ state: deleted
+ existing_lag_interfaces_config:
+ - path: "data/sonic-portchannel:sonic-portchannel"
+ response:
+ code: 200
+ value:
+ sonic-portchannel:sonic-portchannel:
+ PORTCHANNEL:
+ PORTCHANNEL_LIST:
+ - name: PortChannel10
+ PORTCHANNEL_MEMBER:
+ PORTCHANNEL_MEMBER_LIST:
+ - ifname: Eth1/11
+ name: PortChannel10
+ - ifname: Eth1/12
+ name: PortChannel10
+ expected_config_requests:
+ - path: "data/sonic-portchannel:sonic-portchannel/PORTCHANNEL/PORTCHANNEL_LIST"
+ method: "delete"
+ data:
+ - path: "data/sonic-portchannel:sonic-portchannel/PORTCHANNEL_MEMBER/PORTCHANNEL_MEMBER_LIST"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: PortChannel10
+ members:
+ interfaces:
+ - member: Eth1/12
+ - member: Eth1/14
+ - name: PortChannel20
+ members:
+ interfaces:
+ - member: Eth1/23
+ - name: PortChannel30
+ members:
+ interfaces:
+ - member: Eth1/31
+ - name: PortChannel40
+ existing_lag_interfaces_config:
+ - path: "data/sonic-portchannel:sonic-portchannel"
+ response:
+ code: 200
+ value:
+ sonic-portchannel:sonic-portchannel:
+ PORTCHANNEL:
+ PORTCHANNEL_LIST:
+ - name: PortChannel10
+ - name: PortChannel20
+ - name: PortChannel30
+ - name: PortChannel40
+ PORTCHANNEL_MEMBER:
+ PORTCHANNEL_MEMBER_LIST:
+ - ifname: Eth1/11
+ name: PortChannel10
+ - ifname: Eth1/12
+ name: PortChannel10
+ - ifname: Eth1/13
+ name: PortChannel10
+ - ifname: Eth1/14
+ name: PortChannel10
+ - ifname: Eth1/21
+ name: PortChannel20
+ - ifname: Eth1/22
+ name: PortChannel20
+ - ifname: Eth1/23
+ name: PortChannel20
+ - ifname: Eth1/24
+ name: PortChannel20
+ - ifname: Eth1/31
+ name: PortChannel30
+ - ifname: Eth1/41
+ name: PortChannel40
+ - ifname: Eth1/42
+ name: PortChannel40
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f12/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f14/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f23/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=Eth1%2f31/openconfig-if-ethernet:ethernet/config/openconfig-if-aggregate:aggregate-id"
+ method: "delete"
+ data:
+ - path: "data/openconfig-interfaces:interfaces/interface=PortChannel40"
+ method: "delete"
+ data:
+
+deleted_03:
+ module_args:
+ state: deleted
+ config:
+ existing_lag_interfaces_config:
+ - path: "data/sonic-portchannel:sonic-portchannel"
+ response:
+ code: 200
+ value:
+ sonic-portchannel:sonic-portchannel:
+ PORTCHANNEL:
+ PORTCHANNEL_LIST:
+ - name: PortChannel10
+ - name: PortChannel20
+ PORTCHANNEL_MEMBER:
+ PORTCHANNEL_MEMBER_LIST:
+ - ifname: Eth1/11
+ name: PortChannel10
+ - ifname: Eth1/12
+ name: PortChannel10
+ - ifname: Eth1/13
+ name: PortChannel10
+ - ifname: Eth1/14
+ name: PortChannel10
+ - ifname: Eth1/21
+ name: PortChannel20
+ - ifname: Eth1/22
+ name: PortChannel20
+ - ifname: Eth1/23
+ name: PortChannel20
+ - ifname: Eth1/24
+ name: PortChannel20
+ expected_config_requests:
+ - path: "data/sonic-portchannel:sonic-portchannel/PORTCHANNEL/PORTCHANNEL_LIST"
+ method: "delete"
+ data:
+ - path: "data/sonic-portchannel:sonic-portchannel/PORTCHANNEL_MEMBER/PORTCHANNEL_MEMBER_LIST"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_logging.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_logging.yaml
new file mode 100644
index 000000000..61e996878
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_logging.yaml
@@ -0,0 +1,180 @@
+---
+merged_01:
+ module_args:
+ config:
+ remote_servers:
+ - host: 10.11.0.2
+ remote_port: 5
+ source_interface: Eth1/24
+ message_type: event
+ - host: log1.dell.com
+ remote_port: 6
+ source_interface: Eth1/28
+ existing_logging_config:
+ - path: "data/openconfig-system:system/logging"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-system:system/logging/remote-servers"
+ method: "patch"
+ data:
+ openconfig-system:remote-servers:
+ remote-server:
+ - host: 10.11.0.2
+ config:
+ host: 10.11.0.2
+ source-interface: Eth1/24
+ message-type: event
+ remote-port: 5
+ - host: log1.dell.com
+ config:
+ host: log1.dell.com
+ source-interface: Eth1/28
+ remote-port: 6
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_logging_config:
+ - path: "data/openconfig-system:system/logging"
+ response:
+ code: 200
+ value:
+ openconfig-system:logging:
+ remote-servers:
+ remote-server:
+ - host: 10.11.0.2
+ config:
+ host: 10.11.0.2
+ source-interface: Eth1/24
+ message-type: event
+ remote-port: 5
+ - host: log1.dell.com
+ config:
+ host: log1.dell.com
+ source-interface: Eth1/28
+ remote-port: 6
+ expected_config_requests:
+ - path: "data/openconfig-system:system/logging/remote-servers"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ remote_servers:
+ - host: 10.11.0.2
+ existing_logging_config:
+ - path: "data/openconfig-system:system/logging"
+ response:
+ code: 200
+ value:
+ openconfig-system:logging:
+ remote-servers:
+ remote-server:
+ - host: 10.11.0.2
+ config:
+ host: 10.11.0.2
+ source-interface: Eth1/24
+ message-type: event
+ remote-port: 5
+ - host: log1.dell.com
+ config:
+ host: log1.dell.com
+ source-interface: Eth1/28
+ remote-port: 6
+ expected_config_requests:
+ - path: "data/openconfig-system:system/logging/remote-servers/remote-server=10.11.0.2"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ remote_servers:
+ - host: 10.11.0.2
+ remote_port: 9
+ source_interface: Eth1/25
+ message_type: log
+ existing_logging_config:
+ - path: "data/openconfig-system:system/logging"
+ response:
+ code: 200
+ value:
+ openconfig-system:logging:
+ remote-servers:
+ remote-server:
+ - host: 10.11.0.2
+ config:
+ host: 10.11.0.2
+ source-interface: Eth1/24
+ message-type: event
+ remote-port: 5
+ - host: log1.dell.com
+ config:
+ host: log1.dell.com
+ source-interface: Eth1/28
+ remote-port: 6
+ expected_config_requests:
+ - path: "data/openconfig-system:system/logging/remote-servers"
+ method: "patch"
+ data:
+ openconfig-system:remote-servers:
+ remote-server:
+ - host: 10.11.0.2
+ config:
+ host: 10.11.0.2
+ source-interface: Eth1/25
+ message-type: log
+ remote-port: 9
+ vrf-name:
+ - path: "data/openconfig-system:system/logging/remote-servers/remote-server=10.11.0.2"
+ method: "delete"
+ data:
+
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ remote_servers:
+ - host: 10.11.0.10
+ remote_port: 10
+ source_interface: Eth1/26
+ message_type: log
+ existing_logging_config:
+ - path: "data/openconfig-system:system/logging"
+ response:
+ code: 200
+ value:
+ openconfig-system:logging:
+ remote-servers:
+ remote-server:
+ - host: 10.11.0.2
+ config:
+ host: 10.11.0.2
+ source-interface: Eth1/24
+ message-type: event
+ remote-port: 5
+ - host: log1.dell.com
+ config:
+ host: log1.dell.com
+ source-interface: Eth1/28
+ remote-port: 6
+ expected_config_requests:
+ - path: "data/openconfig-system:system/logging/remote-servers"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/logging/remote-servers"
+ method: "patch"
+ data:
+ openconfig-system:remote-servers:
+ remote-server:
+ - host: 10.11.0.10
+ config:
+ host: 10.11.0.10
+ source-interface: Eth1/26
+ message-type: log
+ remote-port: 10
+ vrf-name:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_mac.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_mac.yaml
new file mode 100644
index 000000000..cd869a1a5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_mac.yaml
@@ -0,0 +1,373 @@
+---
+merged_01:
+ module_args:
+ config:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 50
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:00:5e:00:53:af'
+ vlan_id: 1
+ interface: 'Ethernet20'
+ - mac_address: '00:33:33:33:33:33'
+ vlan_id: 2
+ interface: 'Ethernet24'
+ - mac_address: '00:00:4e:00:24:af'
+ vlan_id: 3
+ interface: 'Ethernet28'
+ existing_mac_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/config/mac-aging-time"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening/config"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/mac-table/entries"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb"
+ method: "patch"
+ data:
+ openconfig-network-instance:fdb:
+ config:
+ mac-aging-time: 50
+ mac-table:
+ entries:
+ entry:
+ - mac-address: '00:00:5e:00:53:af'
+ vlan: 1
+ config:
+ mac-address: '00:00:5e:00:53:af'
+ vlan: 1
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet20'
+ subinterface: 0
+ - mac-address: '00:33:33:33:33:33'
+ vlan: 2
+ config:
+ mac-address: '00:33:33:33:33:33'
+ vlan: 2
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet24'
+ subinterface: 0
+ - mac-address: '00:00:4e:00:24:af'
+ vlan: 3
+ config:
+ mac-address: '00:00:4e:00:24:af'
+ vlan: 3
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet28'
+ subinterface: 0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening"
+ method: "patch"
+ data:
+ openconfig-mac-dampening:mac-dampening:
+ config:
+ interval: 20
+ threshold: 30
+
+replaced_01:
+ module_args:
+ config:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 45
+ dampening_interval: 30
+ dampening_threshold: 60
+ mac_table_entries:
+ - mac_address: '00:00:5e:00:53:af'
+ vlan_id: 1
+ interface: 'Ethernet20'
+ - mac_address: '00:44:44:44:44:44'
+ vlan_id: 2
+ interface: 'Ethernet28'
+ existing_mac_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/config/mac-aging-time"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:mac-aging-time: 50
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening/config"
+ response:
+ code: 200
+ value:
+ openconfig-mac-dampening:config:
+ threshold: 30
+ interval: 20
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/mac-table/entries"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:entries:
+ entry:
+ - mac-address: 00:00:4e:00:24:af
+ vlan: 3
+ config:
+ mac-address: 00:00:4e:00:24:af
+ vlan: 3
+ interface:
+ interface-ref:
+ config:
+ interface: Ethernet28
+ - mac-address: 00:00:5e:00:53:af
+ vlan: 1
+ config:
+ mac-address: 00:00:5e:00:53:af
+ vlan: 1
+ interface:
+ interface-ref:
+ config:
+ interface: Ethernet24
+ - mac-address: '00:33:33:33:33:33'
+ vlan: 2
+ config:
+ mac-address: '00:33:33:33:33:33'
+ vlan: 2
+ interface:
+ interface-ref:
+ config:
+ interface: Ethernet20
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb"
+ method: "patch"
+ data:
+ openconfig-network-instance:fdb:
+ config:
+ mac-aging-time: 45
+ mac-table:
+ entries:
+ entry:
+ - mac-address: '00:00:5e:00:53:af'
+ vlan: 1
+ config:
+ mac-address: '00:00:5e:00:53:af'
+ vlan: 1
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet20'
+ subinterface: 0
+ - mac-address: '00:44:44:44:44:44'
+ vlan: 2
+ config:
+ mac-address: '00:44:44:44:44:44'
+ vlan: 2
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet28'
+ subinterface: 0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening"
+ method: "patch"
+ data:
+ openconfig-mac-dampening:mac-dampening:
+ config:
+ interval: 30
+ threshold: 60
+
+overridden_01:
+ module_args:
+ config:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 10
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:11:11:11:11:11'
+ vlan_id: 1
+ interface: 'Ethernet20'
+ - mac_address: '00:22:22:22:22:22'
+ vlan_id: 2
+ interface: 'Ethernet24'
+ - mac_address: '00:00:33:33:33:33'
+ vlan_id: 3
+ interface: 'Ethernet28'
+ state: overridden
+ existing_mac_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/config/mac-aging-time"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:mac-aging-time: 50
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening/config"
+ response:
+ code: 200
+ value:
+ openconfig-mac-dampening:config:
+ threshold: 30
+ interval: 20
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/mac-table/entries"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:entries:
+ entry:
+ - mac-address: '00:00:5e:00:53:af'
+ vlan: 1
+ config:
+ mac-address: '00:00:5e:00:53:af'
+ vlan: 1
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet20'
+ subinterface: 0
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb"
+ method: "patch"
+ data:
+ openconfig-network-instance:fdb:
+ config:
+ mac-aging-time: 10
+ mac-table:
+ entries:
+ entry:
+ - mac-address: '00:11:11:11:11:11'
+ vlan: 1
+ config:
+ mac-address: '00:11:11:11:11:11'
+ vlan: 1
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet20'
+ subinterface: 0
+ - mac-address: '00:22:22:22:22:22'
+ vlan: 2
+ config:
+ mac-address: '00:22:22:22:22:22'
+ vlan: 2
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet24'
+ subinterface: 0
+ - mac-address: '00:00:33:33:33:33'
+ vlan: 3
+ config:
+ mac-address: '00:00:33:33:33:33'
+ vlan: 3
+ interface:
+ interface-ref:
+ config:
+ interface: 'Ethernet28'
+ subinterface: 0
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening"
+ method: "patch"
+ data:
+ openconfig-mac-dampening:mac-dampening:
+ config:
+ interval: 20
+ threshold: 30
+
+deleted_01:
+ module_args:
+ config:
+ - vrf_name: 'default'
+ mac:
+ aging_time: 10
+ dampening_interval: 20
+ dampening_threshold: 30
+ mac_table_entries:
+ - mac_address: '00:11:11:11:11:11'
+ vlan_id: 1
+ interface: 'Ethernet20'
+ - mac_address: '00:00:33:33:33:33'
+ vlan_id: 3
+ state: deleted
+ existing_mac_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/config/mac-aging-time"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:mac-aging-time: 10
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening/config"
+ response:
+ code: 200
+ value:
+ openconfig-mac-dampening:config:
+ threshold: 30
+ interval: 20
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/mac-table/entries"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:entries:
+ entry:
+ - mac-address: '00:00:33:33:33:33'
+ vlan: 3
+ config:
+ mac-address: '00:00:33:33:33:33'
+ vlan: 3
+ interface:
+ interface-ref:
+ config:
+ interface: Ethernet28
+ - mac-address: '00:11:11:11:11:11'
+ vlan: 1
+ config:
+ mac-address: '00:11:11:11:11:11'
+ vlan: 1
+ interface:
+ interface-ref:
+ config:
+ interface: Ethernet20
+ - mac-address: '00:22:22:22:22:22'
+ vlan: 2
+ config:
+ mac-address: '00:22:22:22:22:22'
+ vlan: 2
+ interface:
+ interface-ref:
+ config:
+ interface: Ethernet24
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/config/mac-aging-time"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening/config/interval"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/openconfig-mac-dampening:mac-dampening/config/threshold"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/mac-table/entries/entry=00:11:11:11:11:11,1/interface"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/fdb/mac-table/entries/entry=00:00:33:33:33:33,3"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_mclag.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_mclag.yaml
new file mode 100644
index 000000000..b12fa0704
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_mclag.yaml
@@ -0,0 +1,987 @@
+---
+merged_01:
+ module_args:
+ config:
+ domain_id: 1
+ keepalive: 1
+ peer_address: 1.1.1.1
+ peer_link: 'Portchannel1'
+ source_address: 2.2.2.2
+ session_timeout: 3
+ system_mac: '00:00:00:11:11:11'
+ unique_ip:
+ vlans:
+ - vlan: Vlan4
+ - vlan: Vlan201-205
+ peer_gateway:
+ vlans:
+ - vlan: Vlan4
+ - vlan: Vlan201-205
+ members:
+ portchannels:
+ - lag: PortChannel10
+ existing_mclag_config:
+ - path: "data/openconfig-mclag:mclag"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-mclag:mclag/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-mclag:interface:
+ - name: PortChannel10
+ config:
+ name: PortChannel10
+ mclag-domain-id: 1
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain"
+ method: "patch"
+ data:
+ openconfig-mclag:mclag-domain:
+ - domain-id: 1
+ config:
+ session-timeout: 3
+ keepalive-interval: 1
+ source-address: 2.2.2.2
+ peer-address: 1.1.1.1
+ peer-link: PortChannel1
+ openconfig-mclag:mclag-system-mac: 00:00:00:11:11:11
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-if:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ peer-gateway-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ peer-gateway-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ peer-gateway-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ peer-gateway-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ peer-gateway-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ peer-gateway-enable: ENABLE
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-interface:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ unique-ip-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ unique-ip-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ unique-ip-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ unique-ip-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ unique-ip-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ unique-ip-enable: ENABLE
+
+merged_02:
+ module_args:
+ config:
+ domain_id: 1
+ unique_ip:
+ vlans:
+ - vlan: Vlan204-208
+ peer_gateway:
+ vlans:
+ - vlan: Vlan204-208
+ state: merged
+ existing_mclag_config:
+ - path: "data/openconfig-mclag:mclag"
+ response:
+ code: 200
+ value:
+ openconfig-mclag:mclag:
+ mclag-domains:
+ mclag-domain:
+ - domain-id: 1
+ config:
+ session-timeout: 3
+ keepalive-interval: 1
+ source-address: 2.2.2.2
+ peer-address: 1.1.1.1
+ peer-link: PortChannel1
+ mclag-system-mac: 00:00:00:11:11:11
+ vlan-ifs:
+ vlan-if:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ peer-gateway-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ peer-gateway-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ peer-gateway-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ peer-gateway-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ peer-gateway-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ peer-gateway-enable: ENABLE
+ vlan-interfaces:
+ vlan-interface:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ unique-ip-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ unique-ip-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ unique-ip-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ unique-ip-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ unique-ip-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ unique-ip-enable: ENABLE
+ expected_config_requests:
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-if:
+ - name: Vlan206
+ config:
+ name: Vlan206
+ peer-gateway-enable: ENABLE
+ - name: Vlan207
+ config:
+ name: Vlan207
+ peer-gateway-enable: ENABLE
+ - name: Vlan208
+ config:
+ name: Vlan208
+ peer-gateway-enable: ENABLE
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-interface:
+ - name: Vlan206
+ config:
+ name: Vlan206
+ unique-ip-enable: ENABLE
+ - name: Vlan207
+ config:
+ name: Vlan207
+ unique-ip-enable: ENABLE
+ - name: Vlan208
+ config:
+ name: Vlan208
+ unique-ip-enable: ENABLE
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_mclag_config:
+ - path: "data/openconfig-mclag:mclag"
+ response:
+ code: 200
+ value:
+ openconfig-mclag:mclag:
+ mclag-domains:
+ mclag-domain:
+ - domain-id: 1
+ config:
+ session-timeout: 3
+ keepalive-interval: 1
+ source-address: 2.2.2.2
+ peer-address: 1.1.1.1
+ peer-link: PortChannel1
+ mclag-system-mac: 00:00:00:11:11:11
+ - path: "data/openconfig-mclag:mclag/interfaces/interface"
+ response:
+ code: 200
+ value:
+ openconfig-mclag:interface:
+ - name: PortChannel10
+ config:
+ name: PortChannel10
+ mclag-domain-id: 1
+ expected_config_requests:
+ - path: "data/openconfig-mclag:mclag/mclag-domains"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ domain_id: 8
+ keepalive: 9
+ peer_address: 1.1.1.1
+ peer_link: 'Portchannel1'
+ source_address: 2.2.2.2
+ session_timeout: 33
+ system_mac: '00:00:00:11:11:11'
+ unique_ip:
+ vlans:
+ - vlan: Vlan4
+ - vlan: Vlan201
+ - vlan: Vlan203-204
+ peer_gateway:
+ vlans:
+ - vlan: Vlan4
+ - vlan: Vlan201
+ - vlan: Vlan203-204
+ members:
+ portchannels:
+ - lag: PortChannel99
+ existing_mclag_config:
+ - path: "data/openconfig-mclag:mclag"
+ response:
+ code: 200
+ value:
+ openconfig-mclag:mclag:
+ mclag-domains:
+ mclag-domain:
+ - domain-id: 8
+ config:
+ session-timeout: 33
+ keepalive-interval: 9
+ source-address: 2.2.2.2
+ peer-address: 1.1.1.1
+ peer-link: PortChannel1
+ mclag-system-mac: 00:00:00:11:11:11
+ interfaces:
+ interface:
+ - config:
+ name: PortChannel88
+ mclag-domain-id: 8
+ mclag-id: 88
+ name: PortChannel88
+ - config:
+ name: PortChannel99
+ mclag-domain-id: 8
+ mclag-id: 99
+ name: PortChannel99
+ vlan-ifs:
+ vlan-if:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ peer-gateway-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ peer-gateway-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ peer-gateway-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ peer-gateway-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ peer-gateway-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ peer-gateway-enable: ENABLE
+ vlan-interfaces:
+ vlan-interface:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ unique-ip-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ unique-ip-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ unique-ip-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ unique-ip-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ unique-ip-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ unique-ip-enable: ENABLE
+ expected_config_requests:
+ - path: "data/openconfig-mclag:mclag/interfaces/interface=PortChannel99"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=8/config/keepalive-interval"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=8/config/mclag-system-mac"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=8/config/peer-address"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=8/config/peer-link"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=8/config/session-timeout"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=8/config/source-address"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if=Vlan4"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if=Vlan201"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if=Vlan203"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if=Vlan204"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan4"
+ method: "delete"
+ data:
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan201"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan203"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan204"
+ method: "delete"
+
+replaced_01:
+ module_args:
+ config:
+ domain_id: 1
+ unique_ip:
+ vlans:
+ - vlan: Vlan204-208
+ peer_gateway:
+ vlans:
+ members:
+ portchannels:
+ - lag: portchannel99
+ - lag: portchannel100
+ state: replaced
+ existing_mclag_config:
+ - path: "data/openconfig-mclag:mclag"
+ response:
+ code: 200
+ value:
+ openconfig-mclag:mclag:
+ mclag-domains:
+ mclag-domain:
+ - domain-id: 1
+ config:
+ session-timeout: 3
+ keepalive-interval: 1
+ delay-restore: 300
+ source-address: 2.2.2.2
+ peer-address: 1.1.1.1
+ peer-link: PortChannel1
+ mclag-system-mac: 00:00:00:11:11:11
+ mclag-gateway-macs:
+ mclag-gateway-mac:
+ - gateway-mac: 00:00:00:14:14:14
+ config:
+ gateway-mac: 00:00:00:14:14:14
+ interfaces:
+ interface:
+ - name: PortChannel88
+ config:
+ name: PortChannel88
+ mclag-domain-id: 1
+ mclag-id: 88
+ - name: PortChannel99
+ config:
+ name: PortChannel99
+ mclag-domain-id: 1
+ mclag-id: 99
+ vlan-ifs:
+ vlan-if:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ peer-gateway-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ peer-gateway-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ peer-gateway-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ peer-gateway-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ peer-gateway-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ peer-gateway-enable: ENABLE
+ vlan-interfaces:
+ vlan-interface:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ unique-ip-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ unique-ip-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ unique-ip-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ unique-ip-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ unique-ip-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ unique-ip-enable: ENABLE
+ expected_config_requests:
+ - path: "data/openconfig-mclag:mclag/interfaces/interface=PortChannel88"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan4"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan201"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan202"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan203"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-mclag:interface:
+ - name: PortChannel100
+ config:
+ name: PortChannel100
+ mclag-domain-id: 1
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-interface:
+ - name: Vlan206
+ config:
+ name: Vlan206
+ unique-ip-enable: ENABLE
+ - name: Vlan207
+ config:
+ name: Vlan207
+ unique-ip-enable: ENABLE
+ - name: Vlan208
+ config:
+ name: Vlan208
+ unique-ip-enable: ENABLE
+
+replaced_02:
+ module_args:
+ config:
+ domain_id: 10
+ session_timeout: 60
+ system_mac: 00:00:00:02:02:02
+ unique_ip:
+ vlans:
+ - vlan: Vlan204-208
+ peer_gateway:
+ vlans:
+ - vlan: Vlan204-208
+ members:
+ portchannels:
+ - lag: portchannel99
+ - lag: portchannel100
+ state: replaced
+ existing_mclag_config:
+ - path: "data/openconfig-mclag:mclag"
+ response:
+ code: 200
+ value:
+ openconfig-mclag:mclag:
+ mclag-domains:
+ mclag-domain:
+ - domain-id: 1
+ config:
+ session-timeout: 3
+ keepalive-interval: 1
+ delay-restore: 300
+ source-address: 2.2.2.2
+ peer-address: 1.1.1.1
+ peer-link: PortChannel1
+ mclag-system-mac: 00:00:00:11:11:11
+ mclag-gateway-macs:
+ mclag-gateway-mac:
+ - gateway-mac: 00:00:00:14:14:14
+ config:
+ gateway-mac: 00:00:00:14:14:14
+ interfaces:
+ interface:
+ - name: PortChannel88
+ config:
+ name: PortChannel88
+ mclag-domain-id: 1
+ mclag-id: 88
+ - name: PortChannel99
+ config:
+ name: PortChannel99
+ mclag-domain-id: 1
+ mclag-id: 99
+ vlan-ifs:
+ vlan-if:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ peer-gateway-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ peer-gateway-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ peer-gateway-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ peer-gateway-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ peer-gateway-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ peer-gateway-enable: ENABLE
+ vlan-interfaces:
+ vlan-interface:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ unique-ip-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ unique-ip-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ unique-ip-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ unique-ip-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ unique-ip-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ unique-ip-enable: ENABLE
+ expected_config_requests:
+ - path: "data/openconfig-mclag:mclag/mclag-gateway-macs/mclag-gateway-mac"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain"
+ method: "patch"
+ data:
+ openconfig-mclag:mclag-domain:
+ - domain-id: 10
+ config:
+ session-timeout: 60
+ openconfig-mclag:mclag-system-mac: 00:00:00:02:02:02
+ - path: "data/openconfig-mclag:mclag/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-mclag:interface:
+ - name: PortChannel99
+ config:
+ name: PortChannel99
+ mclag-domain-id: 10
+ - name: PortChannel100
+ config:
+ name: PortChannel100
+ mclag-domain-id: 10
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-if:
+ - name: Vlan204
+ config:
+ name: Vlan204
+ peer-gateway-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ peer-gateway-enable: ENABLE
+ - name: Vlan206
+ config:
+ name: Vlan206
+ peer-gateway-enable: ENABLE
+ - name: Vlan207
+ config:
+ name: Vlan207
+ peer-gateway-enable: ENABLE
+ - name: Vlan208
+ config:
+ name: Vlan208
+ peer-gateway-enable: ENABLE
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-interface:
+ - name: Vlan204
+ config:
+ name: Vlan204
+ unique-ip-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ unique-ip-enable: ENABLE
+ - name: Vlan206
+ config:
+ name: Vlan206
+ unique-ip-enable: ENABLE
+ - name: Vlan207
+ config:
+ name: Vlan207
+ unique-ip-enable: ENABLE
+ - name: Vlan208
+ config:
+ name: Vlan208
+ unique-ip-enable: ENABLE
+
+replaced_03:
+ module_args:
+ config:
+ domain_id: 1
+ gateway_mac: 00:00:00:12:12:12
+ unique_ip:
+ vlans:
+ - vlan: Vlan204-208
+ state: replaced
+ existing_mclag_config:
+ - path: "data/openconfig-mclag:mclag"
+ response:
+ code: 200
+ value:
+ openconfig-mclag:mclag:
+ mclag-domains:
+ mclag-domain:
+ - domain-id: 1
+ config:
+ session-timeout: 3
+ keepalive-interval: 1
+ delay-restore: 300
+ source-address: 2.2.2.2
+ peer-address: 1.1.1.1
+ peer-link: PortChannel1
+ mclag-system-mac: 00:00:00:11:11:11
+ mclag-gateway-macs:
+ mclag-gateway-mac:
+ - gateway-mac: 00:00:00:14:14:14
+ config:
+ gateway-mac: 00:00:00:14:14:14
+ interfaces:
+ interface:
+ - name: PortChannel88
+ config:
+ name: PortChannel88
+ mclag-domain-id: 1
+ mclag-id: 88
+ - name: PortChannel99
+ config:
+ name: PortChannel99
+ mclag-domain-id: 1
+ mclag-id: 99
+ vlan-ifs:
+ vlan-if:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ peer-gateway-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ peer-gateway-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ peer-gateway-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ peer-gateway-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ peer-gateway-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ peer-gateway-enable: ENABLE
+ vlan-interfaces:
+ vlan-interface:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ unique-ip-enable: ENABLE
+ - name: Vlan201
+ config:
+ name: Vlan201
+ unique-ip-enable: ENABLE
+ - name: Vlan202
+ config:
+ name: Vlan202
+ unique-ip-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ unique-ip-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ unique-ip-enable: ENABLE
+ - name: Vlan205
+ config:
+ name: Vlan205
+ unique-ip-enable: ENABLE
+ expected_config_requests:
+ - path: "data/openconfig-mclag:mclag/mclag-gateway-macs/mclag-gateway-mac"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/interfaces/interface=PortChannel88"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/interfaces/interface=PortChannel99"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan4"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan201"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan202"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface=Vlan203"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=1/config/mclag-system-mac"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=1/config/peer-address"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=1/config/peer-link"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=1/config/session-timeout"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=1/config/source-address"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-gateway-macs/mclag-gateway-mac"
+ method: "patch"
+ data:
+ openconfig-mclag:mclag-gateway-mac:
+ - gateway-mac: 00:00:00:12:12:12
+ config:
+ gateway-mac: 00:00:00:12:12:12
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-interface:
+ - name: Vlan206
+ config:
+ name: Vlan206
+ unique-ip-enable: ENABLE
+ - name: Vlan207
+ config:
+ name: Vlan207
+ unique-ip-enable: ENABLE
+ - name: Vlan208
+ config:
+ name: Vlan208
+ unique-ip-enable: ENABLE
+
+overridden_01:
+ module_args:
+ config:
+ domain_id: 10
+ peer_address: 1.1.1.1
+ peer_link: 'Portchannel1'
+ source_address: 2.2.2.2
+ keepalive: 1
+ session_timeout: 60
+ system_mac: 00:00:00:02:02:02
+ unique_ip:
+ vlans:
+ - vlan: Vlan205-208
+ members:
+ portchannels:
+ - lag: portchannel99
+ - lag: portchannel100
+ state: overridden
+ existing_mclag_config:
+ - path: "data/openconfig-mclag:mclag"
+ response:
+ code: 200
+ value:
+ openconfig-mclag:mclag:
+ mclag-domains:
+ mclag-domain:
+ - domain-id: 10
+ config:
+ session-timeout: 3
+ keepalive-interval: 1
+ delay-restore: 360
+ source-address: 2.2.2.2
+ peer-address: 1.1.1.1
+ peer-link: PortChannel1
+ mclag-system-mac: 00:00:00:11:11:11
+ mclag-gateway-macs:
+ mclag-gateway-mac:
+ - gateway-mac: 00:00:00:14:14:14
+ config:
+ gateway-mac: 00:00:00:14:14:14
+ interfaces:
+ interface:
+ - name: PortChannel88
+ config:
+ name: PortChannel88
+ mclag-domain-id: 10
+ mclag-id: 88
+ - name: PortChannel99
+ config:
+ name: PortChannel99
+ mclag-domain-id: 10
+ mclag-id: 99
+ vlan-ifs:
+ vlan-if:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ peer-gateway-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ peer-gateway-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ peer-gateway-enable: ENABLE
+ vlan-interfaces:
+ vlan-interface:
+ - name: Vlan4
+ config:
+ name: Vlan4
+ unique-ip-enable: ENABLE
+ - name: Vlan203
+ config:
+ name: Vlan203
+ unique-ip-enable: ENABLE
+ - name: Vlan204
+ config:
+ name: Vlan204
+ unique-ip-enable: ENABLE
+ expected_config_requests:
+ - path: "data/openconfig-mclag:mclag/mclag-gateway-macs/mclag-gateway-mac"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-ifs/vlan-if"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/interfaces/interface=PortChannel88"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain=10/config/delay-restore"
+ method: "delete"
+ - path: "data/openconfig-mclag:mclag/mclag-domains/mclag-domain"
+ method: "patch"
+ data:
+ openconfig-mclag:mclag-domain:
+ - domain-id: 10
+ config:
+ session-timeout: 60
+ openconfig-mclag:mclag-system-mac: 00:00:00:02:02:02
+ - path: "data/openconfig-mclag:mclag/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-mclag:interface:
+ - name: PortChannel100
+ config:
+ name: PortChannel100
+ mclag-domain-id: 10
+ - path: "data/openconfig-mclag:mclag/vlan-interfaces/vlan-interface"
+ method: "patch"
+ data:
+ openconfig-mclag:vlan-interface:
+ - name: Vlan205
+ config:
+ name: Vlan205
+ unique-ip-enable: ENABLE
+ - name: Vlan206
+ config:
+ name: Vlan206
+ unique-ip-enable: ENABLE
+ - name: Vlan207
+ config:
+ name: Vlan207
+ unique-ip-enable: ENABLE
+ - name: Vlan208
+ config:
+ name: Vlan208
+ unique-ip-enable: ENABLE
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_ntp.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_ntp.yaml
new file mode 100644
index 000000000..b1b7dc9d0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_ntp.yaml
@@ -0,0 +1,486 @@
+---
+merged_01:
+ module_args:
+ config:
+ servers:
+ - address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 12
+ key_id: 19
+ - address: dell.org
+ minpoll: 7
+ maxpoll: 10
+ prefer: true
+ source_interfaces:
+ - Eth1/8
+ - Eth1/16
+ trusted_keys:
+ - 88
+ - 99
+ ntp_keys:
+ - key_id: 10
+ key_type: NTP_AUTH_MD5
+ key_value: "abcd"
+ - key_id: 20
+ key_type: NTP_AUTH_SHA2_256
+ key_value: "efgh"
+ enable_ntp_auth: True
+ existing_ntp_config:
+ - path: "data/openconfig-system:system/ntp"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-system:system/ntp/config/enable-ntp-auth"
+ method: "patch"
+ data:
+ openconfig-system:enable-ntp-auth: True
+ - path: "data/openconfig-system:system/ntp/config/source-interface"
+ method: "patch"
+ data:
+ openconfig-system:source-interface:
+ - Eth1/8
+ - Eth1/16
+ - path: "data/openconfig-system:system/ntp/config/trusted-key"
+ method: "patch"
+ data:
+ openconfig-system:trusted-key:
+ - 88
+ - 99
+ - path: "data/openconfig-system:system/ntp/ntp-keys"
+ method: "patch"
+ data:
+ openconfig-system:ntp-keys:
+ ntp-key:
+ - key-id: 10
+ config:
+ encrypted:
+ key-id: 10
+ key-type: NTP_AUTH_MD5
+ key-value: "abcd"
+ - key-id: 20
+ config:
+ encrypted:
+ key-id: 20
+ key-type: NTP_AUTH_SHA2_256
+ key-value: "efgh"
+ - path: "data/openconfig-system:system/ntp/servers"
+ method: "patch"
+ data:
+ openconfig-system:servers:
+ server:
+ - address: 10.11.0.2
+ config:
+ address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 12
+ key-id: 19
+ - address: dell.org
+ config:
+ address: dell.org
+ minpoll: 7
+ maxpoll: 10
+ prefer: True
+merged_02:
+ module_args:
+ config:
+ servers:
+ - address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 12
+ - address: dell.org
+ minpoll: 7
+ maxpoll: 10
+ prefer: true
+ source_interfaces:
+ - Eth1/8
+ - Eth1/16
+ ntp_keys:
+ - key_id: 10
+ - key_id: 20
+ existing_ntp_config:
+ - path: "data/openconfig-system:system/ntp"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-system:system/ntp/config/source-interface"
+ method: "patch"
+ data:
+ openconfig-system:source-interface:
+ - Eth1/8
+ - Eth1/16
+ - path: "data/openconfig-system:system/ntp/ntp-keys"
+ method: "patch"
+ data:
+ openconfig-system:ntp-keys:
+ ntp-key:
+ - key-id: 10
+ config:
+ encrypted:
+ key-id: 10
+ key-type:
+ key-value:
+ - key-id: 20
+ config:
+ encrypted:
+ key-id: 20
+ key-type:
+ key-value:
+ - path: "data/openconfig-system:system/ntp/servers"
+ method: "patch"
+ data:
+ openconfig-system:servers:
+ server:
+ - address: 10.11.0.2
+ config:
+ address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 12
+ - address: dell.org
+ config:
+ address: dell.org
+ minpoll: 7
+ maxpoll: 10
+ prefer: True
+deleted_01:
+ module_args:
+ state: deleted
+ existing_ntp_config:
+ - path: "data/openconfig-system:system/ntp"
+ response:
+ code: 200
+ value:
+ openconfig-system:ntp:
+ servers:
+ server:
+ - address: 10.11.0.2
+ config:
+ address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 12
+ - address: dell.org
+ config:
+ address: dell.org
+ minpoll: 7
+ maxpoll: 10
+ prefer: True
+ config:
+ source-interface:
+ - Eth1/8
+ - Eth1/16
+ enable-ntp-auth: True
+ trusted-key:
+ - 77
+ - 88
+ network-instance: mgmt
+ ntp-keys:
+ ntp-key:
+ - key-id: 11
+ config:
+ key-id: 11
+ key-type: NTP_AUTH_MD5
+ key-value: "abcd"
+ - key-id: 12
+ config:
+ key-id: 12
+ key-type: NTP_AUTH_SHA2_256
+ key-value: "efgh"
+
+ expected_config_requests:
+ - path: "data/openconfig-system:system/ntp"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/ntp-keys"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/servers"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ servers:
+ - address: 10.11.0.2
+ source_interfaces:
+ - Eth1/8
+ ntp_keys:
+ - key_id: 11
+ trusted_keys:
+ - 88
+ vrf: mgmt
+ existing_ntp_config:
+ - path: "data/openconfig-system:system/ntp"
+ response:
+ code: 200
+ value:
+ openconfig-system:ntp:
+ servers:
+ server:
+ - address: 10.11.0.2
+ config:
+ address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 12
+ - address: dell.org
+ config:
+ address: dell.org
+ minpoll: 7
+ maxpoll: 10
+ prefer: True
+ config:
+ source-interface:
+ - Eth1/8
+ - Eth1/16
+ enable-ntp-auth: True
+ trusted-key:
+ - 77
+ - 88
+ network-instance: mgmt
+ ntp-keys:
+ ntp-key:
+ - key-id: 11
+ config:
+ key-id: 11
+ key-type: NTP_AUTH_MD5
+ key-value: "abcd"
+ - key-id: 12
+ config:
+ key-id: 12
+ key-type: NTP_AUTH_SHA2_256
+ key-value: "efgh"
+
+ expected_config_requests:
+ - path: "data/openconfig-system:system/ntp/config/network-instance"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/config/source-interface=Eth1%2f8"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/config/trusted-key=88"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/ntp-keys/ntp-key=11"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/servers/server=10.11.0.2"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ servers:
+ - address: 10.11.0.3
+ minpoll: 6
+ maxpoll: 13
+ key_id: 20
+ source_interfaces:
+ - Eth1/9
+ enable_ntp_auth: False
+ trusted_keys:
+ ntp_keys:
+ - key_id: 30
+ key_type: NTP_AUTH_MD5
+ key_value: "ntpkey30Value"
+ vrf: mgmt
+ existing_ntp_config:
+ - path: "data/openconfig-system:system/ntp"
+ response:
+ code: 200
+ value:
+ openconfig-system:ntp:
+ servers:
+ server:
+ - address: 10.11.0.2
+ config:
+ address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 12
+ - address: dell.org
+ config:
+ address: dell.org
+ minpoll: 7
+ maxpoll: 10
+ prefer: True
+ config:
+ source-interface:
+ - Eth1/8
+ - Eth1/16
+ enable-ntp-auth: True
+ trusted-key:
+ - 77
+ - 88
+ network-instance: mgmt
+ ntp-keys:
+ ntp-key:
+ - key-id: 11
+ config:
+ key-id: 11
+ key-type: NTP_AUTH_MD5
+ key-value: "abcd"
+ - key-id: 12
+ config:
+ key-id: 12
+ key-type: NTP_AUTH_SHA2_256
+ key-value: "efgh"
+
+ expected_config_requests:
+ - path: "data/openconfig-system:system/ntp"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/config/enable-ntp-auth"
+ method: "patch"
+ data:
+ openconfig-system:enable-ntp-auth: False
+ - path: "data/openconfig-system:system/ntp/config/network-instance"
+ method: "patch"
+ data:
+ openconfig-system:network-instance: mgmt
+ - path: "data/openconfig-system:system/ntp/config/source-interface"
+ method: "patch"
+ data:
+ openconfig-system:source-interface:
+ - Eth1/9
+ - path: "data/openconfig-system:system/ntp/ntp-keys"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/ntp-keys"
+ method: "patch"
+ data:
+ openconfig-system:ntp-keys:
+ ntp-key:
+ - key-id: 30
+ config:
+ encrypted:
+ key-id: 30
+ key-type: NTP_AUTH_MD5
+ key-value: "ntpkey30Value"
+ - path: "data/openconfig-system:system/ntp/servers"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/servers"
+ method: "patch"
+ data:
+ openconfig-system:servers:
+ server:
+ - address: 10.11.0.3
+ config:
+ address: 10.11.0.3
+ minpoll: 6
+ maxpoll: 13
+ prefer: False
+ key-id: 20
+
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ servers:
+ - address: 10.11.0.3
+ minpoll: 6
+ maxpoll: 13
+ key_id: 20
+ source_interfaces:
+ - Eth1/9
+ enable_ntp_auth: False
+ trusted_keys:
+ - 66
+ ntp_keys:
+ - key_id: 30
+ key_type: NTP_AUTH_MD5
+ key_value: "ntpkey30Value"
+ vrf: mgmt
+ existing_ntp_config:
+ - path: "data/openconfig-system:system/ntp"
+ response:
+ code: 200
+ value:
+ openconfig-system:ntp:
+ servers:
+ server:
+ - address: 10.11.0.2
+ config:
+ address: 10.11.0.2
+ minpoll: 5
+ maxpoll: 12
+ - address: dell.org
+ config:
+ address: dell.org
+ minpoll: 7
+ maxpoll: 10
+ prefer: True
+ config:
+ source-interface:
+ - Eth1/8
+ - Eth1/16
+ enable-ntp-auth: True
+ trusted-key:
+ - 77
+ - 88
+ network-instance: mgmt
+ ntp-keys:
+ ntp-key:
+ - key-id: 11
+ config:
+ key-id: 11
+ key-type: NTP_AUTH_MD5
+ key-value: "abcd"
+ - key-id: 12
+ config:
+ key-id: 12
+ key-type: NTP_AUTH_SHA2_256
+ key-value: "efgh"
+
+ expected_config_requests:
+ - path: "data/openconfig-system:system/ntp"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/config/enable-ntp-auth"
+ method: "patch"
+ data:
+ openconfig-system:enable-ntp-auth: False
+ - path: "data/openconfig-system:system/ntp/config/network-instance"
+ method: "patch"
+ data:
+ openconfig-system:network-instance: mgmt
+ - path: "data/openconfig-system:system/ntp/config/source-interface"
+ method: "patch"
+ data:
+ openconfig-system:source-interface:
+ - Eth1/9
+ - path: "data/openconfig-system:system/ntp/config/trusted-key"
+ method: "patch"
+ data:
+ openconfig-system:trusted-key:
+ - 66
+ - path: "data/openconfig-system:system/ntp/ntp-keys"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/ntp-keys"
+ method: "patch"
+ data:
+ openconfig-system:ntp-keys:
+ ntp-key:
+ - key-id: 30
+ config:
+ encrypted:
+ key-id: 30
+ key-type: NTP_AUTH_MD5
+ key-value: "ntpkey30Value"
+ - path: "data/openconfig-system:system/ntp/servers"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/ntp/servers"
+ method: "patch"
+ data:
+ openconfig-system:servers:
+ server:
+ - address: 10.11.0.3
+ config:
+ address: 10.11.0.3
+ minpoll: 6
+ maxpoll: 13
+ prefer: False
+ key-id: 20
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_pki.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_pki.yaml
new file mode 100644
index 000000000..8606b8b4f
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_pki.yaml
@@ -0,0 +1,187 @@
+---
+merged_01:
+ module_args:
+ config:
+ security_profiles:
+ - profile_name: rest
+ ocsp_responder_list:
+ - http://example.com/ocspa
+ - http://example.com/ocspb
+ certificate_name: host
+ trust_store: default-ts
+ trust_stores:
+ - name: default-ts
+ ca_name:
+ - CA2
+ state: merged
+
+ existing_pki_config:
+ - path: "data/openconfig-pki:pki/"
+ response:
+ code: 200
+ value: {}
+ expected_config_requests:
+ - path: "data/openconfig-pki:pki/security-profiles/security-profile"
+ method: "patch"
+ data:
+ openconfig-pki:security-profile:
+ - profile-name: rest
+ config:
+ ocsp-responder-list:
+ - http://example.com/ocspa
+ - http://example.com/ocspb
+ certificate-name: host
+ trust-store: default-ts
+ - path: "data/openconfig-pki:pki/trust-stores/trust-store"
+ method: "patch"
+ data:
+ openconfig-pki:trust-store:
+ - name: default-ts
+ config:
+ name: default-ts
+ ca-name:
+ - CA2
+
+deleted_01:
+ module_args:
+ config:
+ security_profiles:
+ - profile_name: rest
+ trust_store: default-ts
+ state: deleted
+ existing_pki_config:
+ - path: "data/openconfig-pki:pki/"
+ response:
+ code: 200
+ value:
+ openconfig-pki:pki:
+ security-profiles:
+ security-profile:
+ - profile-name: rest
+ config:
+ ocsp-responder-list:
+ - http://example.com/ocspa
+ - http://example.com/ocspb
+ certificate-name: host
+ trust-store: default-ts
+ profile-name: rest
+ trust-stores:
+ trust-store:
+ - name: default-ts
+ config:
+ name: default-ts
+ ca-name:
+ - CA2
+ expected_config_requests:
+ - path: "data/openconfig-pki:pki/security-profiles/security-profile=rest/config/trust-store"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ config:
+ security_profiles:
+ - profile_name: rest
+ ocsp_responder_list:
+ - http://example.com/ocsp
+ key_usage_check: True
+ state: replaced
+
+ existing_pki_config:
+ - path: "data/openconfig-pki:pki/"
+ response:
+ code: 200
+ value:
+ openconfig-pki:pki:
+ security-profiles:
+ security-profile:
+ - profile-name: rest
+ config:
+ ocsp-responder-list:
+ - http://example.com/ocspa
+ - http://example.com/ocspb
+ certificate-name: host
+ trust-store: default-ts
+ profile-name: rest
+ trust-stores:
+ trust-store:
+ - name: default-ts
+ config:
+ name: default-ts
+ ca-name:
+ - CA2
+ expected_config_requests:
+ - path: "data/openconfig-pki:pki/security-profiles/security-profile=rest"
+ method: "put"
+ data:
+ openconfig-pki:security-profile:
+ - profile-name: rest
+ config:
+ profile-name: rest
+ ocsp-responder-list:
+ - http://example.com/ocsp
+ key-usage-check: True
+
+overridden_01:
+ module_args:
+ config:
+ security_profiles:
+ - profile_name: telemetry
+ ocsp_responder_list:
+ - http://example.com/ocsp
+ certificate_name: host2
+ trust_stores:
+ - name: telem-ts
+ ca_name:
+ - CA
+ state: overridden
+
+ existing_pki_config:
+ - path: "data/openconfig-pki:pki/"
+ response:
+ code: 200
+ value:
+ openconfig-pki:pki:
+ security-profiles:
+ security-profile:
+ - profile-name: rest
+ config:
+ ocsp-responder-list:
+ - http://example.com/ocspa
+ - http://example.com/ocspb
+ certificate-name: host
+ trust-store: default-ts
+ profile-name: rest
+ trust-stores:
+ trust-store:
+ - name: default-ts
+ config:
+ name: default-ts
+ ca-name:
+ - CA2
+ expected_config_requests:
+ - path: "data/openconfig-pki:pki/security-profiles/security-profile=rest"
+ method: "delete"
+ data:
+ - path: "data/openconfig-pki:pki/trust-stores/trust-store=default-ts"
+ method: "delete"
+ data:
+ - path: "data/openconfig-pki:pki/security-profiles/security-profile=telemetry"
+ method: "put"
+ data:
+ openconfig-pki:security-profile:
+ - profile-name: telemetry
+ config:
+ profile-name: telemetry
+ ocsp-responder-list:
+ - http://example.com/ocsp
+ certificate-name: host2
+ - path: "data/openconfig-pki:pki/trust-stores/trust-store=telem-ts"
+ method: "put"
+ data:
+ openconfig-pki:trust-store:
+ - name: telem-ts
+ config:
+ name: telem-ts
+ ca-name:
+ - CA
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_port_breakout.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_port_breakout.yaml
new file mode 100644
index 000000000..81c86886a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_port_breakout.yaml
@@ -0,0 +1,252 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: 1/10
+ mode: 1x100G
+ existing_port_breakout_config:
+ - path: "data/sonic-port-breakout:sonic-port-breakout/BREAKOUT_CFG/BREAKOUT_CFG_LIST"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-platform:components"
+ method: "patch"
+ data:
+ openconfig-platform:components:
+ component:
+ - name: 1/10
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ num-breakouts: 1
+ breakout-speed: SPEED_100GB
+deleted_01:
+ module_args:
+ state: deleted
+ existing_port_breakout_config:
+ - path: "data/sonic-port-breakout:sonic-port-breakout/BREAKOUT_CFG/BREAKOUT_CFG_LIST"
+ response:
+ code: 200
+ value:
+ sonic-port-breakout:BREAKOUT_CFG_LIST:
+ - port: 1/10
+ brkout_mode: 1x100G
+ - path: "data/openconfig-platform:components/component=1%2f10"
+ response:
+ code: 200
+ value:
+ openconfig-platform:component:
+ - name: 1/10
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ breakout-speed: openconfig-if-ethernet:SPEED_100GB
+ num-breakouts: 1
+ expected_config_requests:
+ - path: "data/openconfig-platform:components/component=1%2f10/port/openconfig-platform-port:breakout-mode"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: 1/10
+ mode: 1x100G
+ existing_port_breakout_config:
+ - path: "data/sonic-port-breakout:sonic-port-breakout/BREAKOUT_CFG/BREAKOUT_CFG_LIST"
+ response:
+ code: 200
+ value:
+ sonic-port-breakout:BREAKOUT_CFG_LIST:
+ - port: 1/10
+ brkout_mode: 1x100G
+ - path: "data/openconfig-platform:components/component=1%2f10"
+ response:
+ code: 200
+ value:
+ openconfig-platform:component:
+ - name: 1/10
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ breakout-speed: openconfig-if-ethernet:SPEED_100GB
+ num-breakouts: 1
+ expected_config_requests:
+ - path: "data/openconfig-platform:components/component=1%2f10/port/openconfig-platform-port:breakout-mode"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ config:
+ - name: 1/10
+ mode: 1x100G
+ - name: 1/12
+ mode: 4x25G
+ state: replaced
+ existing_port_breakout_config:
+ - path: "data/sonic-port-breakout:sonic-port-breakout/BREAKOUT_CFG/BREAKOUT_CFG_LIST"
+ response:
+ code: 200
+ value:
+ sonic-port-breakout:BREAKOUT_CFG_LIST:
+ - port: 1/10
+ brkout_mode: 4x10G
+ - port: 1/11
+ brkout_mode: 1x100G
+ - path: "data/openconfig-platform:components/component=1%2f10"
+ response:
+ code: 200
+ value:
+ openconfig-platform:component:
+ - name: 1/10
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ breakout-speed: openconfig-if-ethernet:SPEED_10GB
+ num-breakouts: 4
+ - path: "data/openconfig-platform:components/component=1%2f11"
+ response:
+ code: 200
+ value:
+ openconfig-platform:component:
+ - name: 1/11
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ breakout-speed: openconfig-if-ethernet:SPEED_100GB
+ num-breakouts: 1
+ expected_config_requests:
+ - path: "data/openconfig-platform:components"
+ method: "patch"
+ data:
+ openconfig-platform:components:
+ component:
+ - name: 1/10
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ num-breakouts: 1
+ breakout-speed: SPEED_100GB
+ - path: "data/openconfig-platform:components"
+ method: "patch"
+ data:
+ openconfig-platform:components:
+ component:
+ - name: 1/12
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ num-breakouts: 4
+ breakout-speed: SPEED_25GB
+
+overridden_01:
+ module_args:
+ config:
+ - name: 1/10
+ mode: 1x100G
+ - name: 1/12
+ mode: 4x25G
+ state: overridden
+ existing_port_breakout_config:
+ - path: "data/sonic-port-breakout:sonic-port-breakout/BREAKOUT_CFG/BREAKOUT_CFG_LIST"
+ response:
+ code: 200
+ value:
+ sonic-port-breakout:BREAKOUT_CFG_LIST:
+ - port: 1/10
+ brkout_mode: 4x10G
+ - port: 1/11
+ brkout_mode: 1x100G
+ - path: "data/openconfig-platform:components/component=1%2f10"
+ response:
+ code: 200
+ value:
+ openconfig-platform:component:
+ - name: 1/10
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ breakout-speed: openconfig-if-ethernet:SPEED_10GB
+ num-breakouts: 4
+ - path: "data/openconfig-platform:components/component=1%2f11"
+ response:
+ code: 200
+ value:
+ openconfig-platform:component:
+ - name: 1/11
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ breakout-speed: openconfig-if-ethernet:SPEED_100GB
+ num-breakouts: 1
+ expected_config_requests:
+ - path: "data/openconfig-platform:components/component=1%2f11/port/openconfig-platform-port:breakout-mode"
+ method: "delete"
+ - path: "data/openconfig-platform:components"
+ method: "patch"
+ data:
+ openconfig-platform:components:
+ component:
+ - name: 1/10
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ num-breakouts: 1
+ breakout-speed: SPEED_100GB
+ - path: "data/openconfig-platform:components"
+ method: "patch"
+ data:
+ openconfig-platform:components:
+ component:
+ - name: 1/12
+ port:
+ openconfig-platform-port:breakout-mode:
+ groups:
+ group:
+ - index: 1
+ config:
+ index: 1
+ num-breakouts: 4
+ breakout-speed: SPEED_25GB
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_port_group.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_port_group.yaml
new file mode 100644
index 000000000..e86a7ec4b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_port_group.yaml
@@ -0,0 +1,115 @@
+---
+merged_01:
+ module_args:
+ config:
+ - id: 1
+ speed: SPEED_10GB
+ - id: 9
+ speed: SPEED_10GB
+ existing_port_group_config:
+ - path: "data/openconfig-port-group:port-groups/port-group"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-group:
+ - id: 1
+ config:
+ speed: SPEED_100GB
+ - id: 9
+ config:
+ speed: SPEED_100GB
+ expected_config_requests:
+ - path: "data/openconfig-port-group:port-groups/port-group"
+ method: "patch"
+ data:
+ openconfig-port-group:port-group:
+ - id: '1'
+ config:
+ id: '1'
+ speed: openconfig-if-ethernet:SPEED_10GB
+ - id: '9'
+ config:
+ id: '9'
+ speed: openconfig-if-ethernet:SPEED_10GB
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_port_group_config:
+ - path: "data/openconfig-port-group:port-groups/port-group"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-group:
+ - id: 1
+ config:
+ speed: SPEED_100GB
+ - id: 9
+ config:
+ speed: SPEED_100GB
+ expected_config_requests:
+ - path: "data/openconfig-port-group:port-groups/port-group=1/config/speed"
+ method: "delete"
+ data:
+ - path: "data/openconfig-port-group:port-groups/port-group=9/config/speed"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - id: 1
+ speed: SPEED_100GB
+ existing_port_group_config:
+ - path: "data/openconfig-port-group:port-groups/port-group"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-group:
+ - id: 1
+ config:
+ speed: SPEED_100GB
+ - id: 9
+ config:
+ speed: SPEED_100GB
+ expected_config_requests:
+ - path: "data/openconfig-port-group:port-groups/port-group=1/config/speed"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ - id: 1
+ speed: SPEED_10GB
+ - id: 5
+ speed: SPEED_40GB
+ - id: 9
+ speed: SPEED_100GB
+ existing_port_group_config:
+ - path: "data/openconfig-port-group:port-groups/port-group"
+ response:
+ code: 200
+ value:
+ openconfig-port-group:port-group:
+ - id: 1
+ config:
+ speed: SPEED_100GB
+ - id: 9
+ config:
+ speed: SPEED_100GB
+ expected_config_requests:
+ - path: "data/openconfig-port-group:port-groups/port-group"
+ method: "patch"
+ data:
+ openconfig-port-group:port-group:
+ - id: '1'
+ config:
+ id: '1'
+ speed: openconfig-if-ethernet:SPEED_10GB
+ - id: '5'
+ config:
+ id: '5'
+ speed: openconfig-if-ethernet:SPEED_40GB
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_prefix_lists.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_prefix_lists.yaml
new file mode 100644
index 000000000..beb7428d5
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_prefix_lists.yaml
@@ -0,0 +1,153 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: pfx1
+ afi: "ipv4"
+ prefixes:
+ - sequence: 10
+ prefix: "1.2.3.4/24"
+ action: "permit"
+ ge: 26
+ le: 30
+ - name: pfx6
+ afi: "ipv6"
+ prefixes:
+ - sequence: 25
+ action: "deny"
+ prefix: "40::300/124"
+ existing_prefix_lists_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: 'Eth1/1'
+ config:
+ mtu: 2000
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set"
+ method: "patch"
+ data:
+ openconfig-routing-policy:prefix-set:
+ - name: pfx1
+ config:
+ name: pfx1
+ mode: IPV4
+ openconfig-routing-policy-ext:extended-prefixes:
+ extended-prefix:
+ - ip-prefix: 1.2.3.4/24
+ sequence-number: 10
+ masklength-range: 26..30
+ config:
+ sequence-number: 10
+ ip-prefix: 1.2.3.4/24
+ masklength-range: 26..30
+ openconfig-routing-policy-ext:action: PERMIT
+ - name: pfx6
+ config:
+ name: pfx6
+ mode: IPV6
+ openconfig-routing-policy-ext:extended-prefixes:
+ extended-prefix:
+ - ip-prefix: 40::300/124
+ sequence-number: 25
+ masklength-range: exact
+ config:
+ sequence-number: 25
+ ip-prefix: 40::300/124
+ masklength-range: exact
+ openconfig-routing-policy-ext:action: DENY
+deleted_01:
+ module_args:
+ state: deleted
+ existing_prefix_lists_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets"
+ response:
+ code: 200
+ value:
+ openconfig-routing-policy:prefix-sets:
+ prefix-set:
+ - config:
+ mode: IPV4
+ name: pfx1
+ openconfig-routing-policy-ext:extended-prefixes:
+ extended-prefix:
+ - config:
+ action: PERMIT
+ ip-prefix: 1.2.3.4/8
+ masklength-range: 8..16
+ sequence-number: 10
+ ip-prefix: 1.2.3.4/8
+ masklength-range: 8..16
+ sequence-number: 10
+ name: pfx1
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: pfx1
+ afi: "ipv4"
+ prefixes:
+ - sequence: 10
+ prefix: "1.2.3.4/8"
+ action: "permit"
+ ge: 9
+ le: 16
+ - name: pfx2
+ afi: "ipv6"
+ prefixes:
+ - sequence: 11
+ action: "permit"
+ prefix: "11::22/124"
+ existing_prefix_lists_config:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets"
+ response:
+ code: 200
+ value:
+ openconfig-routing-policy:prefix-sets:
+ prefix-set:
+ - config:
+ mode: IPV4
+ name: pfx1
+ openconfig-routing-policy-ext:extended-prefixes:
+ extended-prefix:
+ - config:
+ action: PERMIT
+ ip-prefix: 1.2.3.4/8
+ masklength-range: 9..16
+ sequence-number: 10
+ ip-prefix: 1.2.3.4/8
+ masklength-range: 9..16
+ sequence-number: 10
+ name: pfx1
+ - config:
+ mode: IPV6
+ name: pfx2
+ openconfig-routing-policy-ext:extended-prefixes:
+ extended-prefix:
+ - config:
+ action: PERMIT
+ ip-prefix: 11::22/124
+ masklength-range: exact
+ sequence-number: 11
+ ip-prefix: 11::22/124
+ masklength-range: exact
+ sequence-number: 11
+ name: pfx2
+ expected_config_requests:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set=pfx1/openconfig-routing-policy-ext:extended-prefixes/extended-prefix=10,1.2.3.4%2F8,9..16"
+ method: "delete"
+ data:
+ - path: "data/openconfig-routing-policy:routing-policy/defined-sets/prefix-sets/prefix-set=pfx2/openconfig-routing-policy-ext:extended-prefixes/extended-prefix=11,11::22%2F124,exact"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_radius_server.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_radius_server.yaml
new file mode 100644
index 000000000..7e5a6e606
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_radius_server.yaml
@@ -0,0 +1,207 @@
+---
+merged_01:
+ module_args:
+ config:
+ auth_type: chap
+ key: chap
+ nas_ip: 1.2.3.4
+ statistics: true
+ timeout: 10
+ retransmit: 3
+ servers:
+ host:
+ - name: localhost
+ auth_type: mschapv2
+ key: local
+ priority: 2
+ port: 52
+ retransmit: 2
+ timeout: 20
+ source_interface: Eth1/2
+ vrf: mgmt
+ existing_radius_server_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: 'Eth1/1'
+ config:
+ mtu: 2000
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config"
+ response:
+ code: 200
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config"
+ response:
+ code: 200
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config"
+ method: "patch"
+ data:
+ openconfig-system:config:
+ auth-type: 'chap'
+ secret-key: 'chap'
+ timeout: 10
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config"
+ method: "patch"
+ data:
+ openconfig-aaa-radius-ext:config:
+ nas-ip-address: 1.2.3.4
+ retransmit-attempts: 3
+ statistics: True
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers"
+ method: "patch"
+ data:
+ openconfig-system:servers:
+ server:
+ - address: localhost
+ config:
+ address: localhost
+ auth-type: mschapv2
+ priority: 2
+ vrf: mgmt
+ timeout: 20
+ radius:
+ config:
+ auth-port: 52
+ secret-key: local
+ retransmit-attempts: 2
+ openconfig-aaa-radius-ext:source-interface: Eth1/2
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_radius_server_config:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ auth-type: 'chap'
+ secret-key: 'chap'
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config"
+ response:
+ code: 200
+ value:
+ openconfig-aaa-radius-ext:config:
+ nas-ip-address: 1.2.3.4
+ retransmit-attempts: 3
+ statistics: True
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers"
+ response:
+ code: 200
+ value:
+ openconfig-system:servers:
+ server:
+ - address: localhost
+ config:
+ address: localhost
+ auth-type: mschapv2
+ priority: 2
+ vrf: mgmt
+ timeout: 20
+ radius:
+ config:
+ auth-port: 52
+ secret-key: local
+ retransmit-attempts: 2
+ openconfig-aaa-radius-ext:source-interface: Eth1/2
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config/auth-type"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config/secret-key"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config/nas-ip-address"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config/retransmit-attempts"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config/statistics"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers/server=localhost"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ auth_type: chap
+ key: chap
+ nas_ip: 1.2.3.4
+ statistics: true
+ timeout: 10
+ retransmit: 3
+ servers:
+ host:
+ - name: localhost
+ auth_type: mschapv2
+ key: local
+ priority: 2
+ port: 52
+ retransmit: 2
+ timeout: 20
+ source_interface: Eth1/2
+ vrf: mgmt
+ existing_radius_server_config:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ auth-type: 'chap'
+ secret-key: 'chap'
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config"
+ response:
+ code: 200
+ value:
+ openconfig-aaa-radius-ext:config:
+ nas-ip-address: 1.2.3.4
+ retransmit-attempts: 3
+ statistics: True
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers"
+ response:
+ code: 200
+ value:
+ openconfig-system:servers:
+ server:
+ - address: localhost
+ config:
+ address: localhost
+ auth-type: mschapv2
+ priority: 2
+ vrf: mgmt
+ timeout: 20
+ radius:
+ config:
+ auth-port: 52
+ secret-key: local
+ retransmit-attempts: 2
+ openconfig-aaa-radius-ext:source-interface: Eth1/2
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config/auth-type"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/config/secret-key"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config/nas-ip-address"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config/retransmit-attempts"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/openconfig-aaa-radius-ext:radius/config/statistics"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=RADIUS/servers/server=localhost"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_static_routes.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_static_routes.yaml
new file mode 100644
index 000000000..c83b622fa
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_static_routes.yaml
@@ -0,0 +1,340 @@
+---
+merged_01:
+ module_args:
+ config:
+ - vrf_name: 'default'
+ static_list:
+ - prefix: '2.0.0.0/8'
+ next_hops:
+ - index:
+ interface: 'Ethernet4'
+ metric: 1
+ tag: 2
+ track: 3
+ - index:
+ next_hop: '3.0.0.0'
+ metric: 2
+ tag: 4
+ track: 8
+ existing_static_routes_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ method: "patch"
+ data:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 2.0.0.0/8
+ config:
+ prefix: 2.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: Ethernet4
+ config:
+ index: Ethernet4
+ next-hop:
+ metric: 1
+ track: 3
+ tag: 2
+ interface-ref:
+ config:
+ interface: Ethernet4
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ method: "patch"
+ data:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 2.0.0.0/8
+ config:
+ prefix: 2.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 3.0.0.0
+ config:
+ index: 3.0.0.0
+ next-hop: 3.0.0.0
+ metric: 2
+ track: 8
+ tag: 4
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_static_routes_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 2.0.0.0/8
+ config:
+ prefix: 2.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 3.0.0.0
+ config:
+ index: 3.0.0.0
+ next-hop: 3.0.0.0
+ metric: 2
+ track: 8
+ tag: 4
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - vrf_name: 'default'
+ static_list:
+ - prefix: '2.0.0.0/8'
+ next_hops:
+ - index:
+ next_hop: '3.0.0.0'
+ metric: 2
+ tag: 4
+ track: 8
+ existing_static_routes_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 2.0.0.0/8
+ config:
+ prefix: 2.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 3.0.0.0
+ config:
+ index: 3.0.0.0
+ next-hop: 3.0.0.0
+ metric: 2
+ track: 8
+ tag: 4
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=VrfReg1/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes/static=2.0.0.0%2F8/next-hops/next-hop=3.0.0.0/config/metric"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes/static=2.0.0.0%2F8/next-hops/next-hop=3.0.0.0/config/tag"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes/static=2.0.0.0%2F8/next-hops/next-hop=3.0.0.0/config/track"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ - vrf_name: 'default'
+ static_list:
+ - prefix: '5.0.0.0/8'
+ next_hops:
+ - index:
+ next_hop: '6.0.0.0'
+ metric: 4
+ tag: 5
+ track: 6
+ existing_static_routes_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 2.0.0.0/8
+ config:
+ prefix: 2.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 3.0.0.0
+ config:
+ index: 3.0.0.0
+ next-hop: 3.0.0.0
+ metric: 8
+ track: 10
+ tag: 12
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ method: "patch"
+ data:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 5.0.0.0/8
+ config:
+ prefix: 5.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 6.0.0.0
+ config:
+ index: 6.0.0.0
+ next-hop: 6.0.0.0
+ blackhole: false
+ metric: 4
+ track: 6
+ tag: 5
+
+replaced_02:
+ module_args:
+ state: replaced
+ config:
+ - vrf_name: 'default'
+ static_list:
+ - prefix: '2.0.0.0/8'
+ next_hops:
+ - index:
+ next_hop: '3.0.0.0'
+ metric: 4
+ tag: 5
+ track: 6
+ existing_static_routes_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 2.0.0.0/8
+ config:
+ prefix: 2.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 3.0.0.0
+ config:
+ index: 3.0.0.0
+ next-hop: 3.0.0.0
+ metric: 8
+ track: 10
+ tag: 12
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ method: "patch"
+ data:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 2.0.0.0/8
+ config:
+ prefix: 2.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 3.0.0.0
+ config:
+ index: 3.0.0.0
+ next-hop: 3.0.0.0
+ blackhole: false
+ metric: 4
+ track: 6
+ tag: 5
+
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ - vrf_name: 'default'
+ static_list:
+ - prefix: '5.0.0.0/8'
+ next_hops:
+ - index:
+ next_hop: '6.0.0.0'
+ metric: 4
+ tag: 5
+ track: 6
+ existing_static_routes_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 2.0.0.0/8
+ config:
+ prefix: 2.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 3.0.0.0
+ config:
+ index: 3.0.0.0
+ next-hop: 3.0.0.0
+ metric: 8
+ track: 10
+ tag: 12
+ expected_config_requests:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ method: "delete"
+ data:
+ - path: "/data/openconfig-network-instance:network-instances/network-instance=default/protocols/protocol=STATIC,static/static-routes"
+ method: "patch"
+ data:
+ openconfig-network-instance:static-routes:
+ static:
+ - prefix: 5.0.0.0/8
+ config:
+ prefix: 5.0.0.0/8
+ next-hops:
+ next-hop:
+ - index: 6.0.0.0
+ config:
+ index: 6.0.0.0
+ next-hop: 6.0.0.0
+ blackhole: false
+ metric: 4
+ track: 6
+ tag: 5
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_stp.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_stp.yaml
new file mode 100644
index 000000000..4aa7c6974
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_stp.yaml
@@ -0,0 +1,991 @@
+---
+merged_01:
+ module_args:
+ config:
+ global:
+ enabled_protocol: mst
+ loop_guard: true
+ bpdu_filter: true
+ disabled_vlans:
+ - 4-6
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 15
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: Ethernet20
+ edge_port: true
+ link_type: shared
+ guard: loop
+ bpdu_guard: true
+ bpdu_filter: true
+ portfast: true
+ uplink_fast: true
+ shutdown: true
+ cost: 20
+ port_priority: 30
+ stp_enable: true
+ mstp:
+ mst_name: mst1
+ revision: 1
+ max_hop: 3
+ hello_time: 6
+ max_age: 9
+ fwd_delay: 12
+ mst_instances:
+ - mst_id: 1
+ bridge_priority: 2048
+ vlans:
+ - 1
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 60
+ port_priority: 65
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/global"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:global:
+ config:
+ enabled-protocol:
+ - 'MSTP'
+ loop-guard: True
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:disabled-vlans:
+ - '4..6'
+ openconfig-spanning-tree-ext:hello-time: 5
+ openconfig-spanning-tree-ext:max-age: 10
+ openconfig-spanning-tree-ext:bridge-priority: 4096
+ - path: "data/openconfig-spanning-tree:stp/interfaces"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ edge-port: EDGE_ENABLE
+ link-type: SHARED
+ guard: LOOP
+ bpdu-guard: True
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:portfast: True
+ openconfig-spanning-tree-ext:uplink-fast: True
+ openconfig-spanning-tree-ext:bpdu-guard-port-shutdown: True
+ openconfig-spanning-tree-ext:cost: 20
+ openconfig-spanning-tree-ext:port-priority: 30
+ openconfig-spanning-tree-ext:spanning-tree-enable: True
+ - path: "data/openconfig-spanning-tree:stp/mstp"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:mstp:
+ config:
+ name: mst1
+ revision: 1
+ max-hop: 3
+ hello-time: 6
+ max-age: 9
+ forwarding-delay: 12
+ mst-instances:
+ mst-instance:
+ - mst-id: 1
+ config:
+ mst-id: 1
+ bridge-priority: 2048
+ vlan:
+ - 1
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 60
+ port-priority: 65
+merged_02:
+ module_args:
+ config:
+ global:
+ enabled_protocol: pvst
+ bpdu_filter: true
+ root_guard_timeout: 25
+ portfast: true
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 20
+ bridge_priority: 4096
+ pvst:
+ - vlan_id: 1
+ hello_time: 4
+ max_age: 6
+ fwd_delay: 8
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 10
+ port_priority: 50
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/global"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:global:
+ config:
+ enabled-protocol: ['PVST']
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:rootguard-timeout: 25
+ openconfig-spanning-tree-ext:portfast: True
+ openconfig-spanning-tree-ext:hello-time: 5
+ openconfig-spanning-tree-ext:max-age: 10
+ openconfig-spanning-tree-ext:forwarding-delay: 20
+ openconfig-spanning-tree-ext:bridge-priority: 4096
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst"
+ method: "patch"
+ data:
+ openconfig-spanning-tree-ext:pvst:
+ vlans:
+ - vlan-id: 1
+ config:
+ vlan-id: 1
+ hello-time: 4
+ max-age: 6
+ forwarding-delay: 8
+ bridge-priority: 4096
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 10
+ port-priority: 50
+merged_03:
+ module_args:
+ config:
+ global:
+ enabled_protocol: rapid_pvst
+ bpdu_filter: true
+ root_guard_timeout: 25
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 20
+ bridge_priority: 4096
+ rapid_pvst:
+ - vlan_id: 1
+ hello_time: 4
+ max_age: 6
+ fwd_delay: 8
+ bridge_priority: 4096
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 10
+ port_priority: 50
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/global"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:global:
+ config:
+ enabled-protocol: ['RAPID_PVST']
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:rootguard-timeout: 25
+ openconfig-spanning-tree-ext:hello-time: 5
+ openconfig-spanning-tree-ext:max-age: 10
+ openconfig-spanning-tree-ext:forwarding-delay: 20
+ openconfig-spanning-tree-ext:bridge-priority: 4096
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:rapid-pvst:
+ vlan:
+ - vlan-id: 1
+ config:
+ vlan-id: 1
+ hello-time: 4
+ max-age: 6
+ forwarding-delay: 8
+ bridge-priority: 4096
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 10
+ port-priority: 50
+replaced_01:
+ module_args:
+ config:
+ mstp:
+ mst_name: mst1
+ revision: 1
+ max_hop: 3
+ hello_time: 6
+ max_age: 9
+ fwd_delay: 12
+ state: replaced
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ mstp:
+ config:
+ name: mst2
+ revision: 2
+ max-hop: 4
+ hello-time: 7
+ max-age: 10
+ forwarding-delay: 13
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/mstp/config"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/mstp/mst-instances"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/mstp"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:mstp:
+ config:
+ name: mst1
+ revision: 1
+ max-hop: 3
+ hello-time: 6
+ max-age: 9
+ forwarding-delay: 12
+replaced_02:
+ module_args:
+ config:
+ mstp:
+ mst_instances:
+ - mst_id: 1
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 60
+ port_priority: 65
+ state: replaced
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ mstp:
+ mst-instances:
+ mst-instance:
+ - mst-id: 1
+ config:
+ mst-id: 1
+ vlan:
+ - 1
+ bridge-priority: 2048
+ interfaces:
+ interface:
+ - name:
+ config:
+ name: Ethernet20
+ cost: 10
+ port-priority: 50
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/mstp/mst-instances/mst-instance=1/interfaces/interface=Ethernet20"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/mstp"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:mstp:
+ mst-instances:
+ mst-instance:
+ - mst-id: 1
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 60
+ port-priority: 65
+replaced_03:
+ module_args:
+ config:
+ mstp:
+ mst_instances:
+ - mst_id: 1
+ bridge_priority: 1024
+ vlans:
+ - 2-3
+ state: replaced
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ mstp:
+ mst-instances:
+ mst-instance:
+ - mst-id: 1
+ config:
+ mst-id: 1
+ vlan:
+ - 1
+ bridge-priority: 2048
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/mstp/mst-instances/mst-instance=1"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/mstp"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:mstp:
+ mst-instances:
+ mst-instance:
+ - mst-id: 1
+ config:
+ mst-id: 1
+ bridge-priority: 1024
+ vlan:
+ - '2..3'
+replaced_04:
+ module_args:
+ config:
+ pvst:
+ - vlan_id: 1
+ hello_time: 7
+ max_age: 8
+ fwd_delay: 9
+ bridge_priority: 8192
+ - vlan_id: 2
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 2
+ port_priority: 60
+ state: replaced
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ openconfig-spanning-tree-ext:pvst:
+ vlans:
+ - vlan-id: 1
+ config:
+ vlan-id: 1
+ hello-time: 6
+ max-age: 7
+ forwarding-delay: 8
+ bridge-priority: 4096
+ interfaces:
+ interface:
+ - name: Ethernet24
+ config:
+ name: Ethernet24
+ cost: 40
+ port-priority: 45
+ - vlan-id: 2
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 1
+ port-priority: 55
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/config/hello-time"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/config/max-age"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/config/forwarding-delay"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/config/bridge-priority"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/interfaces/interface=Ethernet24"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=2/interfaces/interface=Ethernet20"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst"
+ method: "patch"
+ data:
+ openconfig-spanning-tree-ext:pvst:
+ vlans:
+ - vlan-id: 1
+ config:
+ vlan-id: 1
+ hello-time: 7
+ max-age: 8
+ forwarding-delay: 9
+ bridge-priority: 8192
+ - vlan-id: 2
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 2
+ port-priority: 60
+replaced_05:
+ module_args:
+ config:
+ rapid_pvst:
+ - vlan_id: 1
+ hello_time: 7
+ max_age: 8
+ fwd_delay: 9
+ bridge_priority: 8192
+ - vlan_id: 2
+ interfaces:
+ - intf_name: Ethernet20
+ cost: 2
+ port_priority: 60
+ state: replaced
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ rapid-pvst:
+ vlan:
+ - vlan-id: 1
+ config:
+ vlan-id: 1
+ hello-time: 6
+ max-age: 7
+ forwarding-delay: 8
+ bridge-priority: 4096
+ interfaces:
+ interface:
+ - name: Ethernet24
+ config:
+ name: Ethernet24
+ cost: 40
+ port-priority: 45
+ - vlan-id: 2
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 1
+ port-priority: 55
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/config/hello-time"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/config/max-age"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/config/forwarding-delay"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/config/bridge-priority"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/interfaces/interface=Ethernet24"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=2/interfaces/interface=Ethernet20"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:rapid-pvst:
+ vlan:
+ - vlan-id: 1
+ config:
+ vlan-id: 1
+ hello-time: 7
+ max-age: 8
+ forwarding-delay: 9
+ bridge-priority: 8192
+ - vlan-id: 2
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 2
+ port-priority: 60
+replaced_06:
+ module_args:
+ config:
+ global:
+ enabled_protocol: mst
+ loop_guard: true
+ bpdu_filter: true
+ disabled_vlans:
+ - 4-6
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 15
+ bridge_priority: 4096
+ state: replaced
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ global:
+ config:
+ enabled-protocol:
+ - openconfig-spanning-tree-ext:PVST
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:rootguard-timeout: 25
+ openconfig-spanning-tree-ext:portfast: True
+ openconfig-spanning-tree-ext:hello-time: 5
+ openconfig-spanning-tree-ext:max-age: 10
+ openconfig-spanning-tree-ext:forwarding-delay: 20
+ openconfig-spanning-tree-ext:bridge-priority: 4096
+ expected_config_requests:
+ - path: "/data/openconfig-spanning-tree:stp"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:global:
+ config:
+ enabled-protocol:
+ - 'MSTP'
+ loop-guard: True
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:disabled-vlans:
+ - '4..6'
+ openconfig-spanning-tree-ext:hello-time: 5
+ openconfig-spanning-tree-ext:max-age: 10
+ openconfig-spanning-tree-ext:bridge-priority: 4096
+replaced_07:
+ module_args:
+ config:
+ interfaces:
+ - intf_name: Ethernet20
+ edge_port: true
+ link_type: shared
+ guard: loop
+ bpdu_guard: true
+ bpdu_filter: true
+ portfast: true
+ uplink_fast: true
+ shutdown: true
+ cost: 20
+ port_priority: 30
+ state: replaced
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ edge-port: openconfig-spanning-tree-types:EDGE_DISABLE
+ link-type: P2P
+ guard: ROOT
+ bpdu-guard: false
+ bpdu-filter: false
+ openconfig-spanning-tree-ext:portfast: false
+ openconfig-spanning-tree-ext:uplink-fast: false
+ openconfig-spanning-tree-ext:bpdu-guard-port-shutdown: false
+ openconfig-spanning-tree-ext:cost: 15
+ openconfig-spanning-tree-ext:port-priority: 25
+ openconfig-spanning-tree-ext:spanning-tree-enable: false
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/interfaces"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ edge-port: EDGE_ENABLE
+ link-type: SHARED
+ guard: LOOP
+ bpdu-guard: True
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:portfast: True
+ openconfig-spanning-tree-ext:uplink-fast: True
+ openconfig-spanning-tree-ext:bpdu-guard-port-shutdown: True
+ openconfig-spanning-tree-ext:cost: 20
+ openconfig-spanning-tree-ext:port-priority: 30
+ openconfig-spanning-tree-ext:spanning-tree-enable: True
+overridden_01:
+ module_args:
+ config:
+ global:
+ enabled_protocol: mst
+ loop_guard: true
+ bpdu_filter: true
+ disabled_vlans:
+ - 4-6
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 15
+ bridge_priority: 4096
+ state: overridden
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ global:
+ config:
+ enabled-protocol:
+ - openconfig-spanning-tree-ext:PVST
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:rootguard-timeout: 25
+ openconfig-spanning-tree-ext:portfast: True
+ openconfig-spanning-tree-ext:hello-time: 5
+ openconfig-spanning-tree-ext:max-age: 10
+ openconfig-spanning-tree-ext:forwarding-delay: 20
+ openconfig-spanning-tree-ext:bridge-priority: 4096
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:global:
+ config:
+ enabled-protocol:
+ - 'MSTP'
+ loop-guard: True
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:disabled-vlans:
+ - '4..6'
+ openconfig-spanning-tree-ext:hello-time: 5
+ openconfig-spanning-tree-ext:max-age: 10
+ openconfig-spanning-tree-ext:bridge-priority: 4096
+deleted_01:
+ module_args:
+ config:
+ global:
+ enabled_protocol: pvst
+ loop_guard: true
+ bpdu_filter: true
+ disabled_vlans:
+ - 4-6
+ root_guard_timeout: 25
+ portfast: true
+ hello_time: 5
+ max_age: 10
+ fwd_delay: 20
+ bridge_priority: 4096
+ state: deleted
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ global:
+ config:
+ enabled-protocol:
+ - openconfig-spanning-tree-ext:PVST
+ loop-guard: True
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:disabled-vlans:
+ - 4-6
+ openconfig-spanning-tree-ext:rootguard-timeout: 25
+ openconfig-spanning-tree-ext:portfast: True
+ openconfig-spanning-tree-ext:hello-time: 5
+ openconfig-spanning-tree-ext:max-age: 10
+ openconfig-spanning-tree-ext:forwarding-delay: 20
+ openconfig-spanning-tree-ext:bridge-priority: 4096
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/global/config/enabled-protocol"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/loop-guard"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/bpdu-filter"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/openconfig-spanning-tree-ext:disabled-vlans=4..6"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/openconfig-spanning-tree-ext:rootguard-timeout"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/openconfig-spanning-tree-ext:portfast"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/openconfig-spanning-tree-ext:hello-time"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/openconfig-spanning-tree-ext:max-age"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/openconfig-spanning-tree-ext:forwarding-delay"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/global/config/openconfig-spanning-tree-ext:bridge-priority"
+ method: "delete"
+ data:
+deleted_02:
+ module_args:
+ config:
+ interfaces:
+ - intf_name: Ethernet20
+ edge_port: true
+ link_type: point-to-point
+ guard: root
+ bpdu_guard: true
+ bpdu_filter: true
+ portfast: true
+ uplink_fast: true
+ shutdown: true
+ cost: 15
+ port_priority: 25
+ stp_enable: false
+ - intf_name: Ethernet24
+ state: deleted
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ edge-port: openconfig-spanning-tree-types:EDGE_ENABLE
+ link-type: P2P
+ guard: ROOT
+ bpdu-guard: True
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:portfast: True
+ openconfig-spanning-tree-ext:uplink-fast: True
+ openconfig-spanning-tree-ext:bpdu-guard-port-shutdown: True
+ openconfig-spanning-tree-ext:cost: 15
+ openconfig-spanning-tree-ext:port-priority: 25
+ openconfig-spanning-tree-ext:spanning-tree-enable: False
+ - name: Ethernet24
+ config:
+ name: Ethernet24
+ edge-port: openconfig-spanning-tree-types:EDGE_ENABLE
+ link-type: P2P
+ guard: ROOT
+ bpdu-guard: True
+ bpdu-filter: True
+ openconfig-spanning-tree-ext:portfast: True
+ openconfig-spanning-tree-ext:uplink-fast: True
+ openconfig-spanning-tree-ext:bpdu-guard-port-shutdown: True
+ openconfig-spanning-tree-ext:cost: 15
+ openconfig-spanning-tree-ext:port-priority: 25
+ openconfig-spanning-tree-ext:spanning-tree-enable: True
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/edge-port"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/link-type"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/guard"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/bpdu-guard"
+ method: "patch"
+ data:
+ openconfig-spanning-tree:bpdu-guard: False
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/bpdu-filter"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/openconfig-spanning-tree-ext:portfast"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/openconfig-spanning-tree-ext:uplink-fast"
+ method: "patch"
+ data:
+ openconfig-spanning-tree-ext:uplink-fast: False
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/openconfig-spanning-tree-ext:bpdu-guard-port-shutdown"
+ method: "patch"
+ data:
+ openconfig-spanning-tree-ext:bpdu-guard-port-shutdown: False
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/openconfig-spanning-tree-ext:cost"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/openconfig-spanning-tree-ext:port-priority"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet20/config/openconfig-spanning-tree-ext:spanning-tree-enable"
+ method: "patch"
+ data:
+ openconfig-spanning-tree-ext:spanning-tree-enable: True
+ - path: "data/openconfig-spanning-tree:stp/interfaces/interface=Ethernet24"
+ method: "delete"
+ data:
+deleted_03:
+ module_args:
+ config:
+ pvst:
+ - vlan_id: 1
+ hello_time: 7
+ max_age: 8
+ fwd_delay: 9
+ bridge_priority: 8192
+ interfaces:
+ - intf_name: Ethernet20
+ - intf_name: Ethernet24
+ cost: 3
+ port_priority: 50
+ state: deleted
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ openconfig-spanning-tree-ext:pvst:
+ vlans:
+ - vlan-id: 1
+ config:
+ vlan-id: 1
+ hello-time: 7
+ max-age: 8
+ forwarding-delay: 9
+ bridge-priority: 8192
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 2
+ port-priority: 60
+ - name: Ethernet24
+ config:
+ name: Ethernet24
+ cost: 3
+ port-priority: 50
+ - vlan-id: 2
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 2
+ port-priority: 60
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/config/hello-time"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/config/max-age"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/config/forwarding-delay"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/config/bridge-priority"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/interfaces/interface=Ethernet24/config/cost"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/interfaces/interface=Ethernet24/config/port-priority"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/openconfig-spanning-tree-ext:pvst/vlans=1/interfaces/interface=Ethernet20"
+ method: "delete"
+ data:
+deleted_04:
+ module_args:
+ config:
+ rapid_pvst:
+ - vlan_id: 1
+ hello_time: 7
+ max_age: 8
+ fwd_delay: 9
+ bridge_priority: 8192
+ interfaces:
+ - intf_name: Ethernet20
+ - intf_name: Ethernet24
+ cost: 3
+ port_priority: 50
+ state: deleted
+ existing_stp_config:
+ - path: "/data/openconfig-spanning-tree:stp"
+ response:
+ code: 200
+ value:
+ openconfig-spanning-tree:stp:
+ rapid-pvst:
+ vlan:
+ - vlan-id: 1
+ config:
+ vlan-id: 1
+ hello-time: 7
+ max-age: 8
+ forwarding-delay: 9
+ bridge-priority: 8192
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 2
+ port-priority: 60
+ - name: Ethernet24
+ config:
+ name: Ethernet24
+ cost: 3
+ port-priority: 50
+ - vlan-id: 2
+ interfaces:
+ interface:
+ - name: Ethernet20
+ config:
+ name: Ethernet20
+ cost: 2
+ port-priority: 60
+ expected_config_requests:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/config/hello-time"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/config/max-age"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/config/forwarding-delay"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/config/bridge-priority"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/interfaces/interface=Ethernet24/config/cost"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/interfaces/interface=Ethernet24/config/port-priority"
+ method: "delete"
+ data:
+ - path: "data/openconfig-spanning-tree:stp/rapid-pvst/vlan=1/interfaces/interface=Ethernet20"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_system.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_system.yaml
new file mode 100644
index 000000000..64b705d06
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_system.yaml
@@ -0,0 +1,232 @@
+---
+merged_01:
+ module_args:
+ config:
+ hostname: SONIC_Test1
+ interface_naming: standard
+ anycast_address:
+ ipv6: true
+ ipv4: true
+ mac_address: aa:bb:cc:dd:ee:ff
+ existing_system_config:
+ - path: "data/openconfig-system:system/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ hostname: abcd_host
+ anycast_address:
+ IPv4: true
+ mac_address: 11:22:33:44:55:66
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost"
+ response:
+ code: 200
+ value:
+ sonic-device-metadata:DEVICE_METADATA_LIST:
+ - intf_naming_mode: native
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-system:system/config"
+ method: "patch"
+ data:
+ openconfig-system:config:
+ hostname: SONIC_Test1
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost/intf_naming_mode"
+ method: "patch"
+ data:
+ sonic-device-metadata:intf_naming_mode: standard
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/"
+ method: "patch"
+ data:
+ sonic-sag:SAG_GLOBAL_LIST:
+ - IPv4: enable
+ IPv6: enable
+ gwmac: aa:bb:cc:dd:ee:ff
+ table_distinguisher: IP
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_system_config:
+ - path: "data/openconfig-system:system/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ hostname: SONIC_Test1
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost"
+ response:
+ code: 200
+ value:
+ sonic-device-metadata:DEVICE_METADATA_LIST:
+ - intf_naming_mode: standard
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/"
+ response:
+ code: 200
+ value:
+ sonic-sag:SAG_GLOBAL_LIST:
+ - IPv4: enable
+ IPv6: enable
+ gwmac: aa:bb:cc:dd:ee:ff
+ table_distinguisher: IP
+ expected_config_requests:
+ - path: "data/openconfig-system:system/config/"
+ method: "patch"
+ data:
+ openconfig-system:config:
+ hostname: sonic
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost/intf_naming_mode"
+ method: "delete"
+ data:
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST=IP/gwmac"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ hostname: SONIC_Test1
+ interface_naming: standard
+ anycast_address:
+ ipv6: true
+ ipv4: true
+ mac_address: aa:bb:cc:dd:ee:ff
+
+ existing_system_config:
+ - path: "data/openconfig-system:system/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ hostname: SONIC_Test1
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost"
+ response:
+ code: 200
+ value:
+ sonic-device-metadata:DEVICE_METADATA_LIST:
+ - intf_naming_mode: standard
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/"
+ response:
+ code: 200
+ value:
+ sonic-sag:SAG_GLOBAL_LIST:
+ - IPv4: enable
+ IPv6: enable
+ gwmac: aa:bb:cc:dd:ee:ff
+ table_distinguisher: IP
+ expected_config_requests:
+ - path: "data/openconfig-system:system/config/"
+ method: "patch"
+ data:
+ openconfig-system:config:
+ hostname: sonic
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost/intf_naming_mode"
+ method: "delete"
+ data:
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST=IP/gwmac"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ hostname: SONIC_Test11
+ interface_naming: native
+ anycast_address:
+ ipv6: False
+ ipv4: true
+ mac_address: 11:22:33:44:55:66
+ existing_system_config:
+ - path: "data/openconfig-system:system/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ hostname: abcd_host
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost"
+ response:
+ code: 200
+ value:
+ sonic-device-metadata:DEVICE_METADATA_LIST:
+ - intf_naming_mode: standard
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/"
+ response:
+ code: 200
+ value:
+ sonic-sag:SAG_GLOBAL_LIST:
+ - IPv4: enable
+ IPv6: enable
+ gwmac: aa:bb:cc:dd:ee:ff
+ table_distinguisher: IP
+ expected_config_requests:
+ - path: "data/openconfig-system:system/config"
+ method: "patch"
+ data:
+ openconfig-system:config:
+ hostname: SONIC_Test11
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost/intf_naming_mode"
+ method: "patch"
+ data:
+ sonic-device-metadata:intf_naming_mode: native
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/"
+ method: "patch"
+ data:
+ sonic-sag:SAG_GLOBAL_LIST:
+ - IPv4: enable
+ IPv6: disable
+ gwmac: 11:22:33:44:55:66
+ table_distinguisher: IP
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ hostname: SONIC_Test11
+ interface_naming: native
+ anycast_address:
+ ipv6: False
+ ipv4: true
+ mac_address: 11:22:33:44:55:66
+ existing_system_config:
+ - path: "data/openconfig-system:system/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ hostname: abcd_host
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost"
+ response:
+ code: 200
+ value:
+ sonic-device-metadata:DEVICE_METADATA_LIST:
+ - intf_naming_mode: standard
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/"
+ response:
+ code: 200
+ value:
+ sonic-sag:SAG_GLOBAL_LIST:
+ - IPv4: enable
+ IPv6: enable
+ gwmac: aa:bb:cc:dd:ee:ff
+ table_distinguisher: IP
+ expected_config_requests:
+ - path: "data/openconfig-system:system/config"
+ method: "patch"
+ data:
+ openconfig-system:config:
+ hostname: SONIC_Test11
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost/intf_naming_mode"
+ method: "patch"
+ data:
+ sonic-device-metadata:intf_naming_mode: native
+ - path: "data/sonic-sag:sonic-sag/SAG_GLOBAL/SAG_GLOBAL_LIST/"
+ method: "patch"
+ data:
+ sonic-sag:SAG_GLOBAL_LIST:
+ - IPv4: enable
+ IPv6: disable
+ gwmac: 11:22:33:44:55:66
+ table_distinguisher: IP
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_tacacs_server.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_tacacs_server.yaml
new file mode 100644
index 000000000..e789252c7
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_tacacs_server.yaml
@@ -0,0 +1,172 @@
+---
+merged_01:
+ module_args:
+ config:
+ auth_type: pap
+ key: papTest
+ source_interface: Eth1/2
+ timeout: 10
+ servers:
+ host:
+ - name: 1.2.3.4
+ auth_type: pap
+ key: 1234
+ port: 99
+ priority: 88
+ timeout: 77
+ vrf: VrfReg1
+ existing_tacacs_server_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config"
+ response:
+ code: 200
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config"
+ method: "patch"
+ data:
+ openconfig-system:config:
+ auth-type: pap
+ secret-key: papTest
+ source-interface: Eth1/2
+ timeout: 10
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers"
+ method: "patch"
+ data:
+ openconfig-system:servers:
+ server:
+ - address: 1.2.3.4
+ config:
+ address: 1.2.3.4
+ auth-type: pap
+ priority: 88
+ vrf: VrfReg1
+ timeout: 77
+ tacacs:
+ config:
+ port: 99
+ secret-key: '1234'
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_tacacs_server_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ auth-type: pap
+ secret-key: papTest
+ source-interface: Eth1/2
+ timeout: 10
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers"
+ response:
+ code: 200
+ value:
+ openconfig-system:servers:
+ server:
+ - address: 1.2.3.4
+ config:
+ address: 1.2.3.4
+ auth-type: pap
+ priority: 88
+ vrf: VrfReg1
+ timeout: 77
+ tacacs:
+ config:
+ port: 99
+ secret-key: '1234'
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config/secret-key"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config/source-interface"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config/timeout"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers/server=1.2.3.4"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ auth_type: pap
+ key: papTest
+ source_interface: Eth1/2
+ timeout: 10
+ servers:
+ host:
+ - name: 1.2.3.4
+ auth_type: pap
+ key: 1234
+ port: 99
+ priority: 88
+ timeout: 77
+ vrf: VrfReg1
+ existing_tacacs_server_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - vrf_name: VrfReg1
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config"
+ response:
+ code: 200
+ value:
+ openconfig-system:config:
+ auth-type: pap
+ secret-key: papTest
+ source-interface: Eth1/2
+ timeout: 10
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers"
+ response:
+ code: 200
+ value:
+ openconfig-system:servers:
+ server:
+ - address: 1.2.3.4
+ config:
+ address: 1.2.3.4
+ auth-type: pap
+ priority: 88
+ vrf: VrfReg1
+ timeout: 77
+ tacacs:
+ config:
+ port: 99
+ secret-key: '1234'
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config/secret-key"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config/source-interface"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/config/timeout"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/server-groups/server-group=TACACS/servers/server=1.2.3.4"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_users.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_users.yaml
new file mode 100644
index 000000000..afeca94c0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_users.yaml
@@ -0,0 +1,93 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: sysadmin
+ role: admin
+ password: admin
+ update_password: always
+ - name: sysoperator
+ role: operator
+ password: operator
+ update_password: always
+ existing_users_config:
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost"
+ response:
+ code: 200
+ value:
+ sonic-device-metadata:DEVICE_METADATA_LIST:
+ - intf_naming_mode: native
+ - path: "data/openconfig-system:system/aaa/authentication/users"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/authentication/users/user=sysadmin"
+ method: "patch"
+ data:
+ openconfig-system:user:
+ - username: sysadmin
+ config:
+ username: sysadmin
+ role: admin
+ password: admin
+ password-hashed: ''
+ - path: "data/openconfig-system:system/aaa/authentication/users/user=sysoperator"
+ method: "patch"
+ data:
+ openconfig-system:user:
+ - username: sysoperator
+ config:
+ username: sysoperator
+ role: operator
+ password: operator
+ password-hashed: ''
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_users_config:
+ - path: "data/openconfig-system:system/aaa/authentication/users"
+ response:
+ code: 200
+ value:
+ openconfig-system:users:
+ user:
+ - username: sysadmin
+ config:
+ role: admin
+ - username: sysoperator
+ config:
+ role: operator
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/authentication/users/user=sysadmin"
+ method: "delete"
+ data:
+ - path: "data/openconfig-system:system/aaa/authentication/users/user=sysoperator"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: sysadmin
+ role: admin
+ password: admin
+ update_password: always
+ existing_users_config:
+ - path: "data/openconfig-system:system/aaa/authentication/users"
+ response:
+ code: 200
+ value:
+ openconfig-system:users:
+ user:
+ - username: sysadmin
+ config:
+ role: admin
+ - username: sysoperator
+ config:
+ role: operator
+ expected_config_requests:
+ - path: "data/openconfig-system:system/aaa/authentication/users/user=sysadmin"
+ method: "delete"
+ data:
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vlans.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vlans.yaml
new file mode 100644
index 000000000..19e183475
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vlans.yaml
@@ -0,0 +1,190 @@
+---
+merged_01:
+ module_args:
+ config:
+ - vlan_id: 10
+ description: "Internal"
+ existing_vlans_config:
+ - path: "data/sonic-device-metadata:sonic-device-metadata/DEVICE_METADATA/DEVICE_METADATA_LIST=localhost"
+ response:
+ code: 200
+ value:
+ sonic-device-metadata:DEVICE_METADATA_LIST:
+ - intf_naming_mode: native
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Vlan10
+ config:
+ name: Vlan10
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan10/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ name: Vlan10
+ description: Internal
+
+merged_02:
+ module_args:
+ config:
+ - vlan_id: 10
+ description: "Decr2"
+ - vlan_id: 20
+ existing_vlans_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - config:
+ name: Vlan10
+ description: Decr1
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Vlan10
+ config:
+ name: Vlan10
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Vlan20
+ config:
+ name: Vlan20
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan10/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ name: Vlan10
+ description: Decr2
+
+deleted_01_vlan_descr:
+ module_args:
+ state: deleted
+ config:
+ - vlan_id: 10
+ description: "Internal"
+ existing_vlans_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - config:
+ name: Vlan10
+ description: Internal
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan10/config/description"
+ method: "delete"
+ data:
+
+deleted_02_vlan:
+ module_args:
+ state: deleted
+ existing_vlans_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - config:
+ name: Vlan10
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan10"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ - vlan_id: 10
+ description: "Desc2"
+ - vlan_id: 30
+ existing_vlans_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - config:
+ name: Vlan10
+ description: Decr1
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Vlan10
+ config:
+ name: Vlan10
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Vlan30
+ config:
+ name: Vlan30
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan10/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ name: Vlan10
+ description: Desc2
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ - vlan_id: 10
+ description: "Decr3"
+ - vlan_id: 40
+ existing_vlans_config:
+ - path: "data/openconfig-interfaces:interfaces"
+ response:
+ code: 200
+ value:
+ openconfig-interfaces:interfaces:
+ interface:
+ - config:
+ name: Vlan10
+ description: Decr1
+ expected_config_requests:
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Vlan10
+ config:
+ name: Vlan10
+ - path: "data/openconfig-interfaces:interfaces"
+ method: "patch"
+ data:
+ openconfig-interfaces:interfaces:
+ interface:
+ - name: Vlan40
+ config:
+ name: Vlan40
+ - path: "data/openconfig-interfaces:interfaces/interface=Vlan10/config"
+ method: "patch"
+ data:
+ openconfig-interfaces:config:
+ name: Vlan10
+ description: Decr3
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vrfs.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vrfs.yaml
new file mode 100644
index 000000000..300d0858b
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vrfs.yaml
@@ -0,0 +1,249 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: Vrfcheck4
+ members:
+ interfaces:
+ - name: Eth1/5
+ - name: Eth1/6
+ - name: Vrfcheck3
+ members:
+ interfaces:
+ - name: Eth1/3
+ - name: Eth1/4
+ existing_vrfs_config:
+ - path: "data/openconfig-network-instance:network-instances"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: mgmt
+ - name: VrfCheck1
+ expected_config_requests:
+ - path: "data/openconfig-network-instance:network-instances"
+ method: "patch"
+ data:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: Vrfcheck4
+ config:
+ name: Vrfcheck4
+ enabled: True
+ type: L3VRF
+ - path: "data/openconfig-network-instance:network-instances"
+ method: "patch"
+ data:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: Vrfcheck3
+ config:
+ name: Vrfcheck3
+ enabled: True
+ type: L3VRF
+ - path: "data/openconfig-network-instance:network-instances/network-instance=Vrfcheck3/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-network-instance:interface:
+ - id: Eth1/3
+ config:
+ id: Eth1/3
+ - id: Eth1/4
+ config:
+ id: Eth1/4
+ - path: "data/openconfig-network-instance:network-instances/network-instance=Vrfcheck4/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-network-instance:interface:
+ - id: Eth1/5
+ config:
+ id: Eth1/5
+ - id: Eth1/6
+ config:
+ id: Eth1/6
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_vrfs_config:
+ - path: "data/openconfig-network-instance:network-instances"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: VrfCheck1
+ interfaces:
+ interface:
+ - id: Eth1/1
+ - id: Eth1/2
+ expected_config_requests:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck1"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: VrfCheck6
+ members:
+ interfaces:
+ - name: Eth1/1
+ - name: mgmt
+ members:
+ interfaces:
+ - name: Eth1/4
+ existing_vrfs_config:
+ - path: "data/openconfig-network-instance:network-instances"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: VrfCheck6
+ interfaces:
+ interface:
+ - id: Eth1/1
+ - id: Eth1/2
+ - name: mgmt
+ interfaces:
+ interface:
+ - id: Eth1/3
+ - id: Eth1/4
+ expected_config_requests:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck6/interfaces/interface=Eth1%2f1"
+ method: "delete"
+ data:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=mgmt/interfaces/interface=Eth1%2f4"
+ method: "delete"
+ data:
+
+replaced_01:
+ module_args:
+ state: replaced
+ config:
+ - name: VrfCheck6
+ members:
+ interfaces:
+ - name: Eth1/6
+ - name: VrfCheck7
+ members:
+ interfaces:
+ - name: Eth1/4
+ existing_vrfs_config:
+ - path: "data/openconfig-network-instance:network-instances"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: VrfCheck6
+ interfaces:
+ interface:
+ - id: Eth1/1
+ - id: Eth1/2
+ - name: VrfCheck7
+ interfaces:
+ interface:
+ - id: Eth1/3
+ - id: Eth1/4
+ expected_config_requests:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck6/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-network-instance:interface:
+ - id: Eth1/6
+ config:
+ id: Eth1/6
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck6/interfaces/interface=Eth1%2f1"
+ method: "delete"
+ data:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck6/interfaces/interface=Eth1%2f2"
+ method: "delete"
+ data:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck7/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-network-instance:interface:
+ - id: Eth1/4
+ config:
+ id: Eth1/4
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck7/interfaces/interface=Eth1%2f3"
+ method: "delete"
+ data:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck7/interfaces/interface=Eth1%2f4"
+ method: "delete"
+ data:
+
+overridden_01:
+ module_args:
+ state: overridden
+ config:
+ - name: VrfCheck6
+ members:
+ interfaces:
+ - name: Eth1/6
+ - name: VrfCheck7
+ members:
+ interfaces:
+ - name: Eth1/4
+ existing_vrfs_config:
+ - path: "data/openconfig-network-instance:network-instances"
+ response:
+ code: 200
+ value:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: VrfCheck11
+ interfaces:
+ interface:
+ - id: Eth1/1
+ - id: Eth1/2
+ - name: VrfCheck12
+ interfaces:
+ interface:
+ - id: Eth1/3
+ - id: Eth1/4
+ expected_config_requests:
+ - path: "data/openconfig-network-instance:network-instances"
+ method: "patch"
+ data:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: VrfCheck6
+ config:
+ name: VrfCheck6
+ enabled: True
+ type: L3VRF
+ - path: "data/openconfig-network-instance:network-instances"
+ method: "patch"
+ data:
+ openconfig-network-instance:network-instances:
+ network-instance:
+ - name: VrfCheck7
+ config:
+ name: VrfCheck7
+ enabled: True
+ type: L3VRF
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck11"
+ method: "delete"
+ data:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck12"
+ method: "delete"
+ data:
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck6/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-network-instance:interface:
+ - id: Eth1/6
+ config:
+ id: Eth1/6
+ - path: "data/openconfig-network-instance:network-instances/network-instance=VrfCheck7/interfaces/interface"
+ method: "patch"
+ data:
+ openconfig-network-instance:interface:
+ - id: Eth1/4
+ config:
+ id: Eth1/4
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vxlans.yaml b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vxlans.yaml
new file mode 100644
index 000000000..f60bd2c5a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/fixtures/sonic_vxlans.yaml
@@ -0,0 +1,418 @@
+---
+merged_01:
+ module_args:
+ config:
+ - name: vteptest1
+ source_ip: 1.1.1.1
+ primary_ip: 2.2.2.2
+ evpn_nvo: nvo1
+ vlan_map:
+ - vni: 101
+ vlan: 11
+ - vni: 102
+ vlan: 12
+ vrf_map:
+ - vni: 101
+ vrf: Vrfcheck1
+ - vni: 102
+ vrf: Vrfcheck2
+ existing_vxlans_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: default
+ - path: "data/sonic-vxlan:sonic-vxlan"
+ response:
+ code: 200
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST"
+ response:
+ code: 200
+ expected_config_requests:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck1/vni"
+ method: "patch"
+ data:
+ sonic-vrf:vni: 101
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck2/vni"
+ method: "patch"
+ data:
+ sonic-vrf:vni: 102
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST"
+ method: "patch"
+ data:
+ sonic-vxlan:EVPN_NVO_LIST:
+ - name: nvo1
+ source_vtep: vteptest1
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL:
+ VXLAN_TUNNEL_LIST:
+ - name: vteptest1
+ src_ip: 1.1.1.1
+ primary_ip: 2.2.2.2
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_101_Vlan11
+ vlan: Vlan11
+ vni: 101
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_102_Vlan12
+ vlan: Vlan12
+ vni: 102
+
+deleted_01:
+ module_args:
+ state: deleted
+ existing_vxlans_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: Vrfcheck1
+ vni: 101
+ - vrf_name: Vrfcheck2
+ vni: 102
+ - vrf_name: default
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vxlan:EVPN_NVO_LIST:
+ - name: nvo1
+ source_vtep: vteptest1
+ - path: "data/sonic-vxlan:sonic-vxlan"
+ response:
+ code: 200
+ value:
+ sonic-vxlan:sonic-vxlan:
+ VXLAN_TUNNEL:
+ VXLAN_TUNNEL_LIST:
+ - name: vteptest1
+ src_ip: 1.1.1.1
+ primary_ip: 2.2.2.2
+ VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_101_Vlan11
+ vni: 101
+ vlan: Vlan11
+ - name: vteptest1
+ mapname: map_102_Vlan12
+ vni: 102
+ vlan: Vlan12
+ expected_config_requests:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck1/vni"
+ method: "delete"
+ data:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck2/vni"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL/VXLAN_TUNNEL_LIST=vteptest1"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL/VXLAN_TUNNEL_LIST=vteptest1/primary_ip"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL/VXLAN_TUNNEL_LIST=vteptest1/src_ip"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST=nvo1"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP/VXLAN_TUNNEL_MAP_LIST=vteptest1,map_101_Vlan11"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP/VXLAN_TUNNEL_MAP_LIST=vteptest1,map_102_Vlan12"
+ method: "delete"
+ data:
+
+deleted_02:
+ module_args:
+ state: deleted
+ config:
+ - name: vteptest1
+ source_ip: 1.1.1.1
+ primary_ip: 2.2.2.2
+ evpn_nvo: nvo1
+ vlan_map:
+ - vni: 101
+ vlan: 11
+ - vni: 102
+ vlan: 12
+ vrf_map:
+ - vni: 101
+ vrf: Vrfcheck1
+ - vni: 102
+ vrf: Vrfcheck2
+ existing_vxlans_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: Vrfcheck1
+ vni: 101
+ - vrf_name: Vrfcheck2
+ vni: 102
+ - vrf_name: default
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vxlan:EVPN_NVO_LIST:
+ - name: nvo1
+ source_vtep: vteptest1
+ - path: "data/sonic-vxlan:sonic-vxlan"
+ response:
+ code: 200
+ value:
+ sonic-vxlan:sonic-vxlan:
+ VXLAN_TUNNEL:
+ VXLAN_TUNNEL_LIST:
+ - name: vteptest1
+ src_ip: 1.1.1.1
+ primary_ip: 2.2.2.2
+ VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_101_Vlan11
+ vni: 101
+ vlan: Vlan11
+ - name: vteptest1
+ mapname: map_102_Vlan12
+ vni: 102
+ vlan: Vlan12
+ expected_config_requests:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck1/vni"
+ method: "delete"
+ data:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck2/vni"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST=nvo1"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL/VXLAN_TUNNEL_LIST=vteptest1/primary_ip"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL/VXLAN_TUNNEL_LIST=vteptest1/src_ip"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP/VXLAN_TUNNEL_MAP_LIST=vteptest1,map_101_Vlan11"
+ method: "delete"
+ data:
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP/VXLAN_TUNNEL_MAP_LIST=vteptest1,map_102_Vlan12"
+ method: "delete"
+ data:
+
+replaced_02:
+ module_args:
+ state: replaced
+ config:
+ - name: vteptest1
+ source_ip: 1.1.1.9
+ primary_ip: 2.2.2.9
+ evpn_nvo: nvo1
+ vlan_map:
+ - vni: 101
+ vlan: 21
+ - vni: 102
+ vlan: 22
+ vrf_map:
+ - vni: 101
+ vrf: Vrfcheck3
+ - vni: 102
+ vrf: Vrfcheck4
+ existing_vxlans_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: Vrfcheck1
+ vni: 101
+ - vrf_name: Vrfcheck2
+ vni: 102
+ - vrf_name: default
+ - vrf_name: Vrfcheck3
+ - vrf_name: Vrfcheck4
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vxlan:EVPN_NVO_LIST:
+ - name: nvo1
+ source_vtep: vteptest1
+ - path: "data/sonic-vxlan:sonic-vxlan"
+ response:
+ code: 200
+ value:
+ sonic-vxlan:sonic-vxlan:
+ VXLAN_TUNNEL:
+ VXLAN_TUNNEL_LIST:
+ - name: vteptest1
+ src_ip: 1.1.1.1
+ primary_ip: 2.2.2.2
+ VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_101_Vlan11
+ vni: 101
+ vlan: Vlan11
+ - name: vteptest1
+ mapname: map_102_Vlan12
+ vni: 102
+ vlan: Vlan12
+ expected_config_requests:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck3/vni"
+ method: "patch"
+ data:
+ sonic-vrf:vni: 101
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck4/vni"
+ method: "patch"
+ data:
+ sonic-vrf:vni: 102
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST"
+ method: "patch"
+ data:
+ sonic-vxlan:EVPN_NVO_LIST:
+ - name: nvo1
+ source_vtep: vteptest1
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL:
+ VXLAN_TUNNEL_LIST:
+ - name: vteptest1
+ src_ip: 1.1.1.9
+ primary_ip: 2.2.2.9
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_101_Vlan21
+ vlan: Vlan21
+ vni: 101
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_102_Vlan22
+ vlan: Vlan22
+ vni: 102
+
+overridden_02:
+ module_args:
+ state: overridden
+ config:
+ - name: vteptest1
+ source_ip: 1.1.1.9
+ primary_ip: 2.2.2.9
+ evpn_nvo: nvo1
+ vlan_map:
+ - vni: 101
+ vlan: 21
+ - vni: 102
+ vlan: 22
+ vrf_map:
+ - vni: 101
+ vrf: Vrfcheck3
+ - vni: 102
+ vrf: Vrfcheck4
+ existing_vxlans_config:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vrf:VRF_LIST:
+ - vrf_name: Vrfcheck1
+ vni: 101
+ - vrf_name: Vrfcheck2
+ vni: 102
+ - vrf_name: default
+ - vrf_name: Vrfcheck3
+ - vrf_name: Vrfcheck4
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST"
+ response:
+ code: 200
+ value:
+ sonic-vxlan:EVPN_NVO_LIST:
+ - name: nvo1
+ source_vtep: vteptest1
+ - path: "data/sonic-vxlan:sonic-vxlan"
+ response:
+ code: 200
+ value:
+ sonic-vxlan:sonic-vxlan:
+ VXLAN_TUNNEL:
+ VXLAN_TUNNEL_LIST:
+ - name: vteptest1
+ src_ip: 1.1.1.1
+ primary_ip: 2.2.2.2
+ VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_101_Vlan11
+ vni: 101
+ vlan: Vlan11
+ - name: vteptest1
+ mapname: map_102_Vlan12
+ vni: 102
+ vlan: Vlan12
+ expected_config_requests:
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck3/vni"
+ method: "patch"
+ data:
+ sonic-vrf:vni: 101
+ - path: "data/sonic-vrf:sonic-vrf/VRF/VRF_LIST=Vrfcheck4/vni"
+ method: "patch"
+ data:
+ sonic-vrf:vni: 102
+ - path: "data/sonic-vxlan:sonic-vxlan/EVPN_NVO/EVPN_NVO_LIST"
+ method: "patch"
+ data:
+ sonic-vxlan:EVPN_NVO_LIST:
+ - name: nvo1
+ source_vtep: vteptest1
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL:
+ VXLAN_TUNNEL_LIST:
+ - name: vteptest1
+ src_ip: 1.1.1.9
+ primary_ip: 2.2.2.9
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_101_Vlan21
+ vlan: Vlan21
+ vni: 101
+ - path: "data/sonic-vxlan:sonic-vxlan/VXLAN_TUNNEL_MAP"
+ method: "patch"
+ data:
+ sonic-vxlan:VXLAN_TUNNEL_MAP:
+ VXLAN_TUNNEL_MAP_LIST:
+ - name: vteptest1
+ mapname: map_102_Vlan22
+ vlan: Vlan22
+ vni: 102
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/sonic_module.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/sonic_module.py
new file mode 100644
index 000000000..a4c411371
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/sonic_module.py
@@ -0,0 +1,149 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import os
+import yaml
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ AnsibleExitJson,
+ AnsibleFailJson,
+ ModuleTestCase,
+)
+
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ update_url
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils import (
+ get_diff
+)
+
+
+class TestSonicModule(ModuleTestCase):
+ """Enterprise SONiC ansible module base unit test class"""
+
+ def setUp(self):
+ super(TestSonicModule, self).setUp()
+
+ self.config_requests_valid = []
+ self.config_requests_sent = []
+
+ self._config_requests_dict = {}
+ self._facts_requests_dict = {}
+
+ @staticmethod
+ def load_fixtures(file_name, content="yaml"):
+ """Load data from specified fixture file and format"""
+ fixture_path = os.path.join(os.path.dirname(__file__), "fixtures")
+ file_path = os.path.join(fixture_path, file_name)
+
+ file_stream = open(file_path, "r")
+ if content == "yaml":
+ data = yaml.full_load(file_stream)
+ else:
+ data = file_stream.read()
+ file_stream.close()
+
+ return data
+
+ def initialize_facts_get_requests(self, facts_get_requests):
+ for request in facts_get_requests:
+ self._facts_requests_dict[request['path']] = request['response']
+
+ def initialize_config_requests(self, config_requests):
+ for request in config_requests:
+ valid_request = request.copy()
+ path = valid_request['path']
+ method = valid_request['method'].lower()
+ data = valid_request.get('data', {})
+ if valid_request.get('response'):
+ response = valid_request.pop('response')
+ else:
+ response = {}
+
+ self.config_requests_valid.append(valid_request)
+ if self._config_requests_dict.get(path) is None:
+ self._config_requests_dict[path] = {}
+
+ config_request_dict = self._config_requests_dict[path]
+ if config_request_dict.get(method) is None:
+ config_request_dict[method] = []
+
+ config_request_dict[method].append([
+ data,
+ {'code': response.get('code', 200), 'value': response.get('value', {})}
+ ])
+
+ def facts_side_effect(self, module, commands):
+ """Side effect function for 'facts' GET requests mock"""
+ responses = []
+ for command in commands:
+ response = []
+ path = update_url(command['path'])
+ method = command['method'].lower()
+
+ if method == 'get':
+ if self._facts_requests_dict.get(path):
+ response.append(self._facts_requests_dict[path]['code'])
+ response.append(self._facts_requests_dict[path].get('value', {}))
+ else:
+ self.module.fail_json(msg="Non GET REST API request made in get facts {0}".format(command))
+
+ responses.append(response)
+
+ return responses
+
+ def config_side_effect(self, module, commands):
+ """Side effect function for 'config' requests mock"""
+ responses = []
+ for command in commands:
+ response = []
+ path = update_url(command['path'])
+ method = command['method'].lower()
+ data = command['data']
+
+ self.config_requests_sent.append({'path': path, 'method': method, 'data': data})
+ entries = self._config_requests_dict.get(path, {}).get(method, [])
+ for entry in entries:
+ if data == entry[0]:
+ response.append(entry[1]['code'])
+ response.append(entry[1]['value'])
+ break
+
+ responses.append(response)
+
+ return responses
+
+ def execute_module(self, failed=False, changed=False):
+ if failed:
+ result = self.failed()
+ else:
+ result = self.changed(changed)
+
+ return result
+
+ def failed(self):
+ with self.assertRaises(AnsibleFailJson) as exc:
+ self.module.main()
+
+ result = exc.exception.args[0]
+ self.assertTrue(result["failed"], result)
+ return result
+
+ def changed(self, changed=False):
+ with self.assertRaises(AnsibleExitJson) as exc:
+ self.module.main()
+
+ result = exc.exception.args[0]
+ self.assertEqual(result["changed"], changed, result)
+ return result
+
+ def validate_config_requests(self, requests_sorted=False):
+ """Check if both list of requests sent and expected are same"""
+ if not requests_sorted:
+ # Sort by 'path' (primary) followed by 'method' (secondary)
+ self.config_requests_valid.sort(key=lambda request: (request['path'], request['method']))
+ self.config_requests_sent.sort(key=lambda request: (request['path'], request['method']))
+
+ self.assertEqual(len(self.config_requests_valid), len(self.config_requests_sent))
+ for valid_request, sent_request in zip(self.config_requests_valid, self.config_requests_sent):
+ self.assertEqual(get_diff(valid_request, sent_request, [{'path': "", 'method': "", 'data': {}}]), {})
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_aaa.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_aaa.py
new file mode 100644
index 000000000..de747c2c8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_aaa.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_aaa,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicAaaModule(TestSonicModule):
+ module = sonic_aaa
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.aaa.aaa.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.aaa.aaa.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_aaa.yaml')
+
+ def setUp(self):
+ super(TestSonicAaaModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicAaaModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_aaa_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_aaa_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_aaa_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_aaa_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_aaa_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_aaa_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_acl_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_acl_interfaces.py
new file mode 100644
index 000000000..0bddd6801
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_acl_interfaces.py
@@ -0,0 +1,96 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_acl_interfaces,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicAclInterfacesModule(TestSonicModule):
+ module = sonic_acl_interfaces
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.acl_interfaces.acl_interfaces.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.acl_interfaces.acl_interfaces.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_acl_interfaces.yaml')
+
+ def setUp(self):
+ super(TestSonicAclInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicAclInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_acl_interfaces_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_acl_interfaces_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['config_requests'])
+
+ result = self.execute_module(changed=False)
+ self.validate_config_requests()
+
+ def test_sonic_acl_interfaces_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_acl_interfaces_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_acl_interfaces_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_acl_interfaces_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_api.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_api.py
new file mode 100644
index 000000000..aa3e05e2f
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_api.py
@@ -0,0 +1,38 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_api,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_api
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.modules.sonic_api.edit_config"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_api.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.edit_config = self.mock_edit_config.start()
+ self.edit_config.return_value = [(204, '')]
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_edit_config.stop()
+
+ def test_sonic_api_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ result = self.execute_module(changed=True)
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bfd.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bfd.py
new file mode 100644
index 000000000..f30c90c79
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bfd.py
@@ -0,0 +1,86 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_bfd,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicBfdModule(TestSonicModule):
+ module = sonic_bfd
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bfd.bfd.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bfd.bfd.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.mock_send_requests = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bfd.bfd.send_requests"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_bfd.yaml')
+
+ def setUp(self):
+ super(TestSonicBfdModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'native'
+ self.send_requests = self.mock_send_requests.start()
+ self.send_requests.return_value = None
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicBfdModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_send_requests.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_bfd_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_bfd_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bfd_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_bfd_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bfd_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_bfd_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bfd_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_bfd_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp.py
new file mode 100644
index 000000000..ad46f73f9
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp.py
@@ -0,0 +1,75 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_bgp,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicBgpModule(TestSonicModule):
+ module = sonic_bgp
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp.bgp.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_bgp.yaml')
+
+ def setUp(self):
+ super(TestSonicBgpModule, self).setUp()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicBgpModule, self).tearDown()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_bgp_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_af.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_af.py
new file mode 100644
index 000000000..fe50c303d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_af.py
@@ -0,0 +1,82 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_bgp_af,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicBgpModule(TestSonicModule):
+ module = sonic_bgp_af
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_af.bgp_af.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_bgp_af.yaml')
+
+ def setUp(self):
+ super(TestSonicBgpModule, self).setUp()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicBgpModule, self).tearDown()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_bgp_af_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_af_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_af_merged_03(self):
+ set_module_args(self.fixture_data['merged_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_03']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_af_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_af_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_af_deleted_03(self):
+ set_module_args(self.fixture_data['deleted_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_03']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_as_paths.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_as_paths.py
new file mode 100644
index 000000000..edcb1a440
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_as_paths.py
@@ -0,0 +1,83 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_bgp_as_paths,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicBgpAsPathsModule(TestSonicModule):
+ module = sonic_bgp_as_paths
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_as_paths.bgp_as_paths.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_as_paths.bgp_as_paths.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_bgp_as_paths.yaml')
+
+ def setUp(self):
+ super(TestSonicBgpAsPathsModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicBgpAsPathsModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_bgp_as_paths_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_as_paths_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_as_paths_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_as_paths_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_as_paths_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_communities.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_communities.py
new file mode 100644
index 000000000..64451a890
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_communities.py
@@ -0,0 +1,104 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_bgp_communities,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicBgpCommunitiesModule(TestSonicModule):
+ module = sonic_bgp_communities
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_communities.bgp_communities.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_communities.bgp_communities.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_bgp_communities.yaml')
+
+ def setUp(self):
+ super(TestSonicBgpCommunitiesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicBgpCommunitiesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_bgp_communities_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_communities_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_communities_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_communities_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_communities_deleted_03(self):
+ set_module_args(self.fixture_data['deleted_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_03']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_communities_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_communities_replaced_02(self):
+ set_module_args(self.fixture_data['replaced_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_communities_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_ext_communities.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_ext_communities.py
new file mode 100644
index 000000000..983554455
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_ext_communities.py
@@ -0,0 +1,146 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_bgp_ext_communities,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicBgpExtCommunitiesModule(TestSonicModule):
+ module = sonic_bgp_ext_communities
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.bgp_ext_communities.bgp_ext_communities.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_ext_communities.bgp_ext_communities.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_bgp_ext_communities.yaml')
+
+ def setUp(self):
+ super(TestSonicBgpExtCommunitiesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicBgpExtCommunitiesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_bgp_ext_communities_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_merged_03(self):
+ set_module_args(self.fixture_data['merged_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_03']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_03(self):
+ set_module_args(self.fixture_data['deleted_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_03']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_04(self):
+ set_module_args(self.fixture_data['deleted_04']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_04']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_04']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_05(self):
+ set_module_args(self.fixture_data['deleted_05']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_05']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_05']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_06(self):
+ set_module_args(self.fixture_data['deleted_06']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_06']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_06']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_07(self):
+ set_module_args(self.fixture_data['deleted_07']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_07']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_07']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_08(self):
+ set_module_args(self.fixture_data['deleted_08']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_08']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_08']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_deleted_09(self):
+ set_module_args(self.fixture_data['deleted_09']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_09']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_09']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_ext_communities_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_neighbors.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_neighbors.py
new file mode 100644
index 000000000..113e9f5c8
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_neighbors.py
@@ -0,0 +1,75 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_bgp_neighbors,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicBgpModule(TestSonicModule):
+ module = sonic_bgp_neighbors
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_neighbors.bgp_neighbors.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_bgp_neighbors.yaml')
+
+ def setUp(self):
+ super(TestSonicBgpModule, self).setUp()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicBgpModule, self).tearDown()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_bgp_neighbors_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ # Merge test when neighbor and peer-group are already present in existing config
+ def test_sonic_bgp_neighbors_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_neighbors_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_neighbors_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_neighbors_af.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_neighbors_af.py
new file mode 100644
index 000000000..574cbf114
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_bgp_neighbors_af.py
@@ -0,0 +1,74 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_bgp_neighbors_af,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicBgpModule(TestSonicModule):
+ module = sonic_bgp_neighbors_af
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.bgp_neighbors_af.bgp_neighbors_af.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_bgp_neighbors_af.yaml')
+
+ def setUp(self):
+ super(TestSonicBgpModule, self).setUp()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicBgpModule, self).tearDown()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_bgp_neighbors_af_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_neighbors_af_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_neighbors_af_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_bgp_neighbors_af_deleted_03(self):
+ set_module_args(self.fixture_data['deleted_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_03']['existing_bgp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_command.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_command.py
new file mode 100644
index 000000000..2a317ff2c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_command.py
@@ -0,0 +1,59 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_command,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_command
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_run_commands = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.modules.sonic_command.run_commands"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_command.yaml')
+
+ def run_commands_side_effect(self, module, commands):
+ """Side effect function for run commands mock"""
+
+ for cmd in commands:
+ self.config_commands_sent.append(cmd['command'])
+ # Purpose of the Unit testing for sonic_command is to check whether the passed command goes to device.
+ # Response from device is validated against the expected values.
+ # Simulate a dummy return value for the "show version" command that is being unit tested.
+ return ['Software Version : dell_sonic_4.x_share.770-0beb2c821\n']
+
+ def validate_config_commands(self):
+ """Check if both list of requests sent and expected are same"""
+
+ self.assertEqual(len(self.config_commands_valid), len(self.config_commands_sent))
+ for valid_command, sent_command in zip(self.config_commands_valid, self.config_commands_sent):
+ self.assertEqual(valid_command, sent_command)
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.config_commands_sent = []
+ self.config_commands_valid = []
+ self.run_commands = self.mock_run_commands.start()
+ self.run_commands.side_effect = self.run_commands_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_run_commands.stop()
+
+ def test_sonic_commands_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.config_commands_valid = self.fixture_data['merged_01']['expected_command_requests']
+ result = self.execute_module(changed=False)
+ self.validate_config_commands()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_config.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_config.py
new file mode 100644
index 000000000..1bbd00766
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_config.py
@@ -0,0 +1,78 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_config,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_config
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_get_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.modules.sonic_config.get_config"
+ )
+ cls.mock_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.modules.sonic_config.edit_config"
+ )
+ cls.mock_run_commands = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.modules.sonic_config.run_commands"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_config.yaml')
+
+ def edit_config_side_effect(self, module, commands):
+ """Side effect function for 'config' requests mock"""
+
+ self.config_commands_sent.extend(commands)
+
+ def run_commands_side_effect(self, module, commands):
+ """Side effect function for run_commands mock"""
+
+ for cmd in commands:
+ self.config_commands_sent.append(cmd['command'])
+
+ def validate_config_commands(self):
+ """Check if both list of requests sent and expected are same"""
+
+ self.assertEqual(len(self.config_commands_valid), len(self.config_commands_sent))
+ for valid_command, sent_command in zip(self.config_commands_valid, self.config_commands_sent):
+ self.assertEqual(valid_command, sent_command)
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.config_commands_sent = []
+ self.config_commands_valid = []
+ self.get_config = self.mock_get_config.start()
+ self.get_config.return_value = "show running-configuration\nip load-share hash ipv4 ipv4-dst-ip"
+ self.edit_config = self.mock_edit_config.start()
+ self.edit_config.side_effect = self.edit_config_side_effect
+ self.run_commands = self.mock_run_commands.start()
+ self.run_commands.side_effect = self.run_commands_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_get_config.stop()
+ self.mock_edit_config.stop()
+ self.mock_run_commands.stop()
+
+ def test_sonic_config_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.config_commands_valid = self.fixture_data['merged_01']['expected_commands_to_device']
+ result = self.execute_module(changed=True)
+ self.validate_config_commands()
+
+ def test_sonic_config_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.config_commands_valid = self.fixture_data['merged_02']['expected_commands_to_device']
+ result = self.execute_module(changed=True)
+ self.validate_config_commands()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_copp.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_copp.py
new file mode 100644
index 000000000..7e7c1be43
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_copp.py
@@ -0,0 +1,86 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_copp,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicCoppModule(TestSonicModule):
+ module = sonic_copp
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.copp.copp.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.copp.copp.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.mock_send_requests = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.copp.copp.send_requests"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_copp.yaml')
+
+ def setUp(self):
+ super(TestSonicCoppModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'native'
+ self.send_requests = self.mock_send_requests.start()
+ self.send_requests.return_value = None
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicCoppModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_send_requests.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_copp_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_copp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_copp_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_copp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_copp_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_copp_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_copp_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_copp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_dhcp_relay.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_dhcp_relay.py
new file mode 100644
index 000000000..227e08993
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_dhcp_relay.py
@@ -0,0 +1,120 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_dhcp_relay,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicDhcpRelayModule(TestSonicModule):
+ module = sonic_dhcp_relay
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.dhcp_relay.dhcp_relay.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.dhcp_relay.dhcp_relay.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_dhcp_relay.yaml')
+
+ def setUp(self):
+ super(TestSonicDhcpRelayModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicDhcpRelayModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_dhcp_relay_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_relay_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['config_requests'])
+
+ result = self.execute_module(changed=False)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_relay_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_relay_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_relay_deleted_03(self):
+ set_module_args(self.fixture_data['deleted_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_03']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_03']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_relay_deleted_04(self):
+ set_module_args(self.fixture_data['deleted_04']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_04']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_04']['config_requests'])
+
+ result = self.execute_module(changed=False)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_relay_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_relay_replaced_02(self):
+ set_module_args(self.fixture_data['replaced_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['replaced_02']['config_requests'])
+
+ result = self.execute_module(changed=False)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_relay_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_dhcp_snooping.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_dhcp_snooping.py
new file mode 100644
index 000000000..b1d2f4a03
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_dhcp_snooping.py
@@ -0,0 +1,213 @@
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_dhcp_snooping,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicDhcpSnoopingModule(TestSonicModule):
+ module = sonic_dhcp_snooping
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.dhcp_snooping.dhcp_snooping.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.dhcp_snooping.dhcp_snooping.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.edit_config"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_dhcp_snooping.yaml')
+
+ def setUp(self):
+ super(TestSonicDhcpSnoopingModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicDhcpSnoopingModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_dhcp_snooping_merged_01(self):
+ test_name = "merged_01"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_merged_02(self):
+ test_name = "merged_02"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_merged_03(self):
+ test_name = "merged_03"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_merged_04(self):
+ test_name = "merged_04_blank"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=False)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_01(self):
+ test_name = "deleted_01"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_02(self):
+ test_name = "deleted_02_clear_vlans"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_02_2(self):
+ test_name = "deleted_02_2_select_vlans"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_03(self):
+ test_name = "deleted_03"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_04(self):
+ test_name = "deleted_04_clear_bindings"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_05(self):
+ test_name = "deleted_05_select_bindings"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_06(self):
+ test_name = "deleted_06_clear_trusted"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_07(self):
+ test_name = "deleted_07_select_trusted"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_08(self):
+ test_name = "deleted_08_booleans"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_deleted_09(self):
+ test_name = "deleted_09_empty"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_overridden_01(self):
+ test_name = "overridden_01"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_replaced_01(self):
+ test_name = "replaced_01"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_replaced_02(self):
+ test_name = "replaced_02"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_replaced_03(self):
+ test_name = "replaced_03_vlan_replace"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_replaced_04(self):
+ test_name = "replaced_04_trusted"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_replaced_05(self):
+ test_name = "replaced_05_verify"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_dhcp_snooping_replaced_06(self):
+ test_name = "replaced_06_empty_bindings"
+ set_module_args(self.fixture_data[test_name]['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data[test_name]['existing_config'])
+ self.initialize_config_requests(self.fixture_data[test_name]['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_facts.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_facts.py
new file mode 100644
index 000000000..fdbe55905
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_facts.py
@@ -0,0 +1,45 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_facts,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_facts
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_get_network_resources_facts = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.facts.FactsBase.get_network_resources_facts"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_facts.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.get_network_resources_facts = self.mock_get_network_resources_facts.start()
+ self.get_network_resources_facts.return_value = ({
+ 'ansible_network_resources': {'bgp': [{'bgp_as': '24', 'router_id': '10.1.1.1', 'log_neighbor_changes': True, 'vrf_name': 'default',
+ 'timers': {'holdtime': 180, 'keepalive_interval': 60},
+ 'bestpath': {'as_path': {'confed': False, 'ignore': False, 'multipath_relax': True,
+ 'multipath_relax_as_set': False},
+ 'med': {'confed': False, 'missing_as_worst': False, 'always_compare_med': False},
+ 'compare_routerid': False}, 'max_med': None}]},
+ 'ansible_net_gather_network_resources': ['bgp'], 'ansible_net_gather_subset': []}, [])
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_get_network_resources_facts.stop()
+
+ def test_sonic_facts_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ result = self.execute_module(changed=False)
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_interfaces.py
new file mode 100644
index 000000000..fadc8d0de
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_interfaces.py
@@ -0,0 +1,90 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_interfaces,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_interfaces
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.interfaces.interfaces.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.interfaces.interfaces.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.interfaces_util.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_interfaces.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_interfaces_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_interfaces_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_interfaces_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_interfaces_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_interfaces_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_ip_neighbor.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_ip_neighbor.py
new file mode 100644
index 000000000..a8e6a40cd
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_ip_neighbor.py
@@ -0,0 +1,87 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_ip_neighbor,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_ip_neighbor
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.ip_neighbor.ip_neighbor.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.ip_neighbor.ip_neighbor.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_ip_neighbor.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_ip_neighbor_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_ip_neighbor_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ip_neighbor_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_ip_neighbor_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ip_neighbor_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_ip_neighbor_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ip_neighbor_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_ip_neighbor_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ip_neighbor_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_ip_neighbor_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l2_acls.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l2_acls.py
new file mode 100644
index 000000000..2afad3e54
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l2_acls.py
@@ -0,0 +1,105 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_l2_acls,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicL2AclsModule(TestSonicModule):
+ module = sonic_l2_acls
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.l2_acls.l2_acls.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.l2_acls.l2_acls.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_l2_acls.yaml')
+
+ def setUp(self):
+ super(TestSonicL2AclsModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicL2AclsModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def validate_config_requests(self):
+ # Sort by payload content for requests with same path
+ acl_path = 'data/openconfig-acl:acl/acl-sets/acl-set'
+ self.config_requests_valid.sort(key=lambda request: (request['path'], request['method'],
+ request['data']['acl-set'][0]['name'] if request['path'] == acl_path else ''))
+ self.config_requests_sent.sort(key=lambda request: (request['path'], request['method'],
+ request['data']['acl-set'][0]['name'] if request['path'] == acl_path else ''))
+ super(TestSonicL2AclsModule, self).validate_config_requests(requests_sorted=True)
+
+ def test_sonic_l2_acls_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l2_acls_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l2_acls_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l2_acls_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l2_acls_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l2_acls_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l2_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l2_interfaces.py
new file mode 100644
index 000000000..d8475bb0d
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l2_interfaces.py
@@ -0,0 +1,76 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_l2_interfaces,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_l2_interfaces
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.l2_interfaces.l2_interfaces.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.l2_interfaces.l2_interfaces.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_l2_interfaces.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_l2_interfaces_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_l2_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l2_interfaces_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_l2_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l2_interfaces_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_l2_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l2_interfaces_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_l2_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l3_acls.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l3_acls.py
new file mode 100644
index 000000000..4ad2b6477
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l3_acls.py
@@ -0,0 +1,105 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_l3_acls,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicL3AclsModule(TestSonicModule):
+ module = sonic_l3_acls
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.l3_acls.l3_acls.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.l3_acls.l3_acls.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_l3_acls.yaml')
+
+ def setUp(self):
+ super(TestSonicL3AclsModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicL3AclsModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def validate_config_requests(self):
+ # Sort by payload content for requests with same path
+ acl_path = 'data/openconfig-acl:acl/acl-sets/acl-set'
+ self.config_requests_valid.sort(key=lambda request: (request['path'], request['method'],
+ request['data']['acl-set'][0]['name'] if request['path'] == acl_path else ''))
+ self.config_requests_sent.sort(key=lambda request: (request['path'], request['method'],
+ request['data']['acl-set'][0]['name'] if request['path'] == acl_path else ''))
+ super(TestSonicL3AclsModule, self).validate_config_requests(requests_sorted=True)
+
+ def test_sonic_l3_acls_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_acls_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_acls_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_acls_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_acls_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_acls_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['facts_get_requests'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['config_requests'])
+
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l3_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l3_interfaces.py
new file mode 100644
index 000000000..7f57382a0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_l3_interfaces.py
@@ -0,0 +1,83 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_l3_interfaces,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_l3_interfaces
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.l3_interfaces.l3_interfaces.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.l3_interfaces.l3_interfaces.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_l3_interfaces.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_l3_interfaces_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_l3_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_interfaces_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_l3_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_interfaces_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_l3_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_interfaces_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_l3_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_l3_interfaces_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_l3_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_lag_interfaces.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_lag_interfaces.py
new file mode 100644
index 000000000..2e3baff92
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_lag_interfaces.py
@@ -0,0 +1,76 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_lag_interfaces,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_lag_interfaces
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.lag_interfaces.lag_interfaces.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.lag_interfaces.lag_interfaces.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_lag_interfaces.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_lag_interfaces_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_lag_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_lag_interfaces_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_lag_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_lag_interfaces_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_lag_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_lag_interfaces_deleted_03(self):
+ set_module_args(self.fixture_data['deleted_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_03']['existing_lag_interfaces_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_logging.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_logging.py
new file mode 100644
index 000000000..87c2f03e4
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_logging.py
@@ -0,0 +1,87 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_logging,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_logging
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.logging.logging.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.logging.logging.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_logging.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.config_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_logging_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_logging_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_logging_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_logging_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_logging_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_logging_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_logging_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_logging_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_logging_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_logging_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_mac.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_mac.py
new file mode 100644
index 000000000..1fb06e4a3
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_mac.py
@@ -0,0 +1,86 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_mac,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicMacModule(TestSonicModule):
+ module = sonic_mac
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.mac.mac.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.mac.mac.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.mock_send_requests = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.mac.mac.send_requests"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_mac.yaml')
+
+ def setUp(self):
+ super(TestSonicMacModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'native'
+ self.send_requests = self.mock_send_requests.start()
+ self.send_requests.return_value = None
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicMacModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_send_requests.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_mac_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_mac_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mac_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_mac_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mac_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_mac_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mac_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_mac_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_mclag.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_mclag.py
new file mode 100644
index 000000000..743d5c758
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_mclag.py
@@ -0,0 +1,104 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_mclag,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicMclagModule(TestSonicModule):
+ module = sonic_mclag
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.mclag.mclag.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.mclag.mclag.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_mclag.yaml')
+
+ def setUp(self):
+ super(TestSonicMclagModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicMclagModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_mclag_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_mclag_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mclag_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_mclag_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mclag_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_mclag_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mclag_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_mclag_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mclag_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_mclag_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mclag_replaced_02(self):
+ set_module_args(self.fixture_data['replaced_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_02']['existing_mclag_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mclag_replaced_03(self):
+ set_module_args(self.fixture_data['replaced_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_03']['existing_mclag_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_mclag_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_mclag_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_ntp.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_ntp.py
new file mode 100644
index 000000000..4337fedbd
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_ntp.py
@@ -0,0 +1,96 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_ntp,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_ntp
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.ntp.ntp.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.ntp.ntp.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_ntp.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.utils_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_ntp_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_ntp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ntp_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_ntp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ntp_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_ntp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ntp_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_ntp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ntp_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_ntp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_ntp_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_ntp_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_pki.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_pki.py
new file mode 100644
index 000000000..9eab58e03
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_pki.py
@@ -0,0 +1,85 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_pki,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_pki
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.pki.pki.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.pki.pki.edit_config"
+ )
+
+ cls.fixture_data = cls.load_fixtures("sonic_pki.yaml")
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+
+ def test_sonic_pki_merged_01(self):
+ set_module_args(self.fixture_data["merged_01"]["module_args"])
+ self.initialize_facts_get_requests(
+ self.fixture_data["merged_01"]["existing_pki_config"]
+ )
+ self.initialize_config_requests(
+ self.fixture_data["merged_01"]["expected_config_requests"]
+ )
+ self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_pki_deleted_01(self):
+ set_module_args(self.fixture_data["deleted_01"]["module_args"])
+ self.initialize_facts_get_requests(
+ self.fixture_data["deleted_01"]["existing_pki_config"]
+ )
+ self.initialize_config_requests(
+ self.fixture_data["deleted_01"]["expected_config_requests"]
+ )
+ self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_pki_replaced_01(self):
+ set_module_args(self.fixture_data["replaced_01"]["module_args"])
+ self.initialize_facts_get_requests(
+ self.fixture_data["replaced_01"]["existing_pki_config"]
+ )
+ self.initialize_config_requests(
+ self.fixture_data["replaced_01"]["expected_config_requests"]
+ )
+ self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_pki_overridden_01(self):
+ set_module_args(self.fixture_data["overridden_01"]["module_args"])
+ self.initialize_facts_get_requests(
+ self.fixture_data["overridden_01"]["existing_pki_config"]
+ )
+ self.initialize_config_requests(
+ self.fixture_data["overridden_01"]["expected_config_requests"]
+ )
+ self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_port_breakout.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_port_breakout.py
new file mode 100644
index 000000000..8f093bf8c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_port_breakout.py
@@ -0,0 +1,89 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_port_breakout,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicPortBreakoutModule(TestSonicModule):
+ module = sonic_port_breakout
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.port_breakout.port_breakout.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.port_breakout.port_breakout.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_port_breakout.yaml')
+
+ def setUp(self):
+ super(TestSonicPortBreakoutModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.utils_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicPortBreakoutModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_utils_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_port_breakout_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_port_breakout_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_port_breakout_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_port_breakout_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_port_breakout_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_port_breakout_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_port_breakout_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_port_breakout_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_port_breakout_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_port_breakout_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_port_group.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_port_group.py
new file mode 100644
index 000000000..89a5280af
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_port_group.py
@@ -0,0 +1,105 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_port_group,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.sonic import (
+ update_url
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_port_group
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.port_group.port_group.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.port_group.port_group.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_port_group.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ # edit_config is getting called from port_group module for 2 purposes. 1 for getting default speeds and other for sending requests.
+ # Hence the sonic_module definition for config_side_effect is modified to support both GET and send requests as follows.
+ def config_side_effect(self, module, commands):
+ """Side effect function for 'config' requests mock"""
+ responses = []
+ for command in commands:
+ response = []
+ path = update_url(command['path'])
+ method = command['method'].lower()
+ if method == 'get':
+ if self._facts_requests_dict.get(path):
+ response.append(self._facts_requests_dict[path]['code'])
+ response.append(self._facts_requests_dict[path].get('value', {}))
+ else:
+ data = command['data']
+
+ self.config_requests_sent.append({'path': path, 'method': method, 'data': data})
+ entries = self._config_requests_dict.get(path, {}).get(method, [])
+ for entry in entries:
+ if data == entry[0]:
+ response.append(entry[1]['code'])
+ response.append(entry[1]['value'])
+ break
+
+ responses.append(response)
+
+ return responses
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_port_group_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_port_group_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_port_group_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_port_group_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_port_group_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_port_group_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_port_group_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_port_group_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_prefix_lists.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_prefix_lists.py
new file mode 100644
index 000000000..426239237
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_prefix_lists.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_prefix_lists,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_prefix_lists
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.prefix_lists.prefix_lists.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.prefix_lists.prefix_lists.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_prefix_lists.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_prefix_lists_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_prefix_lists_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_prefix_lists_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_prefix_lists_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_prefix_lists_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_prefix_lists_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_radius_server.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_radius_server.py
new file mode 100644
index 000000000..35a22085a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_radius_server.py
@@ -0,0 +1,69 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_radius_server,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_radius_server
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.radius_server.radius_server.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.radius_server.radius_server.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_radius_server.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+
+ def test_sonic_radius_server_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_radius_server_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_radius_server_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_radius_server_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_radius_server_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_radius_server_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_static_routes.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_static_routes.py
new file mode 100644
index 000000000..21881e6e0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_static_routes.py
@@ -0,0 +1,94 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_static_routes,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_static_routes
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.static_routes.static_routes.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.static_routes.static_routes.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_static_routes.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_static_routes_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_static_routes_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_static_routes_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_static_routes_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_static_routes_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_static_routes_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_static_routes_replaced_1(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_static_routes_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_static_routes_replaced_2(self):
+ set_module_args(self.fixture_data['replaced_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_02']['existing_static_routes_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_static_routes_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_static_routes_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_stp.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_stp.py
new file mode 100644
index 000000000..f4860bb2c
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_stp.py
@@ -0,0 +1,155 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_stp,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_stp
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.stp.stp.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.stp.stp.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_stp.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'native'
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.config_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_stp_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_merged_03(self):
+ set_module_args(self.fixture_data['merged_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_03']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['merged_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_replaced_02(self):
+ set_module_args(self.fixture_data['replaced_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_02']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_replaced_03(self):
+ set_module_args(self.fixture_data['replaced_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_03']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_replaced_04(self):
+ set_module_args(self.fixture_data['replaced_04']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_04']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_04']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+
+ def test_sonic_stp_replaced_05(self):
+ set_module_args(self.fixture_data['replaced_05']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_05']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_05']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_replaced_06(self):
+ set_module_args(self.fixture_data['replaced_06']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_06']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_06']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+
+ def test_sonic_stp_replaced_07(self):
+ set_module_args(self.fixture_data['replaced_07']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_07']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_07']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_deleted_03(self):
+ set_module_args(self.fixture_data['deleted_03']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_03']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_03']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_stp_deleted_04(self):
+ set_module_args(self.fixture_data['deleted_04']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_04']['existing_stp_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_04']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_system.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_system.py
new file mode 100644
index 000000000..f567461f0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_system.py
@@ -0,0 +1,93 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_system,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_system
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.system.system.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.system.system.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.mock_send_requests = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.system.system.send_requests"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_system.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.send_requests = self.mock_send_requests.start()
+ self.send_requests.return_value = None
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_send_requests.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_system_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_system_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_system_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_system_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_system_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_system_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_system_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_system_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_system_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_system_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_tacacs_server.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_tacacs_server.py
new file mode 100644
index 000000000..d7297e10a
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_tacacs_server.py
@@ -0,0 +1,73 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_tacacs_server,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_tacacs_server
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.tacacs_server.tacacs_server.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.tacacs_server.tacacs_server.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_tacacs_server.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_tacacs_server_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_tacacs_server_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_tacacs_server_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_tacacs_server_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_tacacs_server_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_tacacs_server_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_users.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_users.py
new file mode 100644
index 000000000..a5f2e7358
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_users.py
@@ -0,0 +1,73 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_users,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_users
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.users.users.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.users.users.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_users.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_users_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_users_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_users_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_users_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_users_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_users_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vlans.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vlans.py
new file mode 100644
index 000000000..0dbc81159
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vlans.py
@@ -0,0 +1,94 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_vlans,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_vlans
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vlans.vlans.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.vlans.vlans.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_vlans.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_vlans_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_vlans_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vlans_merged_02(self):
+ set_module_args(self.fixture_data['merged_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_02']['existing_vlans_config'])
+ self.initialize_config_requests(self.fixture_data['merged_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vlans_deleted_01_vlan_descr(self):
+ set_module_args(self.fixture_data['deleted_01_vlan_descr']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01_vlan_descr']['existing_vlans_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01_vlan_descr']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vlans_deleted_02_vlan(self):
+ set_module_args(self.fixture_data['deleted_02_vlan']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02_vlan']['existing_vlans_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02_vlan']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vlans_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_vlans_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vlans_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_vlans_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vrfs.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vrfs.py
new file mode 100644
index 000000000..0fb6bf4e1
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vrfs.py
@@ -0,0 +1,87 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_vrfs,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_vrfs
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vrfs.vrfs.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.vrfs.vrfs.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_vrfs.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.config_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_vrfs_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_vrfs_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vrfs_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_vrfs_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vrfs_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_vrfs_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vrfs_replaced_01(self):
+ set_module_args(self.fixture_data['replaced_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_01']['existing_vrfs_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vrfs_overridden_01(self):
+ set_module_args(self.fixture_data['overridden_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_01']['existing_vrfs_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vxlans.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vxlans.py
new file mode 100644
index 000000000..29a125ce0
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/network/sonic/test_sonic_vxlans.py
@@ -0,0 +1,96 @@
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import (
+ patch,
+)
+from ansible_collections.dellemc.enterprise_sonic.plugins.modules import (
+ sonic_vxlans,
+)
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.modules.utils import (
+ set_module_args,
+)
+from .sonic_module import TestSonicModule
+
+
+class TestSonicInterfacesModule(TestSonicModule):
+ module = sonic_vxlans
+
+ @classmethod
+ def setUpClass(cls):
+ cls.mock_facts_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.facts.vxlans.vxlans.edit_config"
+ )
+ cls.mock_config_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.vxlans.vxlans.edit_config"
+ )
+ cls.mock_utils_edit_config = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.bgp_utils.edit_config"
+ )
+ cls.mock_get_interface_naming_mode = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.utils.utils.get_device_interface_naming_mode"
+ )
+ cls.mock_send_requests = patch(
+ "ansible_collections.dellemc.enterprise_sonic.plugins.module_utils.network.sonic.config.vxlans.vxlans.send_requests"
+ )
+ cls.fixture_data = cls.load_fixtures('sonic_vxlans.yaml')
+
+ def setUp(self):
+ super(TestSonicInterfacesModule, self).setUp()
+ self.facts_edit_config = self.mock_facts_edit_config.start()
+ self.config_edit_config = self.mock_config_edit_config.start()
+ self.facts_edit_config.side_effect = self.facts_side_effect
+ self.config_edit_config.side_effect = self.config_side_effect
+ self.get_interface_naming_mode = self.mock_get_interface_naming_mode.start()
+ self.get_interface_naming_mode.return_value = 'standard'
+ self.send_requests = self.mock_send_requests.start()
+ self.send_requests.return_value = None
+ self.utils_edit_config = self.mock_utils_edit_config.start()
+ self.utils_edit_config.side_effect = self.facts_side_effect
+
+ def tearDown(self):
+ super(TestSonicInterfacesModule, self).tearDown()
+ self.mock_facts_edit_config.stop()
+ self.mock_config_edit_config.stop()
+ self.mock_get_interface_naming_mode.stop()
+ self.mock_send_requests.stop()
+ self.mock_utils_edit_config.stop()
+
+ def test_sonic_vxlans_merged_01(self):
+ set_module_args(self.fixture_data['merged_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['merged_01']['existing_vxlans_config'])
+ self.initialize_config_requests(self.fixture_data['merged_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vxlans_deleted_01(self):
+ set_module_args(self.fixture_data['deleted_01']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_01']['existing_vxlans_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_01']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vxlans_deleted_02(self):
+ set_module_args(self.fixture_data['deleted_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['deleted_02']['existing_vxlans_config'])
+ self.initialize_config_requests(self.fixture_data['deleted_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ # When replace is executed, it first deletes the existing config and then patches the new config.
+ # As part of UT, sonic_module.py does a SORTING before comparison and hence the sequence of the actual configs sent to device varies from the sequence.
+ # in which the UT test case compares with expected results. The actual sequence in which the requests are sent to device should be working fine.
+ def test_sonic_vxlans_replaced_02(self):
+ set_module_args(self.fixture_data['replaced_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['replaced_02']['existing_vxlans_config'])
+ self.initialize_config_requests(self.fixture_data['replaced_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
+
+ def test_sonic_vxlans_overridden_02(self):
+ set_module_args(self.fixture_data['overridden_02']['module_args'])
+ self.initialize_facts_get_requests(self.fixture_data['overridden_02']['existing_vxlans_config'])
+ self.initialize_config_requests(self.fixture_data['overridden_02']['expected_config_requests'])
+ result = self.execute_module(changed=True)
+ self.validate_config_requests()
diff --git a/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/utils.py b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/utils.py
new file mode 100644
index 000000000..0157649ce
--- /dev/null
+++ b/ansible_collections/dellemc/enterprise_sonic/tests/unit/modules/utils.py
@@ -0,0 +1,51 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+from ansible.module_utils import basic
+from ansible.module_utils._text import to_bytes
+
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat import unittest
+from ansible_collections.dellemc.enterprise_sonic.tests.unit.compat.mock import patch
+
+
+def set_module_args(args):
+ if '_ansible_remote_tmp' not in args:
+ args['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in args:
+ args['_ansible_keep_remote_files'] = False
+
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ pass
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+def exit_json(*args, **kwargs):
+ if 'changed' not in kwargs:
+ kwargs['changed'] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class ModuleTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
+ self.mock_module.start()
+ self.mock_sleep = patch('time.sleep')
+ self.mock_sleep.start()
+ set_module_args({})
+ self.addCleanup(self.mock_module.stop)
+ self.addCleanup(self.mock_sleep.stop)
diff --git a/ansible_collections/dellemc/openmanage/.ansible-lint b/ansible_collections/dellemc/openmanage/.ansible-lint
new file mode 100644
index 000000000..f615bf255
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/.ansible-lint
@@ -0,0 +1,2 @@
+exclude_paths:
+ - .github/
diff --git a/ansible_collections/dellemc/openmanage/.ansible-lint-ignore b/ansible_collections/dellemc/openmanage/.ansible-lint-ignore
new file mode 100644
index 000000000..78ed06cb2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/.ansible-lint-ignore
@@ -0,0 +1,122 @@
+roles/redfish_storage_volume/molecule/RAID0/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/initialization/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/defaults/main.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/RAID6/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/RAID60/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/RAID1/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/RAID5/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/RAID10/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/RAID50/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/__delete_virtual_drive.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/__idrac_reset.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/apply_time_default/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/apply_time_immediate/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/converge.yml var-naming[no-role-prefix]
+roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/converge.yml var-naming[no-role-prefix]
+
+
+roles/redfish_firmware/molecule/negative/converge.yml var-naming[no-role-prefix]
+roles/redfish_firmware/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/redfish_firmware/defaults/main.yml var-naming[no-role-prefix]
+
+
+roles/idrac_storage_controller/defaults/main.yml var-naming[no-role-prefix]
+
+roles/idrac_server_powerstate/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/forceoff/converge.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/forcerestart/converge.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/gracefulrestart/converge.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/gracefulshutdown/converge.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/nmi/converge.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/on/converge.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/powercycle/converge.yml var-naming[no-role-prefix]
+roles/idrac_server_powerstate/molecule/pushpowerbutton/converge.yml var-naming[no-role-prefix]
+
+roles/idrac_reset/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_os_deployment/defaults/main/rhel.yml var-naming[no-role-prefix]
+roles/idrac_os_deployment/defaults/main/esxi.yml var-naming[no-role-prefix]
+roles/idrac_os_deployment/defaults/main/main.yml var-naming[no-role-prefix]
+
+roles/idrac_job_queue/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_job_queue/molecule/delete_job/converge.yml var-naming[no-role-prefix]
+roles/idrac_job_queue/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/idrac_job_queue/molecule/clear_job_queue/converge.yml var-naming[no-role-prefix]
+
+roles/idrac_import_server_config_profile/molecule/resources/tests/prepare.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/nfs_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/import_multiple_target/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/import_buffer_xml/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/import_buffer_json/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/https_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/http_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/molecule/cifs_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_import_server_config_profile/defaults/main.yml var-naming[no-role-prefix]
+
+roles/idrac_gather_facts/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_gather_facts/molecule/negative/converge.yml var-naming[no-role-prefix]
+
+roles/idrac_firmware/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_firmware/molecule/cifs_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_firmware/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/idrac_firmware/molecule/ftp_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_firmware/molecule/http_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_firmware/molecule/https_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_firmware/molecule/httpsproxy_share/converge.yml var-naming[no-role-prefix]
+roles/idrac_firmware/molecule/negative_scenarios/converge.yml var-naming[no-role-prefix]
+roles/idrac_firmware/molecule/nfs_share/converge.yml var-naming[no-role-prefix]
+
+roles/idrac_export_server_config_profile/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/idrac_export_server_config_profile/defaults/main.yml var-naming[no-role-prefix]
+
+roles/idrac_certificate/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/CA/converge.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/CSC/converge.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/CTC/converge.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/generateCSR/converge.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/HTTPS/converge.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/reset/converge.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/CustomCertificate/converge.yml var-naming[no-role-prefix]
+roles/idrac_certificate/molecule/SSLKEY/converge.yml var-naming[no-role-prefix]
+
+roles/idrac_boot/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/converge.yml var-naming[no-role-prefix]
+roles/idrac_boot/molecule/default/converge.yml var-naming[no-role-prefix]
+
+roles/idrac_bios/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_with_maintenance_window/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_with_maintenance_window/prepare.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/prepare.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_on_reset/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_on_reset/prepare.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/prepare.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_immediate/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/update_attributes_immediate/prepare.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/reset_bios/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/clear_pending_attributes/converge.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/clear_pending_attributes/prepare.yml var-naming[no-role-prefix]
+roles/idrac_bios/molecule/default/converge.yml var-naming[no-role-prefix]
+
+roles/idrac_attributes/defaults/main.yml var-naming[no-role-prefix]
+roles/idrac_attributes/molecule/default/converge.yml var-naming[no-role-prefix]
+roles/idrac_attributes/molecule/idrac_attr/converge.yml var-naming[no-role-prefix]
+roles/idrac_attributes/molecule/lifecycle_controller_attr/converge.yml var-naming[no-role-prefix]
+roles/idrac_attributes/molecule/system_attr/converge.yml var-naming[no-role-prefix]
diff --git a/ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml
index 64a1aed8c..33251a189 100644
--- a/ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml
+++ b/ansible_collections/dellemc/openmanage/.github/workflows/ansible-test.yml
@@ -15,18 +15,18 @@ jobs:
strategy:
fail-fast: false
matrix:
- ansible-version: [stable-2.10, stable-2.11, stable-2.12, stable-2.13]
+ ansible-version: [stable-2.14, stable-2.15, stable-2.16, devel]
steps:
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- - name: Set up Python 3.9
- uses: actions/setup-python@v1
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v4
with:
- python-version: 3.9
+ python-version: 3.11
- name: Install ansible (${{ matrix.ansible-version }})
- run: pip install pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
- name: Build a collection tarball
run: ansible-galaxy collection build --output-path "${GITHUB_WORKSPACE}/.cache/collection-tarballs"
@@ -43,50 +43,31 @@ jobs:
# https://docs.ansible.com/ansible/latest/dev_guide/testing_units.html
unit:
- name: Unit Tests
+ name: Unit Tests (Ⓐ${{ matrix.ansible }} with ${{ matrix.python }})
needs: [build]
- runs-on: ubuntu-latest
strategy:
- fail-fast: false
matrix:
- python-version: [3.8, 3.9, '3.10']
- ansible-version: [stable-2.11, stable-2.12, stable-2.13]
+ python: ['3.9', '3.10', '3.11']
+ ansible:
+ - stable-2.14
+ - stable-2.15
+ - stable-2.16
+ - devel
exclude:
- - ansible-version: stable-2.11
- python-version: '3.10'
+ - ansible: stable-2.16
+ python: '3.9'
+ - ansible: devel
+ python: '3.9'
+ runs-on: ubuntu-latest
steps:
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
+ - name: Perform unit testing with ansible-test
+ uses: ansible-community/ansible-test-gh-action@release/v1
with:
- python-version: ${{ matrix.python-version }}
-
- - name: Install ansible (${{ matrix.ansible-version }}) version
- run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
-
- - name: Download migrated collection artifacts
- uses: actions/download-artifact@v1
- with:
- name: collection
- path: .cache/collection-tarballs
-
- - name: Setup Unit test Pre-requisites
- run: |
- ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz
- git clone https://github.com/ansible/ansible.git
- cp -rf ansible/test/units/compat /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/unit/
- cp -rf ansible/test/units/modules/utils.py /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/
- sed -i 's/units/ansible_collections.dellemc.openmanage.tests.unit/' /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/utils.py
- if [ -f /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/requirements.txt ]; then pip install -r /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage/tests/requirements.txt; fi
-
-
- - name: Run Unit tests using ansible-test
- run: ansible-test units -v --color --python ${{ matrix.python-version }} --coverage
- working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage
-
- #- name: Generate coverage report
- # run: ansible-test coverage xml -v --group-by command --group-by version
- # working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage
+ testing-type: units
+ coverage: always
+ ansible-core-version: ${{ matrix.ansible }}
+ target-python-version: ${{ matrix.python }}
###
# Sanity tests (REQUIRED)
@@ -94,37 +75,76 @@ jobs:
# https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html
sanity:
- name: Sanity Tests
+ name: Sanity (Ⓐ${{ matrix.ansible }} with ${{ matrix.python }})
+ needs: [build]
+ strategy:
+ matrix:
+ python: ['3.9', '3.10', '3.11']
+ ansible:
+ - stable-2.14
+ - stable-2.15
+ - stable-2.16
+ - devel
+ exclude:
+ - ansible: stable-2.16
+ python: '3.9'
+ - ansible: devel
+ python: '3.9'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Perform sanity testing
+ uses: ansible-community/ansible-test-gh-action@release/v1
+ with:
+ ansible-core-version: ${{ matrix.ansible }}
+ target-python-version: ${{ matrix.python }}
+ testing-type: sanity
+ pull-request-change-detection: true
+ coverage: never
+
+ lint:
+ name: Ansible lint
runs-on: ubuntu-latest
needs: [build]
strategy:
fail-fast: false
matrix:
- ansible-version: [stable-2.11, stable-2.12, stable-2.13, devel]
-
+ python-version: ['3.9', '3.10', '3.11']
+ ansible-version: [stable-2.14, stable-2.15, stable-2.16, devel]
+ exclude:
+ - ansible-version: stable-2.16
+ python-version: '3.9'
+ - ansible-version: devel
+ python-version: '3.9'
steps:
- - name: Set up Python 3.9
- uses: actions/setup-python@v1
+ # Important: This sets up your GITHUB_WORKSPACE environment variable
+ - name: Checkout the source code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0 # needed for progressive mode to work
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
with:
- # it is just required to run that once as "ansible-test sanity" in the docker image
- # will run on all python versions it supports.
- python-version: 3.9
+ python-version: ${{ matrix.python-version }}
- name: Install ansible (${{ matrix.ansible-version }}) version
run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
+ - name: Install ansible lint
+ run: pip install ansible-lint pytest --disable-pip-version-check
+
+ - name: Install ansible posix
+ run: ansible-galaxy collection install ansible.posix
+
- name: Download migrated collection artifacts
uses: actions/download-artifact@v1
with:
name: collection
path: .cache/collection-tarballs
- - name: Setup Sanity test Pre-requisites
+ - name: Install collection build
run: ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz
- # run ansible-test sanity inside of Docker.
- # The docker container has all the pinned dependencies that are required
- # and all python versions ansible supports.
- - name: Run sanity tests
- run: ansible-test sanity --docker -v --color
- working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/openmanage
+ - name: Run Ansible lint
+ run: ansible-lint --show-relpath
+ working-directory: /home/runner/work/dellemc-openmanage-ansible-modules/dellemc-openmanage-ansible-modules
diff --git a/ansible_collections/dellemc/openmanage/.gitignore b/ansible_collections/dellemc/openmanage/.gitignore
index 545a00002..8b54a6b49 100644
--- a/ansible_collections/dellemc/openmanage/.gitignore
+++ b/ansible_collections/dellemc/openmanage/.gitignore
@@ -7,3 +7,7 @@ __pycache__/
.idea/
*.bak
rstdocgen.sh
+node_modules
+demo
+.vscode
+.coverage
diff --git a/ansible_collections/dellemc/openmanage/CHANGELOG.rst b/ansible_collections/dellemc/openmanage/CHANGELOG.rst
index 9e9af81fa..bb78b5c2d 100644
--- a/ansible_collections/dellemc/openmanage/CHANGELOG.rst
+++ b/ansible_collections/dellemc/openmanage/CHANGELOG.rst
@@ -1,10 +1,550 @@
-=================================================
-Dell EMC OpenManage Ansible Modules Release Notes
-=================================================
+=============================================
+Dell OpenManage Ansible Modules Release Notes
+=============================================
.. contents:: Topics
+v8.7.0
+======
+
+Release Summary
+---------------
+
+- Module to manage iDRAC licenses.
+- idrac_gather_facts role is enhanced to add storage controller details in the role output and provide support for secure boot.
+
+Major Changes
+-------------
+
+- idrac_gather_facts - This role is enhanced to support secure boot.
+- idrac_license - The module is introduced to configure iDRAC licenses.
+
+Minor Changes
+-------------
+
+- For idrac_gather_facts role, added storage controller details in the role output.
+
+Bugfixes
+--------
+
+- Issue is fixed for deploying a new configuration on quick deploy slot when IPv6 is disabled.(https://github.com/dell/dellemc-openmanage-ansible-modules/issues/533)
+
+Known Issues
+------------
+
+- idrac_firmware - Issue(279282) - This module does not support firmware update using HTTP, HTTPS, and FTP shares with authentication on iDRAC8.
+- idrac_network_attributes - Issue(279049) - If unsupported values are provided for the parameter ``ome_network_attributes``, then this module does not provide a correct error message.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_diagnostics - Issue(279193) - Export of SupportAssist collection logs to the share location fails on OME version 4.0.0.
+- ome_smart_fabric_uplink - Issue(186024) - The module supported by OpenManage Enterprise Modular, however it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, then the existing uplink is modified.
+
+New Modules
+-----------
+
+- dellemc.openmanage.idrac_license - This module allows to import, export, and delete licenses on iDRAC.
+
+v8.6.0
+======
+
+Release Summary
+---------------
+
+- Added support for the environment variables as fallback for credentials for all modules of iDRAC, OME, and Redfish.
+- Enhanced idrac_certificates module and idrac_certificate role to support `CUSTOMCERTIFICATE` and import `HTTPS` certificate with the SSL key.
+
+Major Changes
+-------------
+
+- All OME modules are enhanced to support the environment variables `OME_USERNAME` and `OME_PASSWORD` as fallback for credentials.
+- All iDRAC and Redfish modules are enhanced to support the environment variables `IDRAC_USERNAME` and `IDRAC_PASSWORD` as fallback for credentials.
+- idrac_certificates - The module is enhanced to support the import and export of `CUSTOMCERTIFICATE`.
+
+Minor Changes
+-------------
+
+- For idrac_certificate role, added support for import operation of `HTTPS` certificate with the SSL key.
+- For idrac_certificates module, below enhancements are made: Added support for import operation of `HTTPS` certificate with the SSL key. The `email_address` has been made as an optional parameter.
+
+Bugfixes
+--------
+
+- Fixed the issue for ignoring the environment variable `NO_PROXY` earlier. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/554)
+- For idrac_certificates module, the `email_address` has been made as an optional parameter. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/582).
+
+Known Issues
+------------
+
+- idrac_firmware - Issue(279282) - This module does not support firmware update using HTTP, HTTPS, and FTP shares with authentication on iDRAC8.
+- idrac_network_attributes - Issue(279049) - If unsupported values are provided for the parameter ``ome_network_attributes``, then this module does not provide a correct error message.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_device_quick_deploy - Issue(275231) - This module does not deploy a new configuration to a slot that has disabled IPv6.
+- ome_diagnostics - Issue(279193) - Export of SupportAssist collection logs to the share location fails on OME version 4.0.0.
+- ome_smart_fabric_uplink - Issue(186024) - The module supported by OpenManage Enterprise Modular, however it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, then the existing uplink is modified.
+
+v8.5.0
+======
+
+Release Summary
+---------------
+
+- Ansible lint issues are fixed for the collections.
+- redfish_storage_volume module is enhanced to support reboot options and job tracking operation.
+
+Minor Changes
+-------------
+
+- Ansible lint issues are fixed for the collections.
+- Module ``redfish_storage_volume`` is enhanced to support reboot options and job tracking operation.
+
+Bugfixes
+--------
+
+- ome_inventory - The plugin returns 50 results when a group is specified. No results are shown when a group is not specified. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/575).
+
+Known Issues
+------------
+
+- idrac_firmware - Issue(279282) - This module does not support firmware update using HTTP, HTTPS, and FTP shares with authentication on iDRAC8.
+- idrac_network_attributes - Issue(279049) - If unsupported values are provided for the parameter ``ome_network_attributes``, then this module does not provide a correct error message.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_device_quick_deploy - Issue(275231) - This module does not deploy a new configuration to a slot that has disabled IPv6.
+- ome_diagnostics - Issue(279193) - Export of SupportAssist collection logs to the share location fails on OME version 4.0.0.
+- ome_smart_fabric_uplink - Issue(186024) - The module supported by OpenManage Enterprise Modular, however it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, then the existing uplink is modified.
+
+v8.4.0
+======
+
+Release Summary
+---------------
+
+Module to manage iDRAC network attributes.
+
+Bugfixes
+--------
+
+- idrac_firmware - Issue(276335) - This module fails on the Python 3.11.x version with NFS share. Use a different Python version or Share type.
+- idrac_server_config_profile - The import for Server Configuration Profile (SCP) operation fails to handle the absence of a file and incorrectly reports success instead of the expected failure. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/544).
+
+Known Issues
+------------
+
+- ca_path missing - Issue(275740) - The roles idrac_attributes, redfish_storage_volume, and idrac_server_powerstate have a missing parameter ca_path.
+- idrac_firmware - Issue(279282) - This module does not support firmware update using HTTP, HTTPS, and FTP shares with authentication on iDRAC8.
+- idrac_network_attributes - Issue(279049) - If unsupported values are provided for the parameter ``ome_network_attributes``, then this module does not provide a correct error message.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_device_quick_deploy - Issue(275231) - This module does not deploy a new configuration to a slot that has disabled IPv6.
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Modules
+-----------
+
+- dellemc.openmanage.idrac_network_attributes - This module allows you to configure the port and partition network attributes on the network interface cards.
+
+v8.3.0
+======
+
+Release Summary
+---------------
+
+- Module to manage OME alert policies.
+- Support for RAID6 and RAID60 for module ``redfish_storage_volume``.
+- Support for reboot type options for module ``ome_firmware``.
+
+Minor Changes
+-------------
+
+- Module ``ome_firmware`` is enhanced to support reboot type options.
+- Module ``redfish_storage_volume`` is enhanced to support RAID6 and RAID60.
+
+Bugfixes
+--------
+
+- ome_device_quick_deploy - If the blade is not present, then the module can assign a static IP to the slot (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/532).
+
+Known Issues
+------------
+
+- ca_path missing - Issue(275740) - The roles idrac_attributes, redfish_storage_volume, and idrac_server_powerstate have a missing parameter ca_path.
+- idrac_firmware - Issue(276335) - This module fails on the Python 3.11.x version with NFS shares. Use a different Python version or Share type.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_device_quick_deploy - Issue(275231) - This module does not deploy a new configuration to a slot that has disabled IPv6.
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Modules
+-----------
+
+- dellemc.openmanage.ome_alert_policies - Manage OME alert policies.
+
+v8.2.0
+======
+
+Release Summary
+---------------
+
+- redfish_firmware and ome_firmware_catalog module is enhanced to support IPv6 address.
+- Module to support firmware rollback of server components.
+- Support for retrieving alert policies, actions, categories and message id information of alert policies for OME and OME Modular.
+- ome_diagnostics module is enhanced to update changed flag status in response.
+
+Minor Changes
+-------------
+
+- Module ``ome_diagnostics`` is enhanced to update changed flag status in response.
+- Module ``ome_firmware_catalog`` is enhanced to support IPv6 address.
+- Module ``redfish_firmware`` is enhanced to support IPv6 address.
+
+Bugfixes
+--------
+
+- Update document on how to use with ansible. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/393).
+
+Known Issues
+------------
+
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Modules
+-----------
+
+- dellemc.openmanage.ome_alert_policies_action_info - Get information on actions of alert policies.
+- dellemc.openmanage.ome_alert_policies_category_info - Retrieves information of all OME alert policy categories.
+- dellemc.openmanage.ome_alert_policies_info - Retrieves information of one or more OME alert policies.
+- dellemc.openmanage.ome_alert_policies_message_id_info - Get message ID information of alert policies.
+- dellemc.openmanage.redfish_firmware_rollback - To perform a component firmware rollback using component name.
+
+v8.1.0
+======
+
+Release Summary
+---------------
+
+- Support for subject alternative names while generating certificate signing requests on OME.
+- Create a user on iDRAC using custom privileges.
+- Create a firmware baseline on OME with the filter option of no reboot required.
+- Retrieve all server items in the output for ome_device_info.
+- Enhancement to add detailed job information for ome_discovery and ome_job_info.
+
+Minor Changes
+-------------
+
+- Module ``idrac_user`` is enhanced to configure custom privileges for an user.
+- Module ``ome_application_certificate`` is enhanced to support subject alternative names.
+- Module ``ome_discovery`` is enhanced to add detailed job information of each IP discovered.
+- Module ``ome_firmware_baseline`` is enhanced to support the option to select only components with no reboot required.
+- Module ``ome_job_info`` is enhanced to return last execution details and execution histories.
+
+Bugfixes
+--------
+
+- The Chassis Power PIN value must be of six numerical digits input from the module. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/492).
+- idrac_attributes module can now support modification of IPv6 attributes on iDRAC 8. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/488).
+- ome_device_info is limited to 50 responses with a query filter. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/499).
+
+Known Issues
+------------
+
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+v8.0.0
+======
+
+Release Summary
+---------------
+
+Role ``idrac_boot`` and ``idrac_job_queue`` is added to manage the boot order settings and iDRAC lifecycle controller job queue respectively. ``Role idrac_os_deployment`` is enhanced to remove the auto installations of required libraries and to support custom ISO and kickstart file as input. Dropped support for iDRAC7 based Dell PowerEdge Servers.
+
+Minor Changes
+-------------
+
+- All the module documentation and examples are updated to use true or false for Boolean values.
+- Role ``idrac_os_deployment`` is enhanced to remove the auto installation of required libraries and to support custom ISO and kickstart file as input.
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- The ``dellemc_get_firmware_inventory`` module is removed and replaced with the module ``idrac_firmware_info``.
+- The ``dellemc_get_system_inventory`` module is removed and replaced with the module ``idrac_system_info``.
+
+Bugfixes
+--------
+
+- Job tracking is fixed for iDRAC SCP import (https://github.com/dell/dellemc-openmanage-ansible-modules/pull/504).
+- OMSDK is handled for import error ``SNIMissingWarning`` that is undefined (https://github.com/dell/omsdk/issues/33).
+
+Known Issues
+------------
+
+- idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided for one of the attributes in the provided attribute list for controller configuration, then this module does not exit with error.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Roles
+---------
+
+- dellemc.openmanage.idrac_boot - Configure the boot order settings
+- dellemc.openmanage.idrac_job_queue - Role to manage the iDRAC lifecycle controller job queue.
+
+v7.6.1
+======
+
+Release Summary
+---------------
+
+Removed the dependency of community general collections.
+
+Minor Changes
+-------------
+
+- Updated the idrac_gather_facts role to use jinja template filters.
+
+Known Issues
+------------
+
+- idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided for one of the attributes in the provided attribute list for controller configuration, then this module does not exit with error.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+v7.6.0
+======
+
+Release Summary
+---------------
+
+- Role to configure the iDRAC system, manager, and lifecycle attributes for Dell PowerEdge servers.
+- Role to modify BIOS attributes, clear pending BIOS attributes, and reset the BIOS to default settings.
+- Role to reset and restart iDRAC (iDRAC8 and iDRAC9 only) for Dell PowerEdge servers.
+- Role to configure the physical disk, virtual disk, and storage controller settings on iDRAC9 based PowerEdge servers.
+
+Known Issues
+------------
+
+- idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided for one of the attributes in the provided attribute list for controller configuration, then this module does not exit with error.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Roles
+---------
+
+- dellemc.openmanage.idrac_attributes - Role to configure iDRAC attributes.
+- dellemc.openmanage.idrac_bios - Role to modify BIOS attributes, clear pending BIOS attributes, and reset the BIOS to default settings.
+- dellemc.openmanage.idrac_reset - Role to reset and restart iDRAC (iDRAC8 and iDRAC9 only) for Dell PowerEdge servers.
+- dellemc.openmanage.idrac_storage_controller - Role to configure the physical disk, virtual disk, and storage controller settings on iDRAC9 based PowerEdge servers.
+
+v7.5.0
+======
+
+Release Summary
+---------------
+
+- redfish_firmware - This module is enhanced to include job tracking.
+- ome_template - This module is enhanced to include job tracking.
+- Role to support the iDRAC and Redfish firmware update and manage storage volume configuration is added.
+- Role to deploy the iDRAC operating system is enhanced to support ESXi version 8.X and HTTP or HTTPS for the destination.
+
+Known Issues
+------------
+
+- idrac_os_deployment- Issue(260496) - OS installation will support only NFS and CIFS share to store the custom ISO in the destination_path, HTTP/HTTPS/FTP not supported
+- idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided for one of the attributes in the provided attribute list for controller configuration, then this module does not exit with error.
+- idrac_user - Issue(192043) The module may error out with the message ``Unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again.
+- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Roles
+---------
+
+- dellemc.openmanage.idrac_firmware - Firmware update from a repository on a network share (CIFS, NFS, HTTP, HTTPS, FTP).
+- dellemc.openmanage.redfish_firmware - To perform a component firmware update using the image file available on the local or remote system.
+- dellemc.openmanage.redfish_storage_volume - Role to manage the storage volume configuration.
+
+v7.4.0
+======
+
+Release Summary
+---------------
+
+- Role to support the Import server configuration profile, Manage iDRAC power states, Manage iDRAC certificate,
+ Gather facts from iDRAC and Deploy operating system is added.
+- Plugin OME inventory is enhanced to support the environment variables for the input parameters.
+
+
+Known Issues
+------------
+
+- idrac_os_deployment- Issue(260496) - OS installation will support only NFS and CIFS share to store the custom ISO in the destination_path, HTTP/HTTPS/FTP not supported
+- idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided for one of the attributes in the provided attribute list for controller configuration, then this module does not exit with error.
+- idrac_user - Issue(192043) The module may error out with the message ``Unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again.
+- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the following parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by OpenManage Enterprise Modular, it does not allow the creation of multiple uplinks of the same name. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Roles
+---------
+
+- dellemc.openmanage.idrac_certificate - Role to manage the iDRAC certificates - generate CSR, import/export certificates, and reset configuration - for PowerEdge servers.
+- dellemc.openmanage.idrac_gather_facts - Role to gather facts from the iDRAC Server.
+- dellemc.openmanage.idrac_import_server_config_profile - Role to import iDRAC Server Configuration Profile (SCP).
+- dellemc.openmanage.idrac_os_deployment - Role to deploy specified operating system and version on the servers.
+- dellemc.openmanage.idrac_server_powerstate - Role to manage the different power states of the specified device.
+
+v7.3.0
+======
+
+Release Summary
+---------------
+
+Support for iDRAC export Server Configuration Profile role and proxy settings, import buffer, include in export, and ignore certificate warning.
+
+Major Changes
+-------------
+
+- idrac_server_config_profile - This module is enhanced to support proxy settings, import buffer, include in export, and ignore certificate warning.
+
+Known Issues
+------------
+
+- idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided for one of the attributes in the provided attribute list for controller configuration, then this module does not exit with error.
+- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again.
+- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_inventory - Issue(256257) - All hosts are not retrieved for ``Modular System`` group and corresponding child groups.
+- ome_inventory - Issue(256589) - All hosts are not retrieved for ``Custom Groups`` group and corresponding child groups.
+- ome_inventory - Issue(256593) - All hosts are not retrieved for ``PLUGIN GROUPS`` group and corresponding child groups.
+- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Roles
+---------
+
+- dellemc.openmanage.idrac_export_server_config_profile - Role to export iDRAC Server Configuration Profile (SCP).
+
+v7.2.0
+======
+
+Release Summary
+---------------
+
+Support for retrieving the inventory and host details of all child groups using parent groups, retrieving inventory of System and Plugin Groups, retrieving profiles with attributes, retrieving network configuration of a template, configuring controller attributes, configuring online capacity expansion, and importing the LDAP directory.
+
+Major Changes
+-------------
+
+- idrac_redfish_storage_controller - This module is enhanced to configure controller attributes and online capacity expansion.
+- ome_domian_user_groups - This module allows to import the LDAP directory groups.
+- ome_inventory - This plugin is enhanced to support inventory retrieval of System and Plugin Groups of OpenManage Enterprise.
+- ome_profile_info - This module allows to retrieve profiles with attributes on OpenManage Enterprise or OpenManage Enterprise Modular.
+- ome_template_network_vlan_info - This module allows to retrieve the network configuration of a template on OpenManage Enterprise or OpenManage Enterprise Modular.
+
+Known Issues
+------------
+
+- idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided for one of the attributes in the provided attribute list for controller configuration, then this module does not exit with error.
+- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again.
+- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_inventory - Issue(256257) - All hosts are not retrieved for ``Modular System`` group and corresponding child groups.
+- ome_inventory - Issue(256589) - All hosts are not retrieved for ``Custom Groups`` group and corresponding child groups.
+- ome_inventory - Issue(256593) - All hosts are not retrieved for ``PLUGIN GROUPS`` group and corresponding child groups.
+- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Modules
+-----------
+
+- dellemc.openmanage.ome_profile_info - Retrieve profiles with attribute details
+- dellemc.openmanage.ome_template_network_vlan_info - Retrieves network configuration of template.
+
+v7.1.0
+======
+
+Release Summary
+---------------
+
+Support for retrieving smart fabric and smart fabric uplink information and support for IPv6 address for OMSDK dependent iDRAC modules.
+
+Major Changes
+-------------
+
+- Support for IPv6 address for OMSDK dependent iDRAC modules.
+- ome_inventory - This plugin allows to create a inventory from the group on OpenManage Enterprise.
+- ome_smart_fabric_info - This module retrieves the list of smart fabrics in the inventory of OpenManage Enterprise Modular.
+- ome_smart_fabric_uplink_info - This module retrieve details of fabric uplink on OpenManage Enterprise Modular.
+
+Minor Changes
+-------------
+
+- redfish_firmware - This module supports timeout option.
+
+Known Issues
+------------
+
+- idrac_firmware - Issue(249879) - Firmware update of iDRAC9-based Servers fails if SOCKS proxy with authentication is used.
+- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again.
+- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Plugins
+-----------
+
+Inventory
+~~~~~~~~~
+
+- dellemc.openmanage.ome_inventory - Group inventory plugin on OpenManage Enterprise.
+
+New Modules
+-----------
+
+- dellemc.openmanage.ome_smart_fabric_info - Retrieves the information of smart fabrics inventoried by OpenManage Enterprise Modular
+- dellemc.openmanage.ome_smart_fabric_uplink_info - Retrieve details of fabric uplink on OpenManage Enterprise Modular.
+
+v7.0.0
+======
+
+Release Summary
+---------------
+
+Rebranded from Dell EMC to Dell, enhanced idrac_firmware module to support proxy, and added support to retrieve iDRAC local user details.
+
+Major Changes
+-------------
+
+- Rebranded from Dell EMC to Dell.
+- idrac_firmware - This module is enhanced to support proxy.
+- idrac_user_info - This module allows to retrieve iDRAC Local user information details.
+
+Known Issues
+------------
+
+- idrac_firmware - Issue(249879) - Firmware update of iDRAC9-based Servers fails if SOCKS proxy with authentication is used.
+- idrac_user - Issue(192043) The module may error out with the message ``unable to perform the import or export operation because there are pending attribute changes or a configuration job is in progress``. Wait for the job to complete and run the task again.
+- ome_application_alerts_syslog - Issue(215374) - The module does not provide a proper error message if the destination_address is more than 255 characters.
+- ome_device_network_services - Issue(212681) - The module does not provide a proper error message if unsupported values are provided for the parameters- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+- ome_device_power_settings - Issue(212679) - The module displays the following message if the value provided for the parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to complete the request because PowerCap does not exist or is not applicable for the resource URI.``
+- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation of multiple uplinks of the same name even though it is supported by OpenManage Enterprise Modular. If an uplink is created using the same name as an existing uplink, the existing uplink is modified.
+
+New Modules
+-----------
+
+- dellemc.openmanage.idrac_user_info - Retrieve iDRAC Local user details.
+
v6.3.0
======
@@ -315,7 +855,7 @@ Support to provide custom or organizational CA signed certificate for SSL valida
Major Changes
-------------
-- All modules can read custom or organizational CA signed certificate from the environment variables. Please refer to `SSL Certificate Validation <https://github.com/dell/dellemc-openmanage-ansible-modules#ssl-certificate-validation>`_ section in the `README.md <https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/README.md#SSL-Certificate-Validation>`_ for modification to existing playbooks or setting environment variable.
+- All modules can read custom or organizational CA signed certificate from the environment variables. Please refer to `SSL Certificate Validation <https://github.com/dell/dellemc-openmanage-ansible-modules#ssl-certificate-validation>` _ section in the `README.md <https://github.com/dell/dellemc-openmanage-ansible-modules /blob/collections/README.md#SSL-Certificate-Validation>` _ for modification to existing playbooks or setting environment variable.
Bugfixes
--------
@@ -854,4 +1394,4 @@ v2.1.0
Release Summary
---------------
-The `Dell EMC OpenManage Ansible Modules <https://github.com/dell/dellemc-openmanage-ansible-modules>`_ are available on Ansible Galaxy as a collection.
+The `Dell OpenManage Ansible Modules <https://github.com/dell/dellemc-openmanage-ansible-modules>`_ are available on Ansible Galaxy as a collection.
diff --git a/ansible_collections/dellemc/openmanage/FILES.json b/ansible_collections/dellemc/openmanage/FILES.json
index 09db209d0..db8150b4c 100644
--- a/ansible_collections/dellemc/openmanage/FILES.json
+++ b/ansible_collections/dellemc/openmanage/FILES.json
@@ -8,6 +8,20 @@
"format": 1
},
{
+ "name": ".ansible-lint",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1faa2fe6cb8f8029a0aae03332dc309e5144309c0a7ae2df24c25727e6f70cf",
+ "format": 1
+ },
+ {
+ "name": ".ansible-lint-ignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b4cb17a21587a17f096001e3cbd9e8b019a667eb724cf01396a35ff5bad2558",
+ "format": 1
+ },
+ {
"name": ".github",
"ftype": "dir",
"chksum_type": null,
@@ -74,21 +88,21 @@
"name": ".github/workflows/ansible-test.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ecbc3e86c750323e17fbb6b19e901e96126a8473bde151d7c37ce99317c3ebcc",
+ "chksum_sha256": "24f87fbc03181be62e4cedf4a463ea5eaab0b5bd4ec68d762a2647015712cd5a",
"format": 1
},
{
"name": ".gitignore",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7425e318ae94763315621de05efc6ebf3386a7ba4df51a8421399931e4c9d559",
+ "chksum_sha256": "3f8be16c6a53d1a7c659b0a9803866e10395d5fa466d557f8e8089e3a26cf0c3",
"format": 1
},
{
"name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "33d93cd495acc97b2e672a688db5b7137217a3940c8294d1549e3544198f1931",
+ "chksum_sha256": "0b095556983a9c9b8a231db06b80b6b529483932af3c6978be58d7b53e9dcc56",
"format": 1
},
{
@@ -102,7 +116,14 @@
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "39f289e03218c0a4781c684393bf036c8581169ac7e5bc8b778fc8258959e350",
+ "chksum_sha256": "f780a795a434441926ca75949a5675e2f7d2f40378e87d4ac6e918b70e6dd312",
+ "format": 1
+ },
+ {
+ "name": "bindep.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76c27d1138b5331a2bfb3b8b7e92c70cc39fd1fe793a48117b54ce2649cd7b1f",
"format": 1
},
{
@@ -116,21 +137,21 @@
"name": "changelogs/.plugin-cache.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2a1f1aeaa0094c7ae47c9210c9fd5ccca2f3cbc78e3da83bd6164ec847ed9757",
+ "chksum_sha256": "a52aa92b1346f5c730d603464c8f20375eaead759c3524f64cd876eea4f3ecc4",
"format": 1
},
{
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f81e61db5decb63a54e21faa199199646405a20d8f40ec0e74aa2c8493ba22d1",
+ "chksum_sha256": "a90add93498588062af0df9b8d61283c97523cd1d53e278a5f82826d90bd0b54",
"format": 1
},
{
"name": "changelogs/config.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8af435bbe577b80a5fda86567dcac849e7762f04a7251e029833b3f073272d84",
+ "chksum_sha256": "c00012f627317aec0a7b0b4db3da8c43eb7eb63188eb3f7ee8c2319692aab2d7",
"format": 1
},
{
@@ -186,14 +207,14 @@
"name": "docs/DOCUMENTATION.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d3fd57baac6fe401783ff5171a9eeb304514bc4b78c7b1182adeb3d0eb3be9ea",
+ "chksum_sha256": "d79a78be68eeb1be24c29a4d65bb253a1c90f43eaefc661d402e3bab0b5b37f3",
"format": 1
},
{
"name": "docs/EXECUTION_ENVIRONMENT.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3e3129e2e646856ae56581aee0eae12edd28f4cef976e5095449905f34d5f989",
+ "chksum_sha256": "f2e0b86dc19b76449e3bee834de2cadf3df5155623884753da9b03ae69b5b223",
"format": 1
},
{
@@ -214,7 +235,7 @@
"name": "docs/README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9e55574f9fc130356178fd08b9c0a0bd0c678542b3f4d51fe27f0df6025174e8",
+ "chksum_sha256": "8be88ea146508ad7565154a1fb86d5d52f69f55e2ef0eca32402083a1e8e14fc",
"format": 1
},
{
@@ -242,560 +263,637 @@
"name": "docs/modules/dellemc_configure_idrac_eventing.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e7dc08f312c63d95f824a9326c084cf5839f89dd87a29aea765009ea08913a79",
+ "chksum_sha256": "15a235619160acc869ef11202c81b3d702762fe22e7796a8f7dd2f09f8fe2036",
"format": 1
},
{
"name": "docs/modules/dellemc_configure_idrac_services.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f0c959f50de0a8fa989f419fae45c05f48083e96a7a6f22f5d1411ba2a9b4240",
- "format": 1
- },
- {
- "name": "docs/modules/dellemc_get_firmware_inventory.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "30e7d15f0b351cfb57213e57624f40c1e8600ee91de9ffb7263f66c14dd778c8",
- "format": 1
- },
- {
- "name": "docs/modules/dellemc_get_system_inventory.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "605a7522a3d1afca04f6dba379db42b1c32fe2abdc9dbcb5b3dcbe7a69407f41",
+ "chksum_sha256": "c5da2d61f8d536f4835cad6c0b11619759d47177e8121c5f52a605ae6698239d",
"format": 1
},
{
"name": "docs/modules/dellemc_idrac_lc_attributes.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7c3ef5e5e65f160201344d5f1e9793097bd0a0697ad13f531b01c0d0da5c3e2a",
+ "chksum_sha256": "0be40c9b7e8fe75321a78edeac8f5d8e38c425ea93b85a2da535de9e6615a851",
"format": 1
},
{
"name": "docs/modules/dellemc_idrac_storage_volume.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e8e2d75172282d6c4d469d615751fa32230be32036cddd2795d61461124e755b",
+ "chksum_sha256": "8c37ba05337b65851c5996643e77bf4c4e29165693f840e39c8b4e837c70ca1b",
"format": 1
},
{
"name": "docs/modules/dellemc_system_lockdown_mode.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d3beff0c42a0ce26a45a8ee4a84d078793a413a4dbae424be2a4f07fdb5ba5ca",
+ "chksum_sha256": "2868bb81a87c50614180c0615c9b3203ba6fdbca3401b447e6feee7f55e78ca4",
"format": 1
},
{
"name": "docs/modules/idrac_attributes.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bf4f956744aba6400dbf4befc937d2ff427deb864106dfb926eecba5952eecd1",
+ "chksum_sha256": "595a9dd30f4ba4e0a770c5be1df3a46031bf16435b7b3c402d192d2fee03f9dd",
"format": 1
},
{
"name": "docs/modules/idrac_bios.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "39acaa62f5d57ed094fcb0806ef32a223c90ed18ad2d24ad346b4c6faca07de0",
+ "chksum_sha256": "f6f031a7525fb6bd815beb1b5046f439b8d77b0fc196ea5883ee750b625f9839",
"format": 1
},
{
"name": "docs/modules/idrac_boot.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f92a3f16c33c26c60d13c6b7f04805a5a54fb62e768f0316a3ae94bae4856e61",
+ "chksum_sha256": "ecd08d41158980bb9feb767b6bb8c1ec198d18835d2f740be86ba0509810c864",
"format": 1
},
{
"name": "docs/modules/idrac_certificates.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d2e7fa82293b4ad1657835f3b662b26fd6e88477d58b2f162475117ac84b3fc3",
+ "chksum_sha256": "10f8fcb9881c004e3fb57910fc5abb181c4b64b10a6c517ad71fc9f5fcdf1989",
"format": 1
},
{
"name": "docs/modules/idrac_firmware.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e529552080ae3d94468acbc1e4fef21eb7d2e90924824dd1b07efb96ad447986",
+ "chksum_sha256": "cdab71f3484273cc4cb5102504a66c4e192315bbbdae56f49f62a56a956e4330",
"format": 1
},
{
"name": "docs/modules/idrac_firmware_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1be081797ba06209876b64831d73289a0593b8fd623324b297c770e2f5e49b2a",
+ "chksum_sha256": "7721e7d37e42c9c633a151701459650ffea7ec5473fb60cab1baea0dabd5cbb3",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/idrac_license.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf8bb22e1c12abed8dfe74e3eb68d33ff2f5dfadde8c3336e00272e6920078c7",
"format": 1
},
{
"name": "docs/modules/idrac_lifecycle_controller_job_status_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5235511d649d1dae82d844adcc9e23ad518c2ba036fca74d2961209ff8a6c1f0",
+ "chksum_sha256": "acc28fe56cb9951a42e2a509ed99904697406a3040d4b40e87e5aa87b0a64ee8",
"format": 1
},
{
"name": "docs/modules/idrac_lifecycle_controller_jobs.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "44ffce0f2130bd614527d649dc558299435f8dcdcc92b34786c8e642a06cfbeb",
+ "chksum_sha256": "cb9ae6a91089f33685ff3774ee97ace1e4cff6eb393d9456666a35f19db3d142",
"format": 1
},
{
"name": "docs/modules/idrac_lifecycle_controller_logs.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8179c7953a0bcf51cffc41820621a51e4e3c6396aa6c0cfd36d02edb5df0d3ab",
+ "chksum_sha256": "2da2f71d6dd23d5feed5e67935caf3de9fc1f37fbec3f390ce09c04a01be3082",
"format": 1
},
{
"name": "docs/modules/idrac_lifecycle_controller_status_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "48f9d4b1674f6d3c51c48331ac174be17e54fc397541431d644d4733d6e0be03",
+ "chksum_sha256": "68af30f4831b0eda2e449b16c60c11b93c542432bf1745ee3a77c320f61e56c4",
"format": 1
},
{
"name": "docs/modules/idrac_network.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f9bef17251a619d1b5a5ae58a3f2c80ea00fa81b99bb27bbe84a1599f0131173",
+ "chksum_sha256": "0d0088371f009e142aec401b6e9233e200ac68c2d5986e2c2369e8eb8424a306",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/idrac_network_attributes.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd8281ba848e85640dfb98c566358c8d258168c999098c457ecca0d1c045ad32",
"format": 1
},
{
"name": "docs/modules/idrac_os_deployment.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3147488b8b3f658defd0731694961fd8d39c4cd1dafa53916f23a3ff5407bd5c",
+ "chksum_sha256": "7b7ddf54fe21917b967f0ffd6bd81e05f799ce8ae356e150dd4c317a65b22d23",
"format": 1
},
{
"name": "docs/modules/idrac_redfish_storage_controller.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c540294d86abb08227e787e97062281e8af81d4eadafdbabfef5bd39ce44d016",
+ "chksum_sha256": "cd01363e3a17bbc7a0210fd2c7402ce4cc66ec6a0d786d9a0569daf9bfa877c3",
"format": 1
},
{
"name": "docs/modules/idrac_reset.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "888b687468837582fbe609b1a0329dbe9c1acb4703020b4270ef022441f72047",
+ "chksum_sha256": "4f08e626ca74cd6d0dab35f749fcd6878e90118a4d1053ebf45e45685fd0dc8d",
"format": 1
},
{
"name": "docs/modules/idrac_server_config_profile.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1ba24f4e1ad7a537fa6f7ff6491c72c8a4726dfcc30e4a2bd3f84a60ca6e1a77",
+ "chksum_sha256": "3163bc9948b760abb5c2eb6e5af336417c5a1fb2296f795d1c46db4676de674c",
"format": 1
},
{
"name": "docs/modules/idrac_syslog.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "01ef3f80fa0325f3a645b3547ebb4bdb93049009c098d6b370751cc6a5c4818b",
+ "chksum_sha256": "7230d44ecb0bb2901ff2e42191b6a4a59e471c258f0cd930ae98065bd2f61123",
"format": 1
},
{
"name": "docs/modules/idrac_system_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f1a06be853fc394c2cab75d0a7ea0b51e5a4844bff757b46eb7e27cd133a043b",
+ "chksum_sha256": "c69ff9e0600ac8b497816051f7ff2c59e434b15cd89c8456a30065923900380a",
"format": 1
},
{
"name": "docs/modules/idrac_timezone_ntp.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a81533bcc3786ccdce796e3220377693cef67afdd4bb825d3f5c272624fcf986",
+ "chksum_sha256": "e02646c6841f8212f8869ac5c125e9daf54038c78ef86388946023ab1479bfe9",
"format": 1
},
{
"name": "docs/modules/idrac_user.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d488071ac6f8ee3fa1c062ec2e8bd0f0031dd48077a14aaa7562df5c7a716f0c",
+ "chksum_sha256": "9282d3b21e78c2282d625e7ef011ccbf10f988855c5bf0e58408a179135360e2",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/idrac_user_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dbfd54f283705a64cc6a84da5dbfba7673d8d12e0a1a49cfa01f8dbfd376958b",
"format": 1
},
{
"name": "docs/modules/idrac_virtual_media.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "48cdb83e815c19a5cb03a3d9a58b7c299aa1a79df02a3fca3a0dbf74e100092d",
+ "chksum_sha256": "b7a27e6acb73cf51ab56187d2d1f10810fb5c9721ec1faae4d3d003fd72244e4",
"format": 1
},
{
"name": "docs/modules/ome_active_directory.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "268326a562c7f86cc156fab384d249adc48ce528440f70fc6d34cd922409e374",
+ "chksum_sha256": "550f341e2ef2ec6078ca235bde879963c7bf97185824f9b7d21e7034ff6177ee",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_alert_policies.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cc865e63694540cecbd0212211fdd061a39bef5eb1405087e5e091dba8955289",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_alert_policies_actions_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4d6f52e980cb1836d1ca7c5352bf17c04acc5462fbd949bdd46b0f80b575509",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_alert_policies_category_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4b8d6a58969d476a4699531df99e0a2137437a65f8564b722e99618d7f74fc3",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_alert_policies_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a202372a436597ad619380b934da694ad1a1d175566ebfcfb03e177ae9a8d142",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_alert_policies_message_id_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "428d10ae4bd13d877782495244086220166d69a939d99b112ac09c03bd787eaf",
"format": 1
},
{
"name": "docs/modules/ome_application_alerts_smtp.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "432872f2f50db4c30275de9493002ef4f4cf8a100515933565a4b5b1e164b1f2",
+ "chksum_sha256": "a00fbbcff1e1de06909d434ad763b6f1497e9acaf617041a9f5de38f0c44a21b",
"format": 1
},
{
"name": "docs/modules/ome_application_alerts_syslog.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e50fff16a925677caf682764f2c042c24bdf54df3739515a5bd8b93fcdf559d3",
+ "chksum_sha256": "18605643bc8d76ca3804e19686f96f0450abe023a0914e587be8434812d5c2e6",
"format": 1
},
{
"name": "docs/modules/ome_application_certificate.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "716271a0b867c3196027821f7263cf48bbf84c5e9721eda0a1c3f60adab115ab",
+ "chksum_sha256": "788a9fd904afaa8824f07b0668d0b5d04acb05db3ad81c47fe3f671dc2405592",
"format": 1
},
{
"name": "docs/modules/ome_application_console_preferences.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4ffe9ecf5bf8ebe75da17bc6052e420d90ab78e7a24ee822783140cdfda7065a",
+ "chksum_sha256": "ca4376734fb3ae51179cb231bb2e8a0b6e128056f277a75a00fa00d5dcbe0248",
"format": 1
},
{
"name": "docs/modules/ome_application_network_address.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "00ae99a219dcbcd8e828ac36f8613fb11fd838bbc81f65c9ca836434bb88018c",
+ "chksum_sha256": "b9b03c1fa75d1bb56a672dd5b59d0736a33580f5e6c51f4d0707f1cb27812ce6",
"format": 1
},
{
"name": "docs/modules/ome_application_network_proxy.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "de03d8f58756723fb93199312cdd6b2da4ebfda2447ed3cb34d009275e8ced66",
+ "chksum_sha256": "b3ad1795dd6d386e0ec41d6a30af7f35d6abff67c25f84a938b22d5cb56de4d3",
"format": 1
},
{
"name": "docs/modules/ome_application_network_settings.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a9cd87b6e95d8692f3e351ae92641aa005966c18fde6922994060735ac37235e",
+ "chksum_sha256": "913d8eb24f87c9a6067713fa4de41b6b48f9961831feb38e5829370f84ce6998",
"format": 1
},
{
"name": "docs/modules/ome_application_network_time.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "58b88646e11c35d898cda6e9223d86fdd0aad2b541e216c62cbbca64100a8ff5",
+ "chksum_sha256": "5f774a1362558799886f5b90831219e7136a0286a335d9c3f2a9924c687b9a56",
"format": 1
},
{
"name": "docs/modules/ome_application_network_webserver.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d59a6f47e46c6ffd8ba4fb5b7f78fc1747124e299641e4667372afb3657de8df",
+ "chksum_sha256": "d2fae7e588d022a7ecbc50be03fbf84eb38e41f49933bb1d5fdd1fc6a398a76a",
"format": 1
},
{
"name": "docs/modules/ome_application_security_settings.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f6954d37ac2c89de427b3fd076a7e3cc80f2c43a22a11cbaac346034931ed340",
+ "chksum_sha256": "4ea4e898cab891699e9006122e6a8ba9a70d20c299da60742ff397a6916137f4",
"format": 1
},
{
"name": "docs/modules/ome_chassis_slots.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c14e2b5f0bc7ce3df14a190d857dac3c90eea09585d0d242181ea4dbfdac914b",
+ "chksum_sha256": "7c4d578297728a799abe5ffd7dba313380895513a565107438a17a55ee431866",
"format": 1
},
{
"name": "docs/modules/ome_configuration_compliance_baseline.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "27b77e4b2c33b96aff213f762bd43f3867311d5ff959de456bf028900fbaa333",
+ "chksum_sha256": "7715d2c934990d7048614a5fcd3319e79bedcb9a0cbbc3632b2f56fff566dba1",
"format": 1
},
{
"name": "docs/modules/ome_configuration_compliance_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "585af29399c6395d976a6039d931cb7c159df5334d59ef2b9a170db119a8f250",
+ "chksum_sha256": "3354f92e90c63162cd9e77c0d125a47e9c831b056ec44dfb2056a305e93a0891",
"format": 1
},
{
"name": "docs/modules/ome_device_group.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8c1fceaa89a11c61408b78a964b59b5350a9eb7cc188703c866cf0cb653ac64b",
+ "chksum_sha256": "558d22aaf32ef3df83d9da91a3e6c0b4d3f07d2ceeedd5e3879156dcfe1a0d55",
"format": 1
},
{
"name": "docs/modules/ome_device_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "aba861ece6349eacae599821f44e863abadd45b155c440f5c3a28b319afe164a",
+ "chksum_sha256": "053b25f1b3afd23790b7f8fdbfe965422848f7da3fd28a864e0b63e469daa386",
"format": 1
},
{
"name": "docs/modules/ome_device_local_access_configuration.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "234109a943b1157c8583226757f1f2c592bba0a8e1b9ed22ea3a2a5b26abdfb7",
+ "chksum_sha256": "6a7cc7259449e562a31a750dcb902c30af50ef1416f53c4ec4bcc3c143221443",
"format": 1
},
{
"name": "docs/modules/ome_device_location.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3f3fd616d9cb1f3587a31efe2be8e604edb1043de5dd07e005af2f7b627c2039",
+ "chksum_sha256": "ef6e85e39bfc4de890bcf9168816ec761aaa2692b850f634e1286a329ccef81d",
"format": 1
},
{
"name": "docs/modules/ome_device_mgmt_network.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7323a62e564f3990ca74c34179afefaebb264d313b1799888620ce236532455e",
+ "chksum_sha256": "1b887414a02438629a3acc83ecd225f6573293cf81aff71215c9e7a47b7a83fb",
"format": 1
},
{
"name": "docs/modules/ome_device_network_services.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f6e7cdd552c4910c2a2dcc3b659d3b1176d9a4e404f76d409d97ad66c833b314",
+ "chksum_sha256": "61099f10bb3e80e56332bacf60f4f6dda3be03a0d5bae8c9945de89b2ae98ef1",
"format": 1
},
{
"name": "docs/modules/ome_device_power_settings.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "61f69b8173d714735cee021295d212cd3e4d82ce3e8f43668396dad6e719005d",
+ "chksum_sha256": "515913e4e4658681b8cc5cf7da7fd70112d64a74acacb1ed3539a10b0e3dc34e",
"format": 1
},
{
"name": "docs/modules/ome_device_quick_deploy.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "acad4ef2ecbd4ca878f1f3c65779cba67cdba7ba5a323ce9645ccfd34a15aa8c",
+ "chksum_sha256": "e98c65583cb4ca7fdddebf1f33b91ffdc2f00fdf3b0dcc67782551e8cac0b577",
"format": 1
},
{
"name": "docs/modules/ome_devices.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d951d5e26fd41565b879285a9492a9a625070152fa3885549d407df56ca870d3",
+ "chksum_sha256": "b3a493600c1706800ff504b61724823017744ff0336509b0edf555957105e9c0",
"format": 1
},
{
"name": "docs/modules/ome_diagnostics.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a209635ba378e6725e75f001d26ed19438172e04f6a76bf5ccdbf504051c4498",
+ "chksum_sha256": "a6ac0f361b89eb6beb02b3288b1b11dbd263d6a2ef624cd1d564872a1df96433",
"format": 1
},
{
"name": "docs/modules/ome_discovery.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a69136c164915d893ab8068517c0c048db4cb5fc06bfa602f2f2b2a9f8c05f82",
+ "chksum_sha256": "fb562eaadab71e87fedaf360f12cfb9b5676b43fdd5584fd61d53ec61c38280c",
"format": 1
},
{
"name": "docs/modules/ome_domain_user_groups.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6bfadb6b820c870834a370f7ffb6e971e8758adc58c6eb7078a9141fb0b56f2f",
+ "chksum_sha256": "e6f655e0ac9dfe1fafaf7ea9e9e9cdbebe2c2efa5c314852427376c3b1775f7e",
"format": 1
},
{
"name": "docs/modules/ome_firmware.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "332ec42546f08695245525769a4dc5e45c7096bda5d2b6d4eb99807b1038fcf9",
+ "chksum_sha256": "ddee2a12586396c8678b11cc1ae1e1dfe2a04c87e43f4c31325d16ddd78d4423",
"format": 1
},
{
"name": "docs/modules/ome_firmware_baseline.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4967891ecf5d69206d3b9d0c60f7f2eaaf450b9ade4ca902858d34b45e00c153",
+ "chksum_sha256": "a4d5b803ac5016911fd4bc3ad926861e93bc73eee282a40bea3dd2a55158c15d",
"format": 1
},
{
"name": "docs/modules/ome_firmware_baseline_compliance_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "864f64325e318d0f15b329795d4b58128347105a6800d99d9a85edfcbbadfe97",
+ "chksum_sha256": "0811710e22209c9e995c81a9d263cacf9639d33c3ea3f8c2d9bb0fe40e90f102",
"format": 1
},
{
"name": "docs/modules/ome_firmware_baseline_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dc83bb2f009bedce749480f16dfabb8fc5e9371b44591be107eee71c26154c80",
+ "chksum_sha256": "735740831ade31895dbfa49a19cddf12d2d82d3836fc35bfa6e295c34a4f4170",
"format": 1
},
{
"name": "docs/modules/ome_firmware_catalog.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "279326b24c52170adb5dff1ce923c47a15a2560dd50b20ce7a546096f6bf384c",
+ "chksum_sha256": "04484a34dec1e25ef3444e0258b612f71aab29bbbee70c07145740c00ac760fe",
"format": 1
},
{
"name": "docs/modules/ome_groups.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a096b583ae88c5bc5e6c2be3baad7cd60cf3843c4f932b1970660ad753f8fbf1",
+ "chksum_sha256": "aaaf374a259564074eced1eefc734fd65c19803f8a4557262f78aefc30adcc41",
"format": 1
},
{
"name": "docs/modules/ome_identity_pool.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "84c0f4be18d5e3247a88d0860376d533fba33fba2f7b5c0660c2407c7ee32b2d",
+ "chksum_sha256": "6a0a40cc5aefcdd5d4683474015e5bfac0a7902b8fffd1cf79ad1f1f45f105b2",
"format": 1
},
{
"name": "docs/modules/ome_job_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3a33363116c9d992d3c556898c239a01dd48f070c1a7dc0ea4493bb22b0c1f9a",
+ "chksum_sha256": "58c1431a6b58bf1e0c14fe1c461b71887456679aa307a47b9afa7530f024d3c8",
"format": 1
},
{
"name": "docs/modules/ome_network_port_breakout.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ee0535da07d00419882e821ae00cea6cfe28d9bfae9217e2673166e06059fa0c",
+ "chksum_sha256": "2dbfb63ba6660e46088091c3b606befd0a32b9ccacc1455861d973afea3c605c",
"format": 1
},
{
"name": "docs/modules/ome_network_vlan.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "914c7bd96c695211a571dc33119df25ebb50aeb33182cbc60968959317e25ecd",
+ "chksum_sha256": "b586d89ff753eb1590f1b320739791d08b5fdb84f2cd641e91e56d9d275dded1",
"format": 1
},
{
"name": "docs/modules/ome_network_vlan_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a99a0a50385c7111edf89d20581736f0b4b058e931d25b1a47532ea94daa002e",
+ "chksum_sha256": "051c30fcff66703db65f1affd6b5d0694b1624f201ba31fc360d75c66d175a9f",
"format": 1
},
{
"name": "docs/modules/ome_powerstate.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "708a394de7c9bf69ee36e79298c0a7a823554c36e6e6cb99dc1e5377a870549f",
+ "chksum_sha256": "b1e7bd216d6245c298ff31b9e9f09ed3a86f925da0ccd4ffefcbd4435bb3331e",
"format": 1
},
{
"name": "docs/modules/ome_profile.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "50a7b05d6853cbe55087f02e3d221b68a0acc2086ebabd7bcb4c865942272a3b",
+ "chksum_sha256": "8d4b6451dc04d7e0759759d53e112f0542e380b9ece9756ee59f95f9ee38d511",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_profile_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0f891ac994a3dc6f86b8a1cf20bd7f7dca9e59ffbc5a34cdd2980f668e27e36",
"format": 1
},
{
"name": "docs/modules/ome_server_interface_profile_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7037f9dea23e53a78adf07f622361f869c67e4aff716aefc3b15f59e0fea2f4e",
+ "chksum_sha256": "a81231f6eb702cad2b4dadc9e328f09207493c95a3c4ddcc26d2bfac6b1a7cdb",
"format": 1
},
{
"name": "docs/modules/ome_server_interface_profiles.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d24f7e16d1d3a2c73bb67d7f8db22de47ccbce64d0e6f4091ab409e06191f565",
+ "chksum_sha256": "965b12325e46ccebc57a9e35846325c0abfc8cacc179c09b94c0f2ecd15942df",
"format": 1
},
{
"name": "docs/modules/ome_smart_fabric.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c21899815d4a0ac3d70d1d0735aa5fc5e72648dab6044abe8227247e1ebf351d",
+ "chksum_sha256": "d0647d3498526246714b4f6baeef18576f44a9cb422b5b4bae3febddc1025fcc",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_smart_fabric_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e83b11d41e04c07eb26b4cc125a461fee36c5f88dd67224ce52f13eeab1e280e",
"format": 1
},
{
"name": "docs/modules/ome_smart_fabric_uplink.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1d7004088e81fd324b804615b266a79bb4db1371ec5e96ebc9228ded55041ebb",
+ "chksum_sha256": "9587cc3af635051d2bd261dbfbbefcb7e818fe00b24323cc4795b3dad314fe66",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_smart_fabric_uplink_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc8261cc3a57c431a71cafca0737970c2e56d41b56f3d686563506ed4fc9a9cf",
"format": 1
},
{
"name": "docs/modules/ome_template.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "79763ff81bfead903a906964d1108ed21097319860976aa8719065266c55af08",
+ "chksum_sha256": "8755b2b7ea80ec6d275153e325ab764ba585c9056a4fe3c4868ce55c55270ab4",
"format": 1
},
{
"name": "docs/modules/ome_template_identity_pool.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c188ae92f5a2972e9fb594f0424deb1f79d30cfca6b06d35e10f395d5f53ad02",
+ "chksum_sha256": "33e0114359d664f81f2dd3740f0cfc1d9a2647a16180993eafe2a2a3138c31dc",
"format": 1
},
{
"name": "docs/modules/ome_template_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9c0227f0fe04b75993ae186171002f5f9e54bf015880c3982f7ae69683c84d7a",
+ "chksum_sha256": "5f082648ad3106f0ff53750f2d0eb4fe802d55898d9229207883428428e4af47",
"format": 1
},
{
"name": "docs/modules/ome_template_network_vlan.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8dc5270d4a4486699adbf1a62fcc92cc044cf7922802b4d46a8bc999fe9e7d6e",
+ "chksum_sha256": "2d3ac25a1114e76392bb80b3f515c88c5ea32681b2328d65c26e6090c9c048b4",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/ome_template_network_vlan_info.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c2564dcba6d877a3f84518295f5b8819d22a4871b73773069e0490bf70c2c7c",
"format": 1
},
{
"name": "docs/modules/ome_user.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7df833bd369a4b367c592ae97d7bab2ea489a1be2266cfcc953749ee922d2baf",
+ "chksum_sha256": "0b3bc47a7e05896ce44fa54a0bc32aa9a7caf371fccbbb592883423775ca7864",
"format": 1
},
{
"name": "docs/modules/ome_user_info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5d1fcfc5ad0dff3d3bd5fbe179de9301a45af464bd7cfd8cd65a58a2ef96c2fe",
+ "chksum_sha256": "9d771343ae8d467df5ba57726a24cc02ed665fb3635c45acf89bc15cd761009b",
"format": 1
},
{
"name": "docs/modules/redfish_event_subscription.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "80daa0edd8aad83fa2aa24f60930db7ded8acc68155da8c0ae7e3e1862d7e3b7",
+ "chksum_sha256": "c5598c90fd6fc719915c94c66508e658504044cb929a7176af7812a6a581689b",
"format": 1
},
{
"name": "docs/modules/redfish_firmware.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f46c91e2c9ce4f584a25ade7454617ae74f3811540f2a9b39b14e5192da25378",
+ "chksum_sha256": "147b4be1cb5993f3419161982de26ae9a74a1d1e2dcccca3e6d470b002733f72",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/redfish_firmware_rollback.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "948db5ad72847ed27ab771e8c879021d9e791a70e6296646fc773baf93f54ec2",
"format": 1
},
{
"name": "docs/modules/redfish_powerstate.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bb3c460a2e44539da06de6bc5c01cc9c580e4cb1b4e1df1c7611ca640920bdd1",
+ "chksum_sha256": "d4dbb35bf868492afe99c72b8efe328f4cc916b37169ceddc2f7b1fbb893b332",
"format": 1
},
{
"name": "docs/modules/redfish_storage_volume.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eaf09f31c67fa3ff0707779f894d7108fe306eaed30076e87eaa73231b88cd6c",
+ "chksum_sha256": "b32f32754226f2ac4dfbad0a6c4b156c8f5c1692d016b6e6c3208e7f9e94881c",
"format": 1
},
{
@@ -809,14 +907,14 @@
"name": "meta/execution-environment.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6fecf89b56560e79560dba9f860201828a8df82323d02c3a6a4e5fbfaa0aed3a",
+ "chksum_sha256": "565e8d831c58bf2a1f80168b5ee92f526fa5fe48b2ec71a9387371c0d50ca9a9",
"format": 1
},
{
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6a7ab22a7f198e1918e75a2750bf0697251cae5f7f71efc8caeeaae813074bc9",
+ "chksum_sha256": "b20215f8fccdf256ff05c7e1b24161dfac781b8fda913345dad2828a180a54b7",
"format": 1
},
{
@@ -837,7 +935,7 @@
"name": "playbooks/idrac/dellemc_idrac_storage_volume.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c4e923ec8f3493b1a5d20303f00332d26cc5cb5c4b9b3a3e5da70bfb352ac1be",
+ "chksum_sha256": "9903d13a50785663a1e781690f981645334d81e112a6872865118b6bac24a52e",
"format": 1
},
{
@@ -851,203 +949,210 @@
"name": "playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "322fb5ab9587243f1b6437ceeed4779c5eb5494f854cda1ae098329c73d61a46",
+ "chksum_sha256": "333f32a604a8273f9048d333fb835e9717dd561b4583360503885f3ef49cfa41",
"format": 1
},
{
"name": "playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0636a02c804ef71a43eae32813bd7bc8d8ccea7b11b4fa40d895492987c5f402",
- "format": 1
- },
- {
- "name": "playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "28578e8a776d1f0fbfb31127e75e0ab58fc472c77b2fd3abba1344db5fb6c17e",
- "format": 1
- },
- {
- "name": "playbooks/idrac/deprecated/dellemc_get_system_inventory.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e1a86c91fe04e4c081a4227e0042a557ab6556f3c01f5420921798235ef1ba45",
+ "chksum_sha256": "82a62b3e894b89054dcc4d01e56614212501a76ed120d0b98c310bb609ef9836",
"format": 1
},
{
"name": "playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cc31c68c48ac4bee5ecddb652ff404f84f79ad703d4af87986e6711bae3a6eca",
+ "chksum_sha256": "965bcf06720fc6b878fed5cc6d77e64b4830027670406a0de451fb519277b842",
"format": 1
},
{
"name": "playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2b12db44b841dd333d7f0d25f15b30ff98c0821142377656c2d23c5b47a73a2c",
+ "chksum_sha256": "d8ec2bdb41b68c6c16783cab302a56739474ba2db2a144340906d83d411d8549",
"format": 1
},
{
"name": "playbooks/idrac/deprecated/idrac_network.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "94bda1741ed253a935332f581dc5672a261306f2f571c70183d6070898c20140",
+ "chksum_sha256": "75b64d730751d3543b829f7baa60bfe506b5c36a068aad0f52db166c7c6b7f15",
"format": 1
},
{
"name": "playbooks/idrac/deprecated/idrac_timezone_ntp.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "60326f75e44105ea46591fb5cdcfada01c291c43e296dc06a0da35f7597f081c",
+ "chksum_sha256": "7fbea4b5c1db843d6a0091e3b19d96062a724fa1059bbb23651692663386b13d",
"format": 1
},
{
"name": "playbooks/idrac/idrac_attributes.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eccd3c328599cbcf02f39f7b3b48246d2a7b201f90d6f3af6c07a08c04867e7e",
+ "chksum_sha256": "335fab9fb27b80e9bb72c6b1b312dc5b344d2f1ae9b6614255f854bc5ca6b3a8",
"format": 1
},
{
"name": "playbooks/idrac/idrac_bios.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "082c12b369fa03c9a40c1c945f539e7d65ac50066bcd380256479b370c84996a",
+ "chksum_sha256": "2f4892fed0e96b2bd7532645fe590d2ae4456bc1de316b405dceb9ef2c10c8fa",
"format": 1
},
{
"name": "playbooks/idrac/idrac_boot.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2137b8237ddf8764aba72b884d53f4fa6aa7c728e848dc4c9c85b33bda2f5d2c",
+ "chksum_sha256": "a3f384d9f351aca517327d735422ff3a7d7e48fd0a8370f9d41dd1da6dd1295b",
"format": 1
},
{
"name": "playbooks/idrac/idrac_boot_virtual_media_workflow.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4f06808baeed1b3d6e2875c11c7d972a8d8a35e6a0a6b1949fd26e95601bbcdd",
+ "chksum_sha256": "542c406f1a42e6130c8c4a75b97a2ea5fbb8f0c0cfeea41e724972855457e486",
"format": 1
},
{
"name": "playbooks/idrac/idrac_certificates.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a5f5c85475347df18d3a2966b1b61a1d252b0276e7dc5bac66d16f50866a896b",
+ "chksum_sha256": "304bead36bc953c79bdb064a6c3372945953d0b6d8addbdc4be45d0e59c51fcc",
"format": 1
},
{
"name": "playbooks/idrac/idrac_firmware.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2cd01e8c28b213252d3cd833f76c9db3da03d1394ed29c762183bf52fa141de1",
+ "chksum_sha256": "971813db1ea19f8f3e3b9d513f7f15ab2e8015c202323836cb43a3075d0d1b90",
"format": 1
},
{
"name": "playbooks/idrac/idrac_firmware_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cb0aeea474dc6cd72d00a85b319f233e467c214d88a8345d226d0cb5228e9e2f",
+ "chksum_sha256": "eaa3b675ac75e62eead40e8ccab34205de55128cbfdeddac97cff7c1ad7dbbec",
+ "format": 1
+ },
+ {
+ "name": "playbooks/idrac/idrac_license.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de01f4b83a795770f2b12c9c3d5fbadeb6f9351334083f5fc21bef71d29609b9",
"format": 1
},
{
"name": "playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "99fc8d52c8e7dc90298ee3230de3db5c00a39e00d18103fd6b9a71a2401c3f96",
+ "chksum_sha256": "453984f1feb8ed3332755cd58dd3b3d13eacf365db73f0a4231e929c28913ec1",
"format": 1
},
{
"name": "playbooks/idrac/idrac_lifecycle_controller_jobs.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ac0275f30fc430f75856502da81fbc34b8af2e353d886dac4de10a66243bcd15",
+ "chksum_sha256": "2a3912a6f0afe6778d57a2eed40df6183a3ed2838400a16cfdc2eaa4755ea14d",
"format": 1
},
{
"name": "playbooks/idrac/idrac_lifecycle_controller_logs.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "99f8a8ae26678d1d7a84d4d99e387971d7c6c756ce1d7fe5933d3630926c6f93",
+ "chksum_sha256": "65432bf367476a0dddc30fbfa2ad9bc77a5423daf5dad6518447fc1bea124cd2",
"format": 1
},
{
"name": "playbooks/idrac/idrac_lifecycle_controller_status_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e30a3612d0c4518b5c2848ffb4e530817ed1fff4fe5dc7b29841342af7817b3c",
+ "chksum_sha256": "faff447c9a135cfba87c04161408d2f9491d42fe85da06223c821a30cd3c611f",
+ "format": 1
+ },
+ {
+ "name": "playbooks/idrac/idrac_network_attributes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7dd0ba1946e68eacb3df926df9a2f7d8fdc9f922ea4044b304de3945b0af77d",
"format": 1
},
{
"name": "playbooks/idrac/idrac_os_deployment.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f5cf86fd3833eb3e5fcbd19a053ac17bd5a78b2beb720318f2f69b68a99dc6d3",
+ "chksum_sha256": "42fc0b056a6282757e5b0be577e896fc6b62ab4ce0ce4c673eec2f99012d12d6",
"format": 1
},
{
"name": "playbooks/idrac/idrac_redfish_storage_controller.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "52d7e3f086ad6e5df4d5d30d43e42ad0dfad6e9423ea514c800ff36c7715bfcb",
+ "chksum_sha256": "0f12fa63252f024245e3644b89527a023e718107110a8580cc332c8b57c19b22",
"format": 1
},
{
"name": "playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b27d21824a5be3572c3cce6626e1c422510d4a5c03bc22c631998f08a285ec07",
+ "chksum_sha256": "a417f1624b45912c41ce6f116140e65bdd241cb60473269ee25efc428d5c2cc9",
"format": 1
},
{
"name": "playbooks/idrac/idrac_reset.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6c6c3a2132bb6071d1282f87980cc4ebac0345a2c2ea95024032bcd007dc2879",
+ "chksum_sha256": "d2e2806fc4f6d092db801230b3ed1a437edae17bf98590b3ef7de0692fb0b2e0",
"format": 1
},
{
"name": "playbooks/idrac/idrac_reset_result_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a44b8833cf72fb84947759b7d73c7c3114748a738e6da7b2b3874a717a2251ba",
+ "chksum_sha256": "1065ba2ac114119031a719e466be0b1cf3d70a05590d7a021855502dc9877f08",
"format": 1
},
{
"name": "playbooks/idrac/idrac_server_config_profile.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "07699a1ad62ca26961f1411576dc0707ccc5569ad2e31dfb619b9d81880a4b6c",
+ "chksum_sha256": "95fbf44d10f20da1cb0a866ec7578e2546e215e535c094b0b0270317eefa8bcb",
"format": 1
},
{
"name": "playbooks/idrac/idrac_syslog.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "de87c5144721e93561b13196929340e9fc44878aca57ce51650e103e04a96634",
+ "chksum_sha256": "be1eeae26000a909eb88c2f7ae2f45d5c492a0716cca5d56ce8f9e709aefaac9",
"format": 1
},
{
"name": "playbooks/idrac/idrac_system_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5f471523a4b5190be049e9d12cfc08e78da059c0f8fd58b760f6dcc57c52afc7",
+ "chksum_sha256": "e385033356b74133e34ecdd3aa63f6ad1d134cc5857ad48bd847f4fc6766c350",
"format": 1
},
{
"name": "playbooks/idrac/idrac_user.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b22a4a0e57cd943dcf4d9e5a2ef2e99c1e0e8a5c472a3f9f9a6b3bd74cbf67ba",
+ "chksum_sha256": "d3aedae3410210ff0893afd6e160929f8e7581a5337928f5ee00f0b89a81a9f9",
+ "format": 1
+ },
+ {
+ "name": "playbooks/idrac/idrac_user_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5068eb579dea93aca8ef6edab4e4254bc341c00d2ef373cfdffb50a34a64c755",
"format": 1
},
{
"name": "playbooks/idrac/idrac_virtual_media.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f2d60a70a720f44cd85081cdbb5b5f42eaae581ac8e391ccfb3111bdd246bba1",
+ "chksum_sha256": "466e5e23ac6ac17de823878df10bc89f90762170e96ca4682626b4ef95d028bc",
"format": 1
},
{
@@ -1058,6 +1163,48 @@
"format": 1
},
{
+ "name": "playbooks/ome/alert_policy",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/alert_policy/ome_alert_policies.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0d6be7aa3a27a5adaa464911c691ec919d592384a57a651a23fd377f28df60d",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/alert_policy/ome_alert_policies_actions_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d26bc85e086d2fe2560c3b15a0cef97d58380ffa4a670cd86d1521746250d55a",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/alert_policy/ome_alert_policies_category_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdfb33544a84e3accabf9b330d92733266c8747b56a4a89d0fdfc14d531d9f75",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/alert_policy/ome_alert_policies_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4ed4df04af14ae90aec17feffb5fb5ce93617856f43b270072e0501328580397",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/alert_policy/ome_alert_policies_message_id_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b98604773e4979bb9807e2652fa6217e3ce2c7414597998a84be4592c9d078c4",
+ "format": 1
+ },
+ {
"name": "playbooks/ome/application",
"ftype": "dir",
"chksum_type": null,
@@ -1068,91 +1215,91 @@
"name": "playbooks/ome/application/ome_application_alerts_smtp.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "544c2f793802573b9f19bbdfd1ce4ae24a6e25bde7b17f576b2730a0cb7f63f9",
+ "chksum_sha256": "d2eb95a8fcea5239a320840fc4b4ff2321d071a06638ae5d5860651e1b3a4587",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_alerts_syslog.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a776cb3ce6ae73720e3b550c0255b703eeb0e1c159dfd5037dcf016bf716b479",
+ "chksum_sha256": "56af4d077ccfe2b4bdb46de7587fbfb03609ff7484108df4840b69d4118d0c2c",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_certificate.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1f23b249bf4e4439bfa6a0a1ef54e2b1c20ad1ef5e3c9926d75eaf8f2def66ae",
+ "chksum_sha256": "451395c662a84f8d3a030be78a6c1cb7bc32dd5223c2f0c21c4ee6a6433e7d56",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_console_preferences.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f5ee4e96f76ca81645647da6d68f65bd4b159e2124aebc989a266dbb3309d61e",
+ "chksum_sha256": "d69507043f5d3ab6e177a969f319b3dcbf8edfb5678e58e348653c8f6b8556b2",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_network_address.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ef48ea765db99c34ba07c1be262208bdd050a2f2eeb764fcc9615a15e7feb6dd",
+ "chksum_sha256": "2941e928032f4e93b02e7f56db09bb784702d1782e1e3c87c094a8b2e68bceef",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_network_address_with_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3656f48e615f71c05a8d38a7526fbbbe51bd376e09e86a2ff5103f650e1c1336",
+ "chksum_sha256": "3375ed92eb8f5143b01108debff662d6f83c98483fd340328315618aa4e801fd",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_network_proxy.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "360c0590089be634f0b4e45470737a040f81d1aa9fa14b2123fc965e7c9f6f04",
+ "chksum_sha256": "afbcdc3fd81f79fec948b8898ed212e48d494d521af66e2c3cfa91538ebfeaa7",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_network_settings.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0e3f0479c2adab5a95bf5c932e47b13ce4ecc54de305e5d16be8fc1486a5ecc0",
+ "chksum_sha256": "d61531249118fb36496a87f9bfc892ae34483d6997ff5cb09155f0269a7ae329",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_network_time.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fd1db03d4ebefc98e7184c0695b741eb9171e4e2f7f15692e435c9acb834ccc4",
+ "chksum_sha256": "5d54fa4f854c3871c631114f2bd1a70e33a6f6b87a593529ac4b7bc2a620bb48",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_network_time_zone_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d30a435e8ec0030ca04fee849029f392ef17633308f065a868dc0ee2d51128e7",
+ "chksum_sha256": "b66d01170f512a566b92b06b43aae3faa1755f0e453d3ef0110521b68e995889",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_network_webserver.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a8a1c0390466bc5b526a92e6bd6f8c1c83f879d316002989b64e50bcaaa76af5",
+ "chksum_sha256": "2e2e19608508da9365570746e726a36e5a5403e16f0563c2de3e6d775b411216",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7e35ab885e81de689a47db1b9d39e7006ee14822e200c1c2479deefb61c2ba90",
+ "chksum_sha256": "d060d4922337f1b9357e6196bc5dc2dae238f126c76481d65d46e00b3b1d3d79",
"format": 1
},
{
"name": "playbooks/ome/application/ome_application_security_settings.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "44ac28893c235740bb266a81004ca7704f35a008174f089053f2acd5cb1f1226",
+ "chksum_sha256": "e423185bb24ce6d7641db2cec93e76029728c4631a0b84c307da0a375a247526",
"format": 1
},
{
@@ -1166,21 +1313,21 @@
"name": "playbooks/ome/compliance/ome_configuration_compliance_baseline.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ca546c9b11fd07d4e3a86135083f9460cc29343095efce34e132bb8577dd0276",
+ "chksum_sha256": "fc3a2176d0d655f312c52f97bfbbbefe8fc5dd45e88b32301aaa56388a52076d",
"format": 1
},
{
"name": "playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "357998e3518962f59d84c8b8d4bde12dfef469974ffbc36f55722a3c21393b15",
+ "chksum_sha256": "6fb650fdb90cefcbc32a59f7fc5facf3413f43908a40bdbd93bde43f3570de8a",
"format": 1
},
{
"name": "playbooks/ome/compliance/ome_configuration_compliance_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6f30f74f501c6b2cab2c82581964f619bae2d59d01d80b43cd4c20aaeafb0ae2",
+ "chksum_sha256": "e248d9baf3d9a443dd968b7dea92c70aba87d542b52a34d692daf7c3f595587e",
"format": 1
},
{
@@ -1208,42 +1355,42 @@
"name": "playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e4f4fba305e8ba3b9687ccd7165bc0fefc8fe37aef496eae36408d82c01bd4c3",
+ "chksum_sha256": "dbc8e4f4a6f5581fc0479f3fccef4984fb26ef93650a41fa0ff2efc04e741a09",
"format": 1
},
{
"name": "playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "161590e42d75ddc7ab16a8d90c0f6ffa8a43b391c04c636f7e3b81c4fde7cd41",
+ "chksum_sha256": "3d2d40aaca55f42d88a21c9556b08ddfb60ee692a2ff95c2674e33ecaed3a291",
"format": 1
},
{
"name": "playbooks/ome/firmware/baseline/ome_firmware_baseline.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "25d3dcb15e46c2c88873163dfe1a01ff37c0dda93cd951c078b3c96b4753e5c1",
+ "chksum_sha256": "7b80747cd3904de0e8d86046cc233e411bc4083f44e2c6f6f8a875ade32dd4c6",
"format": 1
},
{
"name": "playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cddff7a98e07d0e2d24433e757d0f430b0e11aed2f4aa66ace94b25bbe9aef19",
+ "chksum_sha256": "1fec86960e36907b5574822b9a60531ad5812bc465a5473cc57ccd3054f076de",
"format": 1
},
{
"name": "playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f902f16153e996b6fb701d1ade662509238e76e6f0ba84f05cd79216af275c6d",
+ "chksum_sha256": "1ee87a539547297b73866e1d075f7c81fc3ad2147b443c6fbf3b2454367d6b47",
"format": 1
},
{
"name": "playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "71287900811e03a38e685b904e6e09b989b626ec598f2fdec5904997a6282358",
+ "chksum_sha256": "3b297907849be26643ed73ca919c987c17057759b307c99eaa2176c8502d8294",
"format": 1
},
{
@@ -1257,217 +1404,266 @@
"name": "playbooks/ome/firmware/catalog/ome_firmware_catalog.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d449dc412c5b1d6b3bd8efd3f0c333ad5a6597d512fcfc13c0b43e979aa86db3",
+ "chksum_sha256": "d6919243b6c8339e13e6fba8d3b97d0205734da51d8b7858bbb450ebe017af45",
"format": 1
},
{
"name": "playbooks/ome/firmware/ome_firmware.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "737487a1c9a931ea1afe538951ade5b79771cacc9b7661949d5f80d15ba1319a",
+ "chksum_sha256": "7d25058ce25ee9d8ebc654d51279c54cd41fba51abe357c0221d3f41606180a0",
"format": 1
},
{
"name": "playbooks/ome/firmware/ome_firmware_with_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d04f917b100916a94dfcaad72fcb9a5cc94bfa2bea7aae1f8adfdc76a71f5d09",
+ "chksum_sha256": "0a466f3d8237f5442bd13e312ca31d2a6482d63f0f7ca22f38c826422a1ddbca",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/inventory/ome_inventory_all_groups.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea301f8125ffc2fb7889cc791e0558f8f7e85b3516e3d6d6fcdd4b8d83557988",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/inventory/ome_inventory_by_group_with_host_vars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6864ace739760c7c3d8bbf3efe1f34c77fa57b7562817febf7f8d0926b7a2f4c",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/inventory/ome_inventory_by_groups.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72904dae71c2171978e02bef29d887e94c4f588177cffdac576c67df93ac3b97",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/inventory/ome_inventory_groups_with_host_and_group_vars.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a36a617cbe7d9292a04383d2bf0a5716fc645f69c1c7959eca82ceda5d541ab9",
"format": 1
},
{
"name": "playbooks/ome/ome_active_directory.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ac51ececa4b91a8e6365da60f6ce5c5a11a49232872d53f542edb41871521d3c",
+ "chksum_sha256": "dd4d2026942aca6b2fad68425dbb9c6795ba3017fbffe2dd1fd14e9c72ee0d81",
"format": 1
},
{
"name": "playbooks/ome/ome_chassis_slots.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f3906545b9e6ad6b47992b4d861750e93fcdacebafe4aa198d1cafed1d3d39f3",
+ "chksum_sha256": "fb87fe3c86cd4506226de40728064dcba9b759220b566342da9c2f0b2e40331e",
"format": 1
},
{
"name": "playbooks/ome/ome_device_group.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "57f5bbb13a97f70c4b455879f054fe91f687256b0cd2d3966ced9c50b3147b3c",
+ "chksum_sha256": "5669a05a743b069ef53e1dadcf80954feb963fe8fd56932676fb92ddda9ca35e",
"format": 1
},
{
"name": "playbooks/ome/ome_device_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a3f5581491a4f964d974de1f0f46eb145d6396aae2ae3d213aeb25420792a7a9",
+ "chksum_sha256": "ef61ae932b9e642e8db5552525670b6a9645a52fb39de9add50551fcd721190a",
"format": 1
},
{
"name": "playbooks/ome/ome_device_local_access_configuration.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8d98875b5dda6bd407f8bc834349419e89ea35b2d602ebf8c809e37b6917f392",
+ "chksum_sha256": "cb3e4d71a9422a25d8bbe4cf2ca376df39d5a5f922de2cfb9facc8f5721c5a88",
"format": 1
},
{
"name": "playbooks/ome/ome_device_location.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "68f148f9362f3414166ad37b147ba314480a69ed0734900ce968dfb9cc4cbe4d",
+ "chksum_sha256": "497ed8e7f6b757cfbc60ccfcecaec04e742b47a6ed80861b58339f79f2f85adf",
"format": 1
},
{
"name": "playbooks/ome/ome_device_mgmt_network.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "18f3a9b956e41112b3ade4e47dda3a80499633111c159be5cdb15ae980c13172",
+ "chksum_sha256": "89748790b471a573b3921ff6e0fff48422b97ec16fb2a1aeae568ccb3fcb013e",
"format": 1
},
{
"name": "playbooks/ome/ome_device_network_services.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9ef5220d73ae9b58102ddebd5b8090e91a75afdb3fc9fcd3b7095c56fb7755b0",
+ "chksum_sha256": "2fbe1308921f98e05468b4e40687fb1954e365d4c140d73bce5ae550874d95cd",
"format": 1
},
{
"name": "playbooks/ome/ome_device_power_settings.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c95532bb6ec7d855a8b4aeb16836150d74b944f6018b0e17e4f6a825391e31df",
+ "chksum_sha256": "18029724ae0804c84143000e04c892cb042fdca99d8cb70eebe301bb964eb9ca",
"format": 1
},
{
"name": "playbooks/ome/ome_device_quick_deploy.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cfd6cbe0ac40ba5f7e729c7eba16014ff3fd42a0a7992f05a5f4f8422ba8b355",
+ "chksum_sha256": "acb2a7a5c7e73052bced88fbe4ba9dcffab7505be19212d9a11bd7aed261a5e8",
"format": 1
},
{
"name": "playbooks/ome/ome_devices.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "611add2a18b80c04b6470bc768459117d0b2c8ab76456cc0a2613f4244a06796",
+ "chksum_sha256": "c1331fd531251cd1d82f77bbff149fccae42ed7ad035cb01f3b61afbd76cb508",
"format": 1
},
{
"name": "playbooks/ome/ome_diagnostics.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f05b3c6d7f35cd3b463b01fa3548efa203f0ba070cc7ff19347a5181c0e3922e",
+ "chksum_sha256": "b67e463419986332c846988c6c8ba7a877e12fb42b4e15ac9354f1fffac788eb",
"format": 1
},
{
"name": "playbooks/ome/ome_discovery.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0e22c8dc2476de8eab891577e8da856ccb49c9c5e5c41b2a13f7430a7a754c23",
+ "chksum_sha256": "d38b03a955441fecd26fcf8f01f75e65d7d459927ac58040d041299009fab7aa",
"format": 1
},
{
"name": "playbooks/ome/ome_domain_user_groups.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "44fda4cdcbbf62a75180e9513f9ca67ce65478a1ede4a323c73acac12f6cd015",
+ "chksum_sha256": "1ec28a9e158a70907e3da5f2a56894028e75daad9e3393666ceb54d5baa96b37",
"format": 1
},
{
"name": "playbooks/ome/ome_group_device_action.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d40958dea3987aadfcc269ff813f8fd9f784f0651132954ca063ddc5b4c46ccd",
+ "chksum_sha256": "e28a69eee5cc5383903d9c96e9c6b12a76cf76a8a92d7c9dae9457456bb774a4",
"format": 1
},
{
"name": "playbooks/ome/ome_groups.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "57211cd828202d0aaf0b402bdb0517c606f589b3d6c4b25f73041b3e08ddbcd1",
+ "chksum_sha256": "c8af511d1623d2d4cf697b1e3480fede2228f9f0759bc73af8c88c72d20b6cd8",
"format": 1
},
{
"name": "playbooks/ome/ome_identity_pool.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c150987a26461e26fafae4376d23aa0781dea40f8d2970f8bf3a07b0be3244c",
+ "chksum_sha256": "371d65813bfc3a77962caccfc53aa30ab4767402de617f35d2db5734803b29e2",
"format": 1
},
{
"name": "playbooks/ome/ome_job_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1e9d6427fee7e3c8ba92e51df45daf3dfab6a870ee460a51596c70db9d20503c",
+ "chksum_sha256": "7a159c605c87d21e18f581b789ec99a5ec0431f22d4cd223e20483708175b814",
"format": 1
},
{
"name": "playbooks/ome/ome_network_port_breakout.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2a754a0c3de07eb3509967c427cbc461051ca185f771c0d0987a5c9e00c1f4c9",
+ "chksum_sha256": "39bd40122da135d913272454af6d830ba40307d59e66e487aa709a9d1272afec",
"format": 1
},
{
"name": "playbooks/ome/ome_network_port_breakout_job_traking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1c46ea16722e826c7db8517caa61623b917794d3601886b45077cb58abe58ea8",
+ "chksum_sha256": "111f9977f82b1ca2241b5307535fe6833e70c115871c3e856e15c98f2f507eaa",
"format": 1
},
{
"name": "playbooks/ome/ome_network_vlan.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7f4fa99f16d190b0f4885a52547ded02f23f3133edc4aaf6b4628410fa60dfb8",
+ "chksum_sha256": "5fc6914c37f5a6b280047c45bc51dde5156fc7f17fcf6ea7b31b4fb52902e867",
"format": 1
},
{
"name": "playbooks/ome/ome_network_vlan_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2f0d58633b8fb443663af749de213075f5e94ae69663d0c6ecd12ddcf133d92d",
+ "chksum_sha256": "de7ce01fbf0f07f87edbbd56adbc85904b327ab25c3568904a85df7c1635c77c",
"format": 1
},
{
"name": "playbooks/ome/ome_server_interface_profile_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f6ed36fb1d3f8bebccd89b848fbb374f26a79132eaf33e298705ca16085cba17",
+ "chksum_sha256": "af58749f1aa4451723ccb7fde4f31313eea771ce6818b56df7b4d3450b432509",
"format": 1
},
{
"name": "playbooks/ome/ome_server_interface_profile_workflow.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "09ac99eedc5d5d88280c9ec2cbc3eadb2dfa88fd8fab9a6098dcf8ae6183cf34",
+ "chksum_sha256": "88b5f582aba465eac8f1ab17c7e40865721921bb7864af0d07f83511879928af",
"format": 1
},
{
"name": "playbooks/ome/ome_server_interface_profiles.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "881a358b39e8378718af6e682ed10e666079a3994924af5d1d96d5b5cb20919d",
+ "chksum_sha256": "d0e8826dc3911a89aa5733c42495dd1fa40690d391c65c5c4c48bea10716a56f",
"format": 1
},
{
"name": "playbooks/ome/ome_smart_fabric.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8ee25defbd00757d41a854400eda3cabe2e64e0df88bb8ed0fc011cdbc5a247f",
+ "chksum_sha256": "03c70e63d7713fe24c11cfb6400669fbe422459b3695ccb6214f5ae5656e4f30",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/ome_smart_fabric_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecaca37de3625197151d6684472b54c9b3bc712bc9c47d055beb8a1ecdc401e0",
"format": 1
},
{
"name": "playbooks/ome/ome_smart_fabric_uplink.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cc88f3b1dea0d8e3100913370ee019e362a1e80ba1435254284a459d03986a50",
+ "chksum_sha256": "e9980c7e8d795f73469a974aa6a19c09f998081fe4c40163f9525b5db5596216",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/ome_smart_fabric_uplink_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9fda088244750b408b164dcf07d55bbbdf3168b0eb27627c4f938afd0a8658fb",
"format": 1
},
{
"name": "playbooks/ome/ome_template_identity_pool.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "884cefd59aace543ad64ba50025fdb798a37c21989264b8dee86fbceb590fcbd",
+ "chksum_sha256": "97ec21aeab7e209f22107f692b67ed54bc80e0d0205561d2a2b73d1f494e0fba",
"format": 1
},
{
@@ -1481,14 +1677,14 @@
"name": "playbooks/ome/powerstate/ome_powerstate.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b3a69ad92917c874fa543aed3403b5d2558252d891b47fe6dea9db527456ccfa",
+ "chksum_sha256": "ecddfa8e0d0c89d732131128cdb5b5eb2796f2bafc908751866dac67d8896c66",
"format": 1
},
{
"name": "playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f4d5b3d2830d9f98c494f97c75c6645f283aff5eab1371acc75df6a9f80cee8e",
+ "chksum_sha256": "5e7ef89910b84426e2251e5e8900139b87e539f5edc8c2866664dc41be18a3ec",
"format": 1
},
{
@@ -1502,28 +1698,35 @@
"name": "playbooks/ome/profile/ome_profile.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bb6fca02f836fe844b3af07802a8c9161ee0ade5b1d67ae40d5b16a4cd17682d",
+ "chksum_sha256": "6ceb1d703cab609a550730151e605a3e85f690e9459efa1c467fbd92e223cb30",
"format": 1
},
{
"name": "playbooks/ome/profile/ome_profile_assign_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "063b91c58e92b166063205d412ab82b37230dd8e4ffda760373e9eb4e28b0384",
+ "chksum_sha256": "09dd5305a95e0c59fd985365a2f96c6f929603f973794bded1405b9316d9c6db",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/profile/ome_profile_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee5cb574348141c0f41436b8c267660f94a0df9bcf76b67fb7bc555b1b45b687",
"format": 1
},
{
"name": "playbooks/ome/profile/ome_profile_migrate_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6801e62c9ac8c1f44fa9ea203c66f1e3a0cbcfbc3d76341954fd2d6d783b5cab",
+ "chksum_sha256": "832510e01a20607c99b212a65d194fd6c87b0b6d135f395215c50bd4ef2e50d7",
"format": 1
},
{
"name": "playbooks/ome/profile/ome_profile_unassign_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f49b8e2766a212e544d1c50b5e5212443be9c9bcb45ceffc7e53618fdc8a0395",
+ "chksum_sha256": "91cdfc259bfb71732f8ecc5b64fc69ae06d5053f886f9136aaf78b43d5c9539a",
"format": 1
},
{
@@ -1537,49 +1740,56 @@
"name": "playbooks/ome/template/ome_template.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8738432b573fbc779bd22e434c68cb1d87cbc0948ff4a718657469b620ad55a9",
+ "chksum_sha256": "3c90c9b41ff444b1bf174211b0ce260bd7bdd80c7a1a012c51314d03ade5400f",
"format": 1
},
{
"name": "playbooks/ome/template/ome_template_create_modify_lcd_display.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b723fb3b8e855c02630df9a5b4550bfd86dc8df22e8a1436a01b171404497d34",
+ "chksum_sha256": "5abdf1b033e3f22ba9c5d532fccb5e960bd9f04c6fffa4fbae799207e754a877",
"format": 1
},
{
"name": "playbooks/ome/template/ome_template_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fe843587c05f8478ee9c0277febf97d18bae114680dddabf7279ed7ffba41bf6",
+ "chksum_sha256": "7606d5cfee0c932ccc67a090a8a58df3fb85d370de5a727df937d84b871f93e3",
"format": 1
},
{
"name": "playbooks/ome/template/ome_template_info_with_filter.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "28e95655a53e5e79a8629a52baa3c9659fc480a4f3406550f8d48bee8d7edbb3",
+ "chksum_sha256": "9b38315b410882eb04f6a19bb0676ae5ddb146831b571572f5d15ff70d430fe2",
"format": 1
},
{
"name": "playbooks/ome/template/ome_template_lcd_display_string_deploy.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3b448ba7018559dee692e9fdbfd3b1d40949d3c751af3b81731d814e2ed3f711",
+ "chksum_sha256": "4c802f28d538ce55438af15f296e755c1a5f08872f5d55b582e0eb776446975f",
"format": 1
},
{
"name": "playbooks/ome/template/ome_template_network_vlan.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fbb9b0c74a22c30a93958302c7dfe1390d96fa3540f274cefda7e59780469432",
+ "chksum_sha256": "d72b4eadc6cdf60f554394e325a86d8ffee38bb3e38ae3f38b0945e25864770d",
+ "format": 1
+ },
+ {
+ "name": "playbooks/ome/template/ome_template_network_vlan_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf7ebd240aced312384fd6700dcee5f4572c189b18878dd3f2ad1a600a41241c",
"format": 1
},
{
"name": "playbooks/ome/template/ome_template_with_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3e9e6697191b9135c4e75f94021f7432c948126398f79fb37d8d3996004aa9a2",
+ "chksum_sha256": "28cb65035fb452f9d1b688b11f2a9152f010e6502cdf980fdb1fa674da1c613a",
"format": 1
},
{
@@ -1593,14 +1803,14 @@
"name": "playbooks/ome/user/ome_user.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "65b37b05b2bcbf5d41f1a6466df0f07e72fc65b143feb30c07c6d97a3c9218d5",
+ "chksum_sha256": "cab96264060d057d6bd286f21e5ba810a6c9747014fb5c6ef4fa12118adb88e1",
"format": 1
},
{
"name": "playbooks/ome/user/ome_user_info.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3d75ebdd03fc9c5d1ae875f9ad341ff686ff06fea4e51f56df4d75afa9b82ac2",
+ "chksum_sha256": "ed38db95276274d797ccc43cec2d8b50d53be9bd1e5e092c9ec6f030b7f5c6fc",
"format": 1
},
{
@@ -1621,35 +1831,42 @@
"name": "playbooks/redfish/firmware/redfish_firmware.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1ccf1a697f3f96f8463001be793b23e2c511226a40033d56f5bdcfe4d1a82bff",
+ "chksum_sha256": "e9831056b86afc4319cb850d6ed4a723e08ee7a5e2406e178d060bcc0c669796",
"format": 1
},
{
"name": "playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e8b6dade0af8249dfe1b4bba5eedea3751ca67a9331e94e00027633a1422f106",
+ "chksum_sha256": "7e1785959ab3ca10bc07a64de60a1fa8faad059df4b1b7a68a493be98b18d0f0",
"format": 1
},
{
"name": "playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7908ad83d6f8be21b4136e88cdd0526e1b7e165de80313da9b981e9be4b940f7",
+ "chksum_sha256": "da71c6817f0338f16b2efceec75919579ab4783695d256eb50050e50dd732ace",
+ "format": 1
+ },
+ {
+ "name": "playbooks/redfish/firmware/redfish_firmware_rollback.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67bedde1fa194d706233c12d6aeae0504214b2598a4f848373e31eddcdc571c0",
"format": 1
},
{
"name": "playbooks/redfish/redfish_event_subscription.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "129aed58783f80733247d34c27a507546985b6b2d97793f4dc03021022fbf166",
+ "chksum_sha256": "f901dc08010a5c76bf0fbc8c8f1ba60c1eff5d075878469b3fe1197ad2dc4589",
"format": 1
},
{
"name": "playbooks/redfish/redfish_powerstate.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d15e424913842eeaef979265128a62ce872cde5b1f95933773f86597f728ca86",
+ "chksum_sha256": "92461b5efd3384e8da363a9a091a18eb762680e516407b78ad55c1139d03c6e6",
"format": 1
},
{
@@ -1663,35 +1880,336 @@
"name": "playbooks/redfish/storage/redfish_storage_volume.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b8d0bfd51d4fe7176b4bc50169f7ef67d8f56a806fbf32a022df648ae0d137ae",
+ "chksum_sha256": "071ebfb5602ec9046db2ed9209b8697ff9e9cbc66069236b875bbf65a59af625",
"format": 1
},
{
"name": "playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ce3a0ee4237d2835d73bc50a308d1bca27f086cf9b7e2e83de6d038f3f766d3a",
+ "chksum_sha256": "180a6c686410517af84f74f13ee71d1451ea7a736855e521355da7f7b7a36948",
"format": 1
},
{
"name": "playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "afc62ace65f0cfcca325e6c82db1ab2fe75a0d44d79be5d2437e16275da99fba",
+ "chksum_sha256": "54dad57ad6a79cf3d1613917157698b84eadf3f10e456776ce23fd4a1a8c4e61",
"format": 1
},
{
"name": "playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "990cfcc7f5f58fa573a0018cfd52f9e10ef1fb851b8c37d9c1dc4b49c3a9aa4a",
+ "chksum_sha256": "2c4a6b9df13e5c02c6f74324453b3e2e93607f4fe6cc723a1a2d3337883ea21e",
"format": 1
},
{
"name": "playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a5839ad66c1e6e5eea70cdf830d87a9b4476663012c820f66b8b68d6ba555750",
+ "chksum_sha256": "96a4103655813d28c473885a73cdba3b48eafaf01521931f7761f89de461c91e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abd9f8c450fda2dfbff41f9ee2bbc613150d94e027e8a560e48d3ec14aad97fd",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_attributes",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_attributes/idrac_attributes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b558f4d845b9f870ad3c89c34e3361c41acfcef9437f83bdcf342ce3e7c111d0",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_bios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_bios/idrac_bios.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "46eb1addcc8166ef1e320678c15b5c0bcb494e68166b3a4cea65341764af3832",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_certificate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_certificate/idrac_certificate.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc5d65aad27029c224cfbe3945b27725caac02a8bc20db57837283efbac05c85",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_export_server_config_profile",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_export_server_config_profile/idrac_export_server_config_profile.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5a5df7a2c70cdfc5b97c9a752dddf82b134ba6b5358ab6216097b097c72a533",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_firmware",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_firmware/idrac_firmware.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce0d0ab244e8a7831b43abafdcc2e610b3b4507ff0a8785000792d94769e71c0",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_gather_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_gather_facts/idrac_gather_facts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "99a54fcb95b0805ac8ca5f2ce85541caf9dd9666ee3ae0cd1401c90a3a3dbcf8",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_import_server_config_profile",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_import_server_config_profile/idrac_import_server_config_profile.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac1ed229206d33006e498e8c0ec6f46ea190dcbcb34698b3560ac7c45859e645",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_job_queue",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_job_queue/idrac_job_queue.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7495a4784f344f1557f1f65ea5c51f56b31d025e304f66ddbb9ebbaffb2991e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_os_deployment",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_os_deployment/idrac_os_deployment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "484bf0716912aac6c405531874feb47643b82e5ae5bb4048e492adb7269328f1",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_reset",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_reset/idrac_reset.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7260501d8b6889b34b43524d75c1bf06a6457f44ea303b36aacb2190ff8fa4e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_server_powerstate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_server_powerstate/idrac_server_powerstate.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b27bab99e8b95ed92bddd14d59e53341a3cfa8b3828cfce59af8bed8328ac52",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_storage_controller",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/idrac_storage_controller/idrac_storage_controller.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0792768055d63117233dbbca23e2e7f434da81c3e77eaeb67554602d17ea7fd3",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f78483b3cf76c53e1539f5c47bcae79a21890faa859bc5f0cdea52cbeedc046e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/redfish_firmware",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/redfish_firmware/redfish_firmware.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b40bf1bf5fa84bfd210377c89e4b4566076bac113347918c06dcca053c463a8",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/redfish_storage_volume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/redfish_storage_volume/redfish_storage_volume.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c8d35ac12005d8a06ca9fbb891edd16c926b4740b7f1a4743df7fcace3404da",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/attributes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e760a0cb6126d76b49613aac9e612f88ae7e27925160a52b2e0b255b5f47bb2",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/bios.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d5c75310a93c6d513c747db70e9bea927c1eb5dd6ef75ba5fa766d4c2baaf9e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/certificates.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91099951ff4f500c14e95ce2a6950ddc012521daf34c1253221cd774e75647ed",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/credentials.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "959f184f2bd6eec8a6302784bfc6518c10a712a496e92b6f20be4f5e166be41a",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/export.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa812c14b39721cf11488674c9a0a93b8ca8e94406f335f7722a58b20475a08d",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/firmware.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ddbe55c1ad9eaa8f33cc617ed5ceeaaf99efd7d43bb5f33d60ea98fd966241c",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/import.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90d3589259f331152d96d8ae0c773ed3958c7ef24bb9604387bbfa040a20bb6e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/osd.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1e6e58675cb4af4c9f800681df1be3ea613b4ee09aa6f8c5eb2e8f05f4b2df9",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/reset.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "690bd2b370bbff4b2d9fc3e158f7a85276e343e12167d724d7a30ae7fd7ae7b5",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/storage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65e7a405ea04245a99ced19727310b2bdf295578fe6bc2869ee764eeb2c4235d",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/storage_controller.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ccc017e74bede8274235bc7358cfa6192c7c7430876016ddd07c827657f16e8",
"format": 1
},
{
@@ -1705,7 +2223,7 @@
"name": "plugins/README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b8714f96c428a41767a9a643e047e5c72ca3fa5e0d3b7129bbb611187904c3c9",
+ "chksum_sha256": "aaa3a9ba0daeec5a41786ee8e307e4663217563d1b1cdd1adf2cd4813ab6e9d0",
"format": 1
},
{
@@ -1726,42 +2244,56 @@
"name": "plugins/doc_fragments/idrac_auth_options.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "31e6a083ae597d771eae6213edf24ce2e729f3c35b20dc669f787a4bb43a7009",
+ "chksum_sha256": "4559c52de6583c9d98de9aa9d7ba905a3d1ce3dcaf349e2b3ae8f841037af07a",
"format": 1
},
{
"name": "plugins/doc_fragments/network_share_options.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "98d8877a3ceb891a40e3ecb802f3843c6775aba98a9e2760f01bf913ed69a964",
+ "chksum_sha256": "9aef12b8caa7df49c02dde445a8923c811228558bd61893c720ef185b2e7c182",
"format": 1
},
{
"name": "plugins/doc_fragments/ome_auth_options.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1b4468bf5d1469949eae6d6bc6a8b4f861353e7ab7b392ad63ac51c0e7e1caf4",
+ "chksum_sha256": "add3bfb55a976a99dbcd7a2ef87cdc2547991eb4e52e9fd97e0d456341dbde8b",
"format": 1
},
{
"name": "plugins/doc_fragments/omem_auth_options.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "90d5031fb1cad35dd5b4450ccc8682f09709ae17d2871c24694190e8fb2efcba",
+ "chksum_sha256": "ee12d1210af2e46053f3f0b1dc5156395c45aaa30303833b5a14a5c788a89062",
"format": 1
},
{
"name": "plugins/doc_fragments/oment_auth_options.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8fcdde0c2d0d36bb9eb6ab67a4c5a897096b2e45d87a6d3b70d58bc5947569e5",
+ "chksum_sha256": "6b5cfb79f913eaa3a1515e9fc0ea060f2c5a27bc4face2fa3a0506fc866a516c",
"format": 1
},
{
"name": "plugins/doc_fragments/redfish_auth_options.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9d321d8ac3b53cf3196a98682f445482e0de8de3740613113e58e7cc62a80af4",
+ "chksum_sha256": "437dc8e47ff125164d7a46c2ac329cd99bef1d30d6253ef13ff7cabcb28ffdf0",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/ome_inventory.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb2597106d2e3822ce9f74ae0551904f6f6ccd6cde597bd3ddccf96509bd1e3c",
"format": 1
},
{
@@ -1782,35 +2314,35 @@
"name": "plugins/module_utils/dellemc_idrac.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2318a704f1d71853ce4d50180c758378d21ffa37a6bf88dfda2182b17b634428",
+ "chksum_sha256": "3d9f01ddb66c08650f51804e15be43c971f57e8f7960b9e8eb755b1694f655f3",
"format": 1
},
{
"name": "plugins/module_utils/idrac_redfish.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6c1fc94a9adb9dce2477b746d05482a9fc1de9c236e4f3a8dc095a2ac5a62f32",
+ "chksum_sha256": "f58eacbdb2501466181f9fd4e48bc1f9ffc599113e2f558c85a6b1a11d03a45c",
"format": 1
},
{
"name": "plugins/module_utils/ome.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "23fbd69b9f491d36896c0cd996e6514ef74bed23fc4057ed1580eb57bc52147a",
+ "chksum_sha256": "a2b26551a3677c43e45b54a110bbd75fa9bba0108b2ffc5cbc71962d926160c2",
"format": 1
},
{
"name": "plugins/module_utils/redfish.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "94570535cf3dd3f0381ebac589c272030211329043813cabea6fc805b7178a3e",
+ "chksum_sha256": "edd7f7b228d441288a51ef80a2d760a3aa5b6458bb9830fe5c2148d16e1799e2",
"format": 1
},
{
"name": "plugins/module_utils/utils.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "80a1eb37b3d39259af40ba8c206f40ea9c19d069b594ee8873106576a9a620bf",
+ "chksum_sha256": "9b089f767c4d9c3d6f396ddcfb50b7ee3280f4f9f4350171ef445d0f20f1abb6",
"format": 1
},
{
@@ -1831,644 +2363,5621 @@
"name": "plugins/modules/dellemc_configure_idrac_eventing.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ceb170c26435678f63273556f894583b64ab733868c8033fdbee39ef0788cc2a",
+ "chksum_sha256": "8972622a58eb34b314304c1891c770b0cfa0c35a766303f5d2fb4614f2bc9ca9",
"format": 1
},
{
"name": "plugins/modules/dellemc_configure_idrac_services.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b07cbe10a36aaecb250fe8351499f05ffca31472815f6f73b40d904f306184bb",
- "format": 1
- },
- {
- "name": "plugins/modules/dellemc_get_firmware_inventory.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "59a8f7f805ca38912b39b0c41e28108afc34eab2cdf485e2b33b1464002f8770",
- "format": 1
- },
- {
- "name": "plugins/modules/dellemc_get_system_inventory.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bcd3478f09ce0a00cec8c43409f536be51eef85e3e13350e618ef612f35bfef4",
+ "chksum_sha256": "244953d637c27180cf67aec39005498a0abe58688d3a2b05b1655a6df81a8db9",
"format": 1
},
{
"name": "plugins/modules/dellemc_idrac_lc_attributes.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f3774739c8c7a941ced8b22cf985ca09b9133af3b927cee98960ca4feca9bdfe",
+ "chksum_sha256": "79c874570d8c94e5373a26c30e4e1b64576a54a14f62f4499b674c7b2f99bb8d",
"format": 1
},
{
"name": "plugins/modules/dellemc_idrac_storage_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dc8ad88f9f12af0efd986284122a2d25063540d5247f41ea1bbed43a23bec5f2",
+ "chksum_sha256": "33401cd52a68385aba75f18e26cb79b0a6dd180a9d6f7770a14adb86ea65c8ec",
"format": 1
},
{
"name": "plugins/modules/dellemc_system_lockdown_mode.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2fe9b423e68e3e81fe2b98ea82a10c33fa25f1e81d0e27ee8e4bb1a627a7569d",
+ "chksum_sha256": "fafecf1bd67f65c03480e16c097a0dab930c2bfaff25a4efda57f46a90f27b5c",
"format": 1
},
{
"name": "plugins/modules/idrac_attributes.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "31f9b22864a15832ab4882f0cae0f9699204b0ec0b2702c78c8e64aeb247d512",
+ "chksum_sha256": "22cfae5ac4ac90520b412fb9377f72d891920470dffed5f722f0ac8b159b4f51",
"format": 1
},
{
"name": "plugins/modules/idrac_bios.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8ca114a526185fb5d09074bc38101d362f83edc55b794ea604b062b7c9b7617c",
+ "chksum_sha256": "314547ca6619f5e0e397f85dedf7f43e4c7b33bd9db81b8f361c5f05f6844968",
"format": 1
},
{
"name": "plugins/modules/idrac_boot.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3214788e021feeb9001fbc3ec6badf14e178e29f2be9825b5983c9987b0d9a07",
+ "chksum_sha256": "0052eaeedf12e24462e6564f2b767e26450b4f01f1c1a3d09bca324154a1682a",
"format": 1
},
{
"name": "plugins/modules/idrac_certificates.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c44feb0cfdcc6485f8c0bba85444269ca555c28c08773665700e74f9513e007d",
+ "chksum_sha256": "49d033ed9292e6644411b0e252f792c4d7f767470cabbdb2983147e0fa238cd8",
"format": 1
},
{
"name": "plugins/modules/idrac_firmware.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fa03d293a819d57ab6ee9ff5c799aa16c24ff1b950fe201a3c73eedb91a1ae41",
+ "chksum_sha256": "7cabfda75d3c291689c0c2fee932be51f3021e862f692bcb25ed00a053a1e2c6",
"format": 1
},
{
"name": "plugins/modules/idrac_firmware_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "43090db00165687d027122b2035c9353a1c4f9945171907442f83efa2cbbb7d7",
+ "chksum_sha256": "a040502c70727b44937a478d5e33d78ecc90ad8c08873325b5217e80b3e69fab",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_license.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66af69170bff339b9c17c072f987cb6dc74d454e5aa060498aac61ecce1a65de",
"format": 1
},
{
"name": "plugins/modules/idrac_lifecycle_controller_job_status_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "29cc921c4cacd2dd7094ba803d6168595b3e3db3d3c6deaef0cf70f2357248c0",
+ "chksum_sha256": "0030947d9d1bc3f70ed13147d429fa4696627321dc9f75a467f00df10adb8402",
"format": 1
},
{
"name": "plugins/modules/idrac_lifecycle_controller_jobs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "480a35714b5ae18d8c902c470b9c46e72b8cfa2e40518722ded7127e2c96c7ac",
+ "chksum_sha256": "8b57b0f86959fee2a4919d83ef943dd5856fd3b137842fb700478d07159b403f",
"format": 1
},
{
"name": "plugins/modules/idrac_lifecycle_controller_logs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1883741f413c7c5b7670bb152f76e4672cf25abaf44409cb1e9d45cd8af8df38",
+ "chksum_sha256": "b374487c9c0f19acad5da9f3fb8fe24fa98b672e2df72f92b9b5eaa2d43ed865",
"format": 1
},
{
"name": "plugins/modules/idrac_lifecycle_controller_status_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a3eb8b534fa96b830fd73ac8b8c8d88b6b6c8b3aaa0a0c47e982c49b9dd25e55",
+ "chksum_sha256": "158d02c1d50ef175e26c7ffa66831575034e077e233ddd992c39d594e004fa38",
"format": 1
},
{
"name": "plugins/modules/idrac_network.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9913f4317fafacc168e30ea3e7e677bc87098f01e6b878c6513b3b35a74ae4fc",
+ "chksum_sha256": "4c1ea93a690a1400cba57600c7ffe9378813c502057a7d76f593ec2b126e5d85",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_network_attributes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "670868f9e7533d5019975faf0e3cf3119187248c7e255eeebefdbe59c59b45a6",
"format": 1
},
{
"name": "plugins/modules/idrac_os_deployment.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e7fcf34fe7d1bbb017765842e399827402cb3001891f0b80619726b8f5b75d15",
+ "chksum_sha256": "fde92ac2396a616812df831e2c576a591bb0b1e0f38e86e8f99cff8f5c75df3d",
"format": 1
},
{
"name": "plugins/modules/idrac_redfish_storage_controller.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "be4045a15b9013f38a90ba84e303fbde55f7ef08dc9c9a068e1a55e14cd40998",
+ "chksum_sha256": "187f4af6b3da0bd0350f782bbb930c30d6e5926b0c64495b7466f209bd37469d",
"format": 1
},
{
"name": "plugins/modules/idrac_reset.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "83356cad46959040735dc8210e53a70ba252aecbf4c4eaf08d2c7f7ec0abed4f",
+ "chksum_sha256": "68fb61b540612a4ac7c86874e56fdeb1bb706d8dc7df382af6ec6060081ce69c",
"format": 1
},
{
"name": "plugins/modules/idrac_server_config_profile.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "26c2126af51f3017c525d1c4324d7ada615b6ebb7d0cbc7eeab911863d0d13b1",
+ "chksum_sha256": "175505c7f6ed9b09a2b1998e0c9dc44ccb619218ed4ac1a665e955b7d2c8b686",
"format": 1
},
{
"name": "plugins/modules/idrac_syslog.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3785b5d4739f47dd34c9a766b5e705310a67fd7f8cc731e5f75773c63e6845ab",
+ "chksum_sha256": "8a79f5615815e37ffd0e0dd8544a2df2782ba792bb5e283df134c3403b8d4f8c",
"format": 1
},
{
"name": "plugins/modules/idrac_system_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fb989a436c357d2a5275838f727fe2c19be39fb47b1352a08be5a0771f41dc31",
+ "chksum_sha256": "ba0422032d21f80ccf2e138499b8dd10821b58ea7e09a4d470446f7929cc5695",
"format": 1
},
{
"name": "plugins/modules/idrac_timezone_ntp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "77bb42b0c2ffd8597dd1bc4d855e38aab9535562c394fc703abf1dbb0ae2c09a",
+ "chksum_sha256": "540b6fec19d767e940128d79991a7517716052293091e1a98caca550de5d0d8a",
"format": 1
},
{
"name": "plugins/modules/idrac_user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e4343e4bf3645750d1d8e35ad511c3d3c5eacde15469cca1aea5648097ba6834",
+ "chksum_sha256": "6670badc421ded85d316b969b60a24b54c0f93dab90d388f62ac82b12f127ca1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/idrac_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e93386626a7c28ae94efea17aa6af84e7e59966a59e84136af01e19f6d84aa4",
"format": 1
},
{
"name": "plugins/modules/idrac_virtual_media.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4fd6086e474f1344834c5a6bd85f4619125733840b5b0194b7a020e7f899a13a",
+ "chksum_sha256": "1616e52d126a4c25b6cb7a2a47939a6345e5ae673e975e8b89a9b1eb8c7a0c14",
"format": 1
},
{
"name": "plugins/modules/ome_active_directory.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8a6e1062ac7734548dcab7d3175572cfa2d1a3a6b322511b316ab1d6af6fe33d",
+ "chksum_sha256": "47a38b69a0862bf550972a674a37d877b2f70d82a27a1bf68ac5b54c2f0678bf",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_alert_policies.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "744da973ab0686c1f55b81676c86837a846276b5736b0dc364a927b9254f3f28",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_alert_policies_actions_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55e49caeb47cec5c51c53fb0b7cf50570eed92d6fc2fceae4b4e8eee2be6611d",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_alert_policies_category_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b83f7a82cb0fdf713db87623567512b70b3199ddf55b737bbc4ce7f6484b2de3",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_alert_policies_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd3cb7cf50c1d71743a3340912db5a58676728727d83c2a91a139dc1776de4db",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_alert_policies_message_id_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff3bb7fa4f1edfb5025cb6ab0040dc8ee03f79cd20d00dbdd3a5951e5d5a28a1",
"format": 1
},
{
"name": "plugins/modules/ome_application_alerts_smtp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "82edb362407d109837d2695564b40b766caeddb96720d093b56e0b801aee7db5",
+ "chksum_sha256": "de78ff29cce75aa6d57f5e84beed3ca0dd2e91a06ddff572bd9a2bae93aaed85",
"format": 1
},
{
"name": "plugins/modules/ome_application_alerts_syslog.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f27c3801fe3ea34752dd2d205d0458599e2e37d51189c216bda2c897c4adac23",
+ "chksum_sha256": "01e7c7432aafaf16db12d49ac5276f15cf75d6f878c304ad4658ae8e3fc9abeb",
"format": 1
},
{
"name": "plugins/modules/ome_application_certificate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ce57def988dc3d482b4be0f482ec7d43076ede102f9dee391ad1ec63e1dab9a1",
+ "chksum_sha256": "17abd0bae13c93b2d2100f3150f2dc3e865fd2e7f7c941727c13e553d0c886fe",
"format": 1
},
{
"name": "plugins/modules/ome_application_console_preferences.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1504367066c5ba86568df43a31258637c52e950d40a134f401ecfb555bb0e4ef",
+ "chksum_sha256": "e35ef322bf87e64517906464c5c605ac560dbefe3a5d93acbb63c5388401b192",
"format": 1
},
{
"name": "plugins/modules/ome_application_network_address.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "564853dbfd3706607e5dd89d9eeaf1be01aa87e2feeb8d4ac89672e3bdec5c28",
+ "chksum_sha256": "c9844f1c4d958fee67122018e0e1b7d651ee52f747708e6c8f12f5c455e90242",
"format": 1
},
{
"name": "plugins/modules/ome_application_network_proxy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "96dcb54a8ea5fa8a34b7b8b3c846d7725788fbd71833890506ee3d4e20045650",
+ "chksum_sha256": "1366d581d27bbd7ae40ea96506bc8d6a066adbc3222ff66974733b666f2633c5",
"format": 1
},
{
"name": "plugins/modules/ome_application_network_settings.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "315eb7f6c14a82286497e9204c5a1c8430fb93d3496e85e5c865f2f0f25e8327",
+ "chksum_sha256": "9bbaaa51f9acf57b58fae95988ed0afde5172cb4610b8c81d3ad56288f0a5bf4",
"format": 1
},
{
"name": "plugins/modules/ome_application_network_time.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d011173df4e1d7d9b1215d49a3695cf28d3225eaccc457c0bf2183829d24ba18",
+ "chksum_sha256": "7b8fbc8e986b8a44703a5901f6a596b24491313dffabe4c9368ef3f331dd1234",
"format": 1
},
{
"name": "plugins/modules/ome_application_network_webserver.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "034b29abff49479cfefa20ba569d09f13b700c1cecf301334ed66f8611ec903f",
+ "chksum_sha256": "f16ad3c4fd7f81bf6dc43c2131bffb2989392efbd7cbabb0d145c0cb08598a82",
"format": 1
},
{
"name": "plugins/modules/ome_application_security_settings.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f13f076503572a0720d756cdd21125e904b886b4aef50d630cdbc78e9c8bfebd",
+ "chksum_sha256": "ce4ea923efc1aad0fc2b22d8bc4039ddbf6dbe4a34424d844b22bfd141fc128d",
"format": 1
},
{
"name": "plugins/modules/ome_chassis_slots.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5c3934ac69f6a2d3b5c4cc7c1e97b0542eed1886f807cb534feb7c2e87a9dc73",
+ "chksum_sha256": "fe72d30c858c612064ba9118b3d7f1e97cdcb908c5e2005e74026f5707681e81",
"format": 1
},
{
"name": "plugins/modules/ome_configuration_compliance_baseline.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3dc4e8b58d37a525e9e0520e22b7012e9169833bbd31cd6d2410b143703f792e",
+ "chksum_sha256": "6aafa9fc0f313c12a464ea9c69047b8e9026749bebc709d93319a71887ebf99b",
"format": 1
},
{
"name": "plugins/modules/ome_configuration_compliance_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d9aef3c20ecd24903646033f2b5230b79cca997ea86ca73a1c2fc370cba5b669",
+ "chksum_sha256": "4f291a981ee898107c4cef6dab34a8140f703f1cd36914665175566ede35cca2",
"format": 1
},
{
"name": "plugins/modules/ome_device_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9c4ee487708d0b773fa38e831a76f71ae094effe0736d91705f86598506c8e42",
+ "chksum_sha256": "1287863b5aea15224cd308bb4f327cfb0f2a198c4415c6d248d25db7ba11728e",
"format": 1
},
{
"name": "plugins/modules/ome_device_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6104b52e807ef7772aae114878af1b1016616a8a74c8e9dbcb7d8f19a7500d13",
+ "chksum_sha256": "89d586c756522fa223e28b30ac9efa5f18d9dfd17ed6ac95a6478b6121e0529f",
"format": 1
},
{
"name": "plugins/modules/ome_device_local_access_configuration.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b6ac6f03a4157a1cddfc9e6990364b2847b035e5fe87dd5bbbbcc1b12959a26a",
+ "chksum_sha256": "0a00d9f17e5021eb03f67917415d1953bb64b3e9b5dbcbe9356d81f72115f9a2",
"format": 1
},
{
"name": "plugins/modules/ome_device_location.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a6476d5af6166b0cd9e5dbc7887a95247658081eb090f9b2aea47010a30334e9",
+ "chksum_sha256": "3bf02a20d37644dcab75df8f862d01e26b309468b8b611e3bc4832411eb1a415",
"format": 1
},
{
"name": "plugins/modules/ome_device_mgmt_network.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c2a132a8e7f55be9d1bc16097ae7f5f66f293d446deb53612747a704bb2e0eb",
+ "chksum_sha256": "b56771c54c1de0cd75fae19fc36c0fe07d3511c815941acfb32836025f0a69be",
"format": 1
},
{
"name": "plugins/modules/ome_device_network_services.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "355d08452f5c2ed3b50052b48d08d4fc52f967d8f3add1db2dc1526682dfecb7",
+ "chksum_sha256": "a49b72402fbc42fab67786768dff6975d5fe81eb0c0418e975ed8536552d8d63",
"format": 1
},
{
"name": "plugins/modules/ome_device_power_settings.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b77d544ee955b28494a1a86c8db48557624a58a3714376795b41aeea5da347fe",
+ "chksum_sha256": "cc6cabcd711d6117ff88bc5e58a8a24c721952671f47bfcf242ab12c9690c3d5",
"format": 1
},
{
"name": "plugins/modules/ome_device_quick_deploy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c13650b999e2ec1c5fe9937579081a2a1f97e9b6b3740954574220f3e702f09d",
+ "chksum_sha256": "5b702d20b9e2474c9ca9bc14f86c6fb6d7dc80350e77435ec6519e9976bd9627",
"format": 1
},
{
"name": "plugins/modules/ome_devices.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "26b6db0d1eda5962466408a52329b943651c149f5ffb1bfec87c3355834d070c",
+ "chksum_sha256": "792fd9d7344be270366daed2872417a3e76e367667ac7c4c24dcb08b1119e284",
"format": 1
},
{
"name": "plugins/modules/ome_diagnostics.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f0bde9547a9320d763bf7e9bf2343a2529bae27f196b009ee2fd176a9827391a",
+ "chksum_sha256": "f37335945247b3e94a4402fb36530d7505c1d3dbafa35553d17e444abc8ae5d7",
"format": 1
},
{
"name": "plugins/modules/ome_discovery.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7c103d3c7d685088923393b0878b2b53ffbe9fc4f64a954e082bdace0c34616f",
+ "chksum_sha256": "b8ca1eb7df50d781df18ecbdcb179bee2903a7e23fbbad9ac2b0044d60c251a1",
"format": 1
},
{
"name": "plugins/modules/ome_domain_user_groups.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "263a02ed96c8ab2f109291493ceab109efaa9f69d4dbc2bbfb8f0a78f6cd43a4",
+ "chksum_sha256": "c099a1fbe9767fff6644351cf7e728c2fd5413b20ed3a0428a61b2f02133d768",
"format": 1
},
{
"name": "plugins/modules/ome_firmware.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fe6bda6df29da0f03efcb1e8e9ba3c8a4601be4723edd7dc898da7cbd988444e",
+ "chksum_sha256": "d95825cb26dabcc64c1d153652c2466ef5eda18960cb9ee90270202d3d3c2f11",
"format": 1
},
{
"name": "plugins/modules/ome_firmware_baseline.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "045d19af4d6e166a8e4a23c7aef9304b6a7b1de2c1c8f4230ebf86358532d5b6",
+ "chksum_sha256": "af5f2e984223b1ffb9d78648bd8ab8c18b627d81b0dd0900f05a4f16989e451a",
"format": 1
},
{
"name": "plugins/modules/ome_firmware_baseline_compliance_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ca3c12a956151dd75036d5f8cb11ef2478d0d0c10e4fc3cc5d545e6f302385c1",
+ "chksum_sha256": "a0d08bc596d09894a2d95b76871108de4c87b825115b9b237206981289a672ff",
"format": 1
},
{
"name": "plugins/modules/ome_firmware_baseline_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "48e72d3d61c747396545aa29ade0e4efb946f00b05c2d7ca865c7cbd090e2498",
+ "chksum_sha256": "b59977ca8d111c3d82f1315a4a5f1d9fdd77ffb18c6d00a049f60e8f77ed859d",
"format": 1
},
{
"name": "plugins/modules/ome_firmware_catalog.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8d58b5ac3f9a9122260f068fb8754368d5e18fbf6e1f9e73e710ad29a843cb0c",
+ "chksum_sha256": "f3e965fd3781e330c127f0b0c861a2f21b3dfb4b65d0900274c8c0ff7e9682f4",
"format": 1
},
{
"name": "plugins/modules/ome_groups.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "05847ecea350efe57fc3a54881cb5450997aad795ceae0d795a37d5eaa62a862",
+ "chksum_sha256": "2794ed60e06fc533af5b610c6bebbd71a932eeea6be4a446e429c7a07ee33d49",
"format": 1
},
{
"name": "plugins/modules/ome_identity_pool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8f4c0a0b8f9d532f957bf8064eb3738c958a3b254485d2511b2fa7165be101e0",
+ "chksum_sha256": "ce87d1090d90ff57b3d3a527bd2efe95776eb91c3e80640ae6428f4dbaab0835",
"format": 1
},
{
"name": "plugins/modules/ome_job_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "413bb6a7e717a290cff61bacc4773513ca0624a8803742f5bbe1c78ffa805b8d",
+ "chksum_sha256": "0e575ebfd1a2b6247abdd51e73efe2082f0f22e607f26104c43b6d77dd4ced21",
"format": 1
},
{
"name": "plugins/modules/ome_network_port_breakout.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9acfd349291413a4edfc5750772e38a8c67bc724c7d9ec000cb4305d2ea6c368",
+ "chksum_sha256": "aab67ab9d418614cf47102f6883afb629ab72b98be540c904e4e2432888789bf",
"format": 1
},
{
"name": "plugins/modules/ome_network_vlan.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e9265ca6ae16c19449eff9250938e00203c7ad32aa5bac730175dbaac1f4201b",
+ "chksum_sha256": "9fe22c4fa7055e0b395315234e4f87e556c6495d13ea43437158f4ab91c3627c",
"format": 1
},
{
"name": "plugins/modules/ome_network_vlan_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "27d41fa508d95a3355f9bb0db06cdbc5b30bada29b433cf4331783d9320fb9b0",
+ "chksum_sha256": "f41caea42efc7ab9a4ae7144182986992d37e7490dceccd1ec262c2369650096",
"format": 1
},
{
"name": "plugins/modules/ome_powerstate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b42c64d44be41fa1181e738343fc0ca10cf7b697556139594d59b9a8a8d5efc9",
+ "chksum_sha256": "b15c98e5a0722cc06a6d060275cd527526536bb3a0fb4b1aad0d463676afaf75",
"format": 1
},
{
"name": "plugins/modules/ome_profile.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ff602c4917fd764dc2166c8c5db3156d2edd6485e85cc509b2dd58701c71364b",
+ "chksum_sha256": "1668387d3adaf9a68264089ef46e011601502512ffeb72aec08c4c0d91264c8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_profile_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e95d312e668311e09b210ea9e21b0b63d46cb5b6cd1fa0e7e2f306ab507a489b",
"format": 1
},
{
"name": "plugins/modules/ome_server_interface_profile_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cd019157af898d93fcfa244bb0ca2c7d8fb0cb740977f5ff60444f7fead493f4",
+ "chksum_sha256": "7828e3370b2228cebd3f442916fe81c26a43fa4c5239c186c98f3df7d39455d7",
"format": 1
},
{
"name": "plugins/modules/ome_server_interface_profiles.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bf98010dfb823d0845e276973896aedcae48ee1a79669e7e00c2b6b4f788ee3c",
+ "chksum_sha256": "c27dedba93da5122c64cb09bbad215adf60f6ca640adafaf11cebafb93b853e5",
"format": 1
},
{
"name": "plugins/modules/ome_smart_fabric.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ae5fa8026a21d5eb90c18a6284d2ffdf0f82a2ac3513575b39800f4471d1791c",
+ "chksum_sha256": "f55fad4eb0a37b26c225016ce0a62f14289987e8447f60a13d750e724ab3067e",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_smart_fabric_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9aa3f9bfa28ec74952f45d6929b8941e467fc72d593d781d4b9dfd4b07573925",
"format": 1
},
{
"name": "plugins/modules/ome_smart_fabric_uplink.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7bfc89fbd055371a3488f64107a80bb92a0b3d610e76fcf94d7a777ef27202dc",
+ "chksum_sha256": "8b6ec3f94eb2c2926d269de9b35f4f693311a3261e9025b3a08ec665ac2be65b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_smart_fabric_uplink_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0fc404675b2d5f4800e7fe19053556cdaf9e62c415b6867a17a9fc696846655d",
"format": 1
},
{
"name": "plugins/modules/ome_template.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f1c169dba48c6563e1d30fec5334f67fc7f0771e48311fca90de7c6da05de0b2",
+ "chksum_sha256": "e91802d2fd041c04f346ce43ab803596ec548cc29c63c403bf6cb9553903dc38",
"format": 1
},
{
"name": "plugins/modules/ome_template_identity_pool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "94d2c5e2a7db82c8119b8651e35d26f0e791e944257608d79ef6fe5be48e2025",
+ "chksum_sha256": "b15832e64fb39bba8d419fda96a0f0d893b7dd3c3f182a51816a5a09dfd9ef41",
"format": 1
},
{
"name": "plugins/modules/ome_template_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8975be230630553feb5243db9e01d054735f27caaa680bdfcb31a6c7f2892b51",
+ "chksum_sha256": "6b1a8cc5c499d76538aa4321ea545ecee0413733526c75cceeebe6e676ef5cc5",
"format": 1
},
{
"name": "plugins/modules/ome_template_network_vlan.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "80b6eddf62c19f45f7d4bc992d1d6f0e704b7885b8cfffe53c85b24938bd7c35",
+ "chksum_sha256": "dc39292571dca59e1370eff4f3fabaa5a7767029617a24a14b21477207b6408a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/ome_template_network_vlan_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd0ea6e5dbb418ace51af5af341b7a704b3cf0ff05163935f7ab1b4eac0a993e",
"format": 1
},
{
"name": "plugins/modules/ome_user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "399d69ddd06d2b39875a38cd10bc062721b0780c135970b5e205a4aac8d910fd",
+ "chksum_sha256": "7a91a7adb4966965349d38badd708bab4713dc69b7f4007d1393d08c6605902c",
"format": 1
},
{
"name": "plugins/modules/ome_user_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4916a4f9ae03fe5f5de7f3de1ecfc1df0797d437288b3b8d13bd94d9cec6cfd8",
+ "chksum_sha256": "6609af8ddab5e1073d5041d40038cd70f8eed65f5d1d3285f77523b6a34126e9",
"format": 1
},
{
"name": "plugins/modules/redfish_event_subscription.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ae1a0df42858a2276f00a574c71e05565070aa789ff9b0645d8bdcacf84c0359",
+ "chksum_sha256": "675a96365b20bf6bf8808916b467f6810e418271a4c914565c79f41155af8f49",
"format": 1
},
{
"name": "plugins/modules/redfish_firmware.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "792d527767437fa41784d6f458c9638bbd3858e6d4b5285c15aa8f88e31e7ed7",
+ "chksum_sha256": "380f9d2772004466c6e4c8fc33481b52f0bb05a3dfc3635660f9d8e5b447182c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/redfish_firmware_rollback.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "644c7c2972589c5aa58cd7a909119acc7b3d8a4650f8c9f726d6fedea3a9f1ef",
"format": 1
},
{
"name": "plugins/modules/redfish_powerstate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d94dd14da6dc9c2c63e630201382eb2c6fae7e725b561cd4d9aa5d652822824d",
+ "chksum_sha256": "c511e178a205942312cbb1523270a27bf4bb8506b999ad29e8c86a0d9e44b976",
"format": 1
},
{
"name": "plugins/modules/redfish_storage_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "98d36b7e595fac936671741fb344c1c5f7e85211a9028d4215dd9497ec9872a9",
+ "chksum_sha256": "43bc45da1a74df993f68a839aa1e4c743e0d774bd19e318f0e48abca127f51fa",
"format": 1
},
{
"name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ee48376dbe970009538ca8b62c580b70cb7c6f8085e1b0ff2474cd252dfd71bd",
+ "chksum_sha256": "d9edf1a05b19caf48aab674c2d9e34c1b817a9b480255e91b73cf0f41e401b96",
"format": 1
},
{
"name": "requirements.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "730cd2af3466c3077bd9e8f684da582f2ed7d5d43cacb7281446046ad108d26a",
+ "chksum_sha256": "32d916481c121c551a11659f3e0ba3b2e38c580ef5dea95cd1047560f3df514b",
"format": 1
},
{
- "name": "tests",
+ "name": "roles",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/.gitignore",
+ "name": "roles/README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "67b0f799f12726b149fd9677334c10b059e231bf2fa7150d55e9b4d5fd242062",
+ "chksum_sha256": "8091502909248e459a4b18ea05ca7bf36c1022419c31b23e4765684315129c22",
"format": 1
},
{
- "name": "tests/README.md",
+ "name": "roles/idrac_attributes",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "84332312381ea3f7b69a0d14fdb69eafe91071bd64fdc007c82b1cd17b0b68eb",
+ "chksum_sha256": "245dd1c52d38db33b91d948c3a4403bb0739cf172ffd4e63dbe444477c395902",
"format": 1
},
{
- "name": "tests/__init__.py",
+ "name": "roles/idrac_attributes/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccd23c69bb78d5c4da4314139c4f34ec24adb35d0bdfa16c096e2b5c20f9a8c5",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4efd533c8f0a3c63e2ad84713dbb875c27953ade61183e3b98526db8816bbf52",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f061ffd4298bd768dcb142d03cbdc74c96c5b3e6db84ca86c663f04c4ed083bc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a7e4d3e398ea14415197ac551e718bfc71cce0229df4f4f61a35981096dc47e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c58427eeb90ebf651a050a72f1a592dcb67808c53b5460041c67a8b9db98d3e0",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c3ff5ada33af88f7d89035e74a24e9f7ebd0bd1ce9aea711a11e456babcedeb",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/idrac_attr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/idrac_attr/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2eaa72c7f6eb429b320d8c6340ed73e43509fb9b6396c73001e924be3f4b7183",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/idrac_attr/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a7bdba9a94a875c15ec6f83690f3d06fe6c078a228c5c2fecd3c220b344f2cc1",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/idrac_attr/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/lifecycle_controller_attr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/lifecycle_controller_attr/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2eaa72c7f6eb429b320d8c6340ed73e43509fb9b6396c73001e924be3f4b7183",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/lifecycle_controller_attr/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf6b840896de4b786ee1a8c174e9c6330883bb9a29f5fad3116c88652328c937",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/lifecycle_controller_attr/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/system_attr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/system_attr/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2eaa72c7f6eb429b320d8c6340ed73e43509fb9b6396c73001e924be3f4b7183",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/system_attr/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3eef0d294a285ae534ef217c2b07156e28ce8017c57ac02dd3b7d8c71db471bf",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/molecule/system_attr/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c2023b600b957bbafd3a31e5ec79d21662dffa1fc218927b65078b2b3de54a3",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50d768c3b61c1deb820db1bda56cf4a916b4232f9ed448da92035eca8e654b93",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_attributes/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e55a77ecab73769ee48b7cee96d383945f3b74732b244fae6e9b02e0aad3713f",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6c25e672dc7aff51b545c0b96e4660ec49c095f46b7de93843c22e37a2da95e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "568c7c8e3c076cbb645325bfe8fbac4651420942e7b58902f3001b4472f12cb2",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa8e7c5541c5df8b0c7580807b182ff8d7e5d5cca42e3bbbf3c1aff1ab9cd6b7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4fc4895ae1ad8e0a5b64667a60940146e93e076c4bd6cf189c27d63ce936dcb3",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00c6edc47afb99834b3ffa0191b0ecb680cd57182b35087c5d0eda7611072d40",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/__get_data.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28a6d3f13871bd19937f4b4d446f9262917979e0a6ccf754b2acd47ed40b3e19",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/clear_pending_attributes",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/clear_pending_attributes/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "833e9d4a796e3d6e80f7ef1091590baf9263cfe66623e468326bb99c12a9c7f9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/clear_pending_attributes/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/clear_pending_attributes/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b18eaefea67777d2cca717116654500a21607506a8097e180d6b91346f88f687",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4780273b956a4ca22a04c2c52fe1342f40dec5d42ad58389353f061e0f2cc0d9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c3ff5ada33af88f7d89035e74a24e9f7ebd0bd1ce9aea711a11e456babcedeb",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d905bfe4faacf3a7e02a41f538327385ba1dc7bf778a95bf2787447a404db67d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c3ff5ada33af88f7d89035e74a24e9f7ebd0bd1ce9aea711a11e456babcedeb",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/reset_bios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/reset_bios/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2140db934705e067910e794e47080b35e66052808077143b96b902638a0fec8c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/reset_bios/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de8583bbc3924432cfe625f9899beb6ad7848058e61d1ecabd745ec810ee5498",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39dea23cd0f22d83a336b801858f334e655c683729059bab6526419e3c023f66",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de8583bbc3924432cfe625f9899beb6ad7848058e61d1ecabd745ec810ee5498",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/resources",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/resources/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9fa5332de56ac39992c0dfabb7ca55ba96d75b8856879b1d7feaa53b6c06c63c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/resources/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1c3c3706f74819eef75c5a6f7fc7a3c8adb78b0ec008d4ef41e382b779b19ef",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_immediate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_immediate/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3a6e4124abf9d96581688f23ba05ed99f6e6d49a67dc43e0e2f3477e12f39f4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_immediate/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0916dfb0e05312c73d1f5e8a7cfc822cfbc0a16e9ce0a837df4110347690994a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "416f65beecc2e8e53ad7abf57e03a7f089401dbbd0f4eb16789de510950706c9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0916dfb0e05312c73d1f5e8a7cfc822cfbc0a16e9ce0a837df4110347690994a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_on_reset",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_on_reset/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "223982198fa78ae74e7e1cbbaa1fad058036fc89c58918ccf42009d51ca54d56",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_on_reset/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dae48a016bbbe31b14341b09c78f16078af041ad7d83f232072bbf07f960fab9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f53e8d4b80c2dac745f033695febdb4f0c2dd222b7b8ec447164ffb3b44f210",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dae48a016bbbe31b14341b09c78f16078af041ad7d83f232072bbf07f960fab9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_with_maintenance_window",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_with_maintenance_window/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8884874fc4b2a0445cf1e5cea84a17ece3dbf7e3455dc17691d8f52c91e418d1",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/molecule/update_attributes_with_maintenance_window/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dae48a016bbbe31b14341b09c78f16078af041ad7d83f232072bbf07f960fab9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b45790ebf23ed966d7e99a7ba15314ed882cd366284db9f773386e96687f71e3",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee667193e6334e84e496371613b4e028fa9c0067811e07cf364b57064ce93839",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_bios/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f5bc4cf4b6dd8b6d33de490d471c654413d7a5dbe292d025a58e779e14020cf",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92a0c9cdb08215dd0131c2f5e91d125e2c58a1e9771093c40bf71097f035559a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbc348c1aaf27aaa035a1b34a9e496b64dc515388e9a543689c28dc50a597bb2",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "867b8c424c2b712712af162203358ba5061e926389d7ad4d8a3ee40a0d5b3920",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86cced715db95d2ee43d4cf20d223992c4f5aaf08f8c38b60267fde2f11572e5",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e247f0476f74edd90842e7e2507e0efc3f21427f80e411e0d740c74292c55c6f",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fcfcac8b7c470be7db84a5ed8c0b958c6a056d0ef05b2873f81eededd75a3ed9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4123a151aeef273229f90ea9d97454a56c6bc2614ab1b82e46d1b5a63bf4ead6",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf2c97a9b021ec60a46c21fb122c575cf5a4c5cb931ca25deb80d8a3af1e4df3",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "29cae1e6b242c1d9129666e724c14c023d54c0dab247a6df3ff78dc6a02c23f4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d19f060b5c483683cb13e4bb7d5bcbcb8285a93185df49e9da280941fc9ea7a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1eb678af41caaa30a1a302cbf75849714d9e0e804ae64c8145ff3a8f6518660",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe2b08a47a39e288193df820dac93dedff7c9b1e0f81790201b8d34865db94dd",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75317e78c11cd0f2c999b0a24869c569f6eb137a83d1e3831fb0e8d3db9656d4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e7a204475806673210f3155629e8fc017020e826606bc7cb67b78e7f3b3e556",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ef1e42f64b6b1036298447aa4188460ae395dc4af25ab0d0b139d15fbe47df4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c7c031cbadc0f08f5dfacd846de58729e2900c340492871c191e4ac6b504ddc9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15e5c2cab8d295c1d6957b2fef3bdbc2222f003b023a0aba83dadb606f11bd03",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "365ea1cad2e4ffb62f1a086752e377bc9a3fa010685b7c15211be1dec90813c4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_boot/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50ca20875f667975a478f6fd4cf78866cdfbfc89891bbceede99a1150aa258ff",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c9a1ba53346f348b656ce43ae836d01e341d173aa793536b7b83d39a478664b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6cf00eee21d159b121fb581e5c186c22803b7605acdd260c54e5d58969a94520",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66d821f9940918fd71847d8db4b05eb937a6de3f457ad7b9b6412762d51d1f41",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bcfa9cc16612b7726b60e9091be9e85bff3ea73991cbde070e977c434eeb827",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b2a5d6be0d2bb806d5549884a7ac88cd841662629e2b15a9a7ab67177250f7bf",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CA",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CA/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "920f4e9cb7ca5ef8393b92b8df4f47d7e92455e39cb0e0d56eac1411e2238cef",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CA/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CSC",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CSC/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9877fa2c96f05981f6afc09470ec0b6feadda2f501d1e6380d8d438a8a367c83",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CSC/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CTC",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CTC/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db852bf66f0d560599d1631d6178abf0aea7e7c768553bf3e5163ab5ca3c9a80",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CTC/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CustomCertificate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CustomCertificate/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "56fea8f40c9d9eca3d4c42519c87a21da0a603f323a705d7eb9bc022e594e449",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/CustomCertificate/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/HTTPS",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/HTTPS/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9858eda2d16422a41010c07c064193667ee573a295bd4393a7681cf0f159383",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/HTTPS/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/SSLKEY",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/SSLKEY/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30c5c2fc158089dc6f39444bae637bb01e3ad81865b56fa72903b702580987d6",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/SSLKEY/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/__delete_directory.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5acaf39dad31445f79924aadfd59f40857cce066a2bd4ef3dc1c3fd9ed2de0c7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/__extract_firmware_version.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2c1e0fbf65e8b1c4ecd02182ada8dbbd320fd9828f0e50eea57450892e9c5c5",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/__get_helper.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d510c89d7a712cd198e199f8b885057154f906a27dc18f90734362ad192b783a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/__get_ssl_key.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7367f06df38c4c2fe8b9a280dfda2ac001eae5e3c8d7c62daec8856e9dd8d55e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "462bfb673d20d3ea0a5b9a6731feacd316b74db4025013bad12141083bf62d1d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/generateCSR",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/generateCSR/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08b7b587facfba070a4b05e0a7cc17e3936f660d0d57b39a69c63a9955b9ee79",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/generateCSR/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/reset",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/reset/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6caadbdbb48e6ab7e9c2e326b312ca540813cecd18e5caedc7cf9d9f401abd90",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/molecule/reset/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tasks/export.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "80cffca71ccf99a8cbbdcc7e4cad320663e0064ac47faf66b1a62ab2a751d177",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tasks/generate_csr.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aef02f22f523b789c3a397b9f9b7c39c166e754b978f4920177bcbc53067e211",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tasks/import.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a6a2edb98266da8d17726347dc0b3a0c5c270dd44bc0dadeca28a493163b6cc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8500b36628049770984d5c5737419aa9e2e209cda3eae776b981ab8bb70885b5",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tasks/reset.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82b70c0d2bb2c126a73c04c392513fbfa84f4626a6d5f2c855e45bb1bd5e0df7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d906aecb61d076a967d9ffe8821c7b04b063f72df9d9e35b33ef36b1c0d98f16",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05252420e22b201c04db4b4e79f650a9bbb561aea0caec3d3e253d825637f11d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_certificate/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a35cfc80bcacd3b412a6505f8c5e599a8eee9487788a4dd7c8b69a6882b07c59",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36902b75877782020af54e7c969630ae84f2ee9872c4ec4350a6ae79e06c930b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35809f9a6cc33e2577870d883ad657e720b46ee3a2a0777f895d490ffa845ca1",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bf8b1e4781f050ca3d526439f50cead1680b8fbebfe4ba140b8f05c648ccbf7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7df7a3c628dee71fa4ce697d0da28ea42f1eebf667853e84124e2971af446c42",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a7dec7c1e8eb54a57fc78788b66c44e036b676ad62bd998114d7b87fd32e431",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/molecule/default/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ebde9ca53897de40df1400880e6a2a91d81c7a83e56541ee410b675d781a063",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a0d846dff7ca3876c76b1e6cfd625ab62ff93133385f09983e3419025e53a0c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c0d5bf979a0ad1541b496f173165b11f0ad14283391efde2c86ee35c477eb43",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/molecule/default/verify.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4108b95a87aad1d805cade4df5e3071720a3c449023378b488796e8d1b0baaff",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1558146a88520a40c77e451c4a7ab0f54fc2cf83de382bf80b3d608b9eae29a9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tasks/pre_req.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "121b8bc546708066ec1bd268af42e70cb396f1d2a99450d89ecb3eebbacf3c29",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tasks/scp_export_cifs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee2d348b52f7e2711260348db0ff8ad9e0f7c4eb4a391c1de53da94e1002a406",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tasks/scp_export_http.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8ec53a81eacd4b32bb0b7f5e209d85c386b001c541f581d51c17e38f7095836",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tasks/scp_export_https.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ef7408260c8297ac606633989ae2a26f24a15c493dfbdb7f38ae0c4e20abb84",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tasks/scp_export_local.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3d2296ccd8268dbd87cd0dfb3f82bacd1b93cba1ebe898797ac01d681f30f1e5",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tasks/scp_export_nfs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce2b8a6305affda2aaac530edf3e05695c8ed2ff994799fe4e84b77050a33617",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d5634aec64528ab58532686cef5828500fbfcd3b729e0333e20b63365f4a080",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_export_server_config_profile/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac68a419593778d913a88752aa73cf05946ca0438db7f4584d6174b04c0c400a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2aa85fe4ddb50b123a7922cbdc78e9b8b479816d298a5f77b462c00c4f0a2d0d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d35d5e950afdd8a3f36385d957aef5f2348add525025b7b6e786070e4d1f2a9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc76fe5e14c723905484243dd4f75b8158ae7ad1c7c85f21c275ad1f3aafe886",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f0cf3892bdc532a0bd6dd0bc448f090139919f4fc9e1500bafe47d2fcda9aca",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8afa3f929d14d3f681ce7379d32b9ac1f64d820d03d79b272a083deb6676017",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/cifs_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/cifs_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2a085cfe51d93783380fcf3c0900e392f570b0331849d3f6d475da29df6d19ce",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/cifs_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5aa353429e55ed0e91057cdcbd8e4b8791d31c0f4e60d85f4ab62bf972d86fa6",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/ftp_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/ftp_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49d98dc1fc2fce07f662446a0063c02e7f8cd93571cf38d56a7f85a278fa963c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/ftp_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/http_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/http_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5471b0eb5b4eb38016f0201623470b2dbed5d4a0c0cb849d582490795a13350",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/http_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/https_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/https_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "defd19ca5cd9cece94b5805b6fa2b0329f0bf38bcf8920a164875d49380acf0e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/https_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/httpsproxy_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/httpsproxy_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b89bbf2005ff14ca995095080b6f5408139c77fdf5b05df787f0299f5a64060",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/httpsproxy_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/negative_scenarios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/negative_scenarios/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b9dd2af9cb698278463c46b3afbf3833cb9b8bc203d97eba7cad9d95fe65688",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/negative_scenarios/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/nfs_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/nfs_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c8f6e3d66fdc1d105151bc0df8d444dd0ebd4e6bd986f59dbadeaca3a72b9d4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/molecule/nfs_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9878250caee484ccd585db63e5c38597e606587c475e3e993431329b1d14440d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "892ec6c42a76c0f6ba5fc628f5455f78700271bffabdbafde5eed32df1699d92",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_firmware/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76a25139790ece31962351e91c95883e5233a87e63d93e6c044dbf5a0c0604ae",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "babcf9ea882157e54c32df007487cbb8432e214d7c3d5ebbba6631241792cbc3",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e9f5e643ff9212b823aab7dfeb2658e1b1743ee6e296fe7cec64b64c814dd1f",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f872a794379898660d17bfd9e1ae91bb13f8725a1cf4b16b901d80919192caba",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75d820d28ed29f3ab34caf04022a9d5b3023f9179806d13cb88deb25bbf1d58c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3bf6b5dc34c2b73a1e1db79a12c608502254e34ec556ba2c261fe607fbfc6ad1",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/backplane",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/backplane/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f060c709ab360c2ea3299558afe6853d338a5ea57d673d419361c87525e44c69",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/backplane/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/bios",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/bios/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19940920328ca99471e6c48105d744234a8601ac23be9f43ebc47f4dc199ee80",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/bios/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/controller",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/controller/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa250d5db7bb8ba429a89882d0b7496f555b742634df347fb0d9832780032523",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/controller/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/cpu",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/cpu/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03f5f6bec9178116f17d4a1f7e49aa0607b3b34751e35ee4d86f26d4f9912383",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/cpu/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b14aa1092665e1c96e60baa45c8be0219a08702211efdadd89977ce8bd11bdc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/enclosure",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/enclosure/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "42dad150ec77379f3d5ec4dd2d5d009c4e0b1eb61f4bb6e9e0b88da5a0527c62",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/enclosure/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/enclosureemm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/enclosureemm/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4533ddb492d9a730609c0e45b1edf770dcc9f1aaa12a04f60d47dbb33ddb2bb4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/enclosureemm/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/fan",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/fan/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87af8814a77e8103470cdf7e312c103d299006a6a2a358c4135f63feb41a2e08",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/fan/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/firmware",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/firmware/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b86b895de8e9be2509eeaaef22b3e120df8c73da0de26a46276ffe96af33e44c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/firmware/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/hostnic",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/hostnic/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d233cea48c1048fe9ac74653a6f05a9e471b178adcc448612d3e0ee44ac7f58",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/hostnic/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/idrac",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/idrac/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d5ca5f3141059ad14d844544b22e52ebaf2ab9d44fcb797d940b92dadfb3737",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/idrac/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/license",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/license/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db2cddb39bc38b89e5db58beda357a60f7d4c5cae9ca6662ab0d42fd1136396c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/license/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/memory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/memory/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30e86739e7e1e1e18877223dbe17deca255fad88c9d955da03693161aaec7498",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/memory/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/negative",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/negative/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71f132a153b37c0facdb202a3134776049908e882a419fd7142a3fb9f01a185a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/negative/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/nic",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/nic/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "896dc582912c1c53fed8e72bb323f260616d2dfc79a2ed43fbd5bccad815c297",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/nic/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/passensor",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/passensor/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf90cf12505b3178a5fd41ebc0b9288aab73b841ec7e7ccd759247625766c624",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/passensor/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/pciedevice",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/pciedevice/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5df2dbc3b20bf098c08428c4dc85b79ecb447431305bcdf35b26e8320af87a11",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/pciedevice/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/physicaldisk",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/physicaldisk/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3d8d5bdceb4660aae165f96fd61a3b0361d0118e3db38f45fe89e10d7646843",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/physicaldisk/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/powersupply",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/powersupply/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4593adf60c90356bc9aa47f91bea16c718884b95a2ce860608995727c3645bb",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/powersupply/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/secureboot",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/secureboot/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6e792336231dcfa4207c118ba29d754f4cf4cc5a1beca44ed9d7188f4367e85",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/secureboot/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsbattery",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsbattery/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b784dcb655f275d30c967cb253e5b3ffe09f4a3f3ddd3d5fbc1166133e962dd",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsbattery/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsintrusion",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsintrusion/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3c8cbcd170c5cc941748c35b35206c8b55d2371b663d068ebab44330a758cba",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsintrusion/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsvoltage",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsvoltage/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72f0734accc6e0116d5cc038683efd3661b86e8cce47590edec9b09e62613aab",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/sensorsvoltage/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/systemmetrics",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/systemmetrics/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "299c5b02f0b782f856fe72ab5d2a7e30d81cacafddb4abf843c6e2e8db42af29",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/systemmetrics/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/virtualdisk",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/virtualdisk/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a38bd071d17033ff2eb821b827f460c2a3a6d2ae415e6fef83ac523a1c74afe",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/molecule/virtualdisk/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_attributes_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e4ddc302cfdb0520c6fa9e0a73708a3491412bea218571b184b94926c695030",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_backplane_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1ee6473d831ef5fa2b080fbd7e6c545f65cd0b8ddd75af862f3fa314ccd8b71",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_battery_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "148c547f8686797aba487fb562ac318ae933609914899419bc7b08c45f92f76d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_bios_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d625b06d054c58b3434ee76881384abc724a66b58fe7ecf40fdf870c2e7b0b3",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_controller_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ef30c6af08535b3ec7ae2485bdd9d4aa79675e2b1bbb6b07be726ac7c36651c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_cpu_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "151ae93ca668eb1a8900addcfabe7ae999e670e821ad4ced7f06fc95735bd51a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_enclosure_emm_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39af5a227279476db905f227e7c4321756537f807b697dfd02fe16e5af40ec82",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_enclosure_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "deb7f669cdd9686dc3c1e2d129ce062ea6ac39582fb1c4d1ebadb9d33aaa0672",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_fan_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bb1df958e92876346cb1fffead2be0436ca155725be6326174f6be124812afb",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_firmware_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "024fd4797f5075b0030f378f4b23696e23f193eee0ae3d286793028ac54f45f9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_host_nic_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cbe6b638f4f6bf98263cc35f2ab8e9dc9c20d4f4781179ea84b25dc837940f49",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_intrusion_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfa01c92ff7a53762840eac467d93619b0191e4fe771715be349eef092e96b64",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_license_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61c857b5a12886ad7dde1dee0dec583d999cc435615dd4ffa211ea52e11cea56",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_memory_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f1db24485b5870b7a722e07807e8e0df8bebdd162cbb67e12167cbb2b35a3a9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_metrics_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5bf5239cfc7ea06a7bb797c84272d5ca0f814a1ad8d0150299384670cbd2a8a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_nic_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bfc885a21855a4c875dcd2d8d30decb7c970dedeb5c4912a549edf88e9fb9a99",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_pas_sensor_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e9d20d3c5dcfbd9ba045bdd03395fb421c1b6174f3b4bf17a442855b28b3b84",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_pcie_device_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09ded655e39f0510b2432d9e26f24bbcc03d147dae5aef9a4c53ad85b2f8468b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_physical_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c08ebd517f4809864571641b9bbd54e720e54d3eb9f608b86c2de0e21d5e9154",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_power_supply_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b768e6b3a6d27240d49d04a0f59493047f2052f2d9ba94ede986198682525f77",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_resource_id.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71b9b902e3e06e3effa5e21acd8a6f8be1d0dbe49500ff72d7d8cb27def8555c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_secure_boot_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cdb9d35aed51e1bb8f4626028b87fed75d52df4a29e933a096eb6e1e526b47a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_system_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e94f83a0bcae476b8005195648d8fd69b894b0f96d24fa3f837f021f46537d7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_virtual_disk_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "91690948737d7d0249d914032490bc42d6260226a1b8fd55922653a1b46b2a61",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/get_voltage_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "baf9e3a82d2f19ccc21fa8721c09004ef99ab237f790d5f676bc533aaa523e0e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f082f14db62efdca0aa691bfe928fedb660df3fc98f6e32f367cb8dc068335c9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/backplane_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "814795a1a735b378e46304827d8315436b94113bc2a229b35ed354886db6b927",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/controller_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "48bffad0e6952ab27d7721095ef84c4f13fdc6ab07ee859f36a380409ef6e594",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/cpu_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e8af65eb0a9b3e627d4d8b0a352de3cb82218bae97998be2101290194825c25",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/enclosure_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "763ba0a9850c0b62d5224e40cfa49bb970e86f3c49991117ba21254211d11c24",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/enclosureemm_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b409927ca4278e7eea565b40cd36ecc0e212901effc08ea13f8264e323224bf0",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/fan_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92924ad83dc9f5fe1b6801d09dced4bb70c1209ede2394ca0ced2d25c3c72eba",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/firmware_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3dc966c3a3c7ec1c7dd3a3bfb2b4e3a97c10ae9f279c4a56d3ccab5313857afe",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/hostnic_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e84c587ac238b74f292b0382d465ee29ecfdb92a53099410bb0691878aa8990b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/lc_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "170dcd03bdb5031bb82a24cc19cc70e1815865a2572e4f14b892221324ef95b5",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/license_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4d3e30cd29d21fa947ac9f1b43c3f4d73c60c85187d0d9d01025faa1cd969792",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/manager_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60be5b10f57f9ec1bfaa913610ef448320b476d27cd33b122a7037ea31a95c21",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/memory_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6b16f28108610ba17f95350a75e72ee33ba0db357cc239e74a018e7aebc81057",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/mmetrics_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28c0563664fa0a63bc2654edf6bb78c89208920d7414fc9bf2c9eb7408835cf0",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/nic_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb3184e49419f93cf390a53562c01203db04037606af82aeab51783e21a5fbb1",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/passensor_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2796df243ae251d9a6d0358aabd93e12fbc56aaff0208243fc71be0e20bb4182",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/pciedevice_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f4bfc03967189c3fa4c2752b75174294075ad6a2649d4c7e0f6c82dc0b0bd55",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/physicaldisk_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "611944e5b3bed5181b59b6cd940e8d6673076bff6209db3c7a4cd9d12608b984",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/powersupply_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d0c8c6212e46587f25b7366001ec10fbd5a9884b53339505815abdee19164c78",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/psmetrics_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ade4267e18da8e945add979e001b40d832e77f7b3435cc3efd55f29bd4647b6",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/secureboot_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6876b8b8892d421dd0ddd59dcc1986d54361e6c376d8239f42d91a0db707fff",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/sensorsvoltage_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e26f5f551fe9d943743159b24c9a01c3c4c804543311330b12ff87ed2f0b7079",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/system_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78c885b55767b51ba6c4be7998d67e9f9599f9e7518ec683f9bb1b7bd3f337b2",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/tmetrics_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7cd404817eae0e42ab4a29329d93ecf2226b7084464c1613dcd85464b83ddd4e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/asserts/virtualdisk_assert.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e8bd9e08382df2ea34a9b9830b92086569ca4f16e453286cb92c1705f69aa7e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "71250fe59fab062631b04fe173573783fcd89b77e8f5ae2c042720d10cd2f266",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_gather_facts/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "330b6e0ccfb1e2805e8fb61b43ecd5170d98d1fdffaadd905de5ffb2c4339d09",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67521b0bf1ceb30e3ca348cd5f07d4c54aee0c56d7b3935c542fd920c11df288",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d569d3374f9ba9960023d769bdc7245afb9f635656bfdb48ac1ad61d0279a53d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c31a046bb009c7b68dd25c11a096f2b08aad4e49903648580a05dfbec4da9d2f",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1daf5cd34934f57365ef41dd1bdb8eadf73c96faee44442d8b607d3770921a6",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a77ddd7746819905a9dd4511b45439de974b04c6628f8f89c5b89490bc9bcc7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/cifs_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/cifs_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20c76524c38d431acea8853cd59b3deacc7a933da50ceb12ee5c9686608f686",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/cifs_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/cifs_share/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76dc6ec3bbc45acaa1363e8af18098ebdf641fdb97460659095a38744ff11f0c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3f5fdbd0243581c13e09c4a9347c3197712b89de7ccf5c19bf040002a8e0967",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2c3ab569ca359ac2fa8acfa7a3d3b77e5c1e44b8fee6a13b87888f32abe8ac0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bc993e0fb0f926cb49ad73e9afdb41ff60906c02739ede68fc1c817070577a7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e24bb4df02d6d331effe0c6bc95db3c0d7b38776ccf64b0bcf680cb3cee453d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/https_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/https_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b03af0b95e571adc26e9e17d74cbaa0d0ad65e5764b24c0c063ffa073bb70408",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/https_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/https_share/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "671c0272e561dfd3c8a4cf4b62dc1a0b2fc3212d389be919bb50d2dd842fb120",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_buffer_json",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_buffer_json/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "244a0b6dc7b307d86c9fdae8c9e4b4af60f67691ada7486a9313c9404397d25f",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_buffer_json/molecule.yml",
"ftype": "file",
"chksum_type": "sha256",
"chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
"format": 1
},
{
- "name": "tests/requirements.txt",
+ "name": "roles/idrac_import_server_config_profile/molecule/import_buffer_json/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_buffer_xml",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_buffer_xml/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e2a677c8f26d168e7bf5b5e5efa07f121f70750d82e3d9bc1e384690ea55b7c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_buffer_xml/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_buffer_xml/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_multiple_target",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_multiple_target/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f76858a1818cf9026b33f774c00006dab25c933ca94d07e951e7d8bf7d225e92",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_multiple_target/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/import_multiple_target/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/nfs_share",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/nfs_share/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "762ff43cdf20d5c5107f8aeb15af3678adf24b75793fe3a15c886880f16e09e7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/nfs_share/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/nfs_share/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49b649577157352b249d241cab5c9f05d2f14e72c6b886ef809b1ec006a6eb0b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/resources",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/resources/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/molecule/resources/tests/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "043e0949892dddbbf4f51d9b5146cf2fba7777d102dc55c3f5c5a9d2a7fbd73e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8a329bb5b5a0c7ce2855fddb6eb3b16ab43524ad86e7f12defe55577933fb636",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4acfc541204ebf3d6c37f48c79e48f8f273d016aa7a2f6af7fef715905245eba",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_import_server_config_profile/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a072ea3135c9e0109ae66ae42d86a3947cd27c8f7fde4ea1d62bf14a75ff7b6a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3e961b7f75abd408cc8ab2d76fab6a3175b32ddca1f3e87c64b009ff8e53daf",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf4f24cabcc7843106afbcf1ad295d130bc31c4b9431d8e149ef9611ec66f0e4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ae7c77fc2f339f8db094a30043ad551606c6bca9e5bdfbd9e34a79563f7334c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e63d726e475ad59773fa26fe3918bd6cb5b042b508834e49c8c344fbf4945ccd",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "081550bbea995e8776778cb69f4f3003f5dc9ef2148439bb0ab441f28fd67949",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/clear_job_queue",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/clear_job_queue/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6230b7be5eb08101f84d73735097bc06c18160388f47bb8fcaafc602eb70d5e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/clear_job_queue/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d81bc68e8c8b2d39739998d44f3e799e80e6025dc671c773664a0e1c475066fb",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/delete_job",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/delete_job/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6821a330491cf5c51f659fefc17c1151818c5f8fbd9595df438aab1f149c7557",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/molecule/delete_job/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6128fe631d272bed3adb4c98223a2b04d554093dc05a2c499eeb9e6d80c9ac7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/tasks/clear_jobs_with_api.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32e48bc5f0ceedcf0077e0ab7f6931e5dc1f4c34324aef0957ef440b44f69369",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/tasks/clear_jobs_with_wsman.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc5e2cbcef3dffe45fbae0ccdb4310621cde5b940b81f9051ec1039126e894a0",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/tasks/delete_job_with_id.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5aec3e437d0a24ee7b8a41737abd2db6889a1c4a0c0543943212573d06a2fc83",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/tasks/get_idrac_firmware_version.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a193b7f5af28f17972f0da7e52e5fc0ecd34f4b8ae6398bd1b04769a7cffa453",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/tasks/main.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ef1902714bbfbe85a80e4ce867174506597fec7b7867402bcbd1011d0fb32673",
+ "chksum_sha256": "edba2b2d2854747abff9b2b993b2ac119117eaa60533c517b5d7586af869d784",
"format": 1
},
{
- "name": "tests/sanity",
+ "name": "roles/idrac_job_queue/tasks/validate_input_data.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0c862aa3048f9061f02fff2a55ec1324b7b1c81b6175617d3e739eeda382d7ee",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/templates",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/sanity/ignore-2.10.txt",
+ "name": "roles/idrac_job_queue/templates/idrac_delete_job_queue.j2",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2701753b2debbb5754db1f7cdd5bfc4e7cf93df20a06fbc86c7ea979c3151db7",
+ "chksum_sha256": "f83008a98424af87bd8817bb29a82a175e175a8459146267c86e7cbd1895c31b",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.11.txt",
+ "name": "roles/idrac_job_queue/templates/idrac_delete_job_queue_force.j2",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2701753b2debbb5754db1f7cdd5bfc4e7cf93df20a06fbc86c7ea979c3151db7",
+ "chksum_sha256": "b777b52ecc380bc79f8a903a8bd2fb8613e98ea665d41e01efa2532fc9f9ef5b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
"format": 1
},
{
- "name": "tests/sanity/ignore-2.12.txt",
+ "name": "roles/idrac_job_queue/tests/inventory",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2701753b2debbb5754db1f7cdd5bfc4e7cf93df20a06fbc86c7ea979c3151db7",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.9.txt",
+ "name": "roles/idrac_job_queue/tests/test.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "77ca00eb5c1ea0580f4e5d12778ac5029c3bb190d5355d5e8ffae6a3a53b97fb",
+ "chksum_sha256": "8898537e0e2c4d23b06938b4e5181cc005b3eedad5c6f3c848c78a8a93f7ad21",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_job_queue/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12bd53e6ac521d02cf9d16a83ef864467aa1ca7398b0b20e15836de87c74f9cb",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e800016c9724604f5c12210c94b830bbc27bd4996205fcd74a31768f754b0a1f",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/defaults/main",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/defaults/main/esxi.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5393035ed221c2ced358b9d585fd1881817a8b31b0dd030ba32e81ecd8a7f4df",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/defaults/main/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "759f139407f609b8655150fe9df73e69025bbaadc01b6c69e07729867539d583",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/defaults/main/rhel.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e2a5308265f0edf9cdb641063bbf2c2e877a9693154b04cf3a4b9a784f3934c0",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "82cb53c7b2545e1a7883e32efdebbc34bd69bbad9ccbeabd190c3028f7f05777",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e69ff8ddd86e522acf91f9ca4efd9099947ecb857dd1138e8c7aef4793d816a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db297ffe635bcee661eaee0844312d69cc364d67cc427bab545865674401c9b9",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/clean_up",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/clean_up/clean_up_destinations.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9a4258e490a7d6f72372a85f128243594c709f26020b61761ff2064d6837d46",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/clean_up/clean_up_working_directory.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6cb3708d332825d39b859aecd1df938ef1b3bc63b92213c1195432e7ffa9d0aa",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/common",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/common/copy_iso_to_destination.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "163156845f185684a862eb1ba7f0023119f51798898d9bb79209a8f15ea80b20",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/common/create_working_directory_path.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5d3c043120b0788f2a4a4e5f8dcd26adc600065c47c423f92eeca3907c21d4a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/common/download_or_copy_source_files.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec7e75367105039f8833bb8bcacf522f137bd7456162c99987aa0a8ddb2761de",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/common/validate_inputs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e70fbc15dd06a835d5cbf840d4bd93f8c1f1d0d4214f6555e68cbbd4c2256e1",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/esxi",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/esxi/compile_iso.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea92a3796b8d00ba6e0e65146d6089201a6bd4f6fdfb01467279414ac69fd024",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/idrac",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/idrac/attach_iso_to_virtual_media.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2d7ed30e4c808fc88df7f59a669b8bb1b561bf002c332b3a8296c73e19d6c660",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/idrac/clean_up_virtual_media_slot.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f03831c4aa6b588e969bccdce38b75d49969eae2f5543b8ba9e94510cb18eb29",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/idrac/set_boot_mode_and_restart.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0502bbe2d8e5411fda7e5e87820654f14f2641a500eeab7c8a5a5636892cce2",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/iso",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/iso/extract_iso.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "09edb456108a3a29a94573ac4568446c5b401930b1f7bf8b051502fd2d064a7b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/iso/generate_kickstart_file.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aa2a8cd54823f99a1f61b0453048b228e440bdf1c4e274b0f0ae7635990d083e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7414db3ca8e33e14b34126b64b5eddefc24d27ac2e3d3d0b9a33284acba29c16",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/rhel",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/rhel/compile_iso.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d4b130336f4349cd1c3bd96c1f52f1fcd81615bcead7e9fa39d46883ebf41f6",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/tracking",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tasks/tracking/track_for_os_deployment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df751495e4839501919602cebe8c24677e11d191394353b13e5b6448af44459e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/templates/ESXI_8.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "998e80a725d3e21a3b68e6368decadd5c66196afdc74dc9947ff2282dcec144f",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/templates/RHEL_8.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1b54a6b4b5d2ccfeca7376a758389d0de0d5cb2c489189f2690616909b0e6ab",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/templates/RHEL_9.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54d4812cce2199771afaaeedf6fc8d813152f824097195dfb76284bbdebfadb4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0647d824fd28557a0b72bcdc079671c07571c1ee008efd92728e1291b7ca3eac",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9d3fffd8a4834828c2dab606405d1fe11f12c5e4b14142c99221f689597c857",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_os_deployment/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6d9f6ecca54f00e015debd5807eecd29aee9c3118d6a710768074c6eb625b9b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00e77ec0ddaa418973cd9cad99137bcef33738171102464f50d3f49f5cf2b3f4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cd98c8b578501e6b24cec9c004bc5963e0f479ca4435aabc58ddfa7e63033a0",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0ec20499ab70254c4498ed1783b01bff5fdde39aca5f81f9c7e3f979ba8b384a",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6af174177901d928f439e83c69caad0fccc601a710ac5b434af7bf5240f70418",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0195a66c4e4ac7e78a39c4a8fa223ee0b2d049a09c398eab91080868422c8049",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/tasks/lcstatus_check.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "377ab984113ad5d61f3164c7578677c42d727d35eb35c79f61275b794bdb970e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17feb74dd2b4bbec2b65ad4044f5af9e8f9660c66a9594cce342a79e178c1225",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/templates/idrac_lifecycle_controller_status.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cacc1f259dc7dbef66e2f715f138d3a75ffd5852ad56a2fa4e0f17358f30b4ab",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43fa08ab4acc6414c37f0fd53406310d4aa81f970d122feeb5604b40251317d5",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_reset/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e4a4575c14328832a6976cc4aed6b19c07dbfe5997a10ae733b99e50702fa0e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "241acc485326568db77e4c7c6aa738605779ea7949337b58425a58cd2d26bfc5",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "016ff520beef55bf26d90d19c4ff8187fcf861cc6697999b71d4c756e8cd1af4",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fb9435cbde9641256ffb3cad4e82e8169969ecf0faebed24c6e1eca315a17c6",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "689583bd8c529dfe982c3dcdf84e968eeee49eea7da2eac65ad34f74afc2dd3c",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f1119ecdb9b37bfa6f3860f8c767174cca920a521a95946f04a93f5c2ca4cd0e",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8790919530e9b792ac0a0bb5aa7f39756369e469fdcdead5ed849377be400f55",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3f5fdbd0243581c13e09c4a9347c3197712b89de7ccf5c19bf040002a8e0967",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/forceoff",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/forceoff/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d81e28400e1ecd4d4f63e3880ad12efcad626f1b482aea24e3781db13463872",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/forceoff/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/forcerestart",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/forcerestart/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1fc6007dac86a8108d44f5d57cafbbedd7f31ec317258a090cdd20ca7338d5d7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/forcerestart/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/gracefulrestart",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/gracefulrestart/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "495911d67e2d1421a451dc2ecc4d52705486791f8be17321134fd2d91fb4aa9b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/gracefulrestart/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/gracefulshutdown",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/gracefulshutdown/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb25fbf9767297093fc2c5fcb74c38fdae2de2870dd4a7f1d264e32ab3fd2f34",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/gracefulshutdown/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/nmi",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/nmi/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c163e9d4d92dfc89ad31c68ee2172162ce2cf9f2c8c1a65f28d5f42437bbc90d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/nmi/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/on",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/on/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d71c25f25f9e3ba08d70dd9f7dc33d304e69950e6245781aeb8ed71ac00d23d1",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/on/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/powercycle",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/powercycle/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be3e16e9141db31d632e532d034f33ed6d72a9151f1c3194080d16b1ab4961f2",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/powercycle/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/pushpowerbutton",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/pushpowerbutton/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4566f9dc07e3d658986932a9190fadcd98950e65e54829810aac38898b43e77",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/molecule/pushpowerbutton/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/tasks/init.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "28a5dd2d4dd0c53d832afbc13fc43dd71180b4d8579e5e37d62073e2fa51ce07",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a60392a56123662801eb3957572a4c57d858026c0c0222d8e7a3b2c04a24c5f1",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_server_powerstate/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f92fe5299b10e093b76f7527715368bf5d9a243323f74a69b3a74bc75b172886",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64f1956a2b0638551ece563c4f072e2c27553b76e4f381c22316d9e18d47d6b7",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "15213c1a91c203930ede7c03d11efa362d86d0f94e6822268e714e8e7a98b14d",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "527bd186562deac98b997410a770da9d22afee10005f965d699859e2a4df5ce0",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "57c999b43cebe8874baf8ce6a3fbe7a603c69bc0101577c73d7eb1d8720fa066",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3eda64bf454dd351cebf7063807750d690c44072601bae3af6144460a5323e6b",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ea1e319497a59c9d00d925cec6f1bcaf7c672b1b3e6d9a671c95a1c9e89c9f77",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6992dd1f8393638377d59efa1ddbd6e728bffc188f6997d3b5c98208ce710853",
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/idrac_storage_controller/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "981bc0f2d638983323e5081845acf0ac97ddf1cd5e603cf517051f69661a9cec",
+ "format": 1
+ },
+ {
+ "name": "roles/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a681a11158030f89670466b71c1cc02663571a4b551b2e3a7924ad23096e48cd",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e5c84a2ed15c9a447689ca3a576997db0b740caf3318d6c53f5f4bd94ee8ad26",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db1cab86f255f2f7829eb72d7f45c792a3c1560d5c23fc42076c70893f4dd784",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c039d1e5934a6dac58d3fa5dae336ebd5cf878c9649b6acbba4206f6ee0e3e0d",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f8553c37be4eecc702220849c497ff5cb9dc0712567fc85d3cbed123c450f89f",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "669ae8e69c647cf300677acbc0990147d93ded846aa44fd40c45664768c9d6c6",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "12286deeca54ad309b8ad6898fbe72fecbcfd6c35885753b2e8a63de6d2666c1",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2a39596d4b1e4313eca1a4958921f7d8c57b84115d3f81080704d742edf30b1",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/negative",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/negative/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca7ab87d800e87cded135f5d6b008e71d7784794dca7aed1c67e87db1cb3d1ad",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/negative/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2a39596d4b1e4313eca1a4958921f7d8c57b84115d3f81080704d742edf30b1",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/resources",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/resources/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e0599c04f5f5203a69b80ed580e6a03f0586e029a482936479beef09f03ef165",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/molecule/resources/prepare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "70be84b8715d7ea5e18f87fbfd34763293a17ebfa3210f7bd4aa767e0b3ab8fc",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c256607b253027b52ff6b9ffa117ac4ee5adf9f6fd3a0adfe23251aedcade66",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec798a2b3ebd1096273bc1776f157f23a7b4a7e33d45669856dd185fb9f6b064",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_firmware/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95897ec515cd6dc828104d2b67e61b8842802aec9e73eac05462c52cd111f653",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "efb82c5ab08b6d604fa2dfa92911904119f33b20799fca3cccbc0ed9aa9633a8",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a5a221d2fa9a6fc8985007dca4b7b46ea33c4fee921a405ff021bb576740473",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6877ee1583a2e33f3b3baf52a4a633c1f0bddea5bafda3e91241952bb5f2f19f",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8098e64b110bba2ed1aaf28aa39abc87c5085f84149271719bb97376d1181aa3",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "310a9f23099629e5643b4dfdfa22af40cd4f78f3037fe1a5f6915c322e12feb0",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID0",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID0/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "79d8cf155feafc68b8d2d03b79c6a04b5c66e9e5f9e87741ac71c57f54a1c089",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID0/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID1",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID1/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf3b39d9fc6450c880a0e5ab61e904844fb9c1e1b112d72be49da7401eef7ba9",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID1/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID10",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID10/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c443aef978e1312010f754d04cb5c6da231557f5a044f4aa77e6832ae9526908",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID10/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID5",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID5/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2aa8df2c045cd181b56ee21f6bc7c7f3460797cf3d989de75fafd7025f741ca5",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID5/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID50",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID50/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86b09f71d7e2238b359eb8ec3be7860f5f531dd8cba020281d8760b6101e8c24",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID50/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID6",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID6/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd8a43244b35a395e735e9f9662d01d52a0c2b626e3f11b044c0e2f4d599ecf1",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID6/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID60",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID60/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d33d97618ffcc391d674d3345e4701a2ce217eaf2676694346cc76ad008da05e",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/RAID60/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/__delete_virtual_drive.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8d167fa3a1ec0e778272338f9d77af0bb416be6789875be061dcab8375103df6",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/__extract_storage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "971c811200c00ba90bf9bd999d45ec7750ba63d9f5e2d75c3929ae89f82b9f72",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/__get_helper.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2107089195728fc04d99dd63d9f0d8f61b2d7e519f5127627921ef7ee4139df2",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/__idrac_reset.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c004e1ee7b85b34bde8daf3ce4b82d794013745abff33fd2c27801dbf89bf4a",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/__lc_status.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "602b7c37a4a50ca9f143caa2479ff5c737e7592d1c802f8f6ffadbc7579077b1",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65331bfaa443cd7ff715e673d30abb2be2e3acad8ff0c0161cc002b4df3ef030",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_immediate",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_immediate/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "defd2475ab056ad28cf76397ae6f42ac3f7440c041546b6ecf0c488517fbd2d4",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_immediate/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a14046372f6e7f33908931a7edeb09e73dc4ec5bde6e1778b61fe23054a2735",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec06873a01c77fa0cfe3d4e85c647f5c41ee33675b9b254a7cc697451a8c2a87",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/default",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/default/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee862d62b26aee6030c4da1ca247d8d8e8b26f53ad6c388877fa0cb68d881c74",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/default/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/initialization",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/initialization/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d44b362192c52c569bb6fbea45fb8a09be10b9e44c6f48f8df4eec18e7a4905a",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/molecule/initialization/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d504045131a1a6e693af5e3be831c677039d54c70765cb14bb5ab1de3d8eb054",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/tests/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/tests/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c4aa87d44c3b522744c37ca811b31c1648dfde1bf2e75a231499f5d8210396fd",
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/redfish_storage_volume/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5d396acd98a3478d4a32965b818a75ebfddf7cc5725e8a0fac6093830ef34ad9",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/.gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67b0f799f12726b149fd9677334c10b059e231bf2fa7150d55e9b4d5fd242062",
+ "format": 1
+ },
+ {
+ "name": "tests/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e37abcd3cbb5e2200f5702471e8a808aa759c9bf012137d4391f605700af12b",
+ "format": 1
+ },
+ {
+ "name": "tests/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b21de52fbe6ce3aa2369ef41e5ee7fc7e2204d20d6232f2d29fe58886899f10",
"format": 1
},
{
@@ -2514,10 +8023,24 @@
"format": 1
},
{
+ "name": "tests/unit/plugins/module_utils/test_idrac_redfish.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06eee949e14418d09370c421d7d50cb5a92f19a1a49c8ee654ade95220350869",
+ "format": 1
+ },
+ {
"name": "tests/unit/plugins/module_utils/test_ome.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "37c80e12462773e83d956cf3a3f8617b05963c88e269da05315b00647da9f8c2",
+ "chksum_sha256": "00f667133dfcb6305f2ecfde0900211733c873a35c6acb8f680adc4dbfa45a5a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_redfish.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d9bfc8945d1ed9d6f162a9146c9316d859615eb9af5aa162acd850be16be262d",
"format": 1
},
{
@@ -2538,574 +8061,665 @@
"name": "tests/unit/plugins/modules/common.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "960b3c2958d995b6074a108530432a7ab2597d47fe73a7b16ee160198a29b775",
+ "chksum_sha256": "2f706a3caa873b5e81b6f4a3076628f2383e570cda58814a4e2d874e22989cc1",
"format": 1
},
{
"name": "tests/unit/plugins/modules/conftest.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5646cb89ba9ea0220ca9ab036024052501028194d70145c435afb769a3494852",
+ "chksum_sha256": "1d50a8a54e1c284f2db79461ec1b50a7555f1b2dfd71aa1d99ffa1c0271c6977",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "92185c0bcbe6d290c52f7a1ee53e28f25385fcc945f32c3144fac277eccdc545",
+ "chksum_sha256": "51b5435f256be611f442d7e14924493b476e7e965150d8f001e2080255e1ca45",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "31c7b0ce637329346756df7d707ef66ec3b516ffe37ad05410a609392863c6be",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "192b51bc6c5d5b120f6bed0654157a75d12ef0e7adaa5ce7ccf4c25dfb29d4d9",
- "format": 1
- },
- {
- "name": "tests/unit/plugins/modules/test_dellemc_get_system_inventory.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "342e7173304f3621bbfcc9b8fdbd8766a14cfb9fe16b6bbaf99f41f5920b0ca6",
+ "chksum_sha256": "e8b05f177533512dc258068adbc93270d8fc076de433893f64457a782c37df7b",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "91b13c79c1af8aa457d6bc98a074a5d6b0150bf33aefac418a86e29fba52da3e",
+ "chksum_sha256": "3aa7c78efc385f8f23769186e578cfea349e4918a7039391b2dad85ad866e9c4",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6d1e840a6652513c91b19cb21c6c35a38bc75df95cef68fc40caa437fe5d518b",
+ "chksum_sha256": "7172c977250e7d810c1b37c93ac2c9d9e27c778f350c5be3f323305a9ef82d8a",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dcb93f93630fc1ced41ddf55471a6fb5f826768e2732f004d18344f81999da87",
+ "chksum_sha256": "9cc17795c8921e9b46e4b8f5ceeb4b48cedeb7f1c9d0f674841c49fb98096cc7",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_attributes.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f859e8548c7e75db09982441ff4acb324c52f98021f9a4ed916a6c1b32723cdb",
+ "chksum_sha256": "443eea5340887b4af7d7a796e34d3ed99c07394a8fb9ac0af804757fb790f897",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_bios.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "33822ba4405b88f1a76869fd98c72fbc9e59727e0517e13f90b0f24e43072232",
+ "chksum_sha256": "56961e8894b7997b6bafec9e21398a444cbafe2073f2263752a82d55f794a61d",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_boot.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d517d38965a354afaad821408a0978ae1d8e2a24bc39f95c94b4651d9fd5e519",
+ "chksum_sha256": "f4066f490341d728623ed7b407319e3b467f9960aeb6f4d82ce441cfdcb0dd6c",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_certificates.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a02e9ca6da9dd543e87e1b02fb7323efc3bd16517d144964e888a7642a5aa99c",
+ "chksum_sha256": "69ddcc325468f32296935a5aba441ebd2ad2983f9b898cd8475c3e49ac545831",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_firmware.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "88890ddae0fe9a5ea035d56fca9b672b901d1a80a17a6d822f7eaa7be78adbe6",
+ "chksum_sha256": "ba3a0726790706f990aa56f1a75091fb47622d722d0fd58e6ac635d6e167ff54",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_firmware_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1da9b9002bb4b546225a66e57164938f8969d4fc1e7368c8f8b87062b0ed39ca",
+ "chksum_sha256": "5d60aee695e2ea2a27d566dff581483cf66c27cdcc1601fdd7fc3f325d2cefca",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_idrac_license.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7efe5b4fdf172a307c837bdcd8cf55e5b911cd17feb5db80509207ff0d813923",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "acb928f15232a99435d45e6e684196e236d712d6a2e7c27e30d3650195f051de",
+ "chksum_sha256": "c18b501718e37c52efe369dcbd0779cc522d919107ed2828bb9b3396ecf1b49d",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "40f7f1b4c42cbb536c38792f03dddf62dc62a2c3c51ca9c20f1339675c4989e5",
+ "chksum_sha256": "860c64f7be6696f28329e37411e69d02dd3c8ebb85f06dc41a7ef8bcfd231b97",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b805dc930fde093b383a4ff27d6aa7ef12f378506ebb5d940cd15225c36acc64",
+ "chksum_sha256": "be3d0c6ae7c2127c97172c75dbe50d2739d7a14f07eae6b6b1d566ddddb15dae",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "57decc6ea27edfc32a8229433b1194ccfc59376f8891b0eac045525204c84a90",
+ "chksum_sha256": "45e4e9c2e94584738e18f1be859172e06bb9ff3b66b89d857529b77cfab01a57",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_network.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7abff9586412878c5e681cb012f95800045ae16ca4ad5f698bcb7d5f87647fd6",
+ "chksum_sha256": "bff1e11af44e955de3e2b3f93de22efe39676b27e587083531fa3ad55504ccee",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_idrac_network_attributes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "10c2a11793c47a4b49723d09f7e370875bc0bc3eb9e01ed5c02a2e06683752bb",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_os_deployment.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "50b2386c930591f9337bd433807a88269e9a1a85ae73492852388bce34d5529d",
+ "chksum_sha256": "4e13dd9169319f1f6b659c372203b2537d9702a0da6f52c1278b618f7eec9d2e",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2ef7a9070d3f865d5b84d55c00681dab6078d368d496e6cf6172927a946e59ef",
+ "chksum_sha256": "df154e600f856210e4847b458fff5d98def22d85724bae5e85ecb02f347fbe49",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_reset.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4fc21d1e73ed80f994fd7fd9964357cbf532bf8ec632399569bf37519bef88c2",
+ "chksum_sha256": "755381915a5d3433313f42e08e5169624a522675a8bf1d147aa9acf502c99b74",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_server_config_profile.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "277b5672b6891e6d80a3ab14b29b9b095173a842e33e4a1949a540d4f392a11f",
+ "chksum_sha256": "e014b78fdccacfb78721b7f2955cedeb4581e2dd12e75f38751ca3855e6bd9b1",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_syslog.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8dc717a1ebfd0ccc1328769d917479cb2d3cc398c67e271348f7e1184596c00e",
+ "chksum_sha256": "bf10fcb460fa9bc23a992d7bdfd5b2dfeb9b6ee83a918f257c5ae470c434f5d4",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_system_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c1ade1cb0073aecac90d23f082d461074b5bf63af4f0af77e9dc98cec85cde97",
+ "chksum_sha256": "c2dd4206fb7a8fd1b9fd0075e62ec831f85d4678370a3cf39485df2bd27fd53e",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_timezone_ntp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "33f6bb244cd45cbb59227e509d668bad071d06160d52b6319fac2d271b67d745",
+ "chksum_sha256": "8b4464785500ba413275080c10cbc08884ae3a1219ced7b374ae1d4f1125bb76",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "46c9919f488c45f869999e0c840eeacfb65ed956e5e5ac205aa436f3a96eec76",
+ "chksum_sha256": "2d7fbcf3629f88219471cb399b853bc28ea397291e7eafd7f67f71bbca7b62c1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_idrac_user_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04bf9afbb2aea8e129c1592329ff3701c13ed332487432674d7b86a989488334",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_idrac_virtual_media.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "41fba36528bc9af58e7462e3bec64abb938ce399e214134ee8b44894a28deda4",
+ "chksum_sha256": "c40d6af3e3701c4578b48ecdcd4d07848ae5399b0624295f636539ee862360f5",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_active_directory.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9db993dfd0c18c5eb3777adeb6e955151b2f179cc7b0e00419646aef923c2fab",
+ "chksum_sha256": "3f031be7fd8a4448bd473d19b67b8c12ac1f5978b7a12fb5814b7604e3c71af0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_alert_policies.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bafaade223c90287303f473dfd824732bc557b3fd757522ea3be72e276f77881",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_alert_policies_actions_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44a90dc50b1678910dc345a2b88d1692a63a00a0d575ecc7ce9f235c9f19949b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_alert_policies_category_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40124ea4b56fdc618ac554ed78b05746fa9f83e0e075cabddd639e193ace14ae",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_alert_policies_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eef7bb09da512ee64a1528dc8e639db7bbef21aa2835e20e8cca8144c6dfc967",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_alert_policies_message_id_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cac02856eaad4cb77781ab7c481660e7e4efa2bc72ef8530d68c62fa4e7012d7",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_alerts_smtp.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7afa094e8262bdbd5fb9e9aaf875f82b3bf810a9237b01b04ec071771bd132da",
+ "chksum_sha256": "a0f68b19dd63014f99f993b5049322aa10c962f37abc8b01f0c1edc056e14dc2",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_alerts_syslog.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5e6544e27d7b35884ea54cc11816f1713f8a9222220c1b591f3a89ddfc8f5607",
+ "chksum_sha256": "1562c23b20553b15252713cd5443dd6f7ffcea445f0f0841885121af22385c06",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_certificate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0280f7afa09beaad65dfb503782bace0a6f04ff767447970c1950b216b4982d7",
+ "chksum_sha256": "4cf9865aade153e9f8263120ed885fb3d56a07f810396649be3c8fe26d6e2e3a",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_console_preferences.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "d9caa4436405ad6911180207944159f0db1118781e6b0c1763ca50cd332313ed",
+ "chksum_sha256": "265fd707c0cf99eda2b9f20feaccf3328d1f78c1ae45e196f8ce88b9b4e1c726",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_network_address.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "15297916606687affd3c076c580f0c56572415e85728bbe23fedd1a0c6806be2",
+ "chksum_sha256": "ba7257a64dfa1324277c9c8159f0a9e246162e2ef68c0a07b8dff4213c518bac",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_network_proxy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ba4cd21055ebb897c785bf1687f912882d577b4813d3054b164f0c13a519e8fe",
+ "chksum_sha256": "523422cb1cdbc42857be9f768b501b95274c93fa4bf5b1ccba5514cc3501bd90",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_network_settings.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a8b8136508a8e29d4c9ffd399fa65ac64c85c5e42a141ea5d12b5c74c02483a4",
+ "chksum_sha256": "b8ae4cb9602e2cf66fcbc81e803fcc423875deb008eeb3cfe9965ac44a5dfa1a",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_network_time.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6d5459e09e63dacb0cd57368db8f3fb7d90ee83ea4c58707557708480e390237",
+ "chksum_sha256": "42985f90c0f14499d6455b3be358b826d90e443bed4aef6948dca564befe2366",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_network_webserver.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f3d502a99e19c97418dde416d97c4acb8bc37800b43a648ebb546b7e6373c7f9",
+ "chksum_sha256": "c5dc8fbbc99361e13eb3a4617f6e260c55044b69c53c552b7e0921580e608de6",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_application_security_settings.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3a28065c767dadb2abf22fb897cd041c14da1e25749f678a29c58ae7e268a42f",
+ "chksum_sha256": "e7bf2f655672078476cc228eb5fb457beaebf1f955b34e5d624eb3ed1b0c346f",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_chassis_slots.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c9416329532e9c2b8a49292f17457cfd03bbe20357b4762e30e59985b00e3225",
+ "chksum_sha256": "e4668f13509305834df305adef43400dfb621032bc29580ad15b52bead4b3083",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3bae45e27cd4bebba88c9c63c5207ce8026abcd6d1049de3e7210a8291497142",
+ "chksum_sha256": "337df6e350bd487256890123de46fb9c1717f0961345f9899ca4b5deb2219e67",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_configuration_compliance_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "09aec29c2fba54c5647c9f31eb7ae4593c545b6cb5f0c7e57f509c18353523b2",
+ "chksum_sha256": "2bac6fc0e5be8cd2c4841f6ee758233db54442c8eb00fb4ada5ee4a044160447",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_device_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "18b7c5a817f2aeda11912f9fa3a8d5d422201f2f44746f6879fd5c8fe378d74b",
+ "chksum_sha256": "fe679abfeb71c0791e644ac6d501639c229c1ea6c64ae06ce0058d0930079490",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_device_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c041b4e641a484fe40b34ad4cf922891bfe04405dd39134a37829792650963c6",
+ "chksum_sha256": "f3427f3a4be484d1349156864ae1476badd4c88ad84a901ac5afd832df2fa724",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_device_local_access_configuration.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "281c3b191312c53b58a9fa44004c90471e9fbf99313674e148c779cd6f2f2b71",
+ "chksum_sha256": "4de5185ae43025acd83012bd9eaccf2c05d9b94d00bd985483f04b15ee502bbb",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_device_location.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "239b6a2129a8be0d24445fe02c651822082ff9f9f3628ef84eb899c6ce70630b",
+ "chksum_sha256": "5f624abfb88ee235fd29dbb68f5ddcc77463085603e06b5f9bbb03c7471c5b32",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_device_mgmt_network.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9a8e0119234dceafca94eb8e1513bd6bc841cdafb91be67d211d6db09fdb31f9",
+ "chksum_sha256": "4d419a694ec5d1ba0fdebc86f1dc68aa7ee25c3a2cccb787e57d003741dadf66",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_device_network_services.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "09d03bf7553daf2d1bb54674150f0362e37b8b900f9807f0ddf1b22ed9a950b6",
+ "chksum_sha256": "01b4ac372b95cbdea27248e39999953945a4a668be10194887f924668ee5808b",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_device_power_settings.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dde98180f35d1fda81f26fb545d3fdec1936c6164445767f98906944ba2846a3",
+ "chksum_sha256": "8fe2d243278b33bf748001911a1be0704134fafe7bb59f66b6e5485cca2fe12a",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_device_quick_deploy.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c2eccc9c43322c647a17eda27c85e874226ff98ec9ff25d68ce925ee67b311ad",
+ "chksum_sha256": "e0acd2b6ae2cbaf52be36bfe9ba2886c4116c79fab354111c65714eedcef47c2",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_devices.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9ef1681f638521e22f07bfd3926e6e79f16eec1074ccbb84e15abc52974a1abd",
+ "chksum_sha256": "c1d265161a08069cf7e386ab5d56d4ba62dbf4e501da32b69d05bd31450c349e",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_diagnostics.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c5654d0976ffb20d78aa32c04312233790b0f810673db2bc46e0e90dfa56494",
+ "chksum_sha256": "3fb5b4940be19ce98ef0a2d8290019b5f06093a5230a4f8e9b46eef8cc13a1bb",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_discovery.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0bbe3fffa0bc1d54658fcac812d787feb3ee5de3869341c2037dc1197faa32c3",
+ "chksum_sha256": "d8d8270271f1f92ab4b4efe84f40de6e1c03fd6d434f75ccbed76699e3bd63f4",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_domain_user_groups.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0df53445f6faaafc19973b47d9f6d643fd3b6cda327df3f34ec185cae7d3790e",
+ "chksum_sha256": "502d0f2033c22d39a971906066eb5e2533a915d5079d0ba4a42347a2da76eb22",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_firmware.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "17e93cead0aad7aad613fa61ab9eb5b5c3e0eb2157247ad006a32596ab5e7017",
+ "chksum_sha256": "2a88df0a8140232d3e9aeeba3686515d430d1626d96b518d5f7fcb28eb8d61e1",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_firmware_baseline.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "976f2556e6e2be1b42ebe07fa7e4410418551243050a4ed40f1c905e18acfdd9",
+ "chksum_sha256": "10f367578ed301729cfa4190a45191545ed5796d902248f34590c4481fdcd5eb",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3d8f1ea2ad7ef1ac0c41e59726581910afe3bb9c2bbcd3e41ac6506ec4d0bfd2",
+ "chksum_sha256": "e387e2956c280621c1d898b18785bcf281ed4f00c181aeb334697c492108d2bc",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_firmware_baseline_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3282786c382feb202b2ae38de4a51603f1f9e1fc762e3460bb7b01ed91068312",
+ "chksum_sha256": "2b2b8d194c62941b6912236c9e0f6781c9c2940f9c19f81f56ee0b9350115642",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_firmware_catalog.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "efeef89081bb08744cbf92322a30ae18e01bee531c2d383ff32c4b13ce27bfd5",
+ "chksum_sha256": "3b16df578978033fd82dcd342f3e3286b807a480dc76723f5698dc1b5eb2b61d",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_groups.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8fe26ff99f76dc75ede732ee72ea1bd1a07d8fb8066fb0c96d0a7f06929d56d7",
+ "chksum_sha256": "dafbbc8ff84e5d7195c1dfa0c4aac877cfc84524318fb9b93daee52ace901fa1",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_identity_pool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "335ccc1edd47cc0153682dc450ed579339d3b0d0f7e7adc9cacdcefd058d8d7b",
+ "chksum_sha256": "df5bb7d2ab7b18151f47ee0cd2a4304de9e6d11517015148b0d58c405f0b868f",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_job_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3ada75733f8a26f1e792bb93a337bf227c78f9ef2509060825329fcc55e9cd7f",
+ "chksum_sha256": "528679c836f439cfadd6dede9b9cb7790b32329e8ddb834dff7acee88827f529",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_network_port_breakout.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c7a9f682f5b6132c7b4cf54681e248fdd6cee77c729b5951098e78168e702004",
+ "chksum_sha256": "98c509977de9377cc6fb865530149d1a58f3230edf111cea2da5fe0a8da6fc20",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_network_vlan.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "57fc1a04a1dbea86e0b2c436f8f5f678d70332c50e7c70af96604a09bfddf652",
+ "chksum_sha256": "d764453f483885c2265e3de93cd2750af9248d6c90e3e27a82b8e67b2a03a2cf",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_network_vlan_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8536f0b33ee05b6973f38e77c2a50446186b99b424192bdfc593d1b6c8197326",
+ "chksum_sha256": "e2b407402c38936eff8f9f2e20e33ca7e356f664c51d8aa6984d27dd84302f5a",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_powerstate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a187e509fdf9269072ffd282da85693af212cd85817f874ea5062783c3e2f887",
+ "chksum_sha256": "3b5979e60e2a0d99df0b1a598cb28763134e0e19bbae5ebbf2b48762d4a3f860",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_profile.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7d23c1d700c72b0663cd5620d64d81f3a59bf22e07914a6763e477c32a9781c2",
+ "chksum_sha256": "1ef49f3f85e0a585ae05ee9fb61322533d1863cd8853a477bb26d3f81874c7b5",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_profile_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4aea850eca33936d709e5fc967578b5ccc077388f94cc2ae7541dfaf1342ef5d",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_server_interface_profile_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2c23a8f4a91c1e3af9f5f951e61ff6f1b9d3f4e13b4d1ab2ee35d8d36ca1f167",
+ "chksum_sha256": "33e27297dbf2ba70e8a54ac1dfaf6967cd25e3c5d0fa67992661561bddc3d13e",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_server_interface_profiles.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5beec3e90640353ae9ea8de8300c5f8d3df704e8c6f17c559074c6c7f7460a42",
+ "chksum_sha256": "0f48cb24a32aabf756cb9576ba969840c1575574a63a44a705d8778776b342ff",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_smart_fabric.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ea1d81efc9c430d920c9ab37abd1deb39bb9db13072867fd7dcf65818a9b7d8d",
+ "chksum_sha256": "050f3d3f67a9f3da7dd18e92cb31548bc26ede439bfbf27d2413a6da02cc4c9d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_smart_fabric_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f91e9b2df7be4c4127d990c3a63f0e2c02c201deb40fb9a3627b14cc9a05e9f1",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f7041c4e812a11db9b4ca755729910d8d07199eb6d409f9665dbafc6150627ac",
+ "chksum_sha256": "738841207e64e35da9d00cfc93fbf21efb040fbd0fbabb10ba4f08d469a2f98a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_smart_fabric_uplink_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be29eacdaab10d28070c5c410b3de63e6427c76dbed8f9690837847a31124cd8",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_template.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "00074a9ce2a71a0d38af3bcff76f25e11f37e5f54af1d8830223db15a12c9857",
+ "chksum_sha256": "435c50720b4e475953395e40d1e728a5167da0efc799a2bd49d9dbc199b7c391",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_template_identity_pool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "da23834fc8cf6bd9d27ecfa4a5a07765a568dee144224defdc25344ccfef1c3c",
+ "chksum_sha256": "650fc4c361e3a0d79f6a794ad3e43efac1a102a9b49ea75a1d3ae9dbd2b3cb3b",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_template_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bf7dcd18048f2ddcd377ba3eb024b18d17b7df0731283eb5c5be80f4d7caacdb",
+ "chksum_sha256": "35eb0483714d6a253d32010fdcd4421c5006dd565bd4c4f5bad984ad25f7b941",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_template_network_vlan.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "97daf83dcd98ab11b99af9a79583cb74c21cd23291d7c61af8383cea043cfe04",
+ "chksum_sha256": "23b09e9db14b8c7c758d72b118178c18c96418478b5e276a35d69ae06d4d2553",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_ome_template_network_vlan_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53f47c4f82c035207a38e1c6989461f33f39faaf7e8949aba963dd37a1cedaeb",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_user.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6f5f6782d929465d30fb064978558615967eae893165a1f2df440dbcadc7c6b7",
+ "chksum_sha256": "72117dcb003f648d2b2e0d58612903eb64e2dc1c120eaef5f3571331a79e0f3f",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_ome_user_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7ac605d2d50cf2cd3d064bb11519585c970b846f3be3a5c45a418cd96181b83a",
+ "chksum_sha256": "d953dab52deacad582e4bf060b3c2e6d82d490b2e0f872f2cbec4c7eac95df81",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_redfish_event_subscription.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9da2edc940d5473608e1655f3d22fe0fba52ee05b68bf9d0293eb972c2663018",
+ "chksum_sha256": "8b63ef2ac94d46a42ad2ff543ac70afde793e5caf967bc940566fa6ee6c289a2",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_redfish_firmware.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "38a6470438ce17ee89338e5663590cf990ca250607863d92fbbb72ac902d8889",
+ "chksum_sha256": "2661f131bdea868cfe15bf380f4bd2e465c15c95533f12f82f680d6fa6d67691",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_redfish_firmware_rollback.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b74f2f92f2785380b6a40972f3fe64936d8111ec990b61498d198b2bfccf6f68",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_redfish_powerstate.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "795fd10edde86cb737936299817cfd5f81f4bfc44af76bd1a26db9f514975c39",
+ "chksum_sha256": "711d5df101c83c29218d70c8952a3cf185658e3de1ac4f9a737e9ba082d9bdf4",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_redfish_storage_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b67db8d0c6d281a7a671d00e90c1a52c861baff11906f072979c0666e9f0aae7",
+ "chksum_sha256": "54ccd9a59a3da074cbc61424bac980ccbe07ba4b01b2cb4116523c42f339fb9d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6dd69e26e1abab9e11a3c0d8e6212b37d8619036e394b351ccc99e480976da28",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5ec603ab1d2b3071743853324fd0db34d886f78f1543c1fa700ad1c904a6fd25",
"format": 1
}
],
diff --git a/ansible_collections/dellemc/openmanage/MANIFEST.json b/ansible_collections/dellemc/openmanage/MANIFEST.json
index c99e320f6..bf8b402bc 100644
--- a/ansible_collections/dellemc/openmanage/MANIFEST.json
+++ b/ansible_collections/dellemc/openmanage/MANIFEST.json
@@ -2,17 +2,22 @@
"collection_info": {
"namespace": "dellemc",
"name": "openmanage",
- "version": "6.3.0",
+ "version": "8.7.0",
"authors": [
- "Jagadeesh N V <Jagadeesh_N_V@Dell.com>",
- "Felix Stephen <Felix_S@Dell.com>",
- "Sachin Apagundi <Sachin_Apagundi@Dell.com>",
+ "Jagadeesh N V <Jagadeesh.N.V@Dell.com>",
+ "Felix Stephen <Felix.S@Dell.com>",
+ "Sachin Apagundi <Sachin.Apagundi@Dell.com>",
"Husniya Hameed <Husniya.Hameed@Dellteam.com>",
"Abhishek Sinha <Abhishek.Sinha10@Dell.com>",
- "Kritika Bhateja <Kritika.Bhateja@Dell.com>"
+ "Kritika Bhateja <Kritika.Bhateja@Dell.com>",
+ "Shivam Sharma <Shivam.Sharma3@Dell.com>",
+ "Rajshekar P <Rajshekar.P@Dell.com>",
+ "Jennifer John <Jennifer.John@Dell.com>",
+ "Lovepreet Singh <Lovepreet.Singh1@dell.com>"
],
"readme": "README.md",
"tags": [
+ "dell",
"dellemc",
"openmanage",
"infrastructure",
@@ -28,7 +33,10 @@
"description": "Dell OpenManage Ansible Modules allows data center and IT administrators to use RedHat Ansible to automate and orchestrate the configuration, deployment, and update of Dell PowerEdge Servers and modular infrastructure by leveraging the management automation capabilities in-built into the Integrated Dell Remote Access Controller (iDRAC), OpenManage Enterprise and OpenManage Enterprise Modular.",
"license": [],
"license_file": "LICENSE",
- "dependencies": {},
+ "dependencies": {
+ "ansible.utils": ">=2.10.2",
+ "ansible.windows": ">=1.14.0"
+ },
"repository": "https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections",
"documentation": "https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/docs",
"homepage": "https://github.com/dell/dellemc-openmanage-ansible-modules",
@@ -38,7 +46,7 @@
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "82aa686e05007eff6aad01f6bc9016f567b6949b57492af62a6d84371138e9da",
+ "chksum_sha256": "889b7354ab86fd7d07cb93c6efa113b3f470fb53c397a27b5b464adaf803e17e",
"format": 1
},
"format": 1
diff --git a/ansible_collections/dellemc/openmanage/README.md b/ansible_collections/dellemc/openmanage/README.md
index 66f04c779..b9ccc4cbb 100644
--- a/ansible_collections/dellemc/openmanage/README.md
+++ b/ansible_collections/dellemc/openmanage/README.md
@@ -3,7 +3,7 @@
[![Contributor Covenant](https://img.shields.io/badge/Contributor%20Covenant-v2.1%20adopted-ff69b4.svg)](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/CODE_OF_CONDUCT.md)
[![License](https://img.shields.io/github/license/dell/dellemc-openmanage-ansible-modules)](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/LICENSE)
[![Python version](https://img.shields.io/badge/python-3.9.6+-blue.svg)](https://www.python.org/downloads/)
-[![Ansible version](https://img.shields.io/badge/ansible-2.13.0+-blue.svg)](https://pypi.org/project/ansible/)
+[![Ansible version](https://img.shields.io/badge/ansible-2.15.6+-blue.svg)](https://pypi.org/project/ansible/)
[![GitHub release (latest by date including pre-releases)](https://img.shields.io/github/v/release/dell/dellemc-openmanage-ansible-modules?include_prereleases&label=latest&style=flat-square)](https://github.com/dell/dellemc-openmanage-ansible-modules/releases)
Dell OpenManage Ansible Modules allows data center and IT administrators to use RedHat Ansible to automate and orchestrate the configuration, deployment, and update of Dell PowerEdge Servers and modular infrastructure by leveraging the management automation capabilities in-built into the Integrated Dell Remote Access Controller (iDRAC), OpenManage Enterprise (OME) and OpenManage Enterprise Modular (OMEM).
@@ -23,22 +23,21 @@ OpenManage Ansible Modules simplifies and automates provisioning, deployment, an
* [Additional Information](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/docs/ADDITIONAL_INFORMATION.md)
## Supported Platforms
- * iDRAC7 based Dell PowerEdge Servers with firmware versions 2.63.60.62 and above.
- * iDRAC8 based Dell PowerEdge Servers with firmware versions 2.82.82.82 and above.
- * iDRAC9 based Dell PowerEdge Servers with firmware versions 5.10.50.00 and above.
- * Dell OpenManage Enterprise versions 3.8.3 and above.
- * Dell OpenManage Enterprise Modular versions 1.40.20 and above.
+ * iDRAC8 based Dell PowerEdge Servers with firmware versions 2.84.84.84 and above.
+ * iDRAC9 based Dell PowerEdge Servers with firmware versions 6.10.80.00 and above.
+ * Dell OpenManage Enterprise versions 3.10.2 and 4.0.0.
+ * Dell OpenManage Enterprise Modular versions 2.10.10 and above.
## Prerequisites
- * [Ansible >= 2.13.2](https://github.com/ansible/ansible)
+ * [Ansible Core >= 2.16.2 and 2.15.8](https://github.com/ansible/ansible)
* Python >= 3.9.6
* To run the iDRAC modules, install OpenManage Python Software Development Kit (OMSDK)
using either ```pip install omsdk --upgrade``` or ```pip install -r requirements.txt```.
OMSDK can also be installed from [Dell OpenManage Python SDK](https://github.com/dell/omsdk)
* Operating System
- * Red Hat Enterprise Linux (RHEL) 8.6 and 9.0
- * SUSE Linux Enterprise Server (SLES) 15 SP3 and 15 SP4
- * Ubuntu 22.04 and 20.04.04
+ * Red Hat Enterprise Linux (RHEL) 9.3 and 8.9
+ * SUSE Linux Enterprise Server (SLES) 15 SP5 and 15 SP4
+ * Ubuntu 22.04.3 and 22.04.2
## Installation
@@ -53,4 +52,4 @@ Install the collection from the github repository using the latest commit on the
```ansible-galaxy collection install git+https://github.com/dell/dellemc-openmanage-ansible-modules.git,collections```
## About
-Dell OpenManage Ansible Modules is 100% open source and community-driven. All components are available under [GPL-3.0 license](https://www.gnu.org/licenses/gpl-3.0.html) on GitHub.
+Dell OpenManage Ansible Modules is 100% open source and community-driven. All components are available under [GPL-3.0-only](https://www.gnu.org/licenses/gpl-3.0.html) on GitHub.
diff --git a/ansible_collections/dellemc/openmanage/bindep.txt b/ansible_collections/dellemc/openmanage/bindep.txt
new file mode 100644
index 000000000..130d1f7f9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/bindep.txt
@@ -0,0 +1,4 @@
+xorriso
+syslinux
+isomd5sum
+wget \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml
index 73891787e..7f908f563 100644
--- a/ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml
+++ b/ansible_collections/dellemc/openmanage/changelogs/.plugin-cache.yaml
@@ -1,12 +1,82 @@
-objects: {}
+objects:
+ role:
+ idrac_attributes:
+ description: Role to configure the iDRAC attribute
+ name: idrac_attributes
+ version_added: 7.6.0
+ idrac_bios:
+ description: Modify and clear BIOS attributes, and reset BIOS settings
+ name: idrac_bios
+ version_added: 7.6.0
+ idrac_boot:
+ description: Configure the boot order settings
+ name: idrac_boot
+ version_added: 8.0.0
+ idrac_certificate:
+ description: This role allows to generate certificate signing request, import,
+ and export certificates on iDRAC
+ name: idrac_certificate
+ version_added: 7.4.0
+ idrac_export_server_config_profile:
+ description: Role to export iDRAC Server Configuration Profile (SCP)
+ name: idrac_export_server_config_profile
+ version_added: 7.3.0
+ idrac_firmware:
+ description: Firmware update from a repository on a network share (CIFS, NFS,
+ HTTP, HTTPS, FTP)
+ name: idrac_firmware
+ version_added: 7.5.0
+ idrac_gather_facts:
+ description: Role to get the facts from the iDRAC Server
+ name: idrac_gather_facts
+ version_added: 7.4.0
+ idrac_import_server_config_profile:
+ description: Import iDRAC Server Configuration Profile (SCP)
+ name: idrac_import_server_config_profile
+ version_added: 7.4.0
+ idrac_job_queue:
+ description: Role to manage the iDRAC lifecycle controller job queue.
+ name: idrac_job_queue
+ version_added: 8.0.0
+ idrac_os_deployment:
+ description: Role to deploy operating system on the iDRAC servers
+ name: idrac_os_deployment
+ version_added: 7.5.0
+ idrac_reset:
+ description: Role to reset and restart iDRAC
+ name: idrac_reset
+ version_added: 7.6.0
+ idrac_server_powerstate:
+ description: Role to manage the different power states of the specified device
+ name: idrac_server_powerstate
+ version_added: 7.4.0
+ idrac_storage_controller:
+ description: Configures the physical disk, virtual disk, and storage controller
+ settings
+ name: idrac_storage_controller
+ version_added: 7.6.0
+ redfish_firmware:
+ description: Update a component firmware using the image file available on the
+ local or remote system
+ name: redfish_firmware
+ version_added: 7.5.0
+ redfish_storage_volume:
+ description: Role to manage the storage volume configuration
+ name: redfish_storage_volume
+ version_added: 7.5.0
plugins:
become: {}
cache: {}
callback: {}
cliconf: {}
connection: {}
+ filter: {}
httpapi: {}
- inventory: {}
+ inventory:
+ ome_inventory:
+ description: Group inventory plugin on OpenManage Enterprise.
+ name: ome_inventory
+ version_added: 7.1.0
lookup: {}
module:
dellemc_configure_idrac_eventing:
@@ -19,16 +89,6 @@ plugins:
name: dellemc_configure_idrac_services
namespace: ''
version_added: 1.0.0
- dellemc_get_firmware_inventory:
- description: Get Firmware Inventory
- name: dellemc_get_firmware_inventory
- namespace: ''
- version_added: 1.0.0
- dellemc_get_system_inventory:
- description: Get the PowerEdge Server System Inventory
- name: dellemc_get_system_inventory
- namespace: ''
- version_added: 1.0.0
dellemc_idrac_lc_attributes:
description: Enable or disable Collect System Inventory on Restart (CSIOR) property
for all iDRAC/LC jobs
@@ -45,11 +105,27 @@ plugins:
name: dellemc_system_lockdown_mode
namespace: ''
version_added: 1.0.0
+ idrac_attributes:
+ description: Configure the iDRAC attributes.
+ name: idrac_attributes
+ namespace: ''
+ version_added: 6.0.0
idrac_bios:
- description: Configure the BIOS attributes
+ description: Modify and clear BIOS attributes, reset BIOS settings and configure
+ boot sources
name: idrac_bios
namespace: ''
version_added: 2.1.0
+ idrac_boot:
+ description: Configure the boot order settings.
+ name: idrac_boot
+ namespace: ''
+ version_added: 6.1.0
+ idrac_certificates:
+ description: Configure certificates for iDRAC
+ name: idrac_certificates
+ namespace: ''
+ version_added: 5.5.0
idrac_firmware:
description: Firmware update from a repository on a network share (CIFS, NFS,
HTTP, HTTPS, FTP)
@@ -86,13 +162,19 @@ plugins:
name: idrac_network
namespace: ''
version_added: 2.1.0
+ idrac_network_attributes:
+ description: Configures the iDRAC network attributes
+ name: idrac_network_attributes
+ namespace: ''
+ version_added: 8.4.0
idrac_os_deployment:
description: Boot to a network ISO image
name: idrac_os_deployment
namespace: ''
version_added: 2.1.0
idrac_redfish_storage_controller:
- description: Configures the storage controller settings
+ description: Configures the physical disk, virtual disk, and storage controller
+ settings
name: idrac_redfish_storage_controller
namespace: ''
version_added: 2.1.0
@@ -126,17 +208,67 @@ plugins:
name: idrac_user
namespace: ''
version_added: 2.1.0
+ idrac_user_info:
+ description: Retrieve details of all users or a specific user on iDRAC.
+ name: idrac_user_info
+ namespace: ''
+ version_added: 7.0.0
+ idrac_virtual_media:
+ description: Configure the Remote File Share settings.
+ name: idrac_virtual_media
+ namespace: ''
+ version_added: 6.3.0
ome_active_directory:
description: Configure Active Directory groups to be used with Directory Services
- on OpenManage Enterprise
name: ome_active_directory
namespace: ''
version_added: 4.0.0
+ ome_alert_policies:
+ description: Manage OME alert policies.
+ name: ome_alert_policies
+ namespace: ''
+ version_added: 8.3.0
+ ome_alert_policies_actions_info:
+ description: Get information on actions of alert policies.
+ name: ome_alert_policies_actions_info
+ namespace: ''
+ version_added: 8.2.0
+ ome_alert_policies_category_info:
+ description: Retrieves information of all OME alert policy categories.
+ name: ome_alert_policies_category_info
+ namespace: ''
+ version_added: 8.2.0
+ ome_alert_policies_info:
+ description: Retrieves information of one or more OME alert policies.
+ name: ome_alert_policies_info
+ namespace: ''
+ version_added: 8.2.0
+ ome_alert_policies_message_id_info:
+ description: Get message ID information of alert policies.
+ name: ome_alert_policies_message_id_info
+ namespace: ''
+ version_added: 8.2.0
+ ome_application_alerts_smtp:
+ description: This module allows to configure SMTP or email configurations
+ name: ome_application_alerts_smtp
+ namespace: ''
+ version_added: 4.3.0
+ ome_application_alerts_syslog:
+ description: Configure syslog forwarding settings on OpenManage Enterprise and
+ OpenManage Enterprise Modular
+ name: ome_application_alerts_syslog
+ namespace: ''
+ version_added: 4.3.0
ome_application_certificate:
description: This module allows to generate a CSR and upload the certificate
name: ome_application_certificate
namespace: ''
version_added: 2.1.0
+ ome_application_console_preferences:
+ description: Configure console preferences on OpenManage Enterprise.
+ name: ome_application_console_preferences
+ namespace: ''
+ version_added: 5.2.0
ome_application_network_address:
description: Updates the network configuration on OpenManage Enterprise
name: ome_application_network_address
@@ -147,6 +279,12 @@ plugins:
name: ome_application_network_proxy
namespace: ''
version_added: 2.1.0
+ ome_application_network_settings:
+ description: This module allows you to configure the session inactivity timeout
+ settings
+ name: ome_application_network_settings
+ namespace: ''
+ version_added: 4.4.0
ome_application_network_time:
description: Updates the network time on OpenManage Enterprise
name: ome_application_network_time
@@ -157,6 +295,11 @@ plugins:
name: ome_application_network_webserver
namespace: ''
version_added: 2.1.0
+ ome_application_security_settings:
+ description: Configure the login security properties
+ name: ome_application_security_settings
+ namespace: ''
+ version_added: 4.4.0
ome_chassis_slots:
description: Rename sled slots on OpenManage Enterprise Modular
name: ome_chassis_slots
@@ -174,7 +317,8 @@ plugins:
namespace: ''
version_added: 3.2.0
ome_device_group:
- description: Add devices to a static device group on OpenManage Enterprise
+ description: Add or remove device(s) from a static device group on OpenManage
+ Enterprise
name: ome_device_group
namespace: ''
version_added: 3.3.0
@@ -184,6 +328,43 @@ plugins:
name: ome_device_info
namespace: ''
version_added: 2.0.0
+ ome_device_local_access_configuration:
+ description: Configure local access settings on OpenManage Enterprise Modular.
+ name: ome_device_local_access_configuration
+ namespace: ''
+ version_added: 4.4.0
+ ome_device_location:
+ description: Configure device location settings on OpenManage Enterprise Modular
+ name: ome_device_location
+ namespace: ''
+ version_added: 4.2.0
+ ome_device_mgmt_network:
+ description: Configure network settings of devices on OpenManage Enterprise
+ Modular
+ name: ome_device_mgmt_network
+ namespace: ''
+ version_added: 4.2.0
+ ome_device_network_services:
+ description: Configure chassis network services settings on OpenManage Enterprise
+ Modular
+ name: ome_device_network_services
+ namespace: ''
+ version_added: 4.3.0
+ ome_device_power_settings:
+ description: Configure chassis power settings on OpenManage Enterprise Modular
+ name: ome_device_power_settings
+ namespace: ''
+ version_added: 4.2.0
+ ome_device_quick_deploy:
+ description: Configure Quick Deploy settings on OpenManage Enterprise Modular.
+ name: ome_device_quick_deploy
+ namespace: ''
+ version_added: 5.0.0
+ ome_devices:
+ description: Perform device-specific operations on target devices
+ name: ome_devices
+ namespace: ''
+ version_added: 6.1.0
ome_diagnostics:
description: Export technical support logs(TSR) to network share location
name: ome_diagnostics
@@ -195,13 +376,13 @@ plugins:
namespace: ''
version_added: 3.3.0
ome_domain_user_groups:
- description: Create, modify, or delete an Active Directory user group on OpenManage
- Enterprise and OpenManage Enterprise Modular.
+ description: Create, modify, or delete an Active Directory/LDAP user group on
+ OpenManage Enterprise and OpenManage Enterprise Modular
name: ome_domain_user_groups
namespace: ''
version_added: 4.0.0
ome_firmware:
- description: Firmware update of PowerEdge devices and its components through
+ description: Update firmware on PowerEdge devices and its components through
OpenManage Enterprise
name: ome_firmware
namespace: ''
@@ -272,17 +453,44 @@ plugins:
name: ome_profile
namespace: ''
version_added: 3.1.0
+ ome_profile_info:
+ description: Retrieve profiles with attribute details
+ name: ome_profile_info
+ namespace: ''
+ version_added: 7.2.0
+ ome_server_interface_profile_info:
+ description: Retrieves the information of server interface profile on OpenManage
+ Enterprise Modular.
+ name: ome_server_interface_profile_info
+ namespace: ''
+ version_added: 5.1.0
+ ome_server_interface_profiles:
+ description: Configure server interface profiles
+ name: ome_server_interface_profiles
+ namespace: ''
+ version_added: 5.1.0
ome_smart_fabric:
description: Create, modify or delete a fabric on OpenManage Enterprise Modular
name: ome_smart_fabric
namespace: ''
version_added: 2.1.0
+ ome_smart_fabric_info:
+ description: Retrieves the information of smart fabrics inventoried by OpenManage
+ Enterprise Modular
+ name: ome_smart_fabric_info
+ namespace: ''
+ version_added: 7.1.0
ome_smart_fabric_uplink:
description: Create, modify or delete a uplink for a fabric on OpenManage Enterprise
Modular
name: ome_smart_fabric_uplink
namespace: ''
version_added: 2.1.0
+ ome_smart_fabric_uplink_info:
+ description: Retrieve details of fabric uplink on OpenManage Enterprise Modular.
+ name: ome_smart_fabric_uplink_info
+ namespace: ''
+ version_added: 7.1.0
ome_template:
description: Create, modify, deploy, delete, export, import and clone a template
on OpenManage Enterprise
@@ -306,6 +514,11 @@ plugins:
name: ome_template_network_vlan
namespace: ''
version_added: 2.0.0
+ ome_template_network_vlan_info:
+ description: Retrieves network configuration of template.
+ name: ome_template_network_vlan_info
+ namespace: ''
+ version_added: 7.2.0
ome_user:
description: Create, modify or delete a user on OpenManage Enterprise
name: ome_user
@@ -317,12 +530,22 @@ plugins:
name: ome_user_info
namespace: ''
version_added: 2.0.0
+ redfish_event_subscription:
+ description: Manage Redfish Subscriptions
+ name: redfish_event_subscription
+ namespace: ''
+ version_added: 4.1.0
redfish_firmware:
description: To perform a component firmware update using the image file available
on the local or remote system
name: redfish_firmware
namespace: ''
version_added: 2.1.0
+ redfish_firmware_rollback:
+ description: To perform a component firmware rollback using component name
+ name: redfish_firmware_rollback
+ namespace: ''
+ version_added: 8.2.0
redfish_powerstate:
description: Manage device power state
name: redfish_powerstate
@@ -336,5 +559,6 @@ plugins:
netconf: {}
shell: {}
strategy: {}
+ test: {}
vars: {}
-version: 4.0.0
+version: 8.5.0
diff --git a/ansible_collections/dellemc/openmanage/changelogs/changelog.yaml b/ansible_collections/dellemc/openmanage/changelogs/changelog.yaml
index f1b64d483..63ab4cdee 100644
--- a/ansible_collections/dellemc/openmanage/changelogs/changelog.yaml
+++ b/ansible_collections/dellemc/openmanage/changelogs/changelog.yaml
@@ -2,839 +2,876 @@ ancestor: null
releases:
2.1.0:
changes:
- release_summary: The `Dell EMC OpenManage Ansible Modules <https://github.com/dell/dellemc-openmanage-ansible-modules>`_
+ release_summary:
+ The `Dell OpenManage Ansible Modules <https://github.com/dell/dellemc-openmanage-ansible-modules>`_
are available on Ansible Galaxy as a collection.
release_date: '2020-07-29'
2.1.1:
changes:
deprecated_features:
- - The dellemc_configure_bios module is deprecated and replaced with the idrac_bios
- module.
- - The dellemc_configure_idrac_network module is deprecated and replaced with
- the idrac_network module.
- - The dellemc_configure_idrac_timezone module is deprecated and replaced with
- the idrac_timezone_ntp module.
- - The dellemc_delete_lc_job and dellemc_delete_lc_job_queue modules are deprecated
- and replaced with the idrac_lifecycle_controller_jobs module.
- - The dellemc_export_lc_logs module is deprecated and replaced with the idrac_lifecycle_controller_logs
- module.
- - The dellemc_get_lc_job_status module is deprecated and replaced with the idrac_lifecycle_controller_job_status_info
- module.
- - The dellemc_get_lcstatus module is deprecated and replaced with the idrac_lifecycle_controller_status_info
- module.
- - The dellemc_idrac_reset module is deprecated and replaced with the idrac_reset
- module.
- - The dellemc_setup_idrac_syslog module is deprecated and replaced with the
- idrac_syslog module.
+ - The dellemc_configure_bios module is deprecated and replaced with the idrac_bios
+ module.
+ - The dellemc_configure_idrac_network module is deprecated and replaced with
+ the idrac_network module.
+ - The dellemc_configure_idrac_timezone module is deprecated and replaced with
+ the idrac_timezone_ntp module.
+ - The dellemc_delete_lc_job and dellemc_delete_lc_job_queue modules are deprecated
+ and replaced with the idrac_lifecycle_controller_jobs module.
+ - The dellemc_export_lc_logs module is deprecated and replaced with the idrac_lifecycle_controller_logs
+ module.
+ - The dellemc_get_lc_job_status module is deprecated and replaced with the idrac_lifecycle_controller_job_status_info
+ module.
+ - The dellemc_get_lcstatus module is deprecated and replaced with the idrac_lifecycle_controller_status_info
+ module.
+ - The dellemc_idrac_reset module is deprecated and replaced with the idrac_reset
+ module.
+ - The dellemc_setup_idrac_syslog module is deprecated and replaced with the
+ idrac_syslog module.
major_changes:
- - Standardization of ten iDRAC ansible modules based on ansible guidelines.
- - Support for OpenManage Enterprise Modular.
+ - Standardization of ten iDRAC ansible modules based on ansible guidelines.
+ - Support for OpenManage Enterprise Modular.
release_summary: Support for OpenManage Enterprise Modular and other enhancements.
modules:
- - description: Configure the BIOS attributes
- name: idrac_bios
- namespace: ''
- - description: Get the status of a Lifecycle Controller job
- name: idrac_lifecycle_controller_job_status_info
- namespace: ''
- - description: Delete the Lifecycle Controller Jobs
- name: idrac_lifecycle_controller_jobs
- namespace: ''
- - description: Export Lifecycle Controller logs to a network share or local path.
- name: idrac_lifecycle_controller_logs
- namespace: ''
- - description: Get the status of the Lifecycle Controller
- name: idrac_lifecycle_controller_status_info
- namespace: ''
- - description: Configures the iDRAC network attributes
- name: idrac_network
- namespace: ''
- - description: Reset iDRAC
- name: idrac_reset
- namespace: ''
- - description: Enable or disable the syslog on iDRAC
- name: idrac_syslog
- namespace: ''
- - description: Configures time zone and NTP on iDRAC
- name: idrac_timezone_ntp
- namespace: ''
+ - description: Configure the BIOS attributes
+ name: idrac_bios
+ namespace: ''
+ - description: Get the status of a Lifecycle Controller job
+ name: idrac_lifecycle_controller_job_status_info
+ namespace: ''
+ - description: Delete the Lifecycle Controller Jobs
+ name: idrac_lifecycle_controller_jobs
+ namespace: ''
+ - description: Export Lifecycle Controller logs to a network share or local path.
+ name: idrac_lifecycle_controller_logs
+ namespace: ''
+ - description: Get the status of the Lifecycle Controller
+ name: idrac_lifecycle_controller_status_info
+ namespace: ''
+ - description: Configures the iDRAC network attributes
+ name: idrac_network
+ namespace: ''
+ - description: Reset iDRAC
+ name: idrac_reset
+ namespace: ''
+ - description: Enable or disable the syslog on iDRAC
+ name: idrac_syslog
+ namespace: ''
+ - description: Configures time zone and NTP on iDRAC
+ name: idrac_timezone_ntp
+ namespace: ''
release_date: '2020-08-26'
2.1.2:
changes:
bugfixes:
- - Documentation improvement request `#140 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/140>`_
- - Executing dellemc_configure_idrac_users twice fails the second attempt `#100
- <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/100>`_
- - dellemc_change_power_state fails if host is already on `#132 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/132>`_
- - dellemc_change_power_state not idempotent `#115 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/115>`_
- - dellemc_configure_idrac_users error `#26 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/26>`_
- - dellemc_configure_idrac_users is unreliable - errors `#113 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/113>`_
- - idrac_server_config_profile improvement requested (request) `#137 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/137>`_
- - ome_firmware_catalog.yml example errors `#145 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/145>`_
+ - Documentation improvement request `#140 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/140>`_
+ - Executing dellemc_configure_idrac_users twice fails the second attempt `#100
+ <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/100>`_
+ - dellemc_change_power_state fails if host is already on `#132 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/132>`_
+ - dellemc_change_power_state not idempotent `#115 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/115>`_
+ - dellemc_configure_idrac_users error `#26 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/26>`_
+ - dellemc_configure_idrac_users is unreliable - errors `#113 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/113>`_
+ - idrac_server_config_profile improvement requested (request) `#137 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/137>`_
+ - ome_firmware_catalog.yml example errors `#145 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/145>`_
deprecated_features:
- - The dellemc_change_power_state module is deprecated and replaced with the
- redfish_powerstate module.
- - The dellemc_configure_idrac_users module is deprecated and replaced with the
- idrac_user module.
+ - The dellemc_change_power_state module is deprecated and replaced with the
+ redfish_powerstate module.
+ - The dellemc_configure_idrac_users module is deprecated and replaced with the
+ idrac_user module.
minor_changes:
- - The idrac_server_config_profile module supports a user provided file name
- for an export operation.
- release_summary: The dellemc_change_power_state and dellemc_configure_idrac_users
+ - The idrac_server_config_profile module supports a user provided file name
+ for an export operation.
+ release_summary:
+ The dellemc_change_power_state and dellemc_configure_idrac_users
modules are standardized as per ansible guidelines. 8 GitHub issues are fixed.
modules:
- - description: Configure settings for user accounts
- name: idrac_user
- namespace: ''
- - description: Manage device power state
- name: redfish_powerstate
- namespace: ''
+ - description: Configure settings for user accounts
+ name: idrac_user
+ namespace: ''
+ - description: Manage device power state
+ name: redfish_powerstate
+ namespace: ''
release_date: '2020-09-23'
2.1.3:
changes:
- release_summary: Network configuration service related modules ome_network_vlan,
+ release_summary:
+ Network configuration service related modules ome_network_vlan,
ome_network_port_breakout and ome_network_vlan_info are added.
modules:
- - description: This module allows to automate the port portioning or port breakout
- to logical sub ports
- name: ome_network_port_breakout
- namespace: ''
- - description: Create, modify & delete a VLAN
- name: ome_network_vlan
- namespace: ''
- - description: Retrieves the information about networks VLAN(s) present in OpenManage
- Enterprise
- name: ome_network_vlan_info
- namespace: ''
+ - description:
+ This module allows to automate the port portioning or port breakout
+ to logical sub ports
+ name: ome_network_port_breakout
+ namespace: ''
+ - description: Create, modify & delete a VLAN
+ name: ome_network_vlan
+ namespace: ''
+ - description:
+ Retrieves the information about networks VLAN(s) present in OpenManage Enterprise
+ name: ome_network_vlan_info
+ namespace: ''
release_date: '2020-10-29'
2.1.4:
changes:
known_issues:
- - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation
- of multiple uplinks of the same name even though this is supported by OpenManage
- Enterprise Modular. If an uplink is created using the same name as an existing
- uplink, the existing uplink is modified.'
- release_summary: Fabric management related modules ome_smart_fabric and ome_smart_fabric_uplink
- are added.
+ - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation
+ of multiple uplinks of the same name even though this is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.'
+ release_summary:
+ Fabric management related modules ome_smart_fabric and ome_smart_fabric_uplink are added.
modules:
- - description: Create, modify or delete a fabric on OpenManage Enterprise Modular
- name: ome_smart_fabric
- namespace: ''
- - description: Create, modify or delete a uplink for a fabric on OpenManage Enterprise
- Modular
- name: ome_smart_fabric_uplink
- namespace: ''
+ - description: Create, modify or delete a fabric on OpenManage Enterprise Modular
+ name: ome_smart_fabric
+ namespace: ''
+ - description:
+ Create, modify or delete a uplink for a fabric on OpenManage Enterprise Modular
+ name: ome_smart_fabric_uplink
+ namespace: ''
release_date: '2020-11-25'
2.1.5:
changes:
bugfixes:
- - Identity pool does not reset when a network VLAN is added to a template in
- the ome_template_network_vlan module. `#169 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues
- /169>`_
- - Missing parameter added in ome_smart_fabric_uplink module documenation. `#181
- <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/181>`_
- known_issues:
- - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation
- of multiple uplinks of the same name even though this is supported by OpenManage
- Enterprise Modular. If an uplink is created using the same name as an existing
- uplink, the existing uplink is modified.'
- - 'Issue 2(187956): If an invalid job_id is provided, idrac_lifecycle_controller_job_status_info
- returns an error message. This error message does not contain information
- about the exact issue with the invalid job_id.'
- - 'Issue 3(188267): While updating the iDRAC firmware, the idrac_firmware module
- completes execution before the firmware update job is completed. An incorrect
- message is displayed in the task output as ''DRAC WSMAN endpoint returned
- HTTP code ''400'' Reason ''Bad Request''''. This issue may occur if the target
- iDRAC firmware version is less than 3.30.30.30'
+ - Identity pool does not reset when a network VLAN is added to a template in
+ the ome_template_network_vlan module. `#169 <https://github.com/dell/dellemc-openmanage-ansible-modules/issues
+ /169>`_
+ - Missing parameter added in ome_smart_fabric_uplink module documenation. `#181
+ <https://github.com/dell/dellemc-openmanage-ansible-modules/issues/181>`_
+ known_issues:
+ - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation
+ of multiple uplinks of the same name even though this is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.'
+ - 'Issue 2(187956): If an invalid job_id is provided, idrac_lifecycle_controller_job_status_info
+ returns an error message. This error message does not contain information
+ about the exact issue with the invalid job_id.'
+ - "Issue 3(188267): While updating the iDRAC firmware, the idrac_firmware module
+ completes execution before the firmware update job is completed. An incorrect
+ message is displayed in the task output as 'DRAC WSMAN endpoint returned
+ HTTP code '400' Reason 'Bad Request''. This issue may occur if the target
+ iDRAC firmware version is less than 3.30.30.30"
minor_changes:
- - The idrac_server_config_profile module supports IPv6 address format.
- release_summary: The idrac_firmware module is enhanced to include checkmode
+ - The idrac_server_config_profile module supports IPv6 address format.
+ release_summary:
+ The idrac_firmware module is enhanced to include checkmode
support and job tracking.
release_date: '2020-12-30'
3.0.0:
changes:
bugfixes:
- - GitHub issue fix - Module dellemc_idrac_storage_volume.py broken. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/212)
- - GitHub issue fix - ome_smart_fabric Fabric management is not supported on
- the specified system. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/179)
- - 'Known issue fix #187956: If an invalid job_id is provided, the idrac_lifecycle_controller_job_status_info
- module returns an error message with the description of the issue.'
- - 'Known issue fix #188267: No error message is displayed when the target iDRAC
- with firmware version less than 3.30.30.30 is updated.'
- - Sanity fixes as per ansible guidelines to all modules.
+ - GitHub issue fix - Module dellemc_idrac_storage_volume.py broken. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/212)
+ - GitHub issue fix - ome_smart_fabric Fabric management is not supported on
+ the specified system. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/179)
+ - 'Known issue fix #187956: If an invalid job_id is provided, the idrac_lifecycle_controller_job_status_info
+ module returns an error message with the description of the issue.'
+ - 'Known issue fix #188267: No error message is displayed when the target iDRAC
+ with firmware version less than 3.30.30.30 is updated.'
+ - Sanity fixes as per ansible guidelines to all modules.
deprecated_features:
- - The ``dellemc_get_firmware_inventory`` module is deprecated and replaced with
- ``idrac_firmware_info``.
- - The ``dellemc_get_system_inventory`` module is deprecated and replaced with
- ``idrac_system_info``.
- known_issues:
- - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation
- of multiple uplinks of the same name even though this is supported by OpenManage
- Enterprise Modular. If an uplink is created using the same name as an existing
- uplink, the existing uplink is modified.'
+ - The ``dellemc_get_firmware_inventory`` module is deprecated and replaced with
+ ``idrac_firmware_info``.
+ - The ``dellemc_get_system_inventory`` module is deprecated and replaced with
+ ``idrac_system_info``.
+ known_issues:
+ - 'Issue 1(186024): ome_smart_fabric_uplink module does not allow the creation
+ of multiple uplinks of the same name even though this is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.'
major_changes:
- - Removed the existing deprecated modules.
+ - Removed the existing deprecated modules.
minor_changes:
- - Coding Guidelines, Contributor Agreement, and Code of Conduct files are added
- to the collection.
- - New deprecation changes for ``dellemc_get_system_inventory`` and ``dellemc_get_firmware_inventory``
- ignored for ansible 2.9 sanity test.
- - The modules are standardized as per ansible guidelines.
- release_summary: Deprecations, issue fixes, and standardization of modules as
- per ansible guidelines.
+ - Coding Guidelines, Contributor Agreement, and Code of Conduct files are added
+ to the collection.
+ - New deprecation changes for ``dellemc_get_system_inventory`` and ``dellemc_get_firmware_inventory``
+ ignored for ansible 2.9 sanity test.
+ - The modules are standardized as per ansible guidelines.
+ release_summary:
+ Deprecations, issue fixes, and standardization of modules as per ansible guidelines.
release_date: '2021-01-25'
3.1.0:
changes:
bugfixes:
- - ome_firmware_baseline_compliance_info - OMEnt firmware baseline compliance
- info pagination support added (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/171)
- - ome_network_proxy - OMEnt network proxy check mode support added (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/187)
- known_issues:
- - ome_smart_fabric - Issue(185322) Only three design types are supported by
- OpenManage Enterprise Modular but the module successfully creates a fabric
- when the design type is not supported.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though this
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
+ - ome_firmware_baseline_compliance_info - OMEnt firmware baseline compliance
+ info pagination support added (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/171)
+ - ome_network_proxy - OMEnt network proxy check mode support added (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/187)
+ known_issues:
+ - ome_smart_fabric - Issue(185322) Only three design types are supported by
+ OpenManage Enterprise Modular but the module successfully creates a fabric
+ when the design type is not supported.
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though this
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
release_summary: OpenManage Enterprise profiles management support added.
modules:
- - description: Create, modify, delete, assign, unassign and migrate a profile
- on OpenManage Enterprise
- name: ome_profile
- namespace: ''
+ - description:
+ Create, modify, delete, assign, unassign and migrate a profile on OpenManage Enterprise
+ name: ome_profile
+ namespace: ''
release_date: '2021-02-24'
3.2.0:
changes:
known_issues:
- - idrac_user - Issue(192043) Module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_configuration_compliance_info - Issue(195592) Module may error out with
- the message ``unable to process the request because an error occurred``. If
- the issue persists, report it to the system administrator.
- - ome_smart_fabric - Issue(185322) Only three design types are supported by
- OpenManage Enterprise Modular but the module successfully creates a fabric
- when the design type is not supported.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though this
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
+ - idrac_user - Issue(192043) Module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_configuration_compliance_info - Issue(195592) Module may error out with
+ the message ``unable to process the request because an error occurred``. If
+ the issue persists, report it to the system administrator.
+ - ome_smart_fabric - Issue(185322) Only three design types are supported by
+ OpenManage Enterprise Modular but the module successfully creates a fabric
+ when the design type is not supported.
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though this
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
minor_changes:
- - ome_template - Allows to deploy a template on device groups.
+ - ome_template - Allows to deploy a template on device groups.
release_summary: Configuration compliance related modules added
modules:
- - description: Create, modify, and delete a configuration compliance baseline
- and remediate non-compliant devices on OpenManage Enterprise
- name: ome_configuration_compliance_baseline
- namespace: ''
- - description: Device compliance report for devices managed in OpenManage Enterprise
- name: ome_configuration_compliance_info
- namespace: ''
+ - description:
+ Create, modify, and delete a configuration compliance baseline
+ and remediate non-compliant devices on OpenManage Enterprise
+ name: ome_configuration_compliance_baseline
+ namespace: ''
+ - description: Device compliance report for devices managed in OpenManage Enterprise
+ name: ome_configuration_compliance_info
+ namespace: ''
release_date: '2021-03-24'
3.3.0:
changes:
known_issues:
- - idrac_user - Issue(192043) Module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_configuration_compliance_info - Issue(195592) Module may error out with
- the message ``unable to process the request because an error occurred``. If
- the issue persists, report it to the system administrator.
- - ome_smart_fabric - Issue(185322) Only three design types are supported by
- OpenManage Enterprise Modular but the module successfully creates a fabric
- when the design type is not supported.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though this
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
+ - idrac_user - Issue(192043) Module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_configuration_compliance_info - Issue(195592) Module may error out with
+ the message ``unable to process the request because an error occurred``. If
+ the issue persists, report it to the system administrator.
+ - ome_smart_fabric - Issue(185322) Only three design types are supported by
+ OpenManage Enterprise Modular but the module successfully creates a fabric
+ when the design type is not supported.
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though this
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
minor_changes:
- - ome_firmware_baseline - Allows to retrieve the device even if it not in the
- first 50 device IDs
- release_summary: OpenManage Enterprise device group and device discovery support
+ - ome_firmware_baseline - Allows to retrieve the device even if it not in the
+ first 50 device IDs
+ release_summary:
+ OpenManage Enterprise device group and device discovery support
added
modules:
- - description: Add devices to a static device group on OpenManage Enterprise
- name: ome_device_group
- namespace: ''
- - description: Create, modify, or delete a discovery job on OpenManage Enterprise
- name: ome_discovery
- namespace: ''
+ - description: Add devices to a static device group on OpenManage Enterprise
+ name: ome_device_group
+ namespace: ''
+ - description: Create, modify, or delete a discovery job on OpenManage Enterprise
+ name: ome_discovery
+ namespace: ''
release_date: '2021-04-28'
3.4.0:
changes:
known_issues:
- - idrac_user - Issue(192043) Module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though this
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
+ - idrac_user - Issue(192043) Module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though this
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
major_changes:
- - ome_firmware_baseline - Module supports check mode, and allows the modification
- and deletion of firmware baselines.
- - ome_firmware_catalog - Module supports check mode, and allows the modification
- and deletion of firmware catalogs.
+ - ome_firmware_baseline - Module supports check mode, and allows the modification
+ and deletion of firmware baselines.
+ - ome_firmware_catalog - Module supports check mode, and allows the modification
+ and deletion of firmware catalogs.
minor_changes:
- - ome_firmware_catalog - Added support for repositories available on the Dell
- support site.
- - ome_template_network_vlan - Added the input option which allows to apply the
- modified VLAN settings immediately on the associated modular-system servers.
- release_summary: OpenManage Enterprise firmware baseline and firmware catalog
+ - ome_firmware_catalog - Added support for repositories available on the Dell
+ support site.
+ - ome_template_network_vlan - Added the input option which allows to apply the
+ modified VLAN settings immediately on the associated modular-system servers.
+ release_summary:
+ OpenManage Enterprise firmware baseline and firmware catalog
modules updated to support checkmode.
release_date: '2021-05-26'
3.5.0:
changes:
bugfixes:
- - Handled invalid share and unused imports cleanup for iDRAC modules (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/268)
- known_issues:
- - idrac_user - Issue(192043) Module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though this
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
+ - Handled invalid share and unused imports cleanup for iDRAC modules (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/268)
+ known_issues:
+ - idrac_user - Issue(192043) Module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though this
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
major_changes:
- - idrac_server_config_profile - Added support for exporting and importing Server
- Configuration Profile through HTTP/HTTPS share.
- - ome_device_group - Added support for adding devices to a group using the IP
- addresses of the devices and group ID.
+ - idrac_server_config_profile - Added support for exporting and importing Server
+ Configuration Profile through HTTP/HTTPS share.
+ - ome_device_group - Added support for adding devices to a group using the IP
+ addresses of the devices and group ID.
release_summary: Support for managing static device groups on OpenManage Enterprise.
modules:
- - description: Manages static device groups on OpenManage Enterprise
- name: ome_groups
- namespace: ''
+ - description: Manages static device groups on OpenManage Enterprise
+ name: ome_groups
+ namespace: ''
release_date: '2021-06-28'
3.6.0:
changes:
bugfixes:
- - dellemc_idrac_storage_volume - Module fails if the BlockSize, FreeSize, or
- Size state of the physical disk is set to "Not Available".
- known_issues:
- - idrac_user - Issue(192043) Module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though this
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
- release_summary: Support for configuring device slot name and export SupportAssist
+ - dellemc_idrac_storage_volume - Module fails if the BlockSize, FreeSize, or
+ Size state of the physical disk is set to "Not Available".
+ known_issues:
+ - idrac_user - Issue(192043) Module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though this
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
+ release_summary:
+ Support for configuring device slot name and export SupportAssist
device collections from OpenManage Enterprise and OpenManage Enterprise Modular.
modules:
- - description: Rename sled slots on OpenManage Enterprise Modular
- name: ome_chassis_slots
- namespace: ''
- - description: Export technical support logs(TSR) to network share location
- name: ome_diagnostics
- namespace: ''
+ - description: Rename sled slots on OpenManage Enterprise Modular
+ name: ome_chassis_slots
+ namespace: ''
+ - description: Export technical support logs(TSR) to network share location
+ name: ome_diagnostics
+ namespace: ''
release_date: '2021-07-28'
4.0.0:
changes:
known_issues:
- - idrac_user - Issue(192043) Module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though this
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
- release_summary: Support for configuring active directory user group on OpenManage Enterprise and OpenManage Enterprise Modular.
+ - idrac_user - Issue(192043) Module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though this
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
+ release_summary:
+ Support for configuring active directory user group on OpenManage
+ Enterprise and OpenManage Enterprise Modular.
modules:
- - description: Configure Active Directory groups to be used with Directory Services
- on OpenManage Enterprise and OpenManage Enterprise Modular
- name: ome_active_directory
- namespace: ''
- - description: Create, modify, or delete an Active Directory user group on OpenManage
- Enterprise and OpenManage Enterprise Modular
- name: ome_domain_user_groups
- namespace: ''
+ - description:
+ Configure Active Directory groups to be used with Directory Services
+ on OpenManage Enterprise and OpenManage Enterprise Modular
+ name: ome_active_directory
+ namespace: ''
+ - description:
+ Create, modify, or delete an Active Directory user group on OpenManage
+ Enterprise and OpenManage Enterprise Modular
+ name: ome_domain_user_groups
+ namespace: ''
release_date: '2021-08-27'
4.1.0:
changes:
+ known_issues:
+ - idrac_user - Issue(192043) Module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though it
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
major_changes:
- - ome_firmware - Added option to stage the firmware update and support for selecting components and devices for baseline-based firmware update.
+ - ome_firmware - Added option to stage the firmware update and support for selecting
+ components and devices for baseline-based firmware update.
minor_changes:
- - ome_template_network_vlan - Enabled check_mode support.
- known_issues:
- - idrac_user - Issue(192043) Module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though it
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
- release_summary: Support for Redfish event subscriptions and enhancements to ome_firmware module.
+ - ome_template_network_vlan - Enabled check_mode support.
+ release_summary:
+ Support for Redfish event subscriptions and enhancements to
+ ome_firmware module.
modules:
- - description: Manage Redfish Subscriptions
- name: redfish_event_subscription
- namespace: ''
+ - description: Manage Redfish Subscriptions
+ name: redfish_event_subscription
+ namespace: ''
release_date: '2021-09-28'
4.2.0:
changes:
known_issues:
- - idrac_user - Issue(192043) Module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
- not allow the creation of multiple uplinks of the same name even though it
- is supported by OpenManage Enterprise Modular. If an uplink is created using
- the same name as an existing uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) The ome_device_power_settings module
- errors out with the following message if the value provided for the
- parameter ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to
- complete the request because PowerCap does not exist or is not applicable
- for the resource URI.``
- release_summary: Support to configure OME Modular devices network, power, and location settings.
+ - idrac_user - Issue(192043) Module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_device_power_settings - Issue(212679) The ome_device_power_settings module
+ errors out with the following message if the value provided for the parameter
+ ``power_cap`` is not within the supported range of 0 to 32767, ``Unable to
+ complete the request because PowerCap does not exist or is not applicable
+ for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) ome_smart_fabric_uplink module does
+ not allow the creation of multiple uplinks of the same name even though it
+ is supported by OpenManage Enterprise Modular. If an uplink is created using
+ the same name as an existing uplink, the existing uplink is modified.
+ release_summary:
+ Support to configure OME Modular devices network, power, and
+ location settings.
modules:
- - description: Configure network settings of devices on OpenManage Enterprise Modular
- name: ome_device_mgmt_network
- namespace: ''
- - description: Configure device location settings on OpenManage Enterprise Modular
- name: ome_device_location
- namespace: ''
- - description: Configure chassis power settings on OpenManage Enterprise Modular
- name: ome_device_power_settings
- namespace: ''
+ - description: Configure device location settings on OpenManage Enterprise Modular
+ name: ome_device_location
+ namespace: ''
+ - description:
+ Configure network settings of devices on OpenManage Enterprise Modular
+ name: ome_device_mgmt_network
+ namespace: ''
+ - description: Configure chassis power settings on OpenManage Enterprise Modular
+ name: ome_device_power_settings
+ namespace: ''
release_date: '2021-10-27'
4.3.0:
changes:
known_issues:
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - idrac_user - Issue(192043) The module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
- of multiple uplinks of the same name even though it is supported by OpenManage
- Enterprise Modular. If an uplink is created using the same name as an existing
- uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module errors out with the
- following message if the value provided for the parameter ``power_cap`` is
- not within the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Support to configure network services, syslog forwarding, and SMTP settings.
+ - idrac_user - Issue(192043) The module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module errors out with the
+ following message if the value provided for the parameter ``power_cap`` is
+ not within the supported range of 0 to 32767, ``Unable to complete the request
+ because PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
+ release_summary:
+ Support to configure network services, syslog forwarding, and SMTP settings.
modules:
- - description: Configure chassis network services settings on OpenManage Enterprise Modular
- name: ome_device_network_services
- namespace: ''
- - description: This module allows to configure SMTP or email configurations
- name: ome_application_alerts_smtp
- namespace: ''
- - description: Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular
- name: ome_application_alerts_syslog
- namespace: ''
+ - description: This module allows to configure SMTP or email configurations
+ name: ome_application_alerts_smtp
+ namespace: ''
+ - description:
+ Configure syslog forwarding settings on OpenManage Enterprise and
+ OpenManage Enterprise Modular
+ name: ome_application_alerts_syslog
+ namespace: ''
+ - description:
+ Configure chassis network services settings on OpenManage Enterprise Modular
+ name: ome_device_network_services
+ namespace: ''
release_date: '2021-11-26'
4.4.0:
changes:
bugfixes:
- - ome_device_location - The issue that applies values of the location settings only in lowercase is fixed (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/341)
+ - ome_device_location - The issue that applies values of the location settings
+ only in lowercase is fixed (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/341)
+ known_issues:
+ - idrac_user - Issue(192043) The module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module errors out with the
+ following message if the value provided for the parameter ``power_cap`` is
+ not within the supported range of 0 to 32767, ``Unable to complete the request
+ because PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
minor_changes:
- - ome_firmware - The module is enhanced to support check mode and idempotency (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/274)
- - ome_template - An example task is added to create a compliance template from reference device (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/339)
- known_issues:
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - idrac_user - Issue(192043) The module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
- of multiple uplinks of the same name even though it is supported by OpenManage
- Enterprise Modular. If an uplink is created using the same name as an existing
- uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module errors out with the
- following message if the value provided for the parameter ``power_cap`` is
- not within the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Support to configure login security, session inactivity timeout, and local access settings.
+ - ome_firmware - The module is enhanced to support check mode and idempotency
+ (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/274)
+ - ome_template - An example task is added to create a compliance template from
+ reference device (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/339)
+ release_summary:
+ Support to configure login security, session inactivity timeout,
+ and local access settings.
modules:
- - description: Configure the login security properties
- name: ome_application_security_settings
- namespace: ''
- - description: This module allows you to configure the session inactivity timeout settings
- name: ome_application_network_settings
- namespace: ''
- - description: Configure local access settings on OpenManage Enterprise Modular
- name: ome_device_local_access_configuration
- namespace: ''
+ - description:
+ This module allows you to configure the session inactivity timeout settings
+ name: ome_application_network_settings
+ namespace: ''
+ - description: Configure the login security properties
+ name: ome_application_security_settings
+ namespace: ''
+ - description: Configure local access settings on OpenManage Enterprise Modular
+ name: ome_device_local_access_configuration
+ namespace: ''
release_date: '2021-12-24'
5.0.0:
changes:
- major_changes:
- - All modules now support SSL over HTTPS and socket level timeout.
breaking_changes:
- - HTTPS SSL certificate validation is a **breaking change** and will require modification in the
- existing playbooks. Please refer to `SSL Certificate Validation <https://github.com/dell/dellemc-openmanage-ansible-modules#ssl-certificate-validation>`_ section in the `README.md <https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/README.md#SSL-Certificate-Validation>`_ for modification to existing playbooks.
+ - HTTPS SSL certificate validation is a **breaking change** and will require
+ modification in the existing playbooks. Please refer to `SSL Certificate Validation
+ <https://github.com/dell/dellemc-openmanage-ansible-modules#ssl-certificate-validation>`_
+ section in the `README.md <https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/README.md#SSL-Certificate-Validation>`_
+ for modification to existing playbooks.
bugfixes:
- - idrac_bios - The issue while configuring boot sources is fixed (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/347)
- known_issues:
- - ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message
- if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters.
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - idrac_user - Issue(192043) The module may error out with the message ``unable
- to perform the import or export operation because there are pending attribute
- changes or a configuration job is in progress``. Wait for the job to complete
- and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
- of multiple uplinks of the same name even though it is supported by OpenManage
- Enterprise Modular. If an uplink is created using the same name as an existing
- uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module errors out with the
- following message if the value provided for the parameter ``power_cap`` is
- not within the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
+ - idrac_bios - The issue while configuring boot sources is fixed (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/347)
+ known_issues:
+ - idrac_user - Issue(192043) The module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module errors out with the
+ following message if the value provided for the parameter ``power_cap`` is
+ not within the supported range of 0 to 32767, ``Unable to complete the request
+ because PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(216352) - The module does not display a proper
+ error message if an unsupported value is provided for the ipv6_prefix_length
+ and vlan_id parameters.
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
+ major_changes:
+ - All modules now support SSL over HTTPS and socket level timeout.
release_summary: HTTPS SSL support for all modules and quick deploy settings.
modules:
- - description: Configure Quick Deploy settings on OpenManage Enterprise Modular
- name: ome_device_quick_deploy
- namespace: ''
+ - description: Configure Quick Deploy settings on OpenManage Enterprise Modular
+ name: ome_device_quick_deploy
+ namespace: ''
release_date: '2022-01-27'
5.0.1:
changes:
- major_changes:
- - All modules can read custom or organizational CA signed certificate from the environment variables.
- Please refer to `SSL Certificate Validation <https://github.com/dell/dellemc-openmanage-ansible-modules#ssl-certificate-validation>`_ section in the `README.md <https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/README.md#SSL-Certificate-Validation>`_ for modification to existing playbooks or setting environment variable.
bugfixes:
- - The ome_application_network_time and ome_application_network_proxy modules are breaking due
- to the changes introduced for SSL validation.(https://github.com/dell/dellemc-openmanage-ansible-modules/issues/360)
- - All playbooks require modification because the validate_certs argument is set to True by default
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/357)
+ - All playbooks require modification because the validate_certs argument is
+ set to True by default (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/357)
+ - The ome_application_network_time and ome_application_network_proxy modules
+ are breaking due to the changes introduced for SSL validation.(https://github.com/dell/dellemc-openmanage-ansible-modules/issues/360)
known_issues:
- - ome_device_quick_deploy - Issue(216352) - The module does not display a proper error message
- if an unsupported value is provided for the ipv6_prefix_length and vlan_id parameters.
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
- of multiple uplinks of the same name even though it is supported by OpenManage
- Enterprise Modular. If an uplink is created using the same name as an existing
- uplink, the existing uplink is modified.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- ome_device_power_settings - Issue(212679) - The module errors out with the
following message if the value provided for the parameter ``power_cap`` is
not within the supported range of 0 to 32767, ``Unable to complete the request
because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Support to provide custom or organizational CA signed certificate for SSL validation from the environment variable.
+ - ome_device_quick_deploy - Issue(216352) - The module does not display a proper
+ error message if an unsupported value is provided for the ipv6_prefix_length
+ and vlan_id parameters.
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
+ major_changes:
+ - All modules can read custom or organizational CA signed certificate from the
+ environment variables. Please refer to `SSL Certificate Validation
+ <https://github.com/dell/dellemc-openmanage-ansible-modules#ssl-certificate-validation>`
+ _ section in the `README.md <https://github.com/dell/dellemc-openmanage-ansible-modules
+ /blob/collections/README.md#SSL-Certificate-Validation>` _ for modification
+ to existing playbooks or setting environment variable.
+ release_summary:
+ Support to provide custom or organizational CA signed certificate
+ for SSL validation from the environment variable.
release_date: '2022-02-11'
5.1.0:
changes:
bugfixes:
- - idrac_firmware - Issue (220130) The socket.timout issue that occurs
- during the wait_for_job_completion() job is fixed.
- minor_changes:
- - ome_identity_pool - The module is enhanced to support check mode and
- idempotency.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/328)
- - ome_template_identity_pool - The module is enhanced to support check
- mode and idempotency.
- - ome_application_network_address - The module is enhanced to support
- check mode and idempotency.
- - redfish_event_subscription - The module is enhanced to support check
- mode and idempotency.
- - ome_identity_pool - The iSCSI Initiator and Initiator IP Pool
- attributes are not mandatory to create an identity pool.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/329)
- - ome_device_info - The module is enhanced to return a blank list when
- devices or baselines are not present in the system.
- - ome_firmware_baseline_compliance_info - The module is enhanced to
- return a blank list when devices or baselines are not present in the system.
- - ome_firmware_baseline_info - The module is enhanced to return a blank
- list when devices or baselines are not present in the system.
+ - idrac_firmware - Issue (220130) The socket.timout issue that occurs during
+ the wait_for_job_completion() job is fixed.
known_issues:
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
- - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
- of multiple uplinks of the same name even though it is supported by OpenManage
- Enterprise Modular. If an uplink is created using the same name as an existing
- uplink, the existing uplink is modified.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- ome_device_power_settings - Issue(212679) - The module errors out with the
following message if the value provided for the parameter ``power_cap`` is
not within the supported range of 0 to 32767, ``Unable to complete the request
because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Support for OpenManage Enterprise Modular server interface management.
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
+ minor_changes:
+ - ome_application_network_address - The module is enhanced to support check
+ mode and idempotency.
+ - ome_device_info - The module is enhanced to return a blank list when devices
+ or baselines are not present in the system.
+ - ome_firmware_baseline_compliance_info - The module is enhanced to return a
+ blank list when devices or baselines are not present in the system.
+ - ome_firmware_baseline_info - The module is enhanced to return a blank list
+ when devices or baselines are not present in the system.
+ - ome_identity_pool - The iSCSI Initiator and Initiator IP Pool attributes are
+ not mandatory to create an identity pool. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/329)
+ - ome_identity_pool - The module is enhanced to support check mode and idempotency.
+ (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/328)
+ - ome_template_identity_pool - The module is enhanced to support check mode
+ and idempotency.
+ - redfish_event_subscription - The module is enhanced to support check mode
+ and idempotency.
+ release_summary:
+ Support for OpenManage Enterprise Modular server interface
+ management.
modules:
- - description: Configures server interface profiles on OpenManage Enterprise Modular.
- name: ome_server_interface_profiles
- namespace: ''
- - description: Retrieves the information of server interface profile on OpenManage Enterprise Modular.
- name: ome_server_interface_profile_info
- namespace: ''
+ - description:
+ Retrieves the information of server interface profile on OpenManage
+ Enterprise Modular.
+ name: ome_server_interface_profile_info
+ namespace: ''
+ - description: Configures server interface profiles on OpenManage Enterprise Modular.
+ name: ome_server_interface_profiles
+ namespace: ''
release_date: '2022-02-24'
5.2.0:
changes:
- minor_changes:
- - ome_template - The module is enhanced to support check mode and
- idempotency.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/255)
- - ome_template - The module is enhanced to support modifying a template
- based on the attribute names instead of the ID.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/358)
- - ome_profile - The module is enhanced to support check mode and
- idempotency.
- - ome_profile - The module is enhanced to support modifying a profile
- based on the attribute names instead of the ID.
- - ome_diagnostics - The module is enhanced to support check mode and
- idempotency.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/345)
- - ome_diagnostics - This module is enhanced to extract log from
- lead chassis.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/310)
- - idrac_redfish_storage_controller - This module is enhanced to support
- the following settings with check mode and idempotency - UnassignSpare,
- EnableControllerEncryption, BlinkTarget, UnBlinkTarget, ConvertToRAID,
- ConvertToNonRAID, ChangePDStateToOnline, ChangePDStateToOffline.
- known_issues:
- - ome_application_console_preferences - Issue(224690) - The module does
- not display a proper error message when an unsupported value is provided
- for the parameters report_row_limit, email_sender_settings, and
- metric_collection_settings, and the value is applied on OpenManage Enterprise.
- - ome_device_quick_deploy - Issue(216352) - The module does not display a
- proper error message if an unsupported value is provided for the
- ipv6_prefix_length and vlan_id parameters.
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
+ known_issues:
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_console_preferences - Issue(224690) - The module does not
+ display a proper error message when an unsupported value is provided for the
+ parameters report_row_limit, email_sender_settings, and metric_collection_settings,
+ and the value is applied on OpenManage Enterprise.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(216352) - The module does not display a proper
+ error message if an unsupported value is provided for the ipv6_prefix_length
+ and vlan_id parameters.
- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
of multiple uplinks of the same name even though it is supported by OpenManage
Enterprise Modular. If an uplink is created using the same name as an existing
uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module displays the following
- message if the value provided for the parameter ``power_cap`` is not within
- the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
+ minor_changes:
+ - idrac_redfish_storage_controller - This module is enhanced to support the
+ following settings with check mode and idempotency - UnassignSpare, EnableControllerEncryption,
+ BlinkTarget, UnBlinkTarget, ConvertToRAID, ConvertToNonRAID, ChangePDStateToOnline,
+ ChangePDStateToOffline.
+ - ome_diagnostics - The module is enhanced to support check mode and idempotency.
+ (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/345)
+ - ome_diagnostics - This module is enhanced to extract log from lead chassis.
+ (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/310)
+ - ome_profile - The module is enhanced to support check mode and idempotency.
+ - ome_profile - The module is enhanced to support modifying a profile based
+ on the attribute names instead of the ID.
+ - ome_template - The module is enhanced to support check mode and idempotency.
+ (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/255)
+ - ome_template - The module is enhanced to support modifying a template based
+ on the attribute names instead of the ID. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/358)
release_summary: Support to configure console preferences on OpenManage Enterprise.
modules:
- - description: Configures console preferences on OpenManage Enterprise.
- name: ome_application_console_preferences
- namespace: ''
+ - description: Configures console preferences on OpenManage Enterprise.
+ name: ome_application_console_preferences
+ namespace: ''
release_date: '2022-03-29'
5.3.0:
changes:
- minor_changes:
- - redfish_storage_volume - The module is enhanced to support check mode and idempotency.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/245)
- - ome_smart_fabric_uplink - The module is enhanced to support idempotency.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/253)
- - ome_diagnostics - The module is enhanced to support debug logs.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/308)
- - ome_diagnostics - Added "supportassist_collection" as a choice for the log_type argument
- to export SupportAssist logs.
- (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/309)
- known_issues:
- - ome_application_console_preferences - Issue(224690) - The module does
- not display a proper error message when an unsupported value is provided
- for the parameters report_row_limit, email_sender_settings, and
- metric_collection_settings, and the value is applied on OpenManage Enterprise.
- - ome_device_quick_deploy - Issue(216352) - The module does not display a
- proper error message if an unsupported value is provided for the
- ipv6_prefix_length and vlan_id parameters.
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
+ known_issues:
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_console_preferences - Issue(224690) - The module does not
+ display a proper error message when an unsupported value is provided for the
+ parameters report_row_limit, email_sender_settings, and metric_collection_settings,
+ and the value is applied on OpenManage Enterprise.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(216352) - The module does not display a proper
+ error message if an unsupported value is provided for the ipv6_prefix_length
+ and vlan_id parameters.
- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
of multiple uplinks of the same name even though it is supported by OpenManage
Enterprise Modular. If an uplink is created using the same name as an existing
uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module displays the following
- message if the value provided for the parameter ``power_cap`` is not within
- the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Added check mode and idempotency support for redfish_storage_volume
- and idempotency support for ome_smart_fabric_uplink. For ome_diagnostics, added
- support for debug logs and added supportassist_collection as a choice for the log_type
- argument to export SupportAssist logs.
+ minor_changes:
+ - ome_diagnostics - Added "supportassist_collection" as a choice for the log_type
+ argument to export SupportAssist logs. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/309)
+ - ome_diagnostics - The module is enhanced to support debug logs. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/308)
+ - ome_smart_fabric_uplink - The module is enhanced to support idempotency. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/253)
+ - redfish_storage_volume - The module is enhanced to support check mode and
+ idempotency. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/245)
+ release_summary:
+ Added check mode and idempotency support for redfish_storage_volume
+ and idempotency support for ome_smart_fabric_uplink. For ome_diagnostics,
+ added support for debug logs and added supportassist_collection as a choice
+ for the log_type argument to export SupportAssist logs.
release_date: '2022-04-26'
5.4.0:
changes:
- major_changes:
- - idrac_server_config_profile - The module is enhanced to support export,
- import, and preview the SCP configuration using Redfish and added support
- for check mode.
- known_issues:
- - ome_application_console_preferences - Issue(224690) - The module does
- not display a proper error message when an unsupported value is provided
- for the parameters report_row_limit, email_sender_settings, and
- metric_collection_settings, and the value is applied on OpenManage Enterprise.
- - ome_device_quick_deploy - Issue(216352) - The module does not display a
- proper error message if an unsupported value is provided for the
- ipv6_prefix_length and vlan_id parameters.
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
+ known_issues:
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_console_preferences - Issue(224690) - The module does not
+ display a proper error message when an unsupported value is provided for the
+ parameters report_row_limit, email_sender_settings, and metric_collection_settings,
+ and the value is applied on OpenManage Enterprise.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(216352) - The module does not display a proper
+ error message if an unsupported value is provided for the ipv6_prefix_length
+ and vlan_id parameters.
- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
of multiple uplinks of the same name even though it is supported by OpenManage
Enterprise Modular. If an uplink is created using the same name as an existing
uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module displays the following
- message if the value provided for the parameter ``power_cap`` is not within
- the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Support for export, import, and preview the Server Configuration
+ major_changes:
+ - idrac_server_config_profile - The module is enhanced to support export, import,
+ and preview the SCP configuration using Redfish and added support for check
+ mode.
+ release_summary:
+ Support for export, import, and preview the Server Configuration
Profile (SCP) configuration using Redfish and added support for check mode.
release_date: '2022-05-26'
5.5.0:
changes:
- minor_changes:
- - redfish_firmware - This module is updated to use the Job Service URL instead of
- Task Service URL for job tracking.
- - idrac_redfish_storage_controller - This module is updated to use the Job Service URL
- instead of Task Service URL for job tracking.
- - idrac_server_config_profile - This module is updated to use the Job Service URL
- instead of Task Service URL for job tracking.
bugfixes:
- - ome_application_console_preferences - Issue(224690) - The module does
- not display a proper error message when an unsupported value is provided
- for the parameters report_row_limit, email_sender_settings, and
- metric_collection_settings, and the value is applied on OpenManage Enterprise
- - idrac_server_config_profile - Issue(234817) – When an XML format is exported
- using the SCP, the module breaks while waiting for the job completion.
- known_issues:
- - ome_device_quick_deploy - Issue(216352) - The module does not display a
- proper error message if an unsupported value is provided for the
- ipv6_prefix_length and vlan_id parameters.
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
+ - "idrac_server_config_profile - Issue(234817) \u2013 When an XML format is
+ exported using the SCP, the module breaks while waiting for the job completion."
+ - ome_application_console_preferences - Issue(224690) - The module does not
+ display a proper error message when an unsupported value is provided for the
+ parameters report_row_limit, email_sender_settings, and metric_collection_settings,
+ and the value is applied on OpenManage Enterprise
+ known_issues:
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(216352) - The module does not display a proper
+ error message if an unsupported value is provided for the ipv6_prefix_length
+ and vlan_id parameters.
- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
of multiple uplinks of the same name even though it is supported by OpenManage
Enterprise Modular. If an uplink is created using the same name as an existing
uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module displays the following
- message if the value provided for the parameter ``power_cap`` is not within
- the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Support to generate certificate signing request, import, and export
- certificates on iDRAC.
+ minor_changes:
+ - idrac_redfish_storage_controller - This module is updated to use the Job Service
+ URL instead of Task Service URL for job tracking.
+ - idrac_server_config_profile - This module is updated to use the Job Service
+ URL instead of Task Service URL for job tracking.
+ - redfish_firmware - This module is updated to use the Job Service URL instead
+ of Task Service URL for job tracking.
+ release_summary:
+ Support to generate certificate signing request, import, and
+ export certificates on iDRAC.
modules:
- description: Configure certificates for iDRAC.
name: idrac_certificates
@@ -842,43 +879,44 @@ releases:
release_date: '2022-06-29'
6.0.0:
changes:
- major_changes:
- - The share parameters are deprecated from the following modules - idrac_network,
- idrac_timezone_ntp, dellemc_configure_idrac_eventing, dellemc_configure_idrac_services,
- dellemc_idrac_lc_attributes, dellemc_system_lockdown_mode.
- - Added collection metadata for creating execution environments.
- - Refactored the Markdown (MD) files and content for better readability.
known_issues:
- - ome_device_quick_deploy - Issue(216352) - The module does not display a
- proper error message if an unsupported value is provided for the
- ipv6_prefix_length and vlan_id parameters.
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(216352) - The module does not display a proper
+ error message if an unsupported value is provided for the ipv6_prefix_length
+ and vlan_id parameters.
- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
of multiple uplinks of the same name even though it is supported by OpenManage
Enterprise Modular. If an uplink is created using the same name as an existing
uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module displays the following
- message if the value provided for the parameter ``power_cap`` is not within
- the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Added collection metadata for creating execution environments,
+ major_changes:
+ - Added collection metadata for creating execution environments.
+ - Refactored the Markdown (MD) files and content for better readability.
+ - The share parameters are deprecated from the following modules - idrac_network,
+ idrac_timezone_ntp, dellemc_configure_idrac_eventing, dellemc_configure_idrac_services,
+ dellemc_idrac_lc_attributes, dellemc_system_lockdown_mode.
+ release_summary:
+ Added collection metadata for creating execution environments,
deprecation of share parameters, and support for configuring iDRAC attributes
using idrac_attributes module.
modules:
@@ -888,106 +926,806 @@ releases:
release_date: '2022-07-28'
6.1.0:
changes:
- major_changes:
- - ome_devices - Support for performing device-specific operations on OpenManage Enterprise.
- - idrac_boot - Support for configuring the boot settings on iDRAC.
- - ome_device_group - The module is enhanced to support the removal of devices from a static device group.
- minor_changes:
- - ome_configuration_compliance_info - The module is enhanced to report single device compliance information.
known_issues:
- - ome_device_quick_deploy - Issue(216352) - The module does not display a
- proper error message if an unsupported value is provided for the
- ipv6_prefix_length and vlan_id parameters.
- - ome_device_local_access_configuration - Issue(217865) - The module does not
- display a proper error message if an unsupported value is provided for the
- user_defined and lcd_language parameters.
- - ome_device_local_access_configuration - Issue(215035) - The module reports
- ``Successfully updated the local access setting`` if an unsupported value is
- provided for the parameter timeout_limit. However, this value is not
- actually applied on OpenManage Enterprise Modular.
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- - ome_application_alerts_smtp - Issue(212310) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
+ - ome_application_alerts_smtp - Issue(212310) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_local_access_configuration - Issue(215035) - The module reports
+ ``Successfully updated the local access setting`` if an unsupported value
+ is provided for the parameter timeout_limit. However, this value is not actually
+ applied on OpenManage Enterprise Modular.
+ - ome_device_local_access_configuration - Issue(217865) - The module does not
+ display a proper error message if an unsupported value is provided for the
+ user_defined and lcd_language parameters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(216352) - The module does not display a proper
+ error message if an unsupported value is provided for the ipv6_prefix_length
+ and vlan_id parameters.
- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
of multiple uplinks of the same name even though it is supported by OpenManage
Enterprise Modular. If an uplink is created using the same name as an existing
uplink, the existing uplink is modified.
- - ome_device_power_settings - Issue(212679) - The module displays the following
- message if the value provided for the parameter ``power_cap`` is not within
- the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Support for device-specific operations on OpenManage Enterprise and configuring boot settings on iDRAC.
+ major_changes:
+ - idrac_boot - Support for configuring the boot settings on iDRAC.
+ - ome_device_group - The module is enhanced to support the removal of devices
+ from a static device group.
+ - ome_devices - Support for performing device-specific operations on OpenManage
+ Enterprise.
+ minor_changes:
+ - ome_configuration_compliance_info - The module is enhanced to report single
+ device compliance information.
+ release_summary:
+ Support for device-specific operations on OpenManage Enterprise
+ and configuring boot settings on iDRAC.
modules:
- - description: Perform device-specific operations on target devices
- name: ome_devices
- namespace: ''
- description: Configure the boot order settings.
name: idrac_boot
namespace: ''
+ - description: Perform device-specific operations on target devices
+ name: ome_devices
+ namespace: ''
release_date: '2022-08-26'
6.2.0:
changes:
+ known_issues:
+ - idrac_user - Issue(192043) The module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
major_changes:
- idrac_bios - The module is enhanced to support clear pending BIOS attributes,
reset BIOS to default settings, and configure BIOS attribute using Redfish.
+ release_summary:
+ Added clear pending BIOS attributes, reset BIOS to default
+ settings, and configure BIOS attribute using Redfish enhancements for idrac_bios.
+ release_date: '2022-09-28'
+ 6.3.0:
+ changes:
known_issues:
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
- port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
of multiple uplinks of the same name even though it is supported by OpenManage
Enterprise Modular. If an uplink is created using the same name as an existing
uplink, the existing uplink is modified.
+ major_changes:
+ - idrac_redfish_storage_controller - This module is enhanced to support LockVirtualDisk
+ operation.
+ - idrac_virtual_media - This module allows to configure Remote File Share settings.
+ release_summary:
+ Support for LockVirtualDisk operation and to configure Remote
+ File Share settings using idrac_virtual_media module.
+ modules:
+ - description: Configure the virtual media settings.
+ name: idrac_virtual_media
+ namespace: ''
+ release_date: '2022-10-28'
+ 7.0.0:
+ changes:
+ known_issues:
+ - idrac_firmware - Issue(249879) - Firmware update of iDRAC9-based Servers fails
+ if SOCKS proxy with authentication is used.
+ - idrac_user - Issue(192043) The module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- ome_device_power_settings - Issue(212679) - The module displays the following
message if the value provided for the parameter ``power_cap`` is not within
- the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Added clear pending BIOS attributes, reset BIOS to default settings,
- and configure BIOS attribute using Redfish enhancements for idrac_bios.
- release_date: '2022-09-28'
- 6.3.0:
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
+ major_changes:
+ - Rebranded from Dell EMC to Dell.
+ - idrac_firmware - This module is enhanced to support proxy.
+ - idrac_user_info - This module allows to retrieve iDRAC Local user information
+ details.
+ release_summary:
+ Rebranded from Dell EMC to Dell, enhanced idrac_firmware module
+ to support proxy, and added support to retrieve iDRAC local user details.
+ modules:
+ - description: Retrieve iDRAC Local user details.
+ name: idrac_user_info
+ namespace: ''
+ release_date: '2022-11-28'
+ 7.1.0:
changes:
+ known_issues:
+ - idrac_firmware - Issue(249879) - Firmware update of iDRAC9-based Servers fails
+ if SOCKS proxy with authentication is used.
+ - idrac_user - Issue(192043) The module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
major_changes:
- - idrac_virtual_media - This module allows to configure Remote File Share settings.
- - idrac_redfish_storage_controller - This module is enhanced to support
- LockVirtualDisk operation.
+ - Support for IPv6 address for OMSDK dependent iDRAC modules.
+ - ome_inventory - This plugin allows to create a inventory from the group on
+ OpenManage Enterprise.
+ - ome_smart_fabric_info - This module retrieves the list of smart fabrics in
+ the inventory of OpenManage Enterprise Modular.
+ - ome_smart_fabric_uplink_info - This module retrieve details of fabric uplink
+ on OpenManage Enterprise Modular.
+ minor_changes:
+ - redfish_firmware - This module supports timeout option.
+ release_summary:
+ Support for retrieving smart fabric and smart fabric uplink
+ information and support for IPv6 address for OMSDK dependent iDRAC modules.
+ modules:
+ - description:
+ Retrieves the information of smart fabrics inventoried by OpenManage
+ Enterprise Modular
+ name: ome_smart_fabric_info
+ namespace: ''
+ - description: Retrieve details of fabric uplink on OpenManage Enterprise Modular.
+ name: ome_smart_fabric_uplink_info
+ namespace: ''
+ plugins:
+ inventory:
+ - description: Group inventory plugin on OpenManage Enterprise.
+ name: ome_inventory
+ namespace: null
+ release_date: '2022-12-28'
+ 7.2.0:
+ changes:
known_issues:
- - ome_device_network_services - Issue(212681) - The module does not provide a
- proper error message if unsupported values are provided for the parameters-
+ - idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided
+ for one of the attributes in the provided attribute list for controller configuration,
+ then this module does not exit with error.
+ - idrac_user - Issue(192043) The module may error out with the message ``unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
- - ome_application_alerts_syslog - Issue(215374) - The module does not provide a
- proper error message if the destination_address is more than 255 characters.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_inventory - Issue(256257) - All hosts are not retrieved for ``Modular
+ System`` group and corresponding child groups.
+ - ome_inventory - Issue(256589) - All hosts are not retrieved for ``Custom Groups``
+ group and corresponding child groups.
+ - ome_inventory - Issue(256593) - All hosts are not retrieved for ``PLUGIN GROUPS``
+ group and corresponding child groups.
+ - ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
+ of multiple uplinks of the same name even though it is supported by OpenManage
+ Enterprise Modular. If an uplink is created using the same name as an existing
+ uplink, the existing uplink is modified.
+ major_changes:
+ - idrac_redfish_storage_controller - This module is enhanced to configure controller
+ attributes and online capacity expansion.
+ - ome_domian_user_groups - This module allows to import the LDAP directory groups.
+ - ome_inventory - This plugin is enhanced to support inventory retrieval of
+ System and Plugin Groups of OpenManage Enterprise.
+ - ome_profile_info - This module allows to retrieve profiles with attributes
+ on OpenManage Enterprise or OpenManage Enterprise Modular.
+ - ome_template_network_vlan_info - This module allows to retrieve the network
+ configuration of a template on OpenManage Enterprise or OpenManage Enterprise
+ Modular.
+ release_summary:
+ Support for retrieving the inventory and host details of all
+ child groups using parent groups, retrieving inventory of System and Plugin
+ Groups, retrieving profiles with attributes, retrieving network configuration
+ of a template, configuring controller attributes, configuring online capacity
+ expansion, and importing the LDAP directory.
+ modules:
+ - description: Retrieve profiles with attribute details
+ name: ome_profile_info
+ namespace: ''
+ - description: Retrieves network configuration of template.
+ name: ome_template_network_vlan_info
+ namespace: ''
+ release_date: '2023-01-30'
+ 7.3.0:
+ changes:
+ known_issues:
+ - idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided
+ for one of the attributes in the provided attribute list for controller configuration,
+ then this module does not exit with error.
- idrac_user - Issue(192043) The module may error out with the message ``unable
to perform the import or export operation because there are pending attribute
changes or a configuration job is in progress``. Wait for the job to complete
and run the task again.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the parameters-
+ port_number, community_name, max_sessions, max_auth_retries, and idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_inventory - Issue(256257) - All hosts are not retrieved for ``Modular
+ System`` group and corresponding child groups.
+ - ome_inventory - Issue(256589) - All hosts are not retrieved for ``Custom Groups``
+ group and corresponding child groups.
+ - ome_inventory - Issue(256593) - All hosts are not retrieved for ``PLUGIN GROUPS``
+ group and corresponding child groups.
- ome_smart_fabric_uplink - Issue(186024) - The module does not allow the creation
of multiple uplinks of the same name even though it is supported by OpenManage
Enterprise Modular. If an uplink is created using the same name as an existing
uplink, the existing uplink is modified.
+ major_changes:
+ - idrac_server_config_profile - This module is enhanced to support proxy settings,
+ import buffer, include in export, and ignore certificate warning.
+ release_summary:
+ Support for iDRAC export Server Configuration Profile role
+ and proxy settings, import buffer, include in export, and ignore certificate
+ warning.
+ objects:
+ role:
+ - description: Role to export iDRAC Server Configuration Profile (SCP).
+ name: idrac_export_server_config_profile
+ namespace: null
+ release_date: '2023-02-28'
+ 7.4.0:
+ changes:
+ known_issues:
+ - idrac_os_deployment- Issue(260496) - OS installation will support only NFS
+ and CIFS share to store the custom ISO in the destination_path, HTTP/HTTPS/FTP
+ not supported
+ - idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided
+ for one of the attributes in the provided attribute list for controller configuration,
+ then this module does not exit with error.
+ - idrac_user - Issue(192043) The module may error out with the message ``Unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
- ome_device_power_settings - Issue(212679) - The module displays the following
message if the value provided for the parameter ``power_cap`` is not within
- the supported range of 0 to 32767, ``Unable to complete the request
- because PowerCap does not exist or is not applicable for the resource URI.``
- release_summary: Support for LockVirtualDisk operation and to configure Remote File
- Share settings using idrac_virtual_media module.
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ release_summary:
+ "- Role to support the Import server configuration profile,
+ Manage iDRAC power states, Manage iDRAC certificate,\n Gather facts from
+ iDRAC and Deploy operating system is added.\n- Plugin OME inventory is enhanced
+ to support the environment variables for the input parameters.\n"
+ objects:
+ role:
+ - description:
+ Role to manage the iDRAC certificates - generate CSR, import/export
+ certificates, and reset configuration - for PowerEdge servers.
+ name: idrac_certificate
+ namespace: null
+ - description: Role to gather facts from the iDRAC Server.
+ name: idrac_gather_facts
+ namespace: null
+ - description: Role to import iDRAC Server Configuration Profile (SCP).
+ name: idrac_import_server_config_profile
+ namespace: null
+ - description:
+ Role to deploy specified operating system and version on the
+ servers.
+ name: idrac_os_deployment
+ namespace: null
+ - description: Role to manage the different power states of the specified device.
+ name: idrac_server_powerstate
+ namespace: null
+ release_date: '2023-03-30'
+ 7.5.0:
+ changes:
+ known_issues:
+ - idrac_os_deployment- Issue(260496) - OS installation will support only NFS
+ and CIFS share to store the custom ISO in the destination_path, HTTP/HTTPS/FTP
+ not supported
+ - idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided
+ for one of the attributes in the provided attribute list for controller configuration,
+ then this module does not exit with error.
+ - idrac_user - Issue(192043) The module may error out with the message ``Unable
+ to perform the import or export operation because there are pending attribute
+ changes or a configuration job is in progress``. Wait for the job to complete
+ and run the task again.
+ - ome_application_alerts_syslog - Issue(215374) - The module does not provide
+ a proper error message if the destination_address is more than 255 characters.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ release_summary:
+ '- redfish_firmware - This module is enhanced to include job
+ tracking.
+
+ - ome_template - This module is enhanced to include job tracking.
+
+ - Role to support the iDRAC and Redfish firmware update and manage storage
+ volume configuration is added.
+
+ - Role to deploy the iDRAC operating system is enhanced to support ESXi version
+ 8.X and HTTP or HTTPS for the destination.'
+ objects:
+ role:
+ - description:
+ Firmware update from a repository on a network share (CIFS, NFS,
+ HTTP, HTTPS, FTP).
+ name: idrac_firmware
+ namespace: null
+ - description:
+ To perform a component firmware update using the image file available
+ on the local or remote system.
+ name: redfish_firmware
+ namespace: null
+ - description: Role to manage the storage volume configuration.
+ name: redfish_storage_volume
+ namespace: null
+ release_date: '2023-04-30'
+ 7.6.0:
+ changes:
+ known_issues:
+ - idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided
+ for one of the attributes in the provided attribute list for controller configuration,
+ then this module does not exit with error.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ release_summary:
+ '- Role to configure the iDRAC system, manager, and lifecycle
+ attributes for Dell PowerEdge servers.
+
+ - Role to modify BIOS attributes, clear pending BIOS attributes, and reset
+ the BIOS to default settings.
+
+ - Role to reset and restart iDRAC (iDRAC8 and iDRAC9 only) for Dell PowerEdge
+ servers.
+
+ - Role to configure the physical disk, virtual disk, and storage controller
+ settings on iDRAC9 based PowerEdge servers.'
+ objects:
+ role:
+ - description: Role to configure iDRAC attributes.
+ name: idrac_attributes
+ namespace: null
+ - description:
+ Role to modify BIOS attributes, clear pending BIOS attributes,
+ and reset the BIOS to default settings.
+ name: idrac_bios
+ namespace: null
+ - description:
+ Role to reset and restart iDRAC (iDRAC8 and iDRAC9 only) for
+ Dell PowerEdge servers.
+ name: idrac_reset
+ namespace: null
+ - description:
+ Role to configure the physical disk, virtual disk, and storage
+ controller settings on iDRAC9 based PowerEdge servers.
+ name: idrac_storage_controller
+ namespace: null
+ release_date: '2023-05-30'
+ 7.6.1:
+ changes:
+ known_issues:
+ - idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided
+ for one of the attributes in the provided attribute list for controller configuration,
+ then this module does not exit with error.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ minor_changes:
+ - Updated the idrac_gather_facts role to use jinja template filters.
+ release_summary: Removed the dependency of community general collections.
+ release_date: '2023-06-01'
+ 8.0.0:
+ changes:
+ bugfixes:
+ - Job tracking is fixed for iDRAC SCP import (https://github.com/dell/dellemc-openmanage-ansible-modules/pull/504).
+ - OMSDK is handled for import error ``SNIMissingWarning`` that is undefined
+ (https://github.com/dell/omsdk/issues/33).
+ known_issues:
+ - idrac_redfish_storage_controller - Issue(256164) - If incorrect value is provided
+ for one of the attributes in the provided attribute list for controller configuration,
+ then this module does not exit with error.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ minor_changes:
+ - All the module documentation and examples are updated to use true or false
+ for Boolean values.
+ - Role ``idrac_os_deployment`` is enhanced to remove the auto installation of
+ required libraries and to support custom ISO and kickstart file as input.
+ release_summary:
+ Role ``idrac_boot`` and ``idrac_job_queue`` is added to manage
+ the boot order settings and iDRAC lifecycle controller job queue respectively.
+ ``Role idrac_os_deployment`` is enhanced to remove the auto installations
+ of required libraries and to support custom ISO and kickstart file as input.
+ Dropped support for iDRAC7 based Dell PowerEdge Servers.
+ removed_features:
+ - The ``dellemc_get_firmware_inventory`` module is removed and replaced with
+ the module ``idrac_firmware_info``.
+ - The ``dellemc_get_system_inventory`` module is removed and replaced with the
+ module ``idrac_system_info``.
+ objects:
+ role:
+ - description: Configure the boot order settings
+ name: idrac_boot
+ namespace: null
+ - description: Role to manage the iDRAC lifecycle controller job queue.
+ name: idrac_job_queue
+ namespace: null
+ release_date: '2023-06-30'
+ 8.1.0:
+ changes:
+ bugfixes:
+ - The Chassis Power PIN value must be of six numerical digits input from the
+ module. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/492).
+ - idrac_attributes module can now support modification of IPv6 attributes on
+ iDRAC 8. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/488).
+ - ome_device_info is limited to 50 responses with a query filter. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/499).
+ known_issues:
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ minor_changes:
+ - Module ``idrac_user`` is enhanced to configure custom privileges for an user.
+ - Module ``ome_application_certificate`` is enhanced to support subject alternative
+ names.
+ - Module ``ome_discovery`` is enhanced to add detailed job information of each
+ IP discovered.
+ - Module ``ome_firmware_baseline`` is enhanced to support the option to select
+ only components with no reboot required.
+ - Module ``ome_job_info`` is enhanced to return last execution details and execution
+ histories.
+ release_summary:
+ '- Support for subject alternative names while generating certificate
+ signing requests on OME.
+
+ - Create a user on iDRAC using custom privileges.
+
+ - Create a firmware baseline on OME with the filter option of no reboot required.
+
+ - Retrieve all server items in the output for ome_device_info.
+
+ - Enhancement to add detailed job information for ome_discovery and ome_job_info.'
+ release_date: '2023-07-30'
+ 8.2.0:
+ changes:
+ bugfixes:
+ - Update document on how to use with ansible. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/393).
+ known_issues:
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ minor_changes:
+ - Module ``ome_diagnostics`` is enhanced to update changed flag status in response.
+ - Module ``ome_firmware_catalog`` is enhanced to support IPv6 address.
+ - Module ``redfish_firmware`` is enhanced to support IPv6 address.
+ release_summary:
+ '- redfish_firmware and ome_firmware_catalog module is enhanced
+ to support IPv6 address.
+
+ - Module to support firmware rollback of server components.
+
+ - Support for retrieving alert policies, actions, categories and message id
+ information of alert policies for OME and OME Modular.
+
+ - ome_diagnostics module is enhanced to update changed flag status in response.'
modules:
- - description: Configure the virtual media settings.
- name: idrac_virtual_media
+ - description: Get information on actions of alert policies.
+ name: ome_alert_policies_action_info
namespace: ''
- release_date: '2022-10-28'
-
+ - description: Retrieves information of all OME alert policy categories.
+ name: ome_alert_policies_category_info
+ namespace: ''
+ - description: Retrieves information of one or more OME alert policies.
+ name: ome_alert_policies_info
+ namespace: ''
+ - description: Get message ID information of alert policies.
+ name: ome_alert_policies_message_id_info
+ namespace: ''
+ - description: To perform a component firmware rollback using component name.
+ name: redfish_firmware_rollback
+ namespace: ''
+ release_date: '2023-08-31'
+ 8.3.0:
+ changes:
+ bugfixes:
+ - ome_device_quick_deploy - If the blade is not present, then the module can
+ assign a static IP to the slot (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/532).
+ known_issues:
+ - idrac_firmware - Issue(276335) - This module fails on the Python 3.11.x
+ version with NFS shares. Use a different Python version or Share type.
+ - ome_device_quick_deploy - Issue(275231) - This module does not deploy a
+ new configuration to a slot that has disabled IPv6.
+ - ca_path missing - Issue(275740) - The roles idrac_attributes, redfish_storage_volume,
+ and idrac_server_powerstate have a missing parameter ca_path.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ minor_changes:
+ - Module ``ome_firmware`` is enhanced to support reboot type options.
+ - Module ``redfish_storage_volume`` is enhanced to support RAID6 and RAID60.
+ release_summary: '- Module to manage OME alert policies.
+
+ - Support for RAID6 and RAID60 for module ``redfish_storage_volume``.
+
+ - Support for reboot type options for module ``ome_firmware``.'
+ modules:
+ - description: Manage OME alert policies.
+ name: ome_alert_policies
+ namespace: ''
+ release_date: '2023-09-29'
+ 8.4.0:
+ changes:
+ bugfixes:
+ - idrac_firmware - Issue(276335) - This module fails on the Python 3.11.x version
+ with NFS share. Use a different Python version or Share type.
+ - idrac_server_config_profile - The import for Server Configuration Profile
+ (SCP) operation fails to handle the absence of a file and incorrectly reports
+ success instead of the expected failure. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/544).
+ known_issues:
+ - ca_path missing - Issue(275740) - The roles idrac_attributes, redfish_storage_volume,
+ and idrac_server_powerstate have a missing parameter ca_path.
+ - idrac_firmware - Issue(279282) - This module does not support firmware update
+ using HTTP, HTTPS, and FTP shares with authentication on iDRAC8.
+ - idrac_network_attributes - Issue(279049) - If unsupported values are provided
+ for the parameter ``ome_network_attributes``, then this module does not provide
+ a correct error message.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(275231) - This module does not deploy a new
+ configuration to a slot that has disabled IPv6.
+ - ome_smart_fabric_uplink - Issue(186024) - Despite the module supported by
+ OpenManage Enterprise Modular, it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, the existing uplink is modified.
+ release_summary: Module to manage iDRAC network attributes.
+ modules:
+ - description:
+ This module allows you to configure the port and partition network
+ attributes on the network interface cards.
+ name: idrac_network_attributes
+ namespace: ''
+ release_date: '2023-10-30'
+ 8.5.0:
+ changes:
+ bugfixes:
+ - ome_inventory - The plugin returns 50 results when a group is specified.
+ No results are shown when a group is not specified. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/575).
+ minor_changes:
+ - Module ``redfish_storage_volume`` is enhanced to support reboot options and job tracking operation.
+ - Ansible lint issues are fixed for the collections.
+ known_issues:
+ - ome_diagnostics - Issue(279193) - Export of SupportAssist collection logs to
+ the share location fails on OME version 4.0.0.
+ - idrac_firmware - Issue(279282) - This module does not support firmware update
+ using HTTP, HTTPS, and FTP shares with authentication on iDRAC8.
+ - idrac_network_attributes - Issue(279049) - If unsupported values are provided
+ for the parameter ``ome_network_attributes``, then this module does not provide
+ a correct error message.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(275231) - This module does not deploy a new
+ configuration to a slot that has disabled IPv6.
+ - ome_smart_fabric_uplink - Issue(186024) - The module supported by
+ OpenManage Enterprise Modular, however it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, then the existing uplink is modified.
+ release_summary: '- Ansible lint issues are fixed for the collections.
+
+ - redfish_storage_volume module is enhanced to support reboot options and job tracking operation.'
+ release_date: '2023-11-30'
+ 8.6.0:
+ changes:
+ release_summary: '- Added support for the environment variables as fallback for
+ credentials for all modules of iDRAC, OME, and Redfish.
+
+ - Enhanced idrac_certificates module and idrac_certificate role
+ to support `CUSTOMCERTIFICATE` and import `HTTPS` certificate with the SSL key.'
+ bugfixes:
+ - For idrac_certificates module, the `email_address` has been made as an
+ optional parameter. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/582).
+ - Fixed the issue for ignoring the environment variable `NO_PROXY`
+ earlier. (https://github.com/dell/dellemc-openmanage-ansible-modules/issues/554)
+ major_changes:
+ - All iDRAC and Redfish modules are enhanced to support the environment variables
+ `IDRAC_USERNAME` and `IDRAC_PASSWORD` as fallback for credentials.
+ - All OME modules are enhanced to support the environment variables `OME_USERNAME`
+ and `OME_PASSWORD` as fallback for credentials.
+ - idrac_certificates - The module is enhanced to support the import and export of `CUSTOMCERTIFICATE`.
+ minor_changes:
+ - "For idrac_certificates module, below enhancements are made: Added support
+ for import operation of `HTTPS` certificate with the SSL key.
+ The `email_address` has been made as an optional parameter."
+ - For idrac_certificate role, added support for import operation of `HTTPS` certificate with the SSL key.
+ known_issues:
+ - ome_diagnostics - Issue(279193) - Export of SupportAssist collection logs to
+ the share location fails on OME version 4.0.0.
+ - idrac_firmware - Issue(279282) - This module does not support firmware update
+ using HTTP, HTTPS, and FTP shares with authentication on iDRAC8.
+ - idrac_network_attributes - Issue(279049) - If unsupported values are provided
+ for the parameter ``ome_network_attributes``, then this module does not provide
+ a correct error message.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_device_quick_deploy - Issue(275231) - This module does not deploy a new
+ configuration to a slot that has disabled IPv6.
+ - ome_smart_fabric_uplink - Issue(186024) - The module supported by
+ OpenManage Enterprise Modular, however it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, then the existing uplink is modified.
+ release_date: '2023-12-30'
+ 8.7.0:
+ changes:
+ release_summary: '- Module to manage iDRAC licenses.
+
+ - idrac_gather_facts role is enhanced to add storage controller details in the role output and provide support for secure boot.'
+ bugfixes:
+ - Issue is fixed for deploying a new configuration on quick deploy slot when IPv6
+ is disabled.(https://github.com/dell/dellemc-openmanage-ansible-modules/issues/533)
+ major_changes:
+ - idrac_gather_facts - This role is enhanced to support secure boot.
+ - idrac_license - The module is introduced to configure iDRAC licenses.
+ minor_changes:
+ - For idrac_gather_facts role, added storage controller details in the role output.
+ known_issues:
+ - ome_diagnostics - Issue(279193) - Export of SupportAssist collection logs to
+ the share location fails on OME version 4.0.0.
+ - idrac_firmware - Issue(279282) - This module does not support firmware update
+ using HTTP, HTTPS, and FTP shares with authentication on iDRAC8.
+ - idrac_network_attributes - Issue(279049) - If unsupported values are provided
+ for the parameter ``ome_network_attributes``, then this module does not provide
+ a correct error message.
+ - ome_device_network_services - Issue(212681) - The module does not provide
+ a proper error message if unsupported values are provided for the following
+ parameters- port_number, community_name, max_sessions, max_auth_retries, and
+ idle_timeout.
+ - ome_device_power_settings - Issue(212679) - The module displays the following
+ message if the value provided for the parameter ``power_cap`` is not within
+ the supported range of 0 to 32767, ``Unable to complete the request because
+ PowerCap does not exist or is not applicable for the resource URI.``
+ - ome_smart_fabric_uplink - Issue(186024) - The module supported by
+ OpenManage Enterprise Modular, however it does not allow the creation of multiple
+ uplinks of the same name. If an uplink is created using the same name as an
+ existing uplink, then the existing uplink is modified.
+ modules:
+ - description:
+ This module allows to import, export, and delete licenses on iDRAC.
+ name: idrac_license
+ namespace: ''
+ release_date: '2024-01-31'
diff --git a/ansible_collections/dellemc/openmanage/changelogs/config.yaml b/ansible_collections/dellemc/openmanage/changelogs/config.yaml
index cfc04bfa7..605228ca1 100644
--- a/ansible_collections/dellemc/openmanage/changelogs/config.yaml
+++ b/ansible_collections/dellemc/openmanage/changelogs/config.yaml
@@ -10,22 +10,22 @@ notesdir: fragments
prelude_section_name: release_summary
prelude_section_title: Release Summary
sections:
-- - major_changes
- - Major Changes
-- - minor_changes
- - Minor Changes
-- - breaking_changes
- - Breaking Changes / Porting Guide
-- - deprecated_features
- - Deprecated Features
-- - removed_features
- - Removed Features (previously deprecated)
-- - security_fixes
- - Security Fixes
-- - bugfixes
- - Bugfixes
-- - known_issues
- - Known Issues
-title: Dell EMC OpenManage Ansible Modules
+ - - major_changes
+ - Major Changes
+ - - minor_changes
+ - Minor Changes
+ - - breaking_changes
+ - Breaking Changes / Porting Guide
+ - - deprecated_features
+ - Deprecated Features
+ - - removed_features
+ - Removed Features (previously deprecated)
+ - - security_fixes
+ - Security Fixes
+ - - bugfixes
+ - Bugfixes
+ - - known_issues
+ - Known Issues
+title: Dell OpenManage Ansible Modules
trivial_section_name: trivial
use_fqcn: true
diff --git a/ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md b/ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md
index b89afba7f..9119699c1 100644
--- a/ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md
+++ b/ansible_collections/dellemc/openmanage/docs/DOCUMENTATION.md
@@ -8,7 +8,10 @@ You may obtain a copy of the License at
https://www.gnu.org/licenses/gpl-3.0.txt
-->
## Playbooks and Tutorials
-* For the latest sample playbooks and examples, see [playbooks](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/playbooks).
+* For the latest sample playbooks and example for idrac, see [idrac playbooks](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/playbooks/idrac).
+* For the latest sample playbooks and examples for redfish, see [redfish playbooks](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/playbooks/redfish).
+* For the latest sample playbooks and examples for ome, see [ome playbooks](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/playbooks/ome).
+* For the latest sample playbooks and examples for roles, see [roles playbooks](https://github.com/dell/dellemc-openmanage-ansible-modules/tree/collections/playbooks/roles).
* For the tutorials and sample use cases, see the tutorials available at [developer.dell.com](https://developer.dell.com/).
## Module documentations
@@ -36,5 +39,5 @@ You may obtain a copy of the License at
```export REQUESTS_CA_BUNDLE=/usr/share/ssl-certs/ca-cert.pem```
### Ignore SSL certificate validation
-It is common to run a test environment without a proper SSL certificate configuration. To disable the certificate validation for a module, set the validate_certs module argument to ```False``` in the playbook.
+It is common to run a test environment without a proper SSL certificate configuration. To disable the certificate validation for a module, set the validate_certs module argument to ```false``` in the playbook.
diff --git a/ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md b/ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md
index a688c5c27..c5f556a72 100644
--- a/ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md
+++ b/ansible_collections/dellemc/openmanage/docs/EXECUTION_ENVIRONMENT.md
@@ -1,4 +1,4 @@
-# Build execution environment with Dell OpenManage Ansible Modules
+# Using Ansible Automation Platform with Dell OpenManage Ansible Modules
Creating automation execution environments using the OpenManage Ansible Modules enables your automation teams to define, build, and update their automation environment themselves. Execution environments provide a common language to communicate automation dependency between automation developers, architects, and platform administrators.
In this tutorial, you will learn how to build the execution environment image, push the image to a registry, and then create the execution environment in Ansible Automation Platform.
@@ -13,59 +13,68 @@ While Ansible Galaxy is good for testing the latest and greatest developer conte
- Premium support enables you to get help directly from Red Hat if you have any issue with an official Red Hat collection or certified partner collection.
- Red Hat subscription provides free and unlimited access to any content available.
-## Why AWX
-Ansible AWX provides an open-source version of Ansible Automation Platform and is the foundation on which Ansible Automation Platform was developed. With Ansible AWX, you have all the enterprise features for an unlimited number of nodes. However, one drawback to note is that Ansible AWX undergoes minimal testing and quality engineering testing.
## Workflow
In this tutorial, you will learn how to:
-1. [Build custom execution environment image.](#build-custom-execution-environment-image)
+1. [Build execution environment image.](#build-execution-environment-image)
2. [Use Ansible Runner to verify the execution environment (Optional).](#use-ansible-runner-to-verify-the-execution-environment)
3. [Upload the execution environment to a registry.](#upload-the-execution-environment-to-a-registry)
4. [Create execution environment in Ansible Automation Platform.](#create-execution-environment-in-ansible-automation-platform)
-## Build custom execution environment image
-Build a custom image with the required OpenManage Ansible collections ([dellemc.openmanage](https://github.com/dell/dellemc-openmanage-ansible-modules) ) and libraries (omsdk and netaddr), and then upload it to a registry of your choice. In this tutorial, you will learn how to create a Docker image.
+## Build execution environment image
+Build a image with the required Ansible collections and libraries, and then upload it to a registry of your choice. In this tutorial, you will learn how to create a Podman image.
1. Create the following files in your local directory:
- - *execution_environment.yml*
- - *requirement.yml*
+ - *execution-environment.yml*
+ - *requirements.yml*
- *requirements.txt*
2. For installing OpenManage collections and their dependencies, copy the metadata from the [dellemc.openmanage](https://github.com/dell/dellemc-openmanage-ansible-modules) GitHub repository.
-The following are the sample files:
+ The following are the sample files:
-**execution_environment.yml**
+ **execution-environment.yml**
-```yaml
-version: 1
-dependencies:
- galaxy: requirements.yml
- python: requirements.txt
-```
+ ```yaml
+ version: 3
+ dependencies:
+ galaxy: requirements.yml
+ python: requirements.txt
+ system: bindep.txt
+ ```
+
+ We can modify the execution environment file to configure as per your requirement based on the guidelines mentioned [here](https://docs.ansible.com/automation-controller/latest/html/userguide/ee_reference.html)
+
-**requirement.yml**
-```yaml
-collections:
- - name: dellemc.openmanage
-```
+ **requirements.yml**
+ ```yaml
+ collections:
+ - dellemc.openmanage
+ - ansible.utils
+ - ansible.windows
+ ```
+ Note: The content of the *requirements.yml* can be found [here](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/requirements.yml)
-**requirements.txt**
-```yaml
-omsdk
-netaddr>=0.7.19
-```
+ **requirements.txt**
+ ```yaml
+ omsdk
+ netaddr>=0.7.19
+ jmespath
+ ```
-3. Build the Docker image using the following syntax:
+ Note: The content of the *requirements.txt* can be found [here](https://github.com/dell/dellemc-openmanage-ansible-modules/blob/collections/requirements.txt)
-`ansible-builder build -f<path>/execution-environment.yml --container-runtime=<container> -c build_context --tag <container.io>/<org_name or username>/<imagename>:<tag>`
+3. Build the Podman image using the following syntax:
- In this tutorial, the following command is used to build the Docker image with the name "*execution_environment*".
+ `ansible-builder build -f <path>/execution-environment.yml --container-runtime=<container> -c build_context --tag <container.io>/<org_name or username>/<imagename>:<tag>`
-```yaml
-ansible-builder build -f execution-environment.yml --container-runtime=docker -c build_context --tag docker.io/delluser/execution_environment:<tag>
-docker build -f context/Dockerfile -t docker.io/delluser/execution_environment context
-Complete! The build context can be found at: /context
-```
+ In this tutorial, the following command is used to build the Docker image with the name "*execution-environment*".
+
+ ```yaml
+ $ ansible-builder build -f execution-environment.yml --container-runtime=podman -c build_context --tag quay.io/delluser/dell-openmanage-ee:<tag>
+
+ podman build -f context/Containerfile -t quay.io/delluser/dell-openmanage-ee context
+ Complete! The build context can be found at: /context
+ ```
## Use Ansible Runner to verify the execution environment
@@ -86,7 +95,7 @@ runner-example/
└── project
└── testplaybook.yml
```
-
+
2. Create a host file with the following entries:
```yaml
@@ -94,34 +103,30 @@ runner-example/
192.168.0.1
[idrac:vars]
-ansible_python_interpreter=/usr/bin/python3.8
+ansible_python_interpreter=/usr/bin/python3.9
user=user
password=password
```
3. Create a playbook.
```yaml
-- hosts: idrac
- connection: local
- name: Get system inventory
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Get system inventory
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Get system inventory.
- idrac_system_info:
- idrac_ip: "{{ inventory_hostname }}"
- idrac_user: "{{ user }}"
- idrac_password: "{{ password }}"
- validate_certs: False
-
+ - name: Get system inventory.
+ dellemc.openmanage.idrac_system_info:
+ idrac_ip: "{{ inventory_hostname }}"
+ idrac_user: "{{ user }}"
+ idrac_password: "{{ password }}"
+ validate_certs: false
+ delegate_to: localhost
```
4. Run the playbook using the following command:
```yaml
-ansible-runner run --process-isolation --process-isolation-executable docker --container-image docker.io/delluser/execution_environment -p sysinfo.yml ./runner-example/ -v
+ansible-runner run --process-isolation --process-isolation-executable podman --container-image quay.io/delluser/dell-openmanage-ee -p sysinfo.yml ./runner-example/ -v
No config file found; using defaults
PLAY [Get system inventory] ****************************************************
@@ -167,33 +172,33 @@ runner-example/
Now that you have built the image, you can upload the execution environment image to a registry. The following steps describe how to upload the image to a Docker registry. You can upload the image to a registry of your choice (https://quay.io or https://docker.io).
-1. Log in to docker.io.
+1. Log in to quay.io.
```yaml
-docker login docker.io
+podman login quay.io
```
2. To view the list of images, run the following command:
```yaml
-docker image list
+podman image list
```
Output:
```yaml
REPOSITORY TAG IMAGE ID CREATED SIZE
-docker.io/delluser/execution_environment latest 6ea6337881f5 36 seconds ago 908MB
+quay.io/delluser/dell-openmanage-ee latest 6ea6337881f5 36 seconds ago 908MB
<none> <none> bab8f0c1f372 3 hours ago 959MB
<none> <none> 26e61b6f31b6 3 hours ago 779MB
```
3. Upload the image to the repository using the following command:
```yaml
-docker push docker.io/delluser/execution_environment
+podman push quay.io/delluser/dell-openmanage-ee
```
Output:
```yaml
Using default tag: latest
-The push refers to repository [docker.io/delluser/execution_environment]
+The push refers to repository [quay.io/delluser/dell-openmanage-ee]
6a938007b4eb: Pushed
c1a7a8b69adb: Pushed
75f55eeed6f1: Pushed
@@ -217,7 +222,6 @@ aadc47c09f66: Layer already exists
101e6c349551: Layer already exists
latest: digest: sha256:7be5110235abf72e0547cac016a506d59313addefc445d35e5dff68edb0a9ad6 size: 4726
<none> 26e61b6f31b6 3 hours ago 779MB
-
```
## Create execution environment in Ansible Automation Platform
@@ -227,8 +231,8 @@ Now that you uploaded the image to a registry, you can now create the execution
1. Log in to Ansible Automation Platform.
2. On the navigation pane, click **Administration > Execution Environments**.
-2. On the **Execution Environments** page, click **Add**.
-3. On the **Create new execution environment** page, enter the following details, and click **Save**.
+3. On the **Execution Environments** page, click **Add**.
+4. On the **Create new execution environment** page, enter the following details, and click **Save**.
- **Name**: Enter a name for the execution environment (required).
- **Image**: Enter the image name (required). The image name requires its full location (repo), the registry, image name, and version tag
- **Pull**: From the **Pull** drop-down list, select **Only pull the image if not present before running**.
@@ -248,7 +252,7 @@ A Project is a logical collection of Ansible playbooks.
- In the **Source Control URL**, specify the source control URL. That is your repository link.
### Create Credential Types
-This tutorial uses a custom credential type. You can create credential types depending on your data center environment. For more information, see [Credential Types](https://docs.ansible.com/automation-controller/4.0.0/html/userguide/credentials.html#credential-types).
+This tutorial uses a custom credential type. You can create credential types depending on your data center environment. For more information, see [Credential Types](https://docs.ansible.com/automation-controller/4.2.1/html/userguide/credentials.html#credential-types).
To create a credential type:
@@ -263,16 +267,16 @@ This tutorial uses a custom credential type. The following are the input configu
```yaml
fields:
- -id: username
+ - id: username
type: string
label: Username
- -Id: password
+ - id: password
type: string
label: Password
secret: true
-Required:
- -username
- -password
+required:
+ - username
+ - password
```
**Injector configuration:**
@@ -295,7 +299,7 @@ extra_vars:
1. On the navigation pane, click **Resources > Inventories**.
2. On the **Inventories** page, click **Add**.
3. On the **Create New Inventory** page, enter the details and click **Save**.
-4. Add groups and hosts to the inventory.
+4. Add Groups and Hosts to the inventory.
## Create Job Templates
@@ -326,22 +330,10 @@ You can add an Ansible python interpreter to a Template or Inventory.
`ansible_python_interpreter: /usr/bin/python<version>`
```yaml
-ansible_python_interpreter: /usr/bin/python3.8
+ansible_python_interpreter: /usr/bin/python3.9
```
## Documentation references
- [https://www.redhat.com/en/technologies/management/ansible](https://www.redhat.com/en/technologies/management/ansible)
- [https://www.redhat.com/en/blog/what-ansible-automation-hub-and-why-should-you-use-it](https://www.redhat.com/en/blog/what-ansible-automation-hub-and-why-should-you-use-it)
-- [https://becloudready.com/ansible-awx-vs-ansible-tower-the-key-to-automation/](https://becloudready.com/ansible-awx-vs-ansible-tower-the-key-to-automation/)
-
-
-
-
-
-
-
-
-
-
-
-
+- [https://www.ansible.com/blog/unlocking-efficiency-harnessing-the-capabilities-of-ansible-builder-3.0](https://www.ansible.com/blog/unlocking-efficiency-harnessing-the-capabilities-of-ansible-builder-3.0)
diff --git a/ansible_collections/dellemc/openmanage/docs/README.md b/ansible_collections/dellemc/openmanage/docs/README.md
index a96bf0daf..4f39dbc96 100644
--- a/ansible_collections/dellemc/openmanage/docs/README.md
+++ b/ansible_collections/dellemc/openmanage/docs/README.md
@@ -1,5 +1,5 @@
<!--
-Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+Copyright (c) 2023 Dell Inc., or its subsidiaries. All Rights Reserved.
Licensed under the GPL, Version 3.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,13 +12,12 @@ You may obtain a copy of the License at
### iDRAC Modules
- [dellemc_configure_idrac_eventing](modules/dellemc_configure_idrac_eventing.rst)
- [dellemc_configure_idrac_services](modules/dellemc_configure_idrac_services.rst)
-- [dellemc_get_firmware_inventory](modules/dellemc_get_firmware_inventory.rst)
-- [dellemc_get_system_inventory](modules/dellemc_get_system_inventory.rst)
- [dellemc_idrac_lc_attributes](modules/dellemc_idrac_lc_attributes.rst)
- [dellemc_idrac_storage_volume](modules/dellemc_idrac_storage_volume.rst)
- [dellemc_system_lockdown_mode](modules/dellemc_system_lockdown_mode.rst)
- [idrac_attributes](modules/idrac_attributes.rst)
- [idrac_bios](modules/idrac_bios.rst)
+- [idrac_boot](modules/idrac_boot.rst)
- [idrac_certificates](modules/idrac_certificates.rst)
- [idrac_firmware](modules/idrac_firmware.rst)
- [idrac_firmware_info](modules/idrac_firmware_info.rst)
@@ -26,6 +25,7 @@ You may obtain a copy of the License at
- [idrac_lifecycle_controller_job_status_info](modules/idrac_lifecycle_controller_job_status_info.rst)
- [idrac_lifecycle_controller_logs](modules/idrac_lifecycle_controller_logs.rst)
- [idrac_lifecycle_controller_status_info](modules/idrac_lifecycle_controller_status_info.rst)
+- [idrac_network_attributes](modules/idrac_network_attributes.rst)
- [idrac_network](modules/idrac_network.rst)
- [idrac_os_deployment](modules/idrac_os_deployment.rst)
- [idrac_redfish_storage_controller](modules/idrac_redfish_storage_controller.rst)
@@ -35,6 +35,7 @@ You may obtain a copy of the License at
- [idrac_system_info](modules/idrac_system_info.rst)
- [idrac_timezone_ntp](modules/idrac_timezone_ntp.rst)
- [idrac_user](modules/idrac_user.rst)
+- [idrac_user_info](modules/idrac_user_info.rst)
- [idrac_virtual_media](modules/idrac_virtual_media.rst)
- [redfish_event_subscription](modules/redfish_event_subscription.rst)
- [redfish_firmware](modules/redfish_firmware.rst)
@@ -43,6 +44,11 @@ You may obtain a copy of the License at
### OpenManage Enterprise Modules
- [ome_active_directory](modules/ome_active_directory.rst)
+- [ome_alert_policies](modules/ome_alert_policies.rst)
+- [ome_alert_policies_message_id_info](modules/ome_alert_policies_message_id_info.rst)
+- [ome_alert_policies_info](modules/ome_alert_policies_info.rst)
+- [ome_alert_policies_actions_info](modules/ome_alert_policies_actions_info.rst)
+- [ome_alert_policies_category_info](modules/ome_alert_policies_category_info.rst)
- [ome_application_alerts_smtp](modules/ome_application_alerts_smtp.rst)
- [ome_application_alerts_syslog](modules/ome_application_alerts_syslog.rst)
- [ome_application_certificate](modules/ome_application_certificate.rst)
@@ -64,6 +70,7 @@ You may obtain a copy of the License at
- [ome_device_network_services](modules/ome_device_network_services.rst)
- [ome_device_power_settings](modules/ome_device_power_settings.rst)
- [ome_device_quick_deploy](modules/ome_device_quick_deploy.rst)
+- [ome_devices](modules/ome_devices.rst)
- [ome_diagnostics](modules/ome_diagnostics.rst)
- [ome_discovery](modules/ome_discovery.rst)
- [ome_domain_user_groups](modules/ome_domain_user_groups.rst)
@@ -80,14 +87,17 @@ You may obtain a copy of the License at
- [ome_network_vlan_info](modules/ome_network_vlan_info.rst)
- [ome_powerstate](modules/ome_powerstate.rst)
- [ome_profile](modules/ome_profile.rst)
+- [ome_profile_info](modules/ome_profile_info.rst)
- [ome_server_interface_profile_info](modules/ome_server_interface_profile_info.rst)
- [ome_server_interface_profiles](modules/ome_server_interface_profiles.rst)
+- [ome_smart_fabric_info](modules/ome_smart_fabric_info.rst)
- [ome_smart_fabric](modules/ome_smart_fabric.rst)
+- [ome_smart_fabric_uplink_info](modules/ome_smart_fabric_uplink_info.rst)
- [ome_smart_fabric_uplink](modules/ome_smart_fabric_uplink.rst)
- [ome_template](modules/ome_template.rst)
- [ome_template_identity_pool](modules/ome_template_identity_pool.rst)
- [ome_template_info](modules/ome_template_info.rst)
- [ome_template_network_vlan](modules/ome_template_network_vlan.rst)
+- [ome_template_network_vlan_info](modules/ome_template_network_vlan_info.rst)
- [ome_user](modules/ome_user.rst)
- [ome_user_info](modules/ome_user_info.rst)
-
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst
index d0e59981b..e9a4ee437 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_eventing.rst
@@ -20,8 +20,8 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- omsdk >= 1.2.488
-- python >= 3.8.6
+- omsdk >= 1.2.503
+- python >= 3.9.6
@@ -93,7 +93,7 @@ Parameters
smtp_ip_address (optional, str, None)
- SMTP IP address for communication.
+ Enter the IPv4 or IPv6 address of the SMTP server or the FQDN or DNS name.
smtp_port (optional, str, None)
@@ -125,11 +125,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -148,7 +148,8 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -163,9 +164,9 @@ Examples
---
- name: Configure the iDRAC eventing attributes
dellemc.openmanage.dellemc_configure_idrac_eventing:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
destination_number: "2"
destination: "1.1.1.1"
@@ -191,11 +192,11 @@ msg (always, str, Successfully configured the iDRAC eventing settings.)
Successfully configured the iDRAC eventing settings.
-eventing_status (success, dict, AnsibleMapping([('CompletionTime', '2020-04-02T02:43:28'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_12345123456'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)]))
+eventing_status (success, dict, {'CompletionTime': '2020-04-02T02:43:28', 'Description': 'Job Instance', 'EndTime': None, 'Id': 'JID_12345123456', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageArgs': [], 'MessageId': 'SYS053', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True})
Configures the iDRAC eventing attributes.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst
index 02e803fec..73eac109f 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_configure_idrac_services.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -45,11 +45,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -145,7 +145,8 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -160,24 +161,24 @@ Examples
---
- name: Configure the iDRAC services attributes
dellemc.openmanage.dellemc_configure_idrac_services:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- enable_web_server: "Enabled"
- http_port: 80
- https_port: 443
- ssl_encryption: "Auto_Negotiate"
- tls_protocol: "TLS_1_2_Only"
- timeout: "1800"
- snmp_enable: "Enabled"
- snmp_protocol: "SNMPv3"
- community_name: "public"
- alert_port: 162
- discovery_port: 161
- trap_format: "SNMPv3"
- ipmi_lan:
- community_name: "public"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_web_server: "Enabled"
+ http_port: 80
+ https_port: 443
+ ssl_encryption: "Auto_Negotiate"
+ tls_protocol: "TLS_1_2_Only"
+ timeout: "1800"
+ snmp_enable: "Enabled"
+ snmp_protocol: "SNMPv3"
+ community_name: "public"
+ alert_port: 162
+ discovery_port: 161
+ trap_format: "SNMPv3"
+ ipmi_lan:
+ community_name: "public"
@@ -188,11 +189,11 @@ msg (always, str, Successfully configured the iDRAC services settings.)
Overall status of iDRAC service attributes configuration.
-service_status (success, dict, AnsibleMapping([('CompletionTime', '2020-04-02T02:43:28'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_12345123456'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)]))
+service_status (success, dict, {'CompletionTime': '2020-04-02T02:43:28', 'Description': 'Job Instance', 'EndTime': None, 'Id': 'JID_12345123456', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageArgs': [], 'MessageId': 'SYS053', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True})
Details of iDRAC services attributes configuration.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_firmware_inventory.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_firmware_inventory.rst
deleted file mode 100644
index 14b844316..000000000
--- a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_firmware_inventory.rst
+++ /dev/null
@@ -1,107 +0,0 @@
-.. _dellemc_get_firmware_inventory_module:
-
-
-dellemc_get_firmware_inventory -- Get Firmware Inventory
-========================================================
-
-.. contents::
- :local:
- :depth: 1
-
-
-Synopsis
---------
-
-Get Firmware Inventory.
-
-
-
-Requirements
-------------
-The below requirements are needed on the host that executes this module.
-
-- omsdk >= 1.2.488
-- python >= 3.8.6
-
-
-
-Parameters
-----------
-
- idrac_ip (True, str, None)
- iDRAC IP Address.
-
-
- idrac_user (True, str, None)
- iDRAC username.
-
-
- idrac_password (True, str, None)
- iDRAC user password.
-
-
- idrac_port (optional, int, 443)
- iDRAC port.
-
-
- validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
-
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
-
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
-
-
- ca_path (optional, path, None)
- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
-
-
- timeout (optional, int, 30)
- The socket level timeout in seconds.
-
-
-
-
-
-Notes
------
-
-.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
- - This module supports ``check_mode``.
-
-
-
-
-Examples
---------
-
-.. code-block:: yaml+jinja
-
-
- ---
- - name: Get Installed Firmware Inventory
- dellemc.openmanage.dellemc_get_firmware_inventory:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
-
-
-
-
-
-Status
-------
-
-
-- This module will be removed in version
- .
- *[deprecated]*
-
-
-Authors
-~~~~~~~
-
-- Rajeev Arakkal (@rajeevarakkal)
-
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_system_inventory.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_system_inventory.rst
deleted file mode 100644
index 3babb0325..000000000
--- a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_get_system_inventory.rst
+++ /dev/null
@@ -1,107 +0,0 @@
-.. _dellemc_get_system_inventory_module:
-
-
-dellemc_get_system_inventory -- Get the PowerEdge Server System Inventory
-=========================================================================
-
-.. contents::
- :local:
- :depth: 1
-
-
-Synopsis
---------
-
-Get the PowerEdge Server System Inventory.
-
-
-
-Requirements
-------------
-The below requirements are needed on the host that executes this module.
-
-- omsdk >= 1.2.488
-- python >= 3.8.6
-
-
-
-Parameters
-----------
-
- idrac_ip (True, str, None)
- iDRAC IP Address.
-
-
- idrac_user (True, str, None)
- iDRAC username.
-
-
- idrac_password (True, str, None)
- iDRAC user password.
-
-
- idrac_port (optional, int, 443)
- iDRAC port.
-
-
- validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
-
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
-
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
-
-
- ca_path (optional, path, None)
- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
-
-
- timeout (optional, int, 30)
- The socket level timeout in seconds.
-
-
-
-
-
-Notes
------
-
-.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
- - This module supports ``check_mode``.
-
-
-
-
-Examples
---------
-
-.. code-block:: yaml+jinja
-
-
- ---
- - name: Get System Inventory
- dellemc.openmanage.dellemc_get_system_inventory:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
-
-
-
-
-
-Status
-------
-
-
-- This module will be removed in version
- .
- *[deprecated]*
-
-
-Authors
-~~~~~~~
-
-- Rajeev Arakkal (@rajeevarakkal)
-
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst
index 0459b5a45..774442469 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_lc_attributes.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -73,11 +73,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -96,7 +96,8 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -111,11 +112,11 @@ Examples
---
- name: Set up iDRAC LC Attributes
dellemc.openmanage.dellemc_idrac_lc_attributes:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- csior: "Enabled"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ csior: "Enabled"
@@ -126,11 +127,11 @@ msg (always, str, Successfully configured the iDRAC LC attributes.)
Overall status of iDRAC LC attributes configuration.
-lc_attribute_status (success, dict, AnsibleMapping([('CompletionTime', '2020-03-30T00:06:53'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_1234512345'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)]))
+lc_attribute_status (success, dict, {'CompletionTime': '2020-03-30T00:06:53', 'Description': 'Job Instance', 'EndTime': None, 'Id': 'JID_1234512345', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'SYS053', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True})
Collect System Inventory on Restart (CSIOR) property for all iDRAC/LC jobs is configured.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst
index d15aee678..f7e425dbc 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_idrac_storage_volume.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -129,11 +129,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -151,7 +151,8 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -204,7 +205,7 @@ Examples
span_length: 3
span_depth: 1
drives:
- location: [7,3,5]
+ location: [7, 3, 5]
disk_cache_policy: "Disabled"
write_cache_policy: "WriteBack"
read_cache_policy: "NoReadAhead"
@@ -260,7 +261,7 @@ msg (always, str, Successfully completed the view storage volume operation)
Overall status of the storage configuration operation.
-storage_status (success, dict, AnsibleMapping([('Id', 'JID_XXXXXXXXX'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageId', 'XXX123'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)]))
+storage_status (success, dict, {'Id': 'JID_XXXXXXXXX', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageId': 'XXX123', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True})
Storage configuration job and progress details from the iDRAC.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst
index 66d9c7b80..1fca74e82 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/dellemc_system_lockdown_mode.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -73,11 +73,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -96,7 +96,8 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module does not support ``check_mode``.
@@ -111,11 +112,11 @@ Examples
---
- name: Check System Lockdown Mode
dellemc.openmanage.dellemc_system_lockdown_mode:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- lockdown_mode: "Disabled"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ lockdown_mode: "Disabled"
@@ -126,11 +127,11 @@ msg (always, str, Successfully completed the lockdown mode operations.)
Lockdown mode of the system is configured.
-system_lockdown_status (success, dict, AnsibleMapping([('Data', AnsibleMapping([('StatusCode', 200), ('body', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Successfully Completed Request'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'Base.1.0.Success'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'None'), ('Severity', 'OK')])])]))])), ('Message', 'none'), ('Status', 'Success'), ('StatusCode', 200), ('retval', True)]))
+system_lockdown_status (success, dict, {'Data': {'StatusCode': 200, 'body': {'@Message.ExtendedInfo': [{'Message': 'Successfully Completed Request', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'Base.1.0.Success', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'None', 'Severity': 'OK'}]}}, 'Message': 'none', 'Status': 'Success', 'StatusCode': 200, 'retval': True})
Storage configuration job and progress details from the iDRAC.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst
index b0512bd13..13ffa9fa3 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_attributes.rst
@@ -30,19 +30,19 @@ Parameters
idrac_attributes (optional, dict, None)
Dictionary of iDRAC attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and above, see, https://*idrac_ip*/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1 and https://*idrac_ip*/redfish/v1/Registries/ManagerAttributeRegistry.
- For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName> (for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName> (for Example, 'SNMP.1.AgentCommunity').
+ For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName> (for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName> (for Example, 'SNMP.1.AgentCommunity').
system_attributes (optional, dict, None)
Dictionary of System attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and above, see, https://*idrac_ip*/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1 and https://*idrac_ip*/redfish/v1/Registries/ManagerAttributeRegistry.
- For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName> (for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName> (for Example, 'ThermalSettings.1.ThermalProfile').
+ For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName> (for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName> (for Example, 'ThermalSettings.1.ThermalProfile').
lifecycle_controller_attributes (optional, dict, None)
Dictionary of Lifecycle Controller attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry.To view the list of attributes in Attribute Registry for iDRAC9 and above, see, https://*idrac_ip*/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1 and https://*idrac_ip*/redfish/v1/Registries/ManagerAttributeRegistry.
- For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName> (for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName> (for Example, 'LCAttributes.1.AutoUpdate').
+ For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile. If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName> (for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName> (for Example, 'LCAttributes.1.AutoUpdate').
resource_id (optional, str, None)
@@ -66,11 +66,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -90,7 +90,7 @@ Notes
.. note::
- Run this module from a system that has direct access to Dell iDRAC.
- This module supports ``check_mode``.
- - For iDRAC7 and iDRAC8 based servers, the value provided for the attributes are not be validated. Ensure appropriate values are passed.
+ - For iDRAC8 based servers, the value provided for the attributes are not be validated. Ensure appropriate values are passed.
@@ -215,7 +215,7 @@ Examples
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
idrac_attributes:
- Time.1.TimeZone: CST6CDT
+ Time.1.Timezone: CST6CDT
NTPConfigGroup.1.NTPEnable: Enabled
NTPConfigGroup.1.NTP1: 192.168.0.5
NTPConfigGroup.1.NTP2: 192.168.0.6
@@ -245,11 +245,11 @@ msg (always, str, Successfully updated the attributes.)
Status of the attribute update operation.
-invalid_attributes (on invalid attributes or values., dict, AnsibleMapping([('LCAttributes.1.AutoUpdate', 'Invalid value for Enumeration.'), ('LCAttributes.1.StorageHealthRollupStatus', 'Read only Attribute cannot be modified.'), ('SNMP.1.AlertPort', 'Not a valid integer.'), ('SNMP.1.AlertPorty', 'Attribute does not exist.'), ('SysLog.1.PowerLogInterval', 'Integer out of valid range.'), ('ThermalSettings.1.AirExhaustTemp', 'Invalid value for Enumeration.')]))
+invalid_attributes (on invalid attributes or values., dict, {'LCAttributes.1.AutoUpdate': 'Invalid value for Enumeration.', 'LCAttributes.1.StorageHealthRollupStatus': 'Read only Attribute cannot be modified.', 'SNMP.1.AlertPort': 'Not a valid integer.', 'SNMP.1.AlertPorty': 'Attribute does not exist.', 'SysLog.1.PowerLogInterval': 'Integer out of valid range.', 'ThermalSettings.1.AirExhaustTemp': 'Invalid value for Enumeration.'})
Dict of invalid attributes provided.
-error_info (when attribute value is invalid., dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', "The value 'false' for the property LCAttributes.1.BIOSRTDRequested is of a different type than the property can accept."), ('MessageArgs', ['false', 'LCAttributes.1.BIOSRTDRequested']), ('MessageArgs@odata.count', 2), ('MessageId', 'Base.1.12.PropertyValueTypeError'), ('RelatedProperties', ['#/Attributes/LCAttributes.1.BIOSRTDRequested']), ('RelatedProperties@odata.count', 1), ('Resolution', 'Correct the value for the property in the request body and resubmit the request if the operation failed.'), ('Severity', 'Warning')])]), ('code', 'Base.1.12.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information')]))]))
+error_info (when attribute value is invalid., dict, {'error': {'@Message.ExtendedInfo': [{'Message': "The value 'false' for the property LCAttributes.1.BIOSRTDRequested is of a different type than the property can accept.", 'MessageArgs': ['false', 'LCAttributes.1.BIOSRTDRequested'], 'MessageArgs@odata.count': 2, 'MessageId': 'Base.1.12.PropertyValueTypeError', 'RelatedProperties': ['#/Attributes/LCAttributes.1.BIOSRTDRequested'], 'RelatedProperties@odata.count': 1, 'Resolution': 'Correct the value for the property in the request body and resubmit the request if the operation failed.', 'Severity': 'Warning'}], 'code': 'Base.1.12.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information'}})
Error information of the operation.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst
index 72f6bc733..569b36553 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_bios.rst
@@ -23,7 +23,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.490
-- python >= 3.8.6
+- python >= 3.9.6
@@ -92,7 +92,7 @@ Parameters
*job_wait* is not applicable. The module waits till the completion of this task.
- This feature is deprecated, please use :ref:`idrac_boot <idrac_boot_module>` for configuring boot sources.
+ This feature is deprecated, please use :ref:`dellemc.openmanage.idrac_boot <dellemc.openmanage.idrac_boot_module>` for configuring boot sources.
clear_pending (optional, bool, None)
@@ -128,7 +128,7 @@ Parameters
``graceful_restart`` Gracefully reboot the host system.
- This is applicable for *attributes*, and *reset_bios*.
+ This is applicable for *reset_bios*, and *attributes* when *apply_time* is ``Immediate``.
job_wait (optional, bool, True)
@@ -140,7 +140,7 @@ Parameters
job_wait_timeout (optional, int, 1200)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
idrac_ip (True, str, None)
@@ -160,11 +160,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -185,6 +185,7 @@ Notes
- omsdk is required to be installed only for *boot_sources* operation.
- This module requires 'Administrator' privilege for *idrac_user*.
- Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -199,20 +200,20 @@ Examples
---
- name: Configure generic attributes of the BIOS
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
attributes:
- BootMode : "Bios"
+ BootMode: "Bios"
OneTimeBootMode: "Enabled"
BootSeqRetry: "Enabled"
- name: Configure PXE generic attributes
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
attributes:
PxeDev1EnDis: "Enabled"
@@ -224,82 +225,82 @@ Examples
- name: Configure BIOS attributes at Maintenance window
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
apply_time: AtMaintenanceWindowStart
maintenance_window:
start_time: "2022-09-30T05:15:40-05:00"
duration: 600
attributes:
- BootMode : "Bios"
+ BootMode: "Bios"
OneTimeBootMode: "Enabled"
BootSeqRetry: "Enabled"
- name: Clear pending BIOS attributes
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- clear_pending: yes
+ clear_pending: true
- name: Reset BIOS attributes to default settings.
dellemc.openmanage.idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_pwd }}"
- validate_certs: False
- reset_bios: yes
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ validate_certs: false
+ reset_bios: true
- name: Configure boot sources
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-2-3"
- Enabled : true
- Index : 0
+ - Name: "NIC.Integrated.1-2-3"
+ Enabled: true
+ Index: 0
- name: Configure multiple boot sources
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Enabled : true
- Index : 0
- - Name : "NIC.Integrated.2-2-2"
- Enabled : true
- Index : 1
- - Name : "NIC.Integrated.3-3-3"
- Enabled : true
- Index : 2
+ - Name: "NIC.Integrated.1-1-1"
+ Enabled: true
+ Index: 0
+ - Name: "NIC.Integrated.2-2-2"
+ Enabled: true
+ Index: 1
+ - Name: "NIC.Integrated.3-3-3"
+ Enabled: true
+ Index: 2
- name: Configure boot sources - Enabling
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Enabled : true
+ - Name: "NIC.Integrated.1-1-1"
+ Enabled: true
- name: Configure boot sources - Index
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Index : 0
+ - Name: "NIC.Integrated.1-1-1"
+ Index: 0
@@ -314,7 +315,7 @@ msg (success, dict, {'CompletionTime': '2020-04-20T18:50:20', 'Description': 'Jo
Status of the job for *boot_sources* or status of the action performed on bios.
-invalid_attributes (on invalid attributes or values., dict, {'NumLock': 'Invalid value for Enumeration.', 'SystemModelName': 'Read only Attribute cannot be modified.', 'AlertPort': 'Not a valid integer.', 'AssetTag': 'Attribute does not exist.', 'PowerLogInterval': 'Integer out of valid range.', 'AirExhaustTemp': 'Invalid value for Enumeration.'})
+invalid_attributes (on invalid attributes or values., dict, {'PxeDev1VlanId': 'Not a valid integer.', 'AcPwrRcvryUserDelay': 'Integer out of valid range.', 'BootSeqRetry': 'Invalid value for Enumeration.', 'Proc1Brand': 'Read only Attribute cannot be modified.', 'AssetTag': 'Attribute does not exist.'})
Dict of invalid attributes provided.
@@ -338,4 +339,5 @@ Authors
- Felix Stephen (@felixs88)
- Anooja Vardhineni (@anooja-vardhineni)
- Jagadeesh N V (@jagadeeshnv)
+- Shivam Sharma (@shivam-sharma)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst
index 3825c0a62..980423ee2 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_boot.rst
@@ -75,7 +75,7 @@ Parameters
``continuous`` The system boots to the target specified in the *boot_source_override_target* until this property is set to Disabled.
- The state is set to ``once`` for the one-time boot override and ``continuous`` for the remain-active-until—canceled override. If the state is set ``once``, the value is reset to ``disabled`` after the *boot_source_override_target* actions have completed successfully.
+ The state is set to ``once`` for the one-time boot override and ``continuous`` for the remain-active-until—canceled override. If the state is set ``once`` or ``continuous``, the value is reset to ``disabled`` after the *boot_source_override_target* actions have completed successfully.
Changes to this options do not alter the BIOS persistent boot order configuration.
@@ -117,9 +117,9 @@ Parameters
reset_type (optional, str, graceful_restart)
``none`` Host system is not rebooted and *job_wait* is not applicable.
- ``force_reset`` Forcefully reboot the Host system.
+ ``force_restart`` Forcefully reboot the Host system.
- ``graceful_reset`` Gracefully reboot the Host system.
+ ``graceful_restart`` Gracefully reboot the Host system.
job_wait (optional, bool, True)
@@ -131,7 +131,7 @@ Parameters
job_wait_timeout (optional, int, 900)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
resource_id (optional, str, None)
@@ -155,11 +155,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst
index fb7f4ead5..747f15381 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_certificates.rst
@@ -20,7 +20,7 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- python >= 3.8.6
+- python \>= 3.8.6
@@ -28,37 +28,47 @@ Parameters
----------
command (optional, str, generate_csr)
- ``generate_csr``, generate CSR. This requires *cert_params* and *certificate_path*. This is applicable only for ``HTTPS``
+ \ :literal:`generate\_csr`\ , generate CSR. This requires \ :emphasis:`cert\_params`\ and \ :emphasis:`certificate\_path`\ . This is applicable only for \ :literal:`HTTPS`\
- ``import``, import the certificate file. This requires *certificate_path*.
+ \ :literal:`import`\ , import the certificate file. This requires \ :emphasis:`certificate\_path`\ .
- ``export``, export the certificate. This requires *certificate_path*.
+ \ :literal:`export`\ , export the certificate. This requires \ :emphasis:`certificate\_path`\ .
- ``reset``, reset the certificate to default settings. This is applicable only for ``HTTPS``.
+ \ :literal:`reset`\ , reset the certificate to default settings. This is applicable only for \ :literal:`HTTPS`\ .
certificate_type (optional, str, HTTPS)
Type of the iDRAC certificate.
- ``HTTPS`` The Dell self-signed SSL certificate.
+ \ :literal:`HTTPS`\ The Dell self-signed SSL certificate.
- ``CA`` Certificate Authority(CA) signed SSL certificate.
+ \ :literal:`CA`\ Certificate Authority(CA) signed SSL certificate.
- ``CSC`` The custom signed SSL certificate.
+ \ :literal:`CUSTOMCERTIFICATE`\ The custom PKCS12 certificate and private key. Export of custom certificate is supported only on iDRAC firmware version 7.00.00.00 and above.
- ``CLIENT_TRUST_CERTIFICATE`` Client trust certificate.
+ \ :literal:`CSC`\ The custom signing SSL certificate.
+
+ \ :literal:`CLIENT\_TRUST\_CERTIFICATE`\ Client trust certificate.
certificate_path (optional, path, None)
- Absolute path of the certificate file if *command* is ``import``.
+ Absolute path of the certificate file if \ :emphasis:`command`\ is \ :literal:`import`\ .
- Directory path with write permissions if *command* is ``generate_csr`` or ``export``.
+ Directory path with write permissions if \ :emphasis:`command`\ is \ :literal:`generate\_csr`\ or \ :literal:`export`\ .
passphrase (optional, str, None)
The passphrase string if the certificate to be imported is passphrase protected.
+ ssl_key (optional, path, None)
+ Absolute path of the private or SSL key file.
+
+ This is applicable only when \ :emphasis:`command`\ is \ :literal:`import`\ and \ :emphasis:`certificate\_type`\ is \ :literal:`HTTPS`\ .
+
+ Uploading the SSL key to iDRAC is supported on firmware version 6.00.02.00 and above.
+
+
cert_params (optional, dict, None)
Certificate parameters to generate signing request.
@@ -83,7 +93,7 @@ Parameters
The country code of the country where the entity applying for certification is located.
- email_address (True, str, None)
+ email_address (optional, str, None)
The email associated with the CSR.
@@ -103,13 +113,13 @@ Parameters
reset (optional, bool, True)
To reset the iDRAC after the certificate operation.
- This is applicable when *command* is ``import`` or ``reset``.
+ This is applicable when \ :emphasis:`command`\ is \ :literal:`import`\ or \ :literal:`reset`\ .
wait (optional, int, 300)
Maximum wait time for iDRAC to start after the reset, in seconds.
- This is applicable when *command* is ``import`` or ``reset`` and *reset* is ``True``.
+ This is applicable when \ :emphasis:`command`\ is \ :literal:`import`\ or \ :literal:`reset`\ and \ :emphasis:`reset`\ is \ :literal:`true`\ .
idrac_ip (True, str, None)
@@ -129,11 +139,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If \ :literal:`false`\ , the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure \ :literal:`false`\ only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version \ :literal:`5.0.0`\ , the \ :emphasis:`validate\_certs`\ is \ :literal:`false`\ by default.
ca_path (optional, path, None)
@@ -151,9 +161,10 @@ Notes
-----
.. note::
- - The certificate operations are supported on iDRAC firmware 5.10.10.00 and above.
+ - The certificate operations are supported on iDRAC firmware version 6.10.80.00 and above.
- Run this module from a system that has direct access to Dell iDRAC.
- - This module supports ``check_mode``.
+ - This module supports \ :literal:`check\_mode`\ .
+ - This module supports IPv4 and IPv6 addresses.
@@ -195,6 +206,17 @@ Examples
certificate_type: "HTTPS"
certificate_path: "/path/to/cert.pem"
+ - name: Import an HTTPS certificate along with its private key.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "/path/to/cert.pem"
+ ssl_key: "/path/to/private_key.pem"
+
- name: Export a HTTPS certificate.
dellemc.openmanage.idrac_certificates:
idrac_ip: "192.168.0.1"
@@ -215,6 +237,17 @@ Examples
certificate_type: "CSC"
certificate_path: "/path/to/cert.pem"
+ - name: Import a custom certificate with a passphrase.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "/path/to/idrac_cert.p12"
+ passphrase: "cert_passphrase"
+ reset: false
+
- name: Export a Client trust certificate.
dellemc.openmanage.idrac_certificates:
idrac_ip: "192.168.0.1"
@@ -230,7 +263,7 @@ Examples
Return Values
-------------
-msg (always, str, Successfully performed the operation generate_csr.)
+msg (always, str, Successfully performed the 'generate_csr' certificate operation.)
Status of the certificate configuration operation.
@@ -238,7 +271,7 @@ certificate_path (when I(command) is C(export) or C(generate_csr), str, /home/an
The csr or exported certificate file path
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -256,4 +289,6 @@ Authors
~~~~~~~
- Jagadeesh N V(@jagadeeshnv)
+- Rajshekar P(@rajshekarp87)
+- Kristian Lamb V(@kristian_lamb)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst
index 99c5f147a..1cbb31095 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware.rst
@@ -26,8 +26,8 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- omsdk >= 1.2.488
-- python >= 3.8.6
+- omsdk >= 1.2.503
+- python >= 3.9.6
@@ -61,25 +61,73 @@ Parameters
ignore_cert_warning (optional, bool, True)
- Specifies if certificate warnings are ignored when HTTPS share is used. If ``True`` option is set, then the certificate warnings are ignored.
+ Specifies if certificate warnings are ignored when HTTPS share is used. If ``true`` option is set, then the certificate warnings are ignored.
apply_update (optional, bool, True)
- If *apply_update* is set to ``True``, then the packages are applied.
+ If *apply_update* is set to ``true``, then the packages are applied.
- If *apply_update* is set to ``False``, no updates are applied, and a catalog report of packages is generated and returned.
+ If *apply_update* is set to ``false``, no updates are applied, and a catalog report of packages is generated and returned.
reboot (optional, bool, False)
Provides the option to apply the update packages immediately or in the next reboot.
- If *reboot* is set to ``True``, then the packages are applied immediately.
+ If *reboot* is set to ``true``, then the packages are applied immediately.
- If *reboot* is set to ``False``, then the packages are staged and applied in the next reboot.
+ If *reboot* is set to ``false``, then the packages are staged and applied in the next reboot.
Packages that do not require a reboot are applied immediately irrespective of I (reboot).
+ proxy_support (optional, str, Off)
+ Specifies if a proxy should be used.
+
+ Proxy parameters are applicable on ``HTTP``, ``HTTPS``, and ``FTP`` share type of repositories.
+
+ ``ParametersProxy``, sets the proxy parameters for the current firmware operation.
+
+ ``DefaultProxy``, iDRAC uses the proxy values set by default.
+
+ Default Proxy can be set in the Lifecycle Controller attributes using :ref:`dellemc.openmanage.idrac_attributes <dellemc.openmanage.idrac_attributes_module>`.
+
+ ``Off``, will not use the proxy.
+
+ For iDRAC8 based servers, use proxy server with basic authentication.
+
+ For iDRAC9 based servers, ensure that you use digest authentication for the proxy server, basic authentication is not supported.
+
+
+ proxy_server (optional, str, None)
+ The IP address of the proxy server.
+
+ This IP will not be validated. The download job will be created even for invalid *proxy_server*. Please check the results of the job for error details.
+
+ This is required when *proxy_support* is ``ParametersProxy``.
+
+
+ proxy_port (optional, int, None)
+ The Port for the proxy server.
+
+ This is required when *proxy_support* is ``ParametersProxy``.
+
+
+ proxy_type (optional, str, None)
+ The proxy type of the proxy server.
+
+ This is required when *proxy_support* is ``ParametersProxy``.
+
+ Note: SOCKS4 proxy does not support IPv6 address.
+
+
+ proxy_uname (optional, str, None)
+ The user name for the proxy server.
+
+
+ proxy_passwd (optional, str, None)
+ The password for the proxy server.
+
+
idrac_ip (True, str, None)
iDRAC IP Address.
@@ -97,11 +145,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -119,9 +167,10 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- Module will report success based on the iDRAC firmware update parent job status if there are no individual component jobs present.
- For server with iDRAC firmware 5.00.00.00 and later, if the repository contains unsupported packages, then the module will return success with a proper message.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip* and *share_name*.
- This module supports ``check_mode``.
@@ -141,9 +190,9 @@ Examples
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.0:/share"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
catalog_file_name: "Catalog.xml"
- name: Update firmware from repository on a CIFS Share
@@ -155,9 +204,9 @@ Examples
share_name: "full_cifs_path"
share_user: "share_user"
share_password: "share_password"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
catalog_file_name: "Catalog.xml"
- name: Update firmware from repository on a HTTP
@@ -167,9 +216,9 @@ Examples
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "http://downloads.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
- name: Update firmware from repository on a HTTPS
dellemc.openmanage.idrac_firmware:
@@ -178,9 +227,26 @@ Examples
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "https://downloads.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
+
+ - name: Update firmware from repository on a HTTPS via proxy
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://downloads.dell.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: ParametersProxy
+ proxy_server: 192.168.1.10
+ proxy_type: HTTP
+ proxy_port: 80
+ proxy_uname: "proxy_user"
+ proxy_passwd: "proxy_pwd"
- name: Update firmware from repository on a FTP
dellemc.openmanage.idrac_firmware:
@@ -188,10 +254,10 @@ Examples
idrac_user: "user_name"
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "ftp://ftp.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ share_name: "ftp://ftp.mydomain.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
@@ -202,7 +268,7 @@ msg (always, str, Successfully updated the firmware.)
Overall firmware update status.
-update_status (success, dict, AnsibleMapping([('InstanceID', 'JID_XXXXXXXXXXXX'), ('JobState', 'Completed'), ('Message', 'Job completed successfully.'), ('MessageId', 'REDXXX'), ('Name', 'Repository Update'), ('JobStartTime', 'NA'), ('Status', 'Success')]))
+update_status (success, dict, {'InstanceID': 'JID_XXXXXXXXXXXX', 'JobState': 'Completed', 'Message': 'Job completed successfully.', 'MessageId': 'REDXXX', 'Name': 'Repository Update', 'JobStartTime': 'NA', 'Status': 'Success'})
Firmware Update job and progress details from the iDRAC.
@@ -221,4 +287,5 @@ Authors
- Rajeev Arakkal (@rajeevarakkal)
- Felix Stephen (@felixs88)
+- Jagadeesh N V (@jagadeeshnv)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst
index b6eda6ae4..04c46a32e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_firmware_info.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -45,11 +45,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -67,7 +67,8 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -82,10 +83,10 @@ Examples
---
- name: Get Installed Firmware Inventory
dellemc.openmanage.idrac_firmware_info:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
@@ -96,11 +97,11 @@ msg (always, str, Successfully fetched the firmware inventory details.)
Fetching the firmware inventory details.
-firmware_info (success, dict, AnsibleMapping([('Firmware', [AnsibleMapping([('BuildNumber', '0'), ('Classifications', '10'), ('ComponentID', '102573'), ('ComponentType', 'FRMW'), ('DeviceID', None), ('ElementName', 'Power Supply.Slot.1'), ('FQDD', 'PSU.Slot.1'), ('HashValue', None), ('IdentityInfoType', 'OrgID:ComponentType:ComponentID'), ('IdentityInfoValue', 'DCIM:firmware:102573'), ('InstallationDate', '2018-11-22T03:58:23Z'), ('InstanceID', 'DCIM:INSTALLED#0x15__PSU.Slot.1'), ('IsEntity', 'true'), ('Key', 'DCIM:INSTALLED#0x15__PSU.Slot.1'), ('MajorVersion', '0'), ('MinorVersion', '3'), ('RevisionNumber', '67'), ('RevisionString', None), ('Status', 'Installed'), ('SubDeviceID', None), ('SubVendorID', None), ('Updateable', 'true'), ('VendorID', None), ('VersionString', '00.3D.67'), ('impactsTPMmeasurements', 'false')])])]))
+firmware_info (success, dict, {'Firmware': [{'BuildNumber': '0', 'Classifications': '10', 'ComponentID': '102573', 'ComponentType': 'FRMW', 'DeviceID': None, 'ElementName': 'Power Supply.Slot.1', 'FQDD': 'PSU.Slot.1', 'HashValue': None, 'IdentityInfoType': 'OrgID:ComponentType:ComponentID', 'IdentityInfoValue': 'DCIM:firmware:102573', 'InstallationDate': '2018-11-22T03:58:23Z', 'InstanceID': 'DCIM:INSTALLED#0x15__PSU.Slot.1', 'IsEntity': 'true', 'Key': 'DCIM:INSTALLED#0x15__PSU.Slot.1', 'MajorVersion': '0', 'MinorVersion': '3', 'RevisionNumber': '67', 'RevisionString': None, 'Status': 'Installed', 'SubDeviceID': None, 'SubVendorID': None, 'Updateable': 'true', 'VendorID': None, 'VersionString': '00.3D.67', 'impactsTPMmeasurements': 'false'}]})
Details of the firmware.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_license.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_license.rst
new file mode 100644
index 000000000..d4441a3ab
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_license.rst
@@ -0,0 +1,432 @@
+.. _idrac_license_module:
+
+
+idrac_license -- Configure iDRAC licenses
+=========================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module allows to import, export and delete licenses on iDRAC.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python \>= 3.9.6
+
+
+
+Parameters
+----------
+
+ license_id (optional, str, None)
+ Entitlement ID of the license that is to be imported, exported or deleted.
+
+ \ :emphasis:`license\_id`\ is required when \ :emphasis:`delete`\ is \ :literal:`true`\ or \ :emphasis:`export`\ is \ :literal:`true`\ .
+
+
+ delete (optional, bool, False)
+ Delete the license from the iDRAC.
+
+ When \ :emphasis:`delete`\ is \ :literal:`true`\ , then \ :emphasis:`license\_id`\ is required.
+
+ \ :emphasis:`delete`\ is mutually exclusive with \ :emphasis:`export`\ and \ :emphasis:`import`\ .
+
+
+ export (optional, bool, False)
+ Export the license from the iDRAC.
+
+ When \ :emphasis:`export`\ is \ :literal:`true`\ , \ :emphasis:`license\_id`\ and \ :emphasis:`share\_parameters`\ is required.
+
+ \ :emphasis:`export`\ is mutually exclusive with \ :emphasis:`delete`\ and \ :emphasis:`import`\ .
+
+
+ import (optional, bool, False)
+ Import the license from the iDRAC.
+
+ When \ :emphasis:`import`\ is \ :literal:`true`\ , \ :emphasis:`share\_parameters`\ is required.
+
+ \ :emphasis:`import`\ is mutually exclusive with \ :emphasis:`delete`\ and \ :emphasis:`export`\ .
+
+
+ share_parameters (optional, dict, None)
+ Parameters that are required for the import and export operation of a license.
+
+ \ :emphasis:`share\_parameters`\ is required when \ :emphasis:`export`\ or \ :emphasis:`import`\ is \ :literal:`true`\ .
+
+
+ share_type (optional, str, local)
+ Share type of the network share.
+
+ \ :literal:`local`\ uses local path for \ :emphasis:`import`\ and \ :emphasis:`export`\ operation.
+
+ \ :literal:`nfs`\ uses NFS share for \ :emphasis:`import`\ and \ :emphasis:`export`\ operation.
+
+ \ :literal:`cifs`\ uses CIFS share for \ :emphasis:`import`\ and \ :emphasis:`export`\ operation.
+
+ \ :literal:`http`\ uses HTTP share for \ :emphasis:`import`\ and \ :emphasis:`export`\ operation.
+
+ \ :literal:`https`\ uses HTTPS share for \ :emphasis:`import`\ and \ :emphasis:`export`\ operation.
+
+
+ file_name (optional, str, None)
+ License file name for \ :emphasis:`import`\ and \ :emphasis:`export`\ operation.
+
+ \ :emphasis:`file\_name`\ is required when \ :emphasis:`import`\ is \ :literal:`true`\ .
+
+ For the \ :emphasis:`import`\ operation, when \ :emphasis:`share\_type`\ is \ :literal:`local`\ , the supported extensions for \ :emphasis:`file\_name`\ are '.txt' and '.xml'. For other share types, the supported extension is '.xml'
+
+
+ ip_address (optional, str, None)
+ IP address of the network share.
+
+ \ :emphasis:`ip\_address`\ is required when \ :emphasis:`share\_type`\ is \ :literal:`nfs`\ , \ :literal:`cifs`\ , \ :literal:`http`\ or \ :literal:`https`\ .
+
+
+ share_name (optional, str, None)
+ Network share or local path of the license file.
+
+
+ workgroup (optional, str, None)
+ Workgroup of the network share.
+
+ \ :emphasis:`workgroup`\ is applicable only when \ :emphasis:`share\_type`\ is \ :literal:`cifs`\ .
+
+
+ username (optional, str, None)
+ Username of the network share.
+
+ \ :emphasis:`username`\ is required when \ :emphasis:`share\_type`\ is \ :literal:`cifs`\ .
+
+
+ password (optional, str, None)
+ Password of the network share.
+
+ \ :emphasis:`password`\ is required when \ :emphasis:`share\_type`\ is \ :literal:`cifs`\ .
+
+
+ ignore_certificate_warning (optional, str, off)
+ Ignores the certificate warning while connecting to Share and is only applicable when \ :emphasis:`share\_type`\ is \ :literal:`https`\ .
+
+ \ :literal:`off`\ ignores the certificate warning.
+
+ \ :literal:`on`\ does not ignore the certificate warning.
+
+
+ proxy_support (optional, str, off)
+ Specifies if proxy is to be used or not.
+
+ \ :literal:`off`\ does not use proxy settings.
+
+ \ :literal:`default\_proxy`\ uses the default proxy settings.
+
+ \ :literal:`parameters\_proxy`\ uses the specified proxy settings. \ :emphasis:`proxy\_server`\ is required when \ :emphasis:`proxy\_support`\ is \ :literal:`parameters\_proxy`\ .
+
+ \ :emphasis:`proxy\_support`\ is only applicable when \ :emphasis:`share\_type`\ is \ :literal:`https`\ or \ :literal:`https`\ .
+
+
+ proxy_type (optional, str, http)
+ The proxy type of the proxy server.
+
+ \ :literal:`http`\ to select HTTP proxy.
+
+ \ :literal:`socks`\ to select SOCKS proxy.
+
+ \ :emphasis:`proxy\_type`\ is only applicable when \ :emphasis:`share\_type`\ is \ :literal:`https`\ or \ :literal:`https`\ and when \ :emphasis:`proxy\_support`\ is \ :literal:`parameters\_proxy`\ .
+
+
+ proxy_server (optional, str, None)
+ The IP address of the proxy server.
+
+ \ :emphasis:`proxy\_server`\ is required when \ :emphasis:`proxy\_support`\ is \ :literal:`parameters\_proxy`\ .
+
+ \ :emphasis:`proxy\_server`\ is only applicable when \ :emphasis:`share\_type`\ is \ :literal:`https`\ or \ :literal:`https`\ and when \ :emphasis:`proxy\_support`\ is \ :literal:`parameters\_proxy`\ .
+
+
+ proxy_port (optional, int, 80)
+ The port of the proxy server.
+
+ \ :emphasis:`proxy\_port`\ is only applicable when \ :emphasis:`share\_type`\ is \ :literal:`https`\ or \ :literal:`https`\ and when \ :emphasis:`proxy\_support`\ is \ :literal:`parameters\_proxy`\ .
+
+
+ proxy_username (optional, str, None)
+ The username of the proxy server.
+
+ \ :emphasis:`proxy\_username`\ is only applicable when \ :emphasis:`share\_type`\ is \ :literal:`https`\ or \ :literal:`https`\ and when \ :emphasis:`proxy\_support`\ is \ :literal:`parameters\_proxy`\ .
+
+
+ proxy_password (optional, str, None)
+ The password of the proxy server.
+
+ \ :emphasis:`proxy\_password`\ is only applicable when \ :emphasis:`share\_type`\ is \ :literal:`https`\ or \ :literal:`https`\ and when \ :emphasis:`proxy\_support`\ is \ :literal:`parameters\_proxy`\ .
+
+
+
+ resource_id (optional, str, None)
+ Id of the resource.
+
+ If the value for resource ID is not provided, the module picks the first resource ID available from the list of system resources returned by the iDRAC.
+
+
+ idrac_ip (True, str, None)
+ iDRAC IP Address.
+
+
+ idrac_user (True, str, None)
+ iDRAC username.
+
+
+ idrac_password (True, str, None)
+ iDRAC user password.
+
+
+ idrac_port (optional, int, 443)
+ iDRAC port.
+
+
+ validate_certs (optional, bool, True)
+ If \ :literal:`false`\ , the SSL certificates will not be validated.
+
+ Configure \ :literal:`false`\ only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version \ :literal:`5.0.0`\ , the \ :emphasis:`validate\_certs`\ is \ :literal:`false`\ by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports only iDRAC9 and above.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module does not support \ :literal:`check\_mode`\ .
+ - When \ :emphasis:`share\_type`\ is \ :literal:`local`\ for \ :emphasis:`import`\ and \ :emphasis:`export`\ operations, job\_details are not displayed.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Export a license from iDRAC to local
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "local"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+
+ - name: Export a license from iDRAC to NFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "nfs"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+
+ - name: Export a license from iDRAC to CIFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "cifs"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ workgroup: "workgroup"
+
+ - name: Export a license from iDRAC to HTTP share via proxy
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "http"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ proxy_support: "parameters_proxy"
+ proxy_type: socks
+ proxy_server: "192.168.0.2"
+ proxy_port: 1080
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+
+ - name: Export a license from iDRAC to HTTPS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "https"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ignore_certificate_warning: "on"
+
+ - name: Import a license to iDRAC from local
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: local
+ share_name: "/path/to/share"
+
+ - name: Import a license to iDRAC from NFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: nfs
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+
+ - name: Import a license to iDRAC from CIFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: cifs
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+
+ - name: Import a license to iDRAC from HTTP share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: http
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+
+ - name: Import a license to iDRAC from HTTPS share via proxy
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: https
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+ proxy_support: "parameters_proxy"
+ proxy_server: "192.168.0.2"
+ proxy_port: 808
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+
+ - name: Delete a License from iDRAC
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENCE_123"
+ delete: true
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully exported the license.)
+ Status of the license operation.
+
+
+job_details (For import and export operations, dict, {'ActualRunningStartTime': '2024-01-09T05:16:19', 'ActualRunningStopTime': '2024-01-09T05:16:19', 'CompletionTime': '2024-01-09T05:16:19', 'Description': 'Job Instance', 'EndTime': None, 'Id': 'JID_XXXXXXXXX', 'JobState': 'Completed', 'JobType': 'LicenseExport', 'Message': 'The command was successful.', 'MessageArgs': [], 'MessageId': 'LIC900', 'Name': 'Export: License', 'PercentComplete': 100, 'StartTime': '2024-01-09T05:16:19', 'TargetSettingsURI': None})
+ Returns the output for status of the job.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.8.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'Base.1.8.AccessDenied', 'Message': 'The authentication credentials included with this request are missing or invalid.', 'MessageArgs': [], 'RelatedProperties': [], 'Severity': 'Critical', 'Resolution': 'Attempt to ensure that the URI is correct and that the service has the appropriate credentials.'}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Rajshekar P(@rajshekarp87)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst
index 6b4cfd4ed..8cf0e133b 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_job_status_info.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -49,11 +49,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -71,7 +71,8 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -86,11 +87,11 @@ Examples
---
- name: Show status of a Lifecycle Control job
dellemc.openmanage.idrac_lifecycle_controller_job_status_info:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "JID_1234567890"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "JID_1234567890"
@@ -101,11 +102,11 @@ msg (always, str, Successfully fetched the job info.)
Overall status of the job facts operation.
-job_info (success, dict, AnsibleMapping([('ElapsedTimeSinceCompletion', '8742'), ('InstanceID', 'JID_844222910040'), ('JobStartTime', 'NA'), ('JobStatus', 'Completed'), ('JobUntilTime', 'NA'), ('Message', 'Job completed successfully.'), ('MessageArguments', 'NA'), ('MessageID', 'RED001'), ('Name', 'update:DCIM:INSTALLED#iDRAC.Embedded.1-1#IDRACinfo'), ('PercentComplete', '100'), ('Status', 'Success')]))
+job_info (success, dict, {'ElapsedTimeSinceCompletion': '8742', 'InstanceID': 'JID_844222910040', 'JobStartTime': 'NA', 'JobStatus': 'Completed', 'JobUntilTime': 'NA', 'Message': 'Job completed successfully.', 'MessageArguments': 'NA', 'MessageID': 'RED001', 'Name': 'update:DCIM:INSTALLED#iDRAC.Embedded.1-1#IDRACinfo', 'PercentComplete': '100', 'Status': 'Success'})
Displays the status of a Lifecycle Controller job.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst
index 79bb43b2a..385894eb6 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_jobs.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -51,11 +51,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -73,7 +73,8 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module does not support ``check_mode``.
@@ -110,11 +111,11 @@ msg (always, str, Successfully deleted the job.)
Status of the delete operation.
-status (success, dict, AnsibleMapping([('Message', 'The specified job was deleted'), ('MessageID', 'SUP020'), ('ReturnValue', '0')]))
+status (success, dict, {'Message': 'The specified job was deleted', 'MessageID': 'SUP020', 'ReturnValue': '0'})
Details of the delete operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst
index f2d20a24b..1b414e76e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_logs.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -63,11 +63,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -87,7 +87,8 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- Exporting data to a local share is supported only on iDRAC9-based PowerEdge Servers and later.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module does not support ``check_mode``.
@@ -135,11 +136,11 @@ msg (always, str, Successfully exported the lifecycle controller logs.)
Status of the export lifecycle controller logs job.
-lc_logs_status (success, dict, AnsibleMapping([('ElapsedTimeSinceCompletion', '0'), ('InstanceID', 'JID_274774785395'), ('JobStartTime', 'NA'), ('JobStatus', 'Completed'), ('JobUntilTime', 'NA'), ('Message', 'LCL Export was successful'), ('MessageArguments', 'NA'), ('MessageID', 'LC022'), ('Name', 'LC Export'), ('PercentComplete', '100'), ('Status', 'Success'), ('file', '192.168.0.0:/nfsfileshare/190.168.0.1_20210728_133437_LC_Log.log'), ('retval', True)]))
+lc_logs_status (success, dict, {'ElapsedTimeSinceCompletion': '0', 'InstanceID': 'JID_274774785395', 'JobStartTime': 'NA', 'JobStatus': 'Completed', 'JobUntilTime': 'NA', 'Message': 'LCL Export was successful', 'MessageArguments': 'NA', 'MessageID': 'LC022', 'Name': 'LC Export', 'PercentComplete': '100', 'Status': 'Success', 'file': '192.168.0.0:/nfsfileshare/190.168.0.1_20210728_133437_LC_Log.log', 'retval': True})
Status of the export operation along with job details and file path.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst
index 9757ab8e0..07ffe9446 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_lifecycle_controller_status_info.rst
@@ -12,7 +12,7 @@ idrac_lifecycle_controller_status_info -- Get the status of the Lifecycle Contro
Synopsis
--------
-This module shows the status of the Lifecycle Controller on a Dell EMC PowerEdge server.
+This module shows the status of the Lifecycle Controller on a Dell PowerEdge server.
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -45,11 +45,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -67,7 +67,8 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -96,11 +97,11 @@ msg (always, str, Successfully fetched the lifecycle controller status.)
Overall status of fetching lifecycle controller status.
-lc_status_info (success, dict, AnsibleMapping([('msg', AnsibleMapping([('LCReady', True), ('LCStatus', 'Ready')]))]))
- Displays the status of the Lifecycle Controller on a Dell EMC PowerEdge server.
+lc_status_info (success, dict, {'msg': {'LCReady': True, 'LCStatus': 'Ready'}})
+ Displays the status of the Lifecycle Controller on a Dell PowerEdge server.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst
index d565eae7f..c7bb593ae 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_network.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -161,11 +161,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -184,7 +184,8 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -199,9 +200,9 @@ Examples
---
- name: Configure iDRAC network settings
dellemc.openmanage.idrac_network:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
register_idrac_on_dns: Enabled
dns_idrac_name: None
@@ -236,11 +237,11 @@ msg (always, str, Successfully configured the idrac network settings.)
Successfully configured the idrac network settings.
-network_status (success, dict, AnsibleMapping([('@odata.context', '/redfish/v1/$metadata#DellJob.DellJob'), ('@odata.id', '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_856418531008'), ('@odata.type', '#DellJob.v1_0_2.DellJob'), ('CompletionTime', '2020-03-31T03:04:15'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_856418531008'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)]))
+network_status (success, dict, {'@odata.context': '/redfish/v1/$metadata#DellJob.DellJob', '@odata.id': '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_856418531008', '@odata.type': '#DellJob.v1_0_2.DellJob', 'CompletionTime': '2020-03-31T03:04:15', 'Description': 'Job Instance', 'EndTime': None, 'Id': 'JID_856418531008', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'SYS053', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True})
Status of the Network settings operation job.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_network_attributes.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_network_attributes.rst
new file mode 100644
index 000000000..fa0e89cae
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_network_attributes.rst
@@ -0,0 +1,340 @@
+.. _idrac_network_attributes_module:
+
+
+idrac_network_attributes -- Configures the iDRAC network attributes
+===================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module allows you to configure the port and partition network attributes on the network interface cards.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ network_adapter_id (True, str, None)
+ FQDD of the network adapter device that represents the physical network adapter capable of connecting to a computer network.
+
+ An example of FQDD of the network adapter is 'NIC.Mezzanine.1A'
+
+
+ network_device_function_id (True, str, None)
+ FQDD of the network adapter device function that represents a logical interface exposed by the network adapter.
+
+ An example of FQDD of the network adapter device function is 'NIC.Mezzanine.1A-1-1'
+
+
+ network_attributes (optional, dict, None)
+ Dictionary of network attributes and value. To view the list of attributes and its structure, see the below API https://*idrac_ip*/redfish/v1/Systems/System.Embedded.1/NetworkAdapters/<network_id>/NetworkDeviceFunctions/ <network_port_id>/Settings and https://<idrac_ip>/redfish/v1/Schemas/NetworkDeviceFunction.v1_8_0.json.
+
+ *network_attributes* is mutually exclusive with *oem_network_attributes*.
+
+
+ oem_network_attributes (optional, dict, None)
+ The attributes must be part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and newer versions. For more information, see, https://*idrac_ip*/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/<network_id>/NetworkDeviceFunctions/ <network_port_id>/Oem/Dell/DellNetworkAttributes/<network_port_id> and https://*idrac_ip*/redfish/v1/Registries/NetworkAttributesRegistry_<network_port_id>/ NetworkAttributesRegistry_network_port_id.json.
+
+ For iDRAC8 based servers, derive the network attribute name from Server Configuration Profile.
+
+ *oem_network_attributes* is mutually exclusive with *network_attributes*.
+
+
+ resource_id (optional, str, None)
+ Id of the resource.
+
+ If the value for resource ID is not provided, the module picks the first resource ID available from the list of system resources returned by the iDRAC.
+
+
+ clear_pending (optional, bool, False)
+ This parameter allows you to clear all the pending OEM network attributes changes.
+
+ ``false`` does not perform any operation.
+
+ ``true`` discards any pending changes to network attributes, or if a job is in scheduled state, removes the job.
+
+ *apply_time* value will be ignored and will not have any impact for *clear_pending* operation.
+
+ This operation is not supported for iDRAC8.
+
+
+ apply_time (True, str, None)
+ Apply time of the *network_attributes* and *oem_network_attributes*.
+
+ This is applicable only to *network_attributes* and *oem_network_attributes*.
+
+ ``Immediate`` allows the user to immediately reboot the host and apply the changes. *job_wait* is applicable. This is applicable for *oem_network_attributes* and *job_wait*.
+
+ ``OnReset`` allows the user to apply the changes on the next reboot of the host server.
+
+ ``AtMaintenanceWindowStart`` allows the user to apply at the start of a maintenance window as specified in *maintenance_window*. A reboot job is scheduled.
+
+ ``InMaintenanceWindowOnReset`` allows to apply after a manual reset but within the maintenance window as specified in *maintenance_window*.
+
+ This is not applicable for iDRAC8 and value will be ignored and will not have any impact for configuring *oem_network_attributes*.
+
+
+ maintenance_window (optional, dict, None)
+ This option allows you to schedule the maintenance window.
+
+ This is required when *apply_time* is ``AtMaintenanceWindowStart`` or ``InMaintenanceWindowOnReset``.
+
+
+ start_time (True, str, None)
+ The start time for the maintenance window to be scheduled.
+
+ The format is YYYY-MM-DDThh:mm:ss<offset>
+
+ <offset> is the time offset from UTC that the current timezone set in iDRAC in the format: +05:30 for IST.
+
+
+ duration (True, int, None)
+ The duration in seconds for the maintenance window.
+
+
+
+ job_wait (optional, bool, True)
+ Provides the option to wait for job completion.
+
+ This is applicable when *apply_time* is ``Immediate`` for *oem_network_attributes*.
+
+
+ job_wait_timeout (optional, int, 1200)
+ The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
+
+ This option is applicable when *job_wait* is ``true``.
+
+
+ idrac_ip (True, str, None)
+ iDRAC IP Address.
+
+
+ idrac_user (True, str, None)
+ iDRAC username.
+
+
+ idrac_password (True, str, None)
+ iDRAC user password.
+
+
+ idrac_port (optional, int, 443)
+ iDRAC port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Configure OEM network attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: "NIC.Integrated.1"
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+
+ - name: Configure OEM network attributes to apply on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ apply_time: OnReset
+
+ - name: Configure OEM network attributes to apply at maintainance window
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+
+ - name: Clearing the pending attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ clear_pending: true
+
+ - name: Clearing the OEM pending attributes and apply the OEM network attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ clear_pending: true
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+
+ - name: Configure OEM network attributes and wait for the job
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ oem_network_attributes:
+ LnkSpeed: "10MbpsHalf"
+ WakeOnLan: "Enabled"
+ VLanMode: "Enabled"
+ job_wait: true
+ job_wait_timeout: 2000
+
+ - name: Configure redfish network attributes to update fiber channel on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: OnReset
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: true
+
+ - name: Configure redfish network attributes to apply on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: true
+ apply_time: OnReset
+
+ - name: Configure redfish network attributes of iscsi to apply at maintainance window start
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ iSCSIBoot:
+ InitiatorIPAddress: 1.0.0.1
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+
+ - name: Configure redfish network attributes to apply at maintainance window on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: false
+ VLANId: 1
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+
+
+
+Return Values
+-------------
+
+msg (when network attributes is applied, str, Successfully updated the network attributes.)
+ Status of the attribute update operation.
+
+
+invalid_attributes (On invalid attributes or values, dict, {'IscsiInitiatorIpAddr': 'Attribute is not valid.', 'IscsiInitiatorSubnet': 'Attribute is not valid.'})
+ Dictionary of invalid attributes provided that cannot be applied.
+
+
+job_status (always, dict, {'ActualRunningStartTime': None, 'ActualRunningStopTime': None, 'CompletionTime': None, 'Description': 'Job Instance', 'EndTime': 'TIME_NA', 'Id': 'JID_XXXXXXXXX', 'JobState': 'Scheduled', 'JobType': 'NICConfiguration', 'Message': 'Task successfully scheduled.', 'MessageArgs': [], 'MessageId': 'JCP001', 'Name': 'Configure: NIC.Integrated.1-1-1', 'PercentComplete': 0, 'StartTime': '2023-08-07T06:21:24', 'TargetSettingsURI': None})
+ Returns the output for status of the job.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Abhishek Sinha(@ABHISHEK-SINHA10)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst
index b3a18dce8..c5b526a87 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_os_deployment.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -65,11 +65,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -87,7 +87,8 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module does not support ``check_mode``.
@@ -107,7 +108,7 @@ Examples
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.0:/nfsfileshare"
- iso_image: "unattended_os_image.iso"
+ iso_image: "unattended_os_image.iso"
expose_duration: 180
@@ -119,7 +120,7 @@ msg (on error, str, Failed to boot to network iso)
Over all device information status.
-boot_status (always, dict, AnsibleMapping([('DeleteOnCompletion', 'false'), ('InstanceID', 'DCIM_OSDConcreteJob:1'), ('JobName', 'BootToNetworkISO'), ('JobStatus', 'Success'), ('Message', 'The command was successful.'), ('MessageID', 'OSD1'), ('Name', 'BootToNetworkISO'), ('Status', 'Success'), ('file', '192.168.0.0:/nfsfileshare/unattended_os_image.iso'), ('retval', True)]))
+boot_status (always, dict, {'DeleteOnCompletion': 'false', 'InstanceID': 'DCIM_OSDConcreteJob:1', 'JobName': 'BootToNetworkISO', 'JobStatus': 'Success', 'Message': 'The command was successful.', 'MessageID': 'OSD1', 'Name': 'BootToNetworkISO', 'Status': 'Success', 'file': '192.168.0.0:/nfsfileshare/unattended_os_image.iso', 'retval': True})
Details of the boot to network ISO image operation.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst
index 64f8f4ea7..c3592acc6 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_redfish_storage_controller.rst
@@ -20,14 +20,14 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- python >= 3.8.6
+- python >= 3.9.6
Parameters
----------
- command (optional, str, AssignSpare)
+ command (optional, str, None)
These actions may require a system reset, depending on the capabilities of the controller.
``ResetConfig`` - Deletes all the virtual disks and unassigns all hot spares on physical disks. *controller_id* is required for this operation.
@@ -58,6 +58,8 @@ Parameters
``LockVirtualDisk`` - To encrypt the virtual disk. *volume_id* is required for this operation.
+ ``OnlineCapacityExpansion`` - To expand the size of virtual disk. *volume_id*, and *target* or *size* is required for this operation.
+
target (optional, list, None)
Fully Qualified Device Descriptor (FQDD) of the target physical drive.
@@ -66,6 +68,8 @@ Parameters
If *volume_id* is not specified or empty, this physical drive will be assigned as a global hot spare when *command* is ``AssignSpare``.
+ When *command* is ``OnlineCapacityExpansion``, then *target* is mutually exclusive with *size*.
+
Notes: Global or Dedicated hot spare can be assigned only once for a physical disk, Re-assign cannot be done when *command* is ``AssignSpare``.
@@ -84,6 +88,8 @@ Parameters
This option is mandatory when *command* is ``ResetConfig``, ``SetControllerKey``, ``RemoveControllerKey``, ``ReKey``, or ``EnableControllerEncryption``.
+ This option is mandatory for *attributes*.
+
key (optional, str, None)
A new security key passphrase that the encryption-capable controller uses to create the encryption key. The controller uses the encryption key to lock or unlock access to the Self-Encrypting Drive (SED). Only one encryption key can be created for each controller.
@@ -119,14 +125,73 @@ Parameters
``LKM`` to choose mode as local key mode.
+ size (optional, int, None)
+ Capacity of the virtual disk to be expanded in MB.
+
+ Check mode and Idempotency is not supported for *size*.
+
+ Minimum Online Capacity Expansion size must be greater than 100 MB of the current size.
+
+ When *command* is ``OnlineCapacityExpansion``, then *size* is mutually exclusive with *target*.
+
+
+ attributes (optional, dict, None)
+ Dictionary of controller attributes and value pair.
+
+ This feature is only supported for iDRAC9 with firmware version 6.00.00.00 and above
+
+ *controller_id* is required for this operation.
+
+ *apply_time* and *maintenance_window* is applicable for *attributes*.
+
+ *attributes* is mutually exclusive with *command*.
+
+ Use https://*idrac_ip*/redfish/v1/Schemas/DellOemStorageController.json to view the attributes.
+
+
+ apply_time (optional, str, Immediate)
+ Apply time of the *attributes*.
+
+ This is applicable only to *attributes*.
+
+ ``Immediate`` Allows the user to immediately reboot the host and apply the changes. *job_wait* is applicable.
+
+ ``OnReset`` Allows the user to apply the changes on the next reboot of the host server.
+
+ ``AtMaintenanceWindowStart`` Allows the user to apply at the start of a maintenance window as specified in *maintenance_window*.
+
+ ``InMaintenanceWindowOnReset`` Allows to apply after a manual reset but within the maintenance window as specified in *maintenance_window*.
+
+
+ maintenance_window (optional, dict, None)
+ Option to schedule the maintenance window.
+
+ This is required when *apply_time* is ``AtMaintenanceWindowStart`` or ``InMaintenanceWindowOnReset``.
+
+
+ start_time (True, str, None)
+ The start time for the maintenance window to be scheduled.
+
+ The format is YYYY-MM-DDThh:mm:ss<offset>
+
+ <offset> is the time offset from UTC that the current timezone set in iDRAC in the format: +05:30 for IST.
+
+
+ duration (optional, int, 900)
+ The duration in seconds for the maintenance window.
+
+
+
job_wait (optional, bool, False)
Provides the option if the module has to wait for the job to be completed.
+ This is applicable for *attributes* when *apply_time* is ``Immediate``.
+
job_wait_timeout (optional, int, 120)
The maximum wait time of job completion in seconds before the job tracking is stopped.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
baseuri (True, str, None)
@@ -142,11 +207,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -165,7 +230,8 @@ Notes
.. note::
- Run this module from a system that has direct access to Dell iDRAC.
- - This module always reports as changes found when ``ReKey``, ``BlinkTarget``, and ``UnBlinkTarget``.
+ - This module is supported on iDRAC9.
+ - This module always reports as changes found when *command* is ``ReKey``, ``BlinkTarget``, and ``UnBlinkTarget``.
- This module supports ``check_mode``.
@@ -385,6 +451,60 @@ Examples
tags:
- lock
+ - name: Online Capacity Expansion of a volume using target
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "OnlineCapacityExpansion"
+ volume_id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ target:
+ - "Disk.Bay.2:Enclosure.Internal.0-0:RAID.Integrated.1-1"
+ tags:
+ - oce_target
+
+ - name: Online Capacity Expansion of a volume using size
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "OnlineCapacityExpansion"
+ volume_id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ size: 362785
+ tags:
+ - oce_size
+
+ - name: Set controller attributes.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: "RAID.Slot.1-1"
+ attributes:
+ ControllerMode: "HBA"
+ apply_time: "OnReset"
+ tags:
+ - controller-attribute
+
+ - name: Configure controller attributes at Maintenance window
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: "RAID.Slot.1-1"
+ attributes:
+ CheckConsistencyMode: Normal
+ CopybackMode: "Off"
+ LoadBalanceMode: Disabled
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 1200
+
Return Values
@@ -394,15 +514,15 @@ msg (always, str, Successfully submitted the job that performs the AssignSpare o
Overall status of the storage controller configuration operation.
-task (success, dict, AnsibleMapping([('id', 'JID_XXXXXXXXXXXXX'), ('uri', '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX')]))
+task (success, dict, {'id': 'JID_XXXXXXXXXXXXX', 'uri': '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX'})
ID and URI resource of the job created.
-status (always, dict, AnsibleMapping([('ActualRunningStartTime', '2022-02-09T04:42:41'), ('ActualRunningStopTime', '2022-02-09T04:44:00'), ('CompletionTime', '2022-02-09T04:44:00'), ('Description', 'Job Instance'), ('EndTime', 'TIME_NA'), ('Id', 'JID_444033604418'), ('JobState', 'Completed'), ('JobType', 'RealTimeNoRebootConfiguration'), ('Message', 'Job completed successfully.'), ('MessageArgs', []), ('MessageId', 'PR19'), ('Name', 'Configure: RAID.Integrated.1-1'), ('PercentComplete', 100), ('StartTime', '2022-02-09T04:42:40'), ('TargetSettingsURI', None)]))
+status (always, dict, {'ActualRunningStartTime': '2022-02-09T04:42:41', 'ActualRunningStopTime': '2022-02-09T04:44:00', 'CompletionTime': '2022-02-09T04:44:00', 'Description': 'Job Instance', 'EndTime': 'TIME_NA', 'Id': 'JID_444033604418', 'JobState': 'Completed', 'JobType': 'RealTimeNoRebootConfiguration', 'Message': 'Job completed successfully.', 'MessageArgs': [], 'MessageId': 'PR19', 'Name': 'Configure: RAID.Integrated.1-1', 'PercentComplete': 100, 'StartTime': '2022-02-09T04:42:40', 'TargetSettingsURI': None})
status of the submitted job.
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to run the method because the requested HTTP method is not allowed.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'iDRAC.1.6.SYS402'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'Enter a valid HTTP method and retry the operation. For information about valid methods, see the Redfish Users Guide available on the support site.'), ('Severity', 'Informational')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to run the method because the requested HTTP method is not allowed.', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'iDRAC.1.6.SYS402', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'Enter a valid HTTP method and retry the operation. For information about valid methods, see the Redfish Users Guide available on the support site.', 'Severity': 'Informational'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information'}})
Details of a http error.
@@ -422,4 +542,5 @@ Authors
- Jagadeesh N V (@jagadeeshnv)
- Felix Stephen (@felixs88)
- Husniya Hameed (@husniya_hameed)
+- Abhishek Sinha (@Abhishek-Dell)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst
index 867287018..e0efdc9f9 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_reset.rst
@@ -23,7 +23,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -47,11 +47,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -69,7 +69,8 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -99,11 +100,11 @@ msg (always, str, Successfully performed iDRAC reset.)
Status of the iDRAC reset operation.
-reset_status (always, dict, AnsibleMapping([('idracreset', AnsibleMapping([('Data', AnsibleMapping([('StatusCode', 204)])), ('Message', 'none'), ('Status', 'Success'), ('StatusCode', 204), ('retval', True)]))]))
+reset_status (always, dict, {'idracreset': {'Data': {'StatusCode': 204}, 'Message': 'none', 'Status': 'Success', 'StatusCode': 204, 'retval': True}})
Details of iDRAC reset operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst
index f013c2297..4de82dd84 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_server_config_profile.rst
@@ -12,7 +12,7 @@ idrac_server_config_profile -- Export or Import iDRAC Server Configuration Profi
Synopsis
--------
-Export the Server Configuration Profile (SCP) from the iDRAC or import from a network share (CIFS, NFS, HTTP, HTTPS) or a local file.
+Export the Server Configuration Profile (SCP) from the iDRAC or import from a network share (CIFS, NFS, HTTP, HTTPS) or a local path.
@@ -20,7 +20,7 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- python >= 3.8.6
+- python >= 3.9.14
@@ -39,11 +39,13 @@ Parameters
Whether to wait for job completion or not.
- share_name (True, str, None)
+ share_name (optional, str, None)
Network share or local path.
CIFS, NFS, HTTP, and HTTPS network share types are supported.
+ *share_name* is mutually exclusive with *import_buffer*.
+
share_user (optional, str, None)
Network share user in the format 'user@domain' or 'domain\\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share.
@@ -63,16 +65,34 @@ Parameters
*export_format* is used if the valid extension file is not provided for ``import``.
- scp_components (optional, str, ALL)
- If ``ALL``, this module exports or imports all components configurations from SCP file.
+ scp_components (optional, list, ALL)
+ If ``ALL``, this option exports or imports all components configurations from the SCP file.
+
+ If ``IDRAC``, this option exports or imports iDRAC configuration from the SCP file.
+
+ If ``BIOS``, this option exports or imports BIOS configuration from the SCP file.
+
+ If ``NIC``, this option exports or imports NIC configuration from the SCP file.
+
+ If ``RAID``, this option exports or imports RAID configuration from the SCP file.
+
+ If ``FC``, this option exports or imports FiberChannel configurations from the SCP file.
+
+ If ``InfiniBand``, this option exports or imports InfiniBand configuration from the SCP file.
+
+ If ``SupportAssist``, this option exports or imports SupportAssist configuration from the SCP file.
+
+ If ``EventFilters``, this option exports or imports EventFilters configuration from the SCP file.
+
+ If ``System``, this option exports or imports System configuration from the SCP file.
- If ``IDRAC``, this module exports or imports iDRAC configuration from SCP file.
+ If ``LifecycleController``, this option exports or imports SupportAssist configuration from the SCP file.
- If ``BIOS``, this module exports or imports BIOS configuration from SCP file.
+ If ``AHCI``, this option exports or imports EventFilters configuration from the SCP file.
- If ``NIC``, this module exports or imports NIC configuration from SCP file.
+ If ``PCIeSSD``, this option exports or imports PCIeSSD configuration from the SCP file.
- If ``RAID``, this module exports or imports RAID configuration from SCP file.
+ When *command* is ``export`` or ``import`` *target* with multiple components is supported only on iDRAC9 with firmware 6.10.00.00 and above.
shutdown_type (optional, str, Graceful)
@@ -98,7 +118,83 @@ Parameters
export_use (optional, str, Default)
- Specify the type of server configuration profile (SCP) to be exported. This option is applicable for ``export`` command.
+ Specify the type of Server Configuration Profile (SCP) to be exported.
+
+ This option is applicable when *command* is ``export``.
+
+ ``Default`` Creates a non-destructive snapshot of the configuration.
+
+ ``Replace`` Replaces a server with another or restores the servers settings to a known baseline.
+
+ ``Clone`` Clones settings from one server to another server with the identical hardware setup. All settings except I/O identity are updated (e.g. will reset RAID). The settings in this export will be destructive when uploaded to another system.
+
+
+ ignore_certificate_warning (optional, str, ignore)
+ If ``ignore``, it ignores the certificate warnings.
+
+ If ``showerror``, it shows the certificate warnings.
+
+ *ignore_certificate_warning* is considered only when *share_name* is of type HTTPS and is supported only on iDRAC9.
+
+
+ include_in_export (optional, str, default)
+ This option is applicable when *command* is ``export``.
+
+ If ``default``, it exports the default Server Configuration Profile.
+
+ If ``readonly``, it exports the SCP with readonly attributes.
+
+ If ``passwordhashvalues``, it exports the SCP with password hash values.
+
+ If ``customtelemetry``, exports the SCP with custom telemetry attributes supported only in the iDRAC9.
+
+
+ import_buffer (optional, str, None)
+ Used to import the buffer input of xml or json into the iDRAC.
+
+ This option is applicable when *command* is ``import`` and ``preview``.
+
+ *import_buffer* is mutually exclusive with *share_name*.
+
+
+ proxy_support (optional, bool, False)
+ Proxy to be enabled or disabled.
+
+ *proxy_support* is considered only when *share_name* is of type HTTP or HTTPS and is supported only on iDRAC9.
+
+
+ proxy_type (optional, str, http)
+ ``http`` to select HTTP type proxy.
+
+ ``socks4`` to select SOCKS4 type proxy.
+
+ *proxy_type* is considered only when *share_name* is of type HTTP or HTTPS and is supported only on iDRAC9.
+
+
+ proxy_server (optional, str, None)
+ *proxy_server* is required when *share_name* is of type HTTPS or HTTP and *proxy_support* is ``true``.
+
+ *proxy_server* is considered only when *share_name* is of type HTTP or HTTPS and is supported only on iDRAC9.
+
+
+ proxy_port (optional, str, 80)
+ Proxy port to authenticate.
+
+ *proxy_port* is required when *share_name* is of type HTTPS or HTTP and *proxy_support* is ``true``.
+
+ *proxy_port* is considered only when *share_name* is of type HTTP or HTTPS and is supported only on iDRAC9.
+
+
+ proxy_username (optional, str, None)
+ Proxy username to authenticate.
+
+ *proxy_username* is considered only when *share_name* is of type HTTP or HTTPS and is supported only on iDRAC9.
+
+
+ proxy_password (optional, str, None)
+ Proxy password to authenticate.
+
+ *proxy_password* is considered only when *share_name* is of type HTTP or HTTPS and is supported only on iDRAC9.
idrac_ip (True, str, None)
@@ -118,11 +214,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -141,9 +237,11 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- This module supports ``check_mode``.
- - To import Server Configuration Profile (SCP) on the iDRAC7 and iDRAC8-based servers, the servers must have iDRAC Enterprise license or later.
+ - To import Server Configuration Profile (SCP) on the iDRAC8-based servers, the servers must have iDRAC Enterprise license or later.
+ - For ``import`` operation, ``check_mode`` is supported only when *target* is ``ALL``.
+ - This module supports IPv4 and IPv6 addresses.
@@ -162,11 +260,12 @@ Examples
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
- scp_components: IDRAC
+ scp_components:
+ - IDRAC
scp_file: example_file
export_format: JSON
export_use: Clone
- job_wait: True
+ job_wait: true
- name: Import SCP with IDRAC components in JSON format from a local path
dellemc.openmanage.idrac_server_config_profile:
@@ -176,11 +275,12 @@ Examples
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
command: import
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.json
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: False
+ job_wait: false
- name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name
dellemc.openmanage.idrac_server_config_profile:
@@ -189,10 +289,11 @@ Examples
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
- scp_components: "BIOS"
+ scp_components:
+ - BIOS
export_format: XML
export_use: Default
- job_wait: True
+ job_wait: true
- name: Import SCP with BIOS components in XML format from a NFS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -202,11 +303,12 @@ Examples
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
command: import
- scp_components: "BIOS"
+ scp_components:
+ - BIOS
scp_file: 192.168.0.1_20210618_162856.xml
shutdown_type: NoReboot
end_host_power_state: "Off"
- job_wait: False
+ job_wait: false
- name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name
dellemc.openmanage.idrac_server_config_profile:
@@ -217,12 +319,12 @@ Examples
share_name: "\\\\192.168.0.2\\share"
share_user: share_username@domain
share_password: share_password
- share_mnt: /mnt/cifs
scp_file: example_file.xml
- scp_components: "RAID"
+ scp_components:
+ - RAID
export_format: XML
export_use: Default
- job_wait: True
+ job_wait: true
- name: Import SCP with RAID components in XML format from a CIFS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -233,13 +335,13 @@ Examples
share_name: "\\\\192.168.0.2\\share"
share_user: share_username
share_password: share_password
- share_mnt: /mnt/cifs
command: import
- scp_components: "RAID"
+ scp_components:
+ - RAID
scp_file: example_file.xml
shutdown_type: Forced
end_host_power_state: "On"
- job_wait: True
+ job_wait: true
- name: Export SCP with ALL components in JSON format to a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
@@ -251,9 +353,10 @@ Examples
share_user: share_username
share_password: share_password
scp_file: example_file.json
- scp_components: ALL
+ scp_components:
+ - ALL
export_format: JSON
- job_wait: False
+ job_wait: false
- name: Import SCP with ALL components in JSON format from a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
@@ -268,7 +371,7 @@ Examples
scp_file: example_file.json
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: True
+ job_wait: true
- name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name
dellemc.openmanage.idrac_server_config_profile:
@@ -279,10 +382,11 @@ Examples
share_name: "https://192.168.0.4/share"
share_user: share_username
share_password: share_password
- scp_components: ALL
+ scp_components:
+ - ALL
export_format: XML
export_use: Replace
- job_wait: True
+ job_wait: true
- name: Import SCP with ALL components in XML format from a HTTPS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -297,9 +401,9 @@ Examples
scp_file: 192.168.0.1_20160618_164647.xml
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: False
+ job_wait: false
- - name: Preview SCP with ALL components in XML format from a CIFS share path
+ - name: Preview SCP with IDRAC components in XML format from a CIFS share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -309,11 +413,12 @@ Examples
share_user: share_username
share_password: share_password
command: preview
- scp_components: "ALL"
+ scp_components:
+ - ALL
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
- - name: Preview SCP with ALL components in JSON format from a NFS share path
+ - name: Preview SCP with IDRAC components in JSON format from a NFS share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -321,11 +426,12 @@ Examples
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
command: preview
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
- - name: Preview SCP with ALL components in XML format from a HTTP share path
+ - name: Preview SCP with IDRAC components in XML format from a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -335,11 +441,12 @@ Examples
share_user: share_username
share_password: share_password
command: preview
- scp_components: "ALL"
+ scp_components:
+ - ALL
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
- - name: Preview SCP with ALL components in XML format from a local path
+ - name: Preview SCP with IDRAC components in XML format from a local path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -347,9 +454,72 @@ Examples
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
command: preview
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.json
- job_wait: False
+ job_wait: false
+
+ - name: Import SCP with IDRAC components in XML format from the XML content.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ job_wait: true
+ import_buffer: "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'>
+ Disabled</Attribute></Component></SystemConfiguration>"
+
+ - name: Export SCP with ALL components in XML format using HTTP proxy.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ scp_components:
+ - ALL
+ share_name: "http://192.168.0.1/http-share"
+ proxy_support: true
+ proxy_server: 192.168.0.5
+ proxy_port: 8080
+ proxy_username: proxy_username
+ proxy_password: proxy_password
+ proxy_type: http
+ include_in_export: passwordhashvalues
+ job_wait: true
+
+ - name: Import SCP with IDRAC and BIOS components in XML format using SOCKS4 proxy
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ - BIOS
+ share_name: "https://192.168.0.1/http-share"
+ proxy_support: true
+ proxy_server: 192.168.0.6
+ proxy_port: 8080
+ proxy_type: socks4
+ scp_file: filename.xml
+ job_wait: true
+
+ - name: Import SCP with IDRAC components in JSON format from the JSON content.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ job_wait: true
+ import_buffer: "{\"SystemConfiguration\": {\"Components\": [{\"FQDD\": \"iDRAC.Embedded.1\",\"Attributes\":
+ [{\"Name\": \"SNMP.1#AgentCommunity\",\"Value\": \"public1\"}]}]}}"
@@ -360,11 +530,11 @@ msg (always, str, Successfully imported the Server Configuration Profile)
Status of the import or export SCP job.
-scp_status (success, dict, AnsibleMapping([('Id', 'JID_XXXXXXXXX'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageId', 'XXX123'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)]))
+scp_status (success, dict, {'Id': 'JID_XXXXXXXXX', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageArgs': [], 'MessageId': 'XXX123', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True})
SCP operation job and progress details from the iDRAC.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -383,4 +553,6 @@ Authors
- Jagadeesh N V(@jagadeeshnv)
- Felix Stephen (@felixs88)
+- Jennifer John (@Jennifer-John)
+- Shivam Sharma (@ShivamSh3)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst
index 31d69bd37..24a3ac09e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_syslog.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -49,11 +49,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -88,7 +88,8 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -103,27 +104,27 @@ Examples
---
- name: Enable iDRAC syslog
dellemc.openmanage.idrac_syslog:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- share_password: "share_user_pwd"
- share_user: "share_user_name"
- share_mnt: "/mnt/share"
- syslog: "Enabled"
+ share_name: "192.168.0.2:/share"
+ share_password: "share_user_pwd"
+ share_user: "share_user_name"
+ share_mnt: "/mnt/share"
+ syslog: "Enabled"
- name: Disable iDRAC syslog
dellemc.openmanage.idrac_syslog:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- share_password: "share_user_pwd"
- share_user: "share_user_name"
- share_mnt: "/mnt/share"
- syslog: "Disabled"
+ share_name: "192.168.0.2:/share"
+ share_password: "share_user_pwd"
+ share_user: "share_user_name"
+ share_mnt: "/mnt/share"
+ syslog: "Disabled"
@@ -134,11 +135,11 @@ msg (always, str, Successfully fetch the syslogs.)
Overall status of the syslog export operation.
-syslog_status (success, dict, AnsibleMapping([('@odata.context', '/redfish/v1/$metadata#DellJob.DellJob'), ('@odata.id', '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_852940632485'), ('@odata.type', '#DellJob.v1_0_2.DellJob'), ('CompletionTime', '2020-03-27T02:27:45'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_852940632485'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)]))
+syslog_status (success, dict, {'@odata.context': '/redfish/v1/$metadata#DellJob.DellJob', '@odata.id': '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_852940632485', '@odata.type': '#DellJob.v1_0_2.DellJob', 'CompletionTime': '2020-03-27T02:27:45', 'Description': 'Job Instance', 'EndTime': None, 'Id': 'JID_852940632485', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'SYS053', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True})
Job details of the syslog operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst
index 9d0bade41..4c2d4fa8e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_system_info.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -45,11 +45,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -67,7 +67,8 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -96,11 +97,11 @@ msg (always, str, Successfully fetched the system inventory details.)
Overall system inventory information status.
-system_info (success, dict, AnsibleMapping([('BIOS', [AnsibleMapping([('BIOSReleaseDate', '11/26/2019'), ('FQDD', 'BIOS.Setup.1-1'), ('InstanceID', 'DCIM:INSTALLED#741__BIOS.Setup.1-1'), ('Key', 'DCIM:INSTALLED#741__BIOS.Setup.1-1'), ('SMBIOSPresent', 'True'), ('VersionString', '2.4.8')])])]))
+system_info (success, dict, {'BIOS': [{'BIOSReleaseDate': '11/26/2019', 'FQDD': 'BIOS.Setup.1-1', 'InstanceID': 'DCIM:INSTALLED#741__BIOS.Setup.1-1', 'Key': 'DCIM:INSTALLED#741__BIOS.Setup.1-1', 'SMBIOSPresent': 'True', 'VersionString': '2.4.8'}]})
Details of the PowerEdge Server System Inventory.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst
index d05e94fe5..10d7447f4 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_timezone_ntp.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- omsdk >= 1.2.488
-- python >= 3.8.6
+- python >= 3.9.6
@@ -89,11 +89,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -112,7 +112,8 @@ Notes
.. note::
- This module requires 'Administrator' privilege for *idrac_user*.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for *idrac_ip*.
- This module supports ``check_mode``.
@@ -127,9 +128,9 @@ Examples
---
- name: Configure time zone and NTP on iDRAC
dellemc.openmanage.idrac_timezone_ntp:
- idrac_ip: "190.168.0.1"
+ idrac_ip: "190.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
setup_idrac_timezone: "UTC"
enable_ntp: Enabled
@@ -146,11 +147,11 @@ msg (always, str, Successfully configured the iDRAC time settings.)
Overall status of the timezone and ntp configuration.
-timezone_ntp_status (success, dict, AnsibleMapping([('@odata.context', '/redfish/v1/$metadata#DellJob.DellJob'), ('@odata.id', '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_861801613971'), ('@odata.type', '#DellJob.v1_0_0.DellJob'), ('CompletionTime', '2020-04-06T19:06:01'), ('Description', 'Job Instance'), ('EndTime', None), ('Id', 'JID_861801613971'), ('JobState', 'Completed'), ('JobType', 'ImportConfiguration'), ('Message', 'Successfully imported and applied Server Configuration Profile.'), ('MessageArgs', []), ('MessageId', 'SYS053'), ('Name', 'Import Configuration'), ('PercentComplete', 100), ('StartTime', 'TIME_NOW'), ('Status', 'Success'), ('TargetSettingsURI', None), ('retval', True)]))
+timezone_ntp_status (success, dict, {'@odata.context': '/redfish/v1/$metadata#DellJob.DellJob', '@odata.id': '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_861801613971', '@odata.type': '#DellJob.v1_0_0.DellJob', 'CompletionTime': '2020-04-06T19:06:01', 'Description': 'Job Instance', 'EndTime': None, 'Id': 'JID_861801613971', 'JobState': 'Completed', 'JobType': 'ImportConfiguration', 'Message': 'Successfully imported and applied Server Configuration Profile.', 'MessageArgs': [], 'MessageId': 'SYS053', 'Name': 'Import Configuration', 'PercentComplete': 100, 'StartTime': 'TIME_NOW', 'Status': 'Success', 'TargetSettingsURI': None, 'retval': True})
Job details of the time zone setting operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst
index e404582b8..9d92e0d0e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_user.rst
@@ -38,8 +38,6 @@ Parameters
Select ``absent`` to remove a user account.
- Ensure Lifecycle Controller is available because the user operation uses the capabilities of Lifecycle Controller.
-
user_name (True, str, None)
Provide the *user_name* of the account to be created, deleted or modified.
@@ -66,6 +64,12 @@ Parameters
A user with ``None``, no privileges assigned.
+ Will be ignored, if custom_privilege parameter is provided.
+
+
+ custom_privilege (optional, int, None)
+ The privilege level assigned to the user.
+
ipmi_lan_privilege (optional, str, None)
The Intelligent Platform Management Interface LAN privilege level assigned to the user.
@@ -126,11 +130,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -148,7 +152,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- This module supports ``check_mode``.
@@ -208,11 +212,11 @@ msg (always, str, Successfully created user account details.)
Status of the iDRAC user configuration.
-status (success, dict, AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Successfully Completed Request'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'Base.1.5.Success'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'None'), ('Severity', 'OK')]), AnsibleMapping([('Message', 'The operation successfully completed.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'IDRAC.2.1.SYS413'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'No response action is required.'), ('Severity', 'Informational')])])]))
+status (success, dict, {'@Message.ExtendedInfo': [{'Message': 'Successfully Completed Request', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'Base.1.5.Success', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'None', 'Severity': 'OK'}, {'Message': 'The operation successfully completed.', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'IDRAC.2.1.SYS413', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'No response action is required.', 'Severity': 'Informational'}]})
Configures the iDRAC users attributes.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_user_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_user_info.rst
new file mode 100644
index 000000000..85e84ff65
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_user_info.rst
@@ -0,0 +1,148 @@
+.. _idrac_user_info_module:
+
+
+idrac_user_info -- Retrieve details of all users or a specific user on iDRAC.
+=============================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module retrieves the list and basic details of all users or details of a specific user on iDRAC
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.8.6
+
+
+
+Parameters
+----------
+
+ user_id (optional, int, None)
+ Sequential user id numbers that supports from 1 to 16.
+
+ *user_id* is mutually exclusive with *username*
+
+
+ username (optional, str, None)
+ Username of the account that is created in iDRAC local users.
+
+ *username* is mutually exclusive with *user_id*
+
+
+ idrac_ip (True, str, None)
+ iDRAC IP Address.
+
+
+ idrac_user (True, str, None)
+ iDRAC username.
+
+
+ idrac_password (True, str, None)
+ iDRAC user password.
+
+
+ idrac_port (optional, int, 443)
+ iDRAC port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module on a system that has direct access to Dell iDRAC.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Retrieve basic details of all user accounts.
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+
+ - name: Retrieve user details using user_id
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ user_id: 1
+
+ - name: Retrieve user details using username
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ username: user_name
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully retrieved the user information.)
+ Status of user information retrieval.
+
+
+user_info (success, list, [{'Description': 'User Account', 'Enabled': False, 'Id': '1', 'Locked': False, 'Name': 'User Account', 'Password': None, 'RoleId': 'None', 'UserName': ''}])
+ Information about the user.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Husniya Hameed(@husniya_hameed)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst b/ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst
index 7210dd952..479ba05bb 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/idrac_virtual_media.rst
@@ -32,9 +32,9 @@ Parameters
insert (True, bool, None)
- ``True`` connects the remote image file.
+ ``true`` connects the remote image file.
- ``False`` ejects the remote image file if connected.
+ ``false`` ejects the remote image file if connected.
image (optional, path, None)
@@ -42,13 +42,13 @@ Parameters
The file name with .img extension is redirected as a virtual floppy and a file name with .iso extension is redirected as a virtual CDROM.
- This option is required when *insert* is ``True``.
+ This option is required when *insert* is ``true``.
The following are the examples of the share location: CIFS share: //192.168.0.1/file_path/image_name.iso, NFS share: 192.168.0.2:/file_path/image_name.img, HTTP share: http://192.168.0.3/file_path/image_name.iso, HTTPS share: https://192.168.0.4/file_path/image_name.img
- CIFS share is not supported by iDRAC7 and iDRAC8.
+ CIFS share is not supported by iDRAC8.
- HTTPS share with credentials is not supported by iDRAC7 and iDRAC8.
+ HTTPS share with credentials is not supported by iDRAC8.
index (optional, int, None)
@@ -70,12 +70,12 @@ Parameters
media_type (optional, str, None)
- Type of the image file. This is applicable when *insert* is ``True``.
+ Type of the image file. This is applicable when *insert* is ``true``.
force (optional, bool, False)
- ``True`` ejects the image file if already connected and inserts the file provided in *image*. This is applicable when *insert* is ``True``.
+ ``true`` ejects the image file if already connected and inserts the file provided in *image*. This is applicable when *insert* is ``true``.
resource_id (optional, str, None)
@@ -99,11 +99,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -207,7 +207,7 @@ Examples
ca_path: "/path/to/ca_cert.pem"
force: true
virtual_media:
- insert: false
+ insert: false
- name: Insertion and ejection of image file in single task.
dellemc.openmanage.idrac_virtual_media:
@@ -234,7 +234,7 @@ msg (success, str, Successfully performed the virtual media operation.)
Successfully performed the virtual media operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst
index fa9c6b8c7..19092497d 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_active_directory.rst
@@ -94,25 +94,25 @@ Parameters
If test fails, module will error out.
- If ``yes``, *domain_username* and *domain_password* has to be provided.
+ If ``true``, *domain_username* and *domain_password* has to be provided.
domain_password (optional, str, None)
Provide the domain password.
- This is applicable when *test_connection* is ``yes``.
+ This is applicable when *test_connection* is ``true``.
domain_username (optional, str, None)
Provide the domain username either in the UPN (username@domain) or NetBIOS (domain\\username) format.
- This is applicable when *test_connection* is ``yes``.
+ This is applicable when *test_connection* is ``true``.
validate_certificate (optional, bool, False)
Enables validation of SSL certificate of the domain controller.
- The module will always report change when this is ``yes``.
+ The module will always report change when this is ``true``.
certificate_file (optional, path, None)
@@ -120,7 +120,7 @@ Parameters
The certificate should be a Root CA Certificate encoded in Base64 format.
- This is applicable when *validate_certificate* is ``yes``.
+ This is applicable when *validate_certificate* is ``true``.
hostname (True, str, None)
@@ -140,11 +140,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -162,7 +162,7 @@ Notes
-----
.. note::
- - The module will always report change when *validate_certificate* is ``yes``.
+ - The module will always report change when *validate_certificate* is ``true``.
- Run this module from a system that has direct access to OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -186,7 +186,7 @@ Examples
domain_server:
- domainname.com
group_domain: domainname.com
- test_connection: yes
+ test_connection: true
domain_username: user@domainname
domain_password: domain_password
@@ -201,7 +201,7 @@ Examples
domain_server:
- 192.68.20.181
group_domain: domainname.com
- validate_certificate: yes
+ validate_certificate: true
certificate_file: "/path/to/certificate/file.cer"
- name: Modify domain controller IP address, network_timeout and group_domain
@@ -233,10 +233,10 @@ Examples
password: "password"
ca_path: "/path/to/ca_cert.pem"
name: my_ad2
- test_connection: yes
+ test_connection: true
domain_username: user@domainname
domain_password: domain_password
- validate_certificate: yes
+ validate_certificate: true
certificate_file: "/path/to/certificate/file.cer"
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies.rst
new file mode 100644
index 000000000..0c14c7d9d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies.rst
@@ -0,0 +1,447 @@
+.. _ome_alert_policies_module:
+
+
+ome_alert_policies -- Manage OME alert policies.
+================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module allows you to create, modify, or delete alert policies on OpenManage Enterprise or OpenManage Enterprise Modular.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python \>= 3.9.6
+
+
+
+Parameters
+----------
+
+ name (True, list, None)
+ Name of an alert policy or a list of alert policies.
+
+ More than one policy name is applicable when \ :emphasis:`state`\ is \ :literal:`absent`\ and \ :emphasis:`state`\ is \ :literal:`present`\ with only \ :emphasis:`enable`\ provided.
+
+
+ state (optional, str, present)
+ \ :literal:`present`\ allows you to create an alert policy or update if the policy name already exists.
+
+ \ :literal:`absent`\ allows you to delete an alert policy.
+
+
+ enable (optional, bool, None)
+ \ :literal:`true`\ allows you to enable an alert policy.
+
+ \ :literal:`false`\ allows you to disable an alert policy.
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\ .
+
+
+ new_name (optional, str, None)
+ New name for the alert policy.
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\ , and an alert policy exists.
+
+
+ description (optional, str, None)
+ Description for the alert policy.
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\
+
+
+ device_service_tag (optional, list, None)
+ List of device service tags on which the alert policy will be applicable.
+
+ This option is mutually exclusive with \ :emphasis:`device\_group`\ , \ :emphasis:`specific\_undiscovered\_devices`\ , \ :emphasis:`any\_undiscovered\_devices`\ and \ :emphasis:`all\_devices`\ .
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\
+
+
+ device_group (optional, list, None)
+ List of device group names on which the alert policy is applicable.
+
+ This option is mutually exclusive with \ :emphasis:`device\_service\_tag`\ , \ :emphasis:`specific\_undiscovered\_devices`\ , \ :emphasis:`any\_undiscovered\_devices`\ and \ :emphasis:`all\_devices`\ .
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\
+
+
+ specific_undiscovered_devices (optional, list, None)
+ List of undiscovered IPs, hostnames, or range of IPs of devices on which the alert policy is applicable.
+
+ This option is mutually exclusive with \ :emphasis:`device\_service\_tag`\ , \ :emphasis:`device\_group`\ , \ :emphasis:`any\_undiscovered\_devices`\ and \ :emphasis:`all\_devices`\ .
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\
+
+ Examples of valid IP range format:
+
+ 10.35.0.0
+
+ 10.36.0.0-10.36.0.255
+
+ 10.37.0.0/24
+
+ 2607:f2b1:f083:135::5500/118
+
+ 2607:f2b1:f083:135::a500-2607:f2b1:f083:135::a600
+
+ hostname.domain.com
+
+ Examples of invalid IP range format:
+
+ 10.35.0.\*
+
+ 10.36.0.0-255
+
+ 10.35.0.0/255.255.255.0
+
+ These values will not be validated.
+
+
+ any_undiscovered_devices (optional, bool, None)
+ This option indicates whether the alert policy is applicable to any undiscovered devices or not.
+
+ This option is mutually exclusive with \ :emphasis:`device\_service\_tag`\ , \ :emphasis:`specific\_undiscovered\_devices`\ , \ :emphasis:`device\_group`\ and \ :emphasis:`all\_devices`\ .
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\ .
+
+
+ all_devices (optional, bool, None)
+ This option indicates whether the alert policy is applicable to all the discovered and undiscovered devices or not.
+
+ This option is mutually exclusive with \ :emphasis:`device\_service\_tag`\ , \ :emphasis:`specific\_undiscovered\_devices`\ , \ :emphasis:`any\_undiscovered\_devices`\ and \ :emphasis:`device\_group`\ .
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\ .
+
+
+ category (optional, list, None)
+ Category of the alerts received.
+
+ This is mutually exclusive with the \ :emphasis:`message\_ids`\ , \ :emphasis:`message\_file`\ .
+
+ This is fetched from the \ :ref:`dellemc.openmanage.ome\_alert\_policies\_category\_info <ansible_collections.dellemc.openmanage.ome_alert_policies_category_info_module>`\ .
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\ .
+
+
+ catalog_name (True, str, None)
+ Name of the catalog.
+
+
+ catalog_category (optional, list, None)
+ Category of the catalog.
+
+
+ category_name (optional, str, None)
+ Name of the category.
+
+
+ sub_category_names (optional, list, None)
+ List of sub-categories.
+
+
+
+
+ message_ids (optional, list, None)
+ List of Message ids
+
+ This is mutually exclusive with the \ :emphasis:`category`\ , \ :emphasis:`message\_file`\
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\
+
+ This is fetched from the \ :ref:`dellemc.openmanage.ome\_alert\_policies\_message\_id\_info <ansible_collections.dellemc.openmanage.ome_alert_policies_message_id_info_module>`\ .
+
+
+ message_file (optional, path, None)
+ Local path of a CSV formatted file with message IDs
+
+ This is mutually exclusive with the \ :emphasis:`category`\ , \ :emphasis:`message\_ids`\
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\
+
+ This is fetched from the \ :ref:`dellemc.openmanage.ome\_alert\_policies\_message\_id\_info <ansible_collections.dellemc.openmanage.ome_alert_policies_message_id_info_module>`\ .
+
+
+ date_and_time (optional, dict, None)
+ Specifies the schedule for when the alert policy is applicable.
+
+ \ :emphasis:`date\_and\_time`\ is mandatory for creating a policy and optional when updating a policy.
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\ .
+
+
+ date_from (True, str, None)
+ Start date in the format YYYY-MM-DD.
+
+ This parameter to be provided in quotes.
+
+
+ date_to (optional, str, None)
+ End date in the format YYYY-MM-DD.
+
+ This parameter to be provided in quotes.
+
+
+ time_from (optional, str, None)
+ Interval start time in the format HH:MM
+
+ This parameter to be provided in quotes.
+
+ This is mandatory when \ :emphasis:`time\_interval`\ is \ :literal:`true`\ .
+
+
+ time_to (optional, str, None)
+ Interval end time in the format HH:MM
+
+ This parameter to be provided in quotes.
+
+ This is mandatory when \ :emphasis:`time\_interval`\ is \ :literal:`true`\
+
+
+ days (optional, list, None)
+ Required days of the week on which alert policy operation must be scheduled.
+
+
+ time_interval (optional, bool, None)
+ Enable the time interval for which alert policy must be scheduled.
+
+
+
+ severity (optional, list, None)
+ Severity of the alert policy.
+
+ This is mandatory for creating a policy and optional for updating a policy.
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\ .
+
+
+ actions (optional, list, None)
+ Actions to be triggered for the alert policy.
+
+ This parameter is case-sensitive.
+
+ This is mandatory for creating a policy and optional for updating a policy.
+
+ This is applicable only when \ :emphasis:`state`\ is \ :literal:`present`\
+
+
+ action_name (True, str, None)
+ Name of the action.
+
+ This is fetched from the \ :ref:`dellemc.openmanage.ome\_alert\_policies\_action\_info <ansible_collections.dellemc.openmanage.ome_alert_policies_action_info_module>`\ .
+
+ This is mandatory for creating a policy and optional for updating a policy.
+
+ This parameter is case-sensitive.
+
+
+ parameters (optional, list, [])
+ Predefined parameters required to set for \ :emphasis:`action\_name`\ .
+
+
+ name (optional, str, None)
+ Name of the predefined parameter.
+
+ This is fetched from the \ :ref:`dellemc.openmanage.ome\_alert\_policies\_action\_info <ansible_collections.dellemc.openmanage.ome_alert_policies_action_info_module>`\ .
+
+
+ value (optional, str, None)
+ Value of the predefined parameter.
+
+ These values will not be validated.
+
+
+
+
+ hostname (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If \ :literal:`False`\ , the SSL certificates will not be validated.
+
+ Configure \ :literal:`False`\ only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version \ :literal:`5.0.0`\ , the \ :emphasis:`validate\_certs`\ is \ :literal:`False`\ by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports \ :literal:`check\_mode`\ .
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: "Create an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Alert Policy One"
+ device_service_tag:
+ - ABCD123
+ - SVC7845
+ category:
+ - catalog_name: Application
+ catalog_category:
+ - category_name: Audit
+ sub_category_names:
+ - Generic
+ - Devices
+ - catalog_name: iDRAC
+ catalog_category:
+ - category_name: Audit
+ sub_category_names:
+ - BIOS Management
+ - iDRAC Service Module
+ date_and_time:
+ date_from: "2023-10-10"
+ date_to: "2023-10-11"
+ time_from: "11:00"
+ time_to: "12:00"
+ severity:
+ - unknown
+ - critical
+ actions:
+ - action_name: Trap
+ parameters:
+ - name: "192.1.2.3:162"
+ value: true
+ - name: "traphostname.domain.com:162"
+ value: true
+ tags: create_alert_policy
+
+ - name: "Update an alert Policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ new_name: "Update Policy Name"
+ device_group: "Group Name"
+ message_ids:
+ - AMP400
+ - CTL201
+ - BIOS101
+ date_and_time:
+ date_from: "2023-10-10"
+ date_to: "2023-10-11"
+ time_from: "11:00"
+ time_to: "12:00"
+ time_interval: true
+ actions:
+ - action_name: Trap
+ parameters:
+ - name: "192.1.2.3:162"
+ value: true
+ tags: update_alert_policy
+
+ - name: "Enable an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Policy Name"
+ enable: true
+ tags: enable_alert_policy
+
+ - name: "Disable multiple alert policies"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name:
+ - "Policy Name 1"
+ - "Policy Name 2"
+ enable: false
+ tags: disable_alert_policy
+
+ - name: "Delete an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name:
+ - "Policy Name"
+ state: absent
+ tags: delete_alert_policy
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully created the alert policy.)
+ Status of the alert policies operation.
+
+
+status (when state is present, dict, {'Id': 12345, 'Name': 'Policy', 'Description': 'Details of the Policy', 'Enabled': True, 'DefaultPolicy': False, 'Editable': True, 'Visible': True, 'PolicyData': {'Catalogs': [{'CatalogName': 'iDRAC', 'Categories': [4], 'SubCategories': [41]}, {'CatalogName': 'Application', 'Categories': [0], 'SubCategories': [0]}], 'Severities': [16, 1, 2, 4, 8], 'Devices': [10086, 10088], 'DeviceTypes': [1000, 2000], 'Groups': [], 'Schedule': {'StartTime': '2023-06-06 15:02:46.000', 'EndTime': '2023-06-06 18:02:46.000', 'CronString': '* * * ? * * *'}, 'Actions': [{'Id': 8, 'Name': 'Email', 'ParameterDetails': [{'Id': 1, 'Name': 'subject', 'Value': 'Device Name: $name, Device IP Address: $ip, Severity: $severity', 'Type': 'string', 'TypeParams': [{'Name': 'maxLength', 'Value': '255'}]}, {'Id': 1, 'Name': 'to', 'Value': 'test@org.com', 'Type': 'string', 'TypeParams': [{'Name': 'maxLength', 'Value': '255'}]}, {'Id': 1, 'Name': 'from', 'Value': 'abc@corp.com', 'Type': 'string', 'TypeParams': [{'Name': 'maxLength', 'Value': '255'}]}, {'Id': 1, 'Name': 'message', 'Value': 'Event occurred for Device Name: $name, Device IP Address: $ip', 'Type': 'string', 'TypeParams': [{'Name': 'maxLength', 'Value': '255'}]}]}], 'UndiscoveredTargets': [], 'State': True, 'Owner': 10069}})
+ The policy which was created or modified.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CMON7011', 'RelatedProperties': [], 'Message': 'Unable to create or modify the alert policy because an invalid value [To Email] is entered for the action Email.', 'MessageArgs': ['[To Email]', 'Email'], 'Severity': 'Warning', 'Resolution': 'Enter a valid value for the action identified in the message and retry the operation.'}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Jagadeesh N V(@jagadeeshnv)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_actions_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_actions_info.rst
new file mode 100644
index 000000000..a8138a0e6
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_actions_info.rst
@@ -0,0 +1,121 @@
+.. _ome_alert_policies_actions_info_module:
+
+
+ome_alert_policies_actions_info -- Get information on actions of alert policies.
+================================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module retrieves the information on actions of alert policies for OpenManage Enterprise and OpenManage Enterprise Modular.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ hostname (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Get action details of all alert policies.
+ dellemc.openmanage.ome_alert_policies_actions_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+
+
+Return Values
+-------------
+
+actions (success, list, [{'Name': 'Email', 'Description': 'Email', 'Disabled': False, 'ParameterDetails': [{'Id': 1, 'Name': 'subject', 'Value': 'Device Name: $name, Device IP Address: $ip, Severity: $severity', 'Type': 'string', 'TemplateParameterTypeDetails': [{'Name': 'maxLength', 'Value': '255'}]}, {'Id': 2, 'Name': 'to', 'Value': '', 'Type': 'string', 'TemplateParameterTypeDetails': [{'Name': 'maxLength', 'Value': '255'}]}, {'Id': 3, 'Name': 'from', 'Value': 'admin1@dell.com', 'Type': 'string', 'TemplateParameterTypeDetails': [{'Name': 'maxLength', 'Value': '255'}]}, {'Id': 4, 'Name': 'message', 'Value': 'Event occurred for Device Name: $name, Device IP Address: $ip, Service Tag: $identifier, UTC Time: $time, Severity: $severity, Message ID: $messageId, $message', 'Type': 'string', 'TemplateParameterTypeDetails': [{'Name': 'maxLength', 'Value': '255'}]}, {'Id': 60, 'Name': 'Trap', 'Description': 'Trap', 'Disabled': False, 'ParameterDetails': [{'Id': 1, 'Name': 'localhost:162', 'Value': 'true', 'Type': 'boolean', 'TemplateParameterTypeDetails': []}]}, {'Id': 90, 'Name': 'Syslog', 'Description': 'Syslog', 'Disabled': False, 'ParameterDetails': [{'Id': 1, 'Name': 'localhost.scomdev.com:555', 'Value': 'true', 'Type': 'boolean', 'TemplateParameterTypeDetails': []}, {'Id': 2, 'Name': 'localhost.scomdev.com:555', 'Value': 'true', 'Type': 'boolean', 'TemplateParameterTypeDetails': []}]}, {'Id': 100, 'Name': 'Ignore', 'Description': 'Ignore', 'Disabled': False, 'ParameterDetails': []}, {'Id': 70, 'Name': 'SMS', 'Description': 'SMS', 'Disabled': False, 'ParameterDetails': [{'Id': 1, 'Name': 'to', 'Value': '', 'Type': 'string', 'TemplateParameterTypeDetails': [{'Name': 'maxLength', 'Value': '255'}]}]}, {'Id': 110, 'Name': 'PowerControl', 'Description': 'Power Control Action Template', 'Disabled': False, 'ParameterDetails': [{'Id': 1, 'Name': 'powercontrolaction', 'Value': 'poweroff', 'Type': 'singleSelect', 'TemplateParameterTypeDetails': [{'Name': 'option', 'Value': 'powercycle'}, {'Name': 'option', 'Value': 'poweroff'}, {'Name': 'option', 'Value': 'poweron'}, {'Name': 'option', 'Value': 'gracefulshutdown'}]}]}, {'Id': 111, 'Name': 'RemoteCommand', 'Description': 'RemoteCommand', 'Disabled': True, 'ParameterDetails': [{'Id': 1, 'Name': 'remotecommandaction', 'Value': None, 'Type': 'singleSelect', 'TemplateParameterTypeDetails': []}]}, {'Id': 112, 'Name': 'Mobile', 'Description': 'Mobile', 'Disabled': False, 'ParameterDetails': []}]}])
+ Returns the alert policies action information collected from the Device.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
+ Details of the HTTP Error.
+
+
+msg (always, str, Successfully retrieved alert policies actions information.)
+ Status of the alert policies actions fetch operation.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Kritika Bhateja (@Kritika-Bhateja-03)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_category_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_category_info.rst
new file mode 100644
index 000000000..20edbe90b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_category_info.rst
@@ -0,0 +1,121 @@
+.. _ome_alert_policies_category_info_module:
+
+
+ome_alert_policies_category_info -- Retrieves information of all OME alert policy categories.
+=============================================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module allows to retrieve all the alert policy categories for OpenManage Enterprise and OpenManage Enterprise Modular.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ hostname (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Retrieve information about all the OME alert policy categories
+ dellemc.openmanage.ome_alert_policies_category_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully retrieved alert policies category information.)
+ Status of the alert policies category fetch operation.
+
+
+categories (always, list, [{'CategoriesDetails': [{'CatalogName': 'Application', 'Id': 5, 'Name': 'Configuration', 'SubCategoryDetails': [{'Description': 'Application', 'Id': 85, 'Name': 'Application'}, {'Description': 'Users', 'Id': 35, 'Name': 'Users'}]}, {'CatalogName': 'Application', 'Id': 7, 'Name': 'Miscellaneous', 'SubCategoryDetails': [{'Description': 'Miscellaneous', 'Id': 20, 'Name': 'Miscellaneous'}]}, {'CatalogName': 'Application', 'Id': 2, 'Name': 'Storage', 'SubCategoryDetails': [{'Description': 'Devices', 'Id': 90, 'Name': 'Devices'}]}, {'CatalogName': 'Application', 'Id': 3, 'Name': 'Updates', 'SubCategoryDetails': [{'Description': 'Application', 'Id': 85, 'Name': 'Application'}, {'Description': 'Firmware', 'Id': 112, 'Name': 'Firmware'}]}], 'IsBuiltIn': True, 'Name': 'Application'}, {'CategoriesDetails': [{'CatalogName': 'Dell Storage', 'Id': 2, 'Name': 'Storage', 'SubCategoryDetails': [{'Description': 'Other', 'Id': 7700, 'Name': 'Other'}]}, {'CatalogName': 'Dell Storage', 'Id': 1, 'Name': 'System Health', 'SubCategoryDetails': [{'Description': 'Other', 'Id': 7700, 'Name': 'Other'}, {'Description': 'Storage', 'Id': 18, 'Name': 'Storage'}]}], 'IsBuiltIn': True, 'Name': 'Dell Storage'}, {'CategoriesDetails': [{'CatalogName': 'iDRAC', 'Id': 4, 'Name': 'Audit', 'SubCategoryDetails': [{'Description': 'Auto System Reset', 'Id': 41, 'Name': 'Auto System Reset'}, {'Description': 'UEFI Event', 'Id': 55, 'Name': 'UEFI Event'}, {'Description': 'User Tracking', 'Id': 56, 'Name': 'User Tracking'}]}, {'CatalogName': 'iDRAC', 'Id': 5, 'Name': 'Configuration', 'SubCategoryDetails': [{'Description': 'Auto-Discovery', 'Id': 49, 'Name': 'Auto-Discovery'}, {'Description': 'vFlash Event', 'Id': 66, 'Name': 'vFlash Event'}, {'Description': 'Virtual Console', 'Id': 7, 'Name': 'Virtual Console'}]}, {'CatalogName': 'iDRAC', 'Id': 2, 'Name': 'Storage', 'SubCategoryDetails': [{'Description': 'Battery Event', 'Id': 108, 'Name': 'Battery Event'}, {'Description': 'Virtual Disk', 'Id': 46, 'Name': 'Virtual Disk'}]}, {'CatalogName': 'iDRAC', 'Id': 1, 'Name': 'System Health', 'SubCategoryDetails': [{'Description': 'Amperage', 'Id': 67, 'Name': 'Amperage'}, {'Description': 'Auto System Reset', 'Id': 41, 'Name': 'Auto System Reset'}, {'Description': 'Voltage', 'Id': 40, 'Name': 'Voltage'}]}, {'CatalogName': 'iDRAC', 'Id': 6, 'Name': 'Work Notes', 'SubCategoryDetails': [{'Description': 'BIOS Management', 'Id': 54, 'Name': 'BIOS Management'}]}], 'IsBuiltIn': True, 'Name': 'iDRAC'}])
+ Information about the alert categories.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGEN1234', 'RelatedProperties': [], 'Message': 'Unable to complete the request because the resource URI does not exist or is not implemented.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties."}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Jagadeesh N V(@jagadeeshnv)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_info.rst
new file mode 100644
index 000000000..8dbe503d8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_info.rst
@@ -0,0 +1,137 @@
+.. _ome_alert_policies_info_module:
+
+
+ome_alert_policies_info -- Retrieves information of one or more OME alert policies.
+===================================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module retrieves the information of alert policies for OpenManage Enterprise and OpenManage Enterprise Modular.
+
+A list of information about a specific OME alert policy using the policy name.
+
+A list of all the OME alert policies with their information when the policy name is not provided.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ policy_name (optional, str, None)
+ Name of the policy.
+
+
+ hostname (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Retrieve information about all OME alert policies.
+ dellemc.openmanage.ome_alert_policies_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+ - name: Retrieve information about a specific OME alert policy using the policy name.
+ dellemc.openmanage.ome_alert_policies_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ policy_name: "Mobile Push Notification - Critical Alerts"
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully retrieved all the OME alert policies information.)
+ Status of the alert policies info fetch operation.
+
+
+policies (success, list, [{'Id': 10006, 'Name': 'Mobile Push Notification - Critical Alerts', 'Description': 'This policy is applicable to critical alerts. Associated actions will be taken when a critical alert is received.', 'Enabled': True, 'DefaultPolicy': True, 'PolicyData': {'Catalogs': [], 'Severities': [16], 'MessageIds': [], 'Devices': [], 'DeviceTypes': [], 'Groups': [], 'AllTargets': False, 'Schedule': {'StartTime': None, 'EndTime': None, 'CronString': None, 'Interval': False}, 'Actions': [{'Id': 5, 'Name': 'Mobile', 'ParameterDetails': [], 'TemplateId': 112}], 'UndiscoveredTargets': []}, 'State': True, 'Visible': True, 'Owner': None}])
+ Retrieve information about all the OME alert policies.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Abhishek Sinha(@ABHISHEK-SINHA10)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_message_id_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_message_id_info.rst
new file mode 100644
index 000000000..1be11ecbb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_alert_policies_message_id_info.rst
@@ -0,0 +1,121 @@
+.. _ome_alert_policies_message_id_info_module:
+
+
+ome_alert_policies_message_id_info -- Get message ID information of alert policies.
+===================================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module retrieves the message ID information of alert policies for OpenManage Enterprise and OpenManage Enterprise Modular.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ hostname (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports ``check_mode``.
+ - This module supports IPv4 and IPv6 addresses.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Get message ID details of all alert policies
+ dellemc.openmanage.ome_alert_policies_message_id_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully retrieved alert policies message ids information.)
+ Status of the alert policies message ids fetch operation.
+
+
+message_ids (success, dict, [{'Category': 'System Health', 'DetailedDescription': 'The current sensor identified in the message has failed. This condition can cause system performance issues and degradation in the monitoring capability of the system.', 'Message': 'The ${0} sensor has failed, and the last recorded value by the sensor was ${1} A.', 'MessageId': 'AMP400', 'Prefix': 'AMP', 'RecommendedAction': 'Check the Embedded System Management (ESM) Log for any sensor related faults. If there is a failed sensor, replace the system board. For more information, contact your service provider.', 'SequenceNo': 400, 'Severity': 'Critical', 'SubCategory': 'Amperage'}, {'Category': 'System Health', 'DetailedDescription': 'The current sensor identified in the message has failed. This condition can cause system performance issues and degradation in the monitoring capability of the system.', 'Message': 'Unable to read the ${0} sensor value.', 'MessageId': 'AMP401', 'Prefix': 'AMP', 'RecommendedAction': 'Check the Embedded System Management (ESM) Log for any sensor related faults. If there is a failed sensor, replace the system board. For more information, contact your service provider.', 'SequenceNo': 401, 'Severity': 'Warning', 'SubCategory': 'Amperage'}])
+ Details of the message ids.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Shivam Sharma (@ShivamSh3)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst
index 2b34f1cdd..ce2b05be0 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_smtp.rst
@@ -42,9 +42,9 @@ Parameters
enable_authentication (True, bool, None)
Enable or disable authentication to access the SMTP server.
- The *credentials* are mandatory if *enable_authentication* is ``True``.
+ The *credentials* are mandatory if *enable_authentication* is ``true``.
- The module will always report change when this is ``True``.
+ The module will always report change when this is ``true``.
credentials (optional, dict, None)
@@ -77,11 +77,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -99,8 +99,8 @@ Notes
-----
.. note::
- - The module will always report change when *enable_authentication* is ``True``.
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - The module will always report change when *enable_authentication* is ``true``.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
- This module support ``check_mode``.
@@ -146,11 +146,11 @@ msg (always, str, Successfully updated the SMTP settings.)
Overall status of the SMTP settings update.
-smtp_details (success, dict, AnsibleMapping([('DestinationAddress', 'localhost'), ('PortNumber', 25), ('UseCredentials', True), ('UseSSL', False), ('Credential', AnsibleMapping([('User', 'admin'), ('Password', None)]))]))
+smtp_details (success, dict, {'DestinationAddress': 'localhost', 'PortNumber': 25, 'UseCredentials': True, 'UseSSL': False, 'Credential': {'User': 'admin', 'Password': None}})
returned when SMTP settings are updated successfully.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CAPP1106'), ('RelatedProperties', []), ('Message', 'Unable to update the SMTP settings because the entered credential is invalid or empty.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Either enter valid credentials or disable the Use Credentials option and retry the operation.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CAPP1106', 'RelatedProperties': [], 'Message': 'Unable to update the SMTP settings because the entered credential is invalid or empty.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Either enter valid credentials or disable the Use Credentials option and retry the operation.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst
index d741e1671..7fddd6e13 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_alerts_syslog.rst
@@ -42,7 +42,7 @@ Parameters
destination_address (optional, str, None)
The IP address, FQDN or hostname of the syslog server.
- This is required if *enabled* is ``True``.
+ This is required if *enabled* is ``true``.
port_number (optional, int, None)
@@ -67,11 +67,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -89,7 +89,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -142,11 +142,11 @@ msg (always, str, Successfully updated the syslog forwarding settings.)
Overall status of the syslog forwarding operation.
-syslog_details (on success, list, [AnsibleMapping([('DestinationAddress', '192.168.10.43'), ('Enabled', False), ('Id', 1), ('PortNumber', 514)]), AnsibleMapping([('DestinationAddress', '192.168.10.46'), ('Enabled', True), ('Id', 2), ('PortNumber', 514)]), AnsibleMapping([('DestinationAddress', '192.168.10.44'), ('Enabled', True), ('Id', 3), ('PortNumber', 514)]), AnsibleMapping([('DestinationAddress', '192.168.10.42'), ('Enabled', True), ('Id', 4), ('PortNumber', 515)])])
+syslog_details (on success, list, [{'DestinationAddress': '192.168.10.43', 'Enabled': False, 'Id': 1, 'PortNumber': 514}, {'DestinationAddress': '192.168.10.46', 'Enabled': True, 'Id': 2, 'PortNumber': 514}, {'DestinationAddress': '192.168.10.44', 'Enabled': True, 'Id': 3, 'PortNumber': 514}, {'DestinationAddress': '192.168.10.42', 'Enabled': True, 'Id': 4, 'PortNumber': 515}])
Syslog forwarding settings list applied.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CAPP1108'), ('RelatedProperties', []), ('Message', 'Unable to update the Syslog settings because the request contains an invalid number of configurations. The request must contain no more than 4 configurations but contains 5.'), ('MessageArgs', ['4', '5']), ('Severity', 'Warning'), ('Resolution', 'Enter only the required number of configurations as identified in the message and retry the operation.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CAPP1108', 'RelatedProperties': [], 'Message': 'Unable to update the Syslog settings because the request contains an invalid number of configurations. The request must contain no more than 4 configurations but contains 5.', 'MessageArgs': ['4', '5'], 'Severity': 'Warning', 'Resolution': 'Enter only the required number of configurations as identified in the message and retry the operation.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst
index e4fbec1cc..d3c8a2a0d 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_certificate.rst
@@ -20,7 +20,7 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- python >= 3.8.6
+- python >= 3.9.6
@@ -59,6 +59,12 @@ Parameters
Email associated with the issuer. This option is applicable for ``generate_csr``.
+ subject_alternative_names (optional, str, None)
+ Subject alternative name required for the certificate signing request generation.
+
+ Supports up to 4 comma separated values starting from primary, secondary, Tertiary and Quaternary values.
+
+
upload_file (optional, str, None)
Local path of the certificate file to be uploaded. This option is applicable for ``upload``. Once the certificate is uploaded, OpenManage Enterprise cannot be accessed for a few seconds.
@@ -80,11 +86,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -130,6 +136,22 @@ Examples
country: "US"
email: "support@dell.com"
+ - name: Generate a certificate signing request with subject alternative names
+ dellemc.openmanage.ome_application_certificate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "generate_csr"
+ distinguished_name: "hostname.com"
+ subject_alternative_names: "hostname1.chassis.com,hostname2.chassis.com"
+ department_name: "Remote Access Group"
+ business_name: "Dell Inc."
+ locality: "Round Rock"
+ country_state: "Texas"
+ country: "US"
+ email: "support@dell.com"
+
- name: Upload the certificate
dellemc.openmanage.ome_application_certificate:
hostname: "192.168.0.1"
@@ -148,11 +170,11 @@ msg (always, str, Successfully generated certificate signing request.)
Overall status of the certificate signing request.
-csr_status (on success, dict, AnsibleMapping([('CertificateData', '-----BEGIN CERTIFICATE REQUEST-----GHFSUEKLELE af3u4h2rkdkfjasczjfefhkrr/frjrfrjfrxnvzklf/nbcvxmzvndlskmcvbmzkdk kafhaksksvklhfdjtrhhffgeth/tashdrfstkm@kdjFGD/sdlefrujjfvvsfeikdf yeufghdkatbavfdomehtdnske/tahndfavdtdfgeikjlagmdfbandfvfcrfgdtwxc qwgfrteyupojmnsbajdkdbfs/ujdfgthedsygtamnsuhakmanfuarweyuiwruefjr etwuwurefefgfgurkjkdmbvfmvfvfk==-----END CERTIFICATE REQUEST-----')]))
+csr_status (on success, dict, {'CertificateData': '-----BEGIN CERTIFICATE REQUEST-----GHFSUEKLELE af3u4h2rkdkfjasczjfefhkrr/frjrfrjfrxnvzklf/nbcvxmzvndlskmcvbmzkdk kafhaksksvklhfdjtrhhffgeth/tashdrfstkm@kdjFGD/sdlefrujjfvvsfeikdf yeufghdkatbavfdomehtdnske/tahndfavdtdfgeikjlagmdfbandfvfcrfgdtwxc qwgfrteyupojmnsbajdkdbfs/ujdfgthedsygtamnsuhakmanfuarweyuiwruefjr etwuwurefefgfgurkjkdmbvfmvfvfk==-----END CERTIFICATE REQUEST-----'})
Details of the generated certificate.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CSEC9002'), ('RelatedProperties', []), ('Message', 'Unable to upload the certificate because the certificate file provided is invalid.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Make sure the CA certificate and private key are correct and retry the operation.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CSEC9002', 'RelatedProperties': [], 'Message': 'Unable to upload the certificate because the certificate file provided is invalid.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Make sure the CA certificate and private key are correct and retry the operation.'}]}})
Details of the HTTP error.
@@ -170,4 +192,6 @@ Authors
~~~~~~~
- Felix Stephen (@felixs88)
+- Kritika Bhateja (@Kritika-Bhateja-03)
+- Jennifer John (@Jennifer-John)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst
index b30d66523..79ce3f7d4 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_console_preferences.rst
@@ -166,11 +166,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -218,7 +218,7 @@ Examples
common_mac_addresses: "::"
server_initiated_discovery:
device_discovery_approval_policy: Automatic
- set_trap_destination: True
+ set_trap_destination: true
mx7000_onboarding_preferences: all
builtin_appliance_share:
share_options: CIFS
@@ -273,7 +273,7 @@ Examples
ca_path: "/path/to/ca_cert.pem"
server_initiated_discovery:
device_discovery_approval_policy: Automatic
- set_trap_destination: True
+ set_trap_destination: true
mx7000_onboarding_preferences: chassis
email_sender_settings: "admin@dell.com"
trap_forwarding_format: Original
@@ -288,11 +288,11 @@ msg (always, str, Successfully update the console preferences.)
Overall status of the console preferences.
-console_preferences (on success, list, [AnsibleMapping([('Name', 'DEVICE_PREFERRED_NAME'), ('DefaultValue', 'SLOT_NAME'), ('Value', 'PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME'), ('DataType', 'java.lang.String'), ('GroupName', 'DISCOVERY_SETTING')]), AnsibleMapping([('Name', 'INVALID_DEVICE_HOSTNAME'), ('DefaultValue', ''), ('Value', 'localhost,localhost.localdomain,not defined,pv132t,pv136t,default,dell,idrac-'), ('DataType', 'java.lang.String'), ('GroupName', 'DISCOVERY_SETTING')]), AnsibleMapping([('Name', 'COMMON_MAC_ADDRESSES'), ('DefaultValue', ''), ('Value', '00:53:45:00:00:00,33:50:6F:45:30:30,50:50:54:50:30:30,00:00:FF:FF:FF:FF,20:41:53:59:4E:FF,00:00:00:00:00:00,20:41:53:59:4e:ff,00:00:00:00:00:00'), ('DataType', 'java.lang.String'), ('GroupName', 'DISCOVERY_SETTING')]), AnsibleMapping([('Name', 'SHARE_TYPE'), ('DefaultValue', 'CIFS'), ('Value', 'CIFS'), ('DataType', 'java.lang.String'), ('GroupName', 'BUILT_IN_APPLIANCE_SHARE_SETTINGS')]), AnsibleMapping([('Name', 'TRAP_FORWARDING_SETTING'), ('DefaultValue', 'AsIs'), ('Value', 'Normalized'), ('DataType', 'java.lang.String'), ('GroupName', '')]), AnsibleMapping([('Name', 'DATA_PURGE_INTERVAL'), ('DefaultValue', '365'), ('Value', '3650000'), ('DataType', 'java.lang.Integer'), ('GroupName', '')]), AnsibleMapping([('Name', 'CONSOLE_CONNECTION_SETTING'), ('DefaultValue', 'last_known'), ('Value', 'last_known'), ('DataType', 'java.lang.String'), ('GroupName', 'CONSOLE_CONNECTION_SETTING')]), AnsibleMapping([('Name', 'MIN_PROTOCOL_VERSION'), ('DefaultValue', 'V2'), ('Value', 'V1'), ('DataType', 'java.lang.String'), ('GroupName', 'CIFS_PROTOCOL_SETTINGS')]), AnsibleMapping([('Name', 'ALERT_ACKNOWLEDGEMENT_VIEW'), ('DefaultValue', '2000'), ('Value', '2000'), ('DataType', 'java.lang.Integer'), ('GroupName', '')]), AnsibleMapping([('Name', 'AUTO_CONSOLE_UPDATE_AFTER_DOWNLOAD'), ('DefaultValue', 'false'), ('Value', 'false'), ('DataType', 'java.lang.Boolean'), ('GroupName', 'CONSOLE_UPDATE_SETTING_GROUP')]), AnsibleMapping([('Name', 'NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION'), ('DefaultValue', 'false'), ('Value', 'false'), ('DataType', 'java.lang.Boolean'), ('GroupName', '')]), AnsibleMapping([('Name', 'REPORTS_MAX_RESULTS_LIMIT'), ('DefaultValue', '0'), ('Value', '2000000000000000000000000'), ('DataType', 'java.lang.Integer'), ('GroupName', '')]), AnsibleMapping([('Name', 'EMAIL_SENDER'), ('DefaultValue', 'omcadmin@dell.com'), ('Value', 'admin1@dell.com@dell.com@dell.com'), ('DataType', 'java.lang.String'), ('GroupName', '')]), AnsibleMapping([('Name', 'MX7000_ONBOARDING_PREF'), ('DefaultValue', 'all'), ('Value', 'test_chassis'), ('DataType', 'java.lang.String'), ('GroupName', '')]), AnsibleMapping([('Name', 'DISCOVERY_APPROVAL_POLICY'), ('DefaultValue', 'Automatic'), ('Value', 'Automatic_test'), ('DataType', 'java.lang.String'), ('GroupName', '')])])
+console_preferences (on success, list, [{'Name': 'DEVICE_PREFERRED_NAME', 'DefaultValue': 'SLOT_NAME', 'Value': 'PREFER_DNS,PREFER_IDRAC_SYSTEM_HOSTNAME', 'DataType': 'java.lang.String', 'GroupName': 'DISCOVERY_SETTING'}, {'Name': 'INVALID_DEVICE_HOSTNAME', 'DefaultValue': '', 'Value': 'localhost,localhost.localdomain,not defined,pv132t,pv136t,default,dell,idrac-', 'DataType': 'java.lang.String', 'GroupName': 'DISCOVERY_SETTING'}, {'Name': 'COMMON_MAC_ADDRESSES', 'DefaultValue': '', 'Value': '00:53:45:00:00:00,33:50:6F:45:30:30,50:50:54:50:30:30,00:00:FF:FF:FF:FF,20:41:53:59:4E:FF,00:00:00:00:00:00,20:41:53:59:4e:ff,00:00:00:00:00:00', 'DataType': 'java.lang.String', 'GroupName': 'DISCOVERY_SETTING'}, {'Name': 'SHARE_TYPE', 'DefaultValue': 'CIFS', 'Value': 'CIFS', 'DataType': 'java.lang.String', 'GroupName': 'BUILT_IN_APPLIANCE_SHARE_SETTINGS'}, {'Name': 'TRAP_FORWARDING_SETTING', 'DefaultValue': 'AsIs', 'Value': 'Normalized', 'DataType': 'java.lang.String', 'GroupName': ''}, {'Name': 'DATA_PURGE_INTERVAL', 'DefaultValue': '365', 'Value': '3650000', 'DataType': 'java.lang.Integer', 'GroupName': ''}, {'Name': 'CONSOLE_CONNECTION_SETTING', 'DefaultValue': 'last_known', 'Value': 'last_known', 'DataType': 'java.lang.String', 'GroupName': 'CONSOLE_CONNECTION_SETTING'}, {'Name': 'MIN_PROTOCOL_VERSION', 'DefaultValue': 'V2', 'Value': 'V1', 'DataType': 'java.lang.String', 'GroupName': 'CIFS_PROTOCOL_SETTINGS'}, {'Name': 'ALERT_ACKNOWLEDGEMENT_VIEW', 'DefaultValue': '2000', 'Value': '2000', 'DataType': 'java.lang.Integer', 'GroupName': ''}, {'Name': 'AUTO_CONSOLE_UPDATE_AFTER_DOWNLOAD', 'DefaultValue': 'false', 'Value': 'false', 'DataType': 'java.lang.Boolean', 'GroupName': 'CONSOLE_UPDATE_SETTING_GROUP'}, {'Name': 'NODE_INITIATED_DISCOVERY_SET_TRAP_DESTINATION', 'DefaultValue': 'false', 'Value': 'false', 'DataType': 'java.lang.Boolean', 'GroupName': ''}, {'Name': 'REPORTS_MAX_RESULTS_LIMIT', 'DefaultValue': '0', 'Value': '2000000000000000000000000', 'DataType': 'java.lang.Integer', 'GroupName': ''}, {'Name': 'EMAIL_SENDER', 'DefaultValue': 'omcadmin@dell.com', 'Value': 'admin1@dell.com@dell.com@dell.com', 'DataType': 'java.lang.String', 'GroupName': ''}, {'Name': 'MX7000_ONBOARDING_PREF', 'DefaultValue': 'all', 'Value': 'test_chassis', 'DataType': 'java.lang.String', 'GroupName': ''}, {'Name': 'DISCOVERY_APPROVAL_POLICY', 'DefaultValue': 'Automatic', 'Value': 'Automatic_test', 'DataType': 'java.lang.String', 'GroupName': ''}])
Details of the console preferences.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGEN1006'), ('RelatedProperties', []), ('Message', 'Unable to complete the request because the resource URI does not exist or is not implemented.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Enter a valid URI and retry the operation.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGEN1006', 'RelatedProperties': [], 'Message': 'Unable to complete the request because the resource URI does not exist or is not implemented.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Enter a valid URI and retry the operation.'}]}})
Details of the HTTP error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst
index c3e3228b8..c5931f2c6 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_address.rst
@@ -214,11 +214,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -365,15 +365,15 @@ msg (always, str, Successfully updated network address configuration)
Overall status of the network address configuration change.
-network_configuration (on success, dict, AnsibleMapping([('Delay', 0), ('DnsConfiguration', AnsibleMapping([('DnsDomainName', ''), ('DnsName', 'MX-SVCTAG'), ('RegisterWithDNS', False), ('UseDHCPForDNSDomainName', True)])), ('EnableNIC', True), ('InterfaceName', 'eth0'), ('PrimaryInterface', True), ('Ipv4Configuration', AnsibleMapping([('Enable', True), ('EnableDHCP', False), ('StaticAlternateDNSServer', ''), ('StaticGateway', '192.168.0.2'), ('StaticIPAddress', '192.168.0.3'), ('StaticPreferredDNSServer', '192.168.0.4'), ('StaticSubnetMask', '255.255.254.0'), ('UseDHCPForDNSServerNames', False)])), ('Ipv6Configuration', AnsibleMapping([('Enable', True), ('EnableAutoConfiguration', True), ('StaticAlternateDNSServer', ''), ('StaticGateway', ''), ('StaticIPAddress', ''), ('StaticPreferredDNSServer', ''), ('StaticPrefixLength', 0), ('UseDHCPForDNSServerNames', True)])), ('ManagementVLAN', AnsibleMapping([('EnableVLAN', False), ('Id', 1)]))]))
+network_configuration (on success, dict, {'Delay': 0, 'DnsConfiguration': {'DnsDomainName': '', 'DnsName': 'MX-SVCTAG', 'RegisterWithDNS': False, 'UseDHCPForDNSDomainName': True}, 'EnableNIC': True, 'InterfaceName': 'eth0', 'PrimaryInterface': True, 'Ipv4Configuration': {'Enable': True, 'EnableDHCP': False, 'StaticAlternateDNSServer': '', 'StaticGateway': '192.168.0.2', 'StaticIPAddress': '192.168.0.3', 'StaticPreferredDNSServer': '192.168.0.4', 'StaticSubnetMask': '255.255.254.0', 'UseDHCPForDNSServerNames': False}, 'Ipv6Configuration': {'Enable': True, 'EnableAutoConfiguration': True, 'StaticAlternateDNSServer': '', 'StaticGateway': '', 'StaticIPAddress': '', 'StaticPreferredDNSServer': '', 'StaticPrefixLength': 0, 'UseDHCPForDNSServerNames': True}, 'ManagementVLAN': {'EnableVLAN': False, 'Id': 1}})
Updated application network address configuration.
-job_info (on success, dict, AnsibleMapping([('Builtin', False), ('CreatedBy', 'system'), ('Editable', True), ('EndTime', None), ('Id', 14902), ('JobDescription', 'Generic OME runtime task'), ('JobName', 'OMERealtime_Task'), ('JobStatus', AnsibleMapping([('Id', 2080), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 207), ('Internal', True), ('Name', 'OMERealtime_Task')])), ('LastRun', None), ('LastRunStatus', AnsibleMapping([('Id', 2080), ('Name', 'New')])), ('NextRun', None), ('Params', [AnsibleMapping([('JobId', 14902), ('Key', 'Nmcli_Update'), ('Value', '{"interfaceName":"eth0","profileName":"eth0","enableNIC":true, "ipv4Configuration":{"enable":true,"enableDHCP":true,"staticIPAddress":"", "staticSubnetMask":"","staticGateway":"","useDHCPForDNSServerNames":true, "staticPreferredDNSServer":"","staticAlternateDNSServer":""}, "ipv6Configuration":{"enable":false,"enableAutoConfiguration":true,"staticIPAddress":"", "staticPrefixLength":0,"staticGateway":"","useDHCPForDNSServerNames":false, "staticPreferredDNSServer":"","staticAlternateDNSServer":""}, "managementVLAN":{"enableVLAN":false,"id":0},"dnsConfiguration":{"registerWithDNS":false, "dnsName":"","useDHCPForDNSDomainName":false,"dnsDomainName":"","fqdndomainName":"", "ipv4CurrentPreferredDNSServer":"","ipv4CurrentAlternateDNSServer":"", "ipv6CurrentPreferredDNSServer":"","ipv6CurrentAlternateDNSServer":""}, "currentSettings":{"ipv4Address":[],"ipv4Gateway":"","ipv4Dns":[],"ipv4Domain":"", "ipv6Address":[],"ipv6LinkLocalAddress":"","ipv6Gateway":"","ipv6Dns":[], "ipv6Domain":""},"delay":0,"primaryInterface":true,"modifiedConfigs":{}}')])]), ('Schedule', 'startnow'), ('StartTime', None), ('State', 'Enabled'), ('Targets', []), ('UpdatedBy', None), ('Visible', True)]))
+job_info (on success, dict, {'Builtin': False, 'CreatedBy': 'system', 'Editable': True, 'EndTime': None, 'Id': 14902, 'JobDescription': 'Generic OME runtime task', 'JobName': 'OMERealtime_Task', 'JobStatus': {'Id': 2080, 'Name': 'New'}, 'JobType': {'Id': 207, 'Internal': True, 'Name': 'OMERealtime_Task'}, 'LastRun': None, 'LastRunStatus': {'Id': 2080, 'Name': 'New'}, 'NextRun': None, 'Params': [{'JobId': 14902, 'Key': 'Nmcli_Update', 'Value': '{"interfaceName":"eth0","profileName":"eth0","enableNIC":true, "ipv4Configuration":{"enable":true,"enableDHCP":true,"staticIPAddress":"", "staticSubnetMask":"","staticGateway":"","useDHCPForDNSServerNames":true, "staticPreferredDNSServer":"","staticAlternateDNSServer":""}, "ipv6Configuration":{"enable":false,"enableAutoConfiguration":true,"staticIPAddress":"", "staticPrefixLength":0,"staticGateway":"","useDHCPForDNSServerNames":false, "staticPreferredDNSServer":"","staticAlternateDNSServer":""}, "managementVLAN":{"enableVLAN":false,"id":0},"dnsConfiguration":{"registerWithDNS":false, "dnsName":"","useDHCPForDNSDomainName":false,"dnsDomainName":"","fqdndomainName":"", "ipv4CurrentPreferredDNSServer":"","ipv4CurrentAlternateDNSServer":"", "ipv6CurrentPreferredDNSServer":"","ipv6CurrentAlternateDNSServer":""}, "currentSettings":{"ipv4Address":[],"ipv4Gateway":"","ipv4Dns":[],"ipv4Domain":"", "ipv6Address":[],"ipv6LinkLocalAddress":"","ipv6Gateway":"","ipv6Dns":[], "ipv6Domain":""},"delay":0,"primaryInterface":true,"modifiedConfigs":{}}'}], 'Schedule': 'startnow', 'StartTime': None, 'State': 'Enabled', 'Targets': [], 'UpdatedBy': None, 'Visible': True})
Details of the job to update in case OME version is >= 3.3.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to update the address configuration because a dependent field is missing for Use DHCP for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid configuration .'), ('MessageArgs', ['Use DHCP for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid configuration']), ('MessageId', 'CAPP1304'), ('RelatedProperties', []), ('Resolution', 'Make sure that all dependent fields contain valid content and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on HTTP error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to update the address configuration because a dependent field is missing for Use DHCP for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid configuration .', 'MessageArgs': ['Use DHCP for DNS Domain Name, Enable DHCP for ipv4 or Enable Autoconfig for ipv6 settings for valid configuration'], 'MessageId': 'CAPP1304', 'RelatedProperties': [], 'Resolution': 'Make sure that all dependent fields contain valid content and retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of the HTTP error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst
index 2c5d1bd04..ed48d90f6 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_proxy.rst
@@ -82,11 +82,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -104,7 +104,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support ``check_mode``.
@@ -158,11 +158,11 @@ msg (always, str, Successfully updated network proxy configuration.)
Overall status of the network proxy configuration change.
-proxy_configuration (success, dict, AnsibleMapping([('EnableAuthentication', True), ('EnableProxy', True), ('IpAddress', '192.168.0.2'), ('Password', None), ('PortNumber', 444), ('Username', 'root')]))
+proxy_configuration (success, dict, {'EnableAuthentication': True, 'EnableProxy': True, 'IpAddress': '192.168.0.2', 'Password': None, 'PortNumber': 444, 'Username': 'root'})
Updated application network proxy configuration.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the request because the input value for PortNumber is missing or an invalid value is entered.'), ('MessageArgs', ['PortNumber']), ('MessageId', 'CGEN6002'), ('RelatedProperties', []), ('Resolution', 'Enter a valid value and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on HTTP error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to complete the request because the input value for PortNumber is missing or an invalid value is entered.', 'MessageArgs': ['PortNumber'], 'MessageId': 'CGEN6002', 'RelatedProperties': [], 'Resolution': 'Enter a valid value and retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of the HTTP error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst
index ab6302099..1cc02239c 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_settings.rst
@@ -109,11 +109,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -131,7 +131,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
- To configure other network settings such as network address, web server, and so on, refer to the respective OpenManage Enterprise application network setting modules.
- This module supports ``check_mode``.
@@ -213,11 +213,11 @@ msg (always, str, Successfully updated the session timeout settings.)
Overall status of the Session timeout settings.
-session_inactivity_setting (success, dict, [AnsibleMapping([('SessionType', 'API'), ('MaxSessions', 32), ('SessionTimeout', 99600), ('MinSessionTimeout', 60000), ('MaxSessionTimeout', 86400000), ('MinSessionsAllowed', 1), ('MaxSessionsAllowed', 100), ('MaxSessionsConfigurable', True), ('SessionTimeoutConfigurable', True)]), AnsibleMapping([('SessionType', 'GUI'), ('MaxSessions', 6), ('SessionTimeout', 99600), ('MinSessionTimeout', 60000), ('MaxSessionTimeout', 7200000), ('MinSessionsAllowed', 1), ('MaxSessionsAllowed', 6), ('MaxSessionsConfigurable', True), ('SessionTimeoutConfigurable', True)]), AnsibleMapping([('SessionType', 'SSH'), ('MaxSessions', 4), ('SessionTimeout', 99600), ('MinSessionTimeout', 60000), ('MaxSessionTimeout', 10800000), ('MinSessionsAllowed', 1), ('MaxSessionsAllowed', 4), ('MaxSessionsConfigurable', True), ('SessionTimeoutConfigurable', True)]), AnsibleMapping([('SessionType', 'Serial'), ('MaxSessions', 1), ('SessionTimeout', 99600), ('MinSessionTimeout', 60000), ('MaxSessionTimeout', 86400000), ('MinSessionsAllowed', 1), ('MaxSessionsAllowed', 1), ('MaxSessionsConfigurable', False), ('SessionTimeoutConfigurable', True)]), AnsibleMapping([('SessionType', 'UniversalTimeout'), ('MaxSessions', 0), ('SessionTimeout', -1), ('MinSessionTimeout', -1), ('MaxSessionTimeout', 86400000), ('MinSessionsAllowed', 0), ('MaxSessionsAllowed', 0), ('MaxSessionsConfigurable', False), ('SessionTimeoutConfigurable', True)])])
+session_inactivity_setting (success, dict, [{'SessionType': 'API', 'MaxSessions': 32, 'SessionTimeout': 99600, 'MinSessionTimeout': 60000, 'MaxSessionTimeout': 86400000, 'MinSessionsAllowed': 1, 'MaxSessionsAllowed': 100, 'MaxSessionsConfigurable': True, 'SessionTimeoutConfigurable': True}, {'SessionType': 'GUI', 'MaxSessions': 6, 'SessionTimeout': 99600, 'MinSessionTimeout': 60000, 'MaxSessionTimeout': 7200000, 'MinSessionsAllowed': 1, 'MaxSessionsAllowed': 6, 'MaxSessionsConfigurable': True, 'SessionTimeoutConfigurable': True}, {'SessionType': 'SSH', 'MaxSessions': 4, 'SessionTimeout': 99600, 'MinSessionTimeout': 60000, 'MaxSessionTimeout': 10800000, 'MinSessionsAllowed': 1, 'MaxSessionsAllowed': 4, 'MaxSessionsConfigurable': True, 'SessionTimeoutConfigurable': True}, {'SessionType': 'Serial', 'MaxSessions': 1, 'SessionTimeout': 99600, 'MinSessionTimeout': 60000, 'MaxSessionTimeout': 86400000, 'MinSessionsAllowed': 1, 'MaxSessionsAllowed': 1, 'MaxSessionsConfigurable': False, 'SessionTimeoutConfigurable': True}, {'SessionType': 'UniversalTimeout', 'MaxSessions': 0, 'SessionTimeout': -1, 'MinSessionTimeout': -1, 'MaxSessionTimeout': 86400000, 'MinSessionsAllowed': 0, 'MaxSessionsAllowed': 0, 'MaxSessionsConfigurable': False, 'SessionTimeoutConfigurable': True}])
Returned when session inactivity timeout settings are updated successfully.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CUSR1233'), ('RelatedProperties', []), ('Message', 'The number of allowed concurrent sessions for API must be between 1 and 100 sessions.'), ('MessageArgs', ['API', '1', '100']), ('Severity', 'Critical'), ('Resolution', 'Enter values in the correct range and retry the operation.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CUSR1233', 'RelatedProperties': [], 'Message': 'The number of allowed concurrent sessions for API must be between 1 and 100 sessions.', 'MessageArgs': ['API', '1', '100'], 'Severity': 'Critical', 'Resolution': 'Enter values in the correct range and retry the operation.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst
index 6c884a154..e0f939646 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_time.rst
@@ -82,11 +82,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -104,7 +104,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -148,11 +148,11 @@ msg (always, str, Successfully configured network time.)
Overall status of the network time configuration change.
-proxy_configuration (success, dict, AnsibleMapping([('EnableNTP', False), ('JobId', None), ('PrimaryNTPAddress', None), ('SecondaryNTPAddress1', None), ('SecondaryNTPAddress2', None), ('SystemTime', None), ('TimeSource', 'Local Clock'), ('TimeZone', 'TZ_ID_1'), ('TimeZoneIdLinux', None), ('TimeZoneIdWindows', None), ('UtcTime', None)]))
+proxy_configuration (success, dict, {'EnableNTP': False, 'JobId': None, 'PrimaryNTPAddress': None, 'SecondaryNTPAddress1': None, 'SecondaryNTPAddress2': None, 'SystemTime': None, 'TimeSource': 'Local Clock', 'TimeZone': 'TZ_ID_1', 'TimeZoneIdLinux': None, 'TimeZoneIdWindows': None, 'UtcTime': None})
Updated application network time configuration.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the request because the input value for SystemTime is missing or an invalid value is entered.'), ('MessageArgs', ['SystemTime']), ('MessageId', 'CGEN6002'), ('RelatedProperties', []), ('Resolution', 'Enter a valid value and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on HTTP error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to complete the request because the input value for SystemTime is missing or an invalid value is entered.', 'MessageArgs': ['SystemTime'], 'MessageId': 'CGEN6002', 'RelatedProperties': [], 'Resolution': 'Enter a valid value and retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of the HTTP error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst
index 9add772b4..64adb1bf5 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_network_webserver.rst
@@ -56,11 +56,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -78,7 +78,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -125,11 +125,11 @@ msg (always, str, Successfully updated network web server configuration.)
Overall status of the network web server configuration change.
-webserver_configuration (success, dict, AnsibleMapping([('TimeOut', 20), ('PortNumber', 443), ('EnableWebServer', True)]))
+webserver_configuration (success, dict, {'TimeOut': 20, 'PortNumber': 443, 'EnableWebServer': True})
Updated application network web server configuration.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the request because the input value for PortNumber is missing or an invalid value is entered.'), ('MessageArgs', ['PortNumber']), ('MessageId', 'CGEN6002'), ('RelatedProperties', []), ('Resolution', 'Enter a valid value and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on HTTP error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to complete the request because the input value for PortNumber is missing or an invalid value is entered.', 'MessageArgs': ['PortNumber'], 'MessageId': 'CGEN6002', 'RelatedProperties': [], 'Resolution': 'Enter a valid value and retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of the HTTP error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst
index f99ca189f..f7ca82d94 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_application_security_settings.rst
@@ -78,7 +78,7 @@ Parameters
job_wait_timeout (optional, int, 120)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
fips_mode_enable (optional, bool, None)
@@ -112,11 +112,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -134,7 +134,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -193,7 +193,7 @@ Examples
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
- fips_mode_enable: yes
+ fips_mode_enable: true
@@ -208,7 +208,7 @@ job_id (When security configuration properties are provided, int, 10123)
Job ID of the security configuration task.
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to process the request because the domain information cannot be retrieved.'), ('MessageArgs', []), ('MessageId', 'CGEN8007'), ('RelatedProperties', []), ('Resolution', 'Verify the status of the database and domain configuration, and then retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to process the request because the domain information cannot be retrieved.', 'MessageArgs': [], 'MessageId': 'CGEN8007', 'RelatedProperties': [], 'Resolution': 'Verify the status of the database and domain configuration, and then retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of http error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst
index 60463fe06..bc05c9616 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_chassis_slots.rst
@@ -91,11 +91,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -114,7 +114,7 @@ Notes
.. note::
- This module initiates the refresh inventory task. It may take a minute for new names to be reflected. If the task exceeds 300 seconds to refresh, the task times out.
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -192,7 +192,7 @@ msg (always, str, Successfully renamed the slot(s).)
Overall status of the slot rename operation.
-slot_info (if at least one slot renamed, list, [AnsibleMapping([('ChassisId', 10053), ('ChassisServiceTag', 'ABCD123'), ('DeviceName', ''), ('DeviceType', 1000), ('JobId', 15746), ('SlotId', '10072'), ('SlotName', 'slot_op2'), ('SlotNumber', '6'), ('SlotType', 2000)]), AnsibleMapping([('ChassisId', 10053), ('ChassisName', 'MX-ABCD123'), ('ChassisServiceTag', 'ABCD123'), ('DeviceType', '3000'), ('JobId', 15747), ('SlotId', '10070'), ('SlotName', 'slot_op2'), ('SlotNumber', '4'), ('SlotType', '2000')]), AnsibleMapping([('ChassisId', '10053'), ('ChassisName', 'MX-PQRS123'), ('ChassisServiceTag', 'PQRS123'), ('DeviceId', '10054'), ('DeviceServiceTag', 'XYZ5678'), ('DeviceType', '1000'), ('JobId', 15761), ('SlotId', '10067'), ('SlotName', 'a1'), ('SlotNumber', '1'), ('SlotType', '2000')])])
+slot_info (if at least one slot renamed, list, [{'ChassisId': 10053, 'ChassisServiceTag': 'ABCD123', 'DeviceName': '', 'DeviceType': 1000, 'JobId': 15746, 'SlotId': '10072', 'SlotName': 'slot_op2', 'SlotNumber': '6', 'SlotType': 2000}, {'ChassisId': 10053, 'ChassisName': 'MX-ABCD123', 'ChassisServiceTag': 'ABCD123', 'DeviceType': '3000', 'JobId': 15747, 'SlotId': '10070', 'SlotName': 'slot_op2', 'SlotNumber': '4', 'SlotType': '2000'}, {'ChassisId': '10053', 'ChassisName': 'MX-PQRS123', 'ChassisServiceTag': 'PQRS123', 'DeviceId': '10054', 'DeviceServiceTag': 'XYZ5678', 'DeviceType': '1000', 'JobId': 15761, 'SlotId': '10067', 'SlotName': 'a1', 'SlotNumber': '1', 'SlotType': '2000'}])
Information of the slots that are renamed successfully.
The ``DeviceServiceTag`` and ``DeviceId`` options are available only if *device_options* is used.
@@ -200,7 +200,7 @@ slot_info (if at least one slot renamed, list, [AnsibleMapping([('ChassisId', 10
``NOTE`` Only the slots which were renamed are listed.
-rename_failed_slots (if at least one slot renaming fails, list, [AnsibleMapping([('ChassisId', '12345'), ('ChassisName', 'MX-ABCD123'), ('ChassisServiceTag', 'ABCD123'), ('DeviceType', '4000'), ('JobId', 1234), ('JobStatus', 'Aborted'), ('SlotId', '10061'), ('SlotName', 'c2'), ('SlotNumber', '1'), ('SlotType', '4000')]), AnsibleMapping([('ChassisId', '10053'), ('ChassisName', 'MX-PQRS123'), ('ChassisServiceTag', 'PQRS123'), ('DeviceType', '1000'), ('JobId', 0), ('JobStatus', 'HTTP Error 400: Bad Request'), ('SlotId', '10069'), ('SlotName', 'b2'), ('SlotNumber', '3'), ('SlotType', '2000')])])
+rename_failed_slots (if at least one slot renaming fails, list, [{'ChassisId': '12345', 'ChassisName': 'MX-ABCD123', 'ChassisServiceTag': 'ABCD123', 'DeviceType': '4000', 'JobId': 1234, 'JobStatus': 'Aborted', 'SlotId': '10061', 'SlotName': 'c2', 'SlotNumber': '1', 'SlotType': '4000'}, {'ChassisId': '10053', 'ChassisName': 'MX-PQRS123', 'ChassisServiceTag': 'PQRS123', 'DeviceType': '1000', 'JobId': 0, 'JobStatus': 'HTTP Error 400: Bad Request', 'SlotId': '10069', 'SlotName': 'b2', 'SlotNumber': '3', 'SlotType': '2000'}])
Information of the valid slots that are not renamed.
``JobStatus`` is shown if rename job fails.
@@ -208,7 +208,7 @@ rename_failed_slots (if at least one slot renaming fails, list, [AnsibleMapping(
``NOTE`` Only slots which were not renamed are listed.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGEN1014'), ('RelatedProperties', []), ('Message', 'Unable to complete the operation because an invalid value is entered for the property Invalid json type: STRING for Edm.Int64 property: Id .'), ('MessageArgs', ['Invalid json type: STRING for Edm.Int64 property: Id']), ('Severity', 'Critical'), ('Resolution', "Enter a valid value for the property and retry the operation. For more information about valid values, see the OpenManage Enterprise-Modular User's Guide available on the support site.")])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGEN1014', 'RelatedProperties': [], 'Message': 'Unable to complete the operation because an invalid value is entered for the property Invalid json type: STRING for Edm.Int64 property: Id .', 'MessageArgs': ['Invalid json type: STRING for Edm.Int64 property: Id'], 'Severity': 'Critical', 'Resolution': "Enter a valid value for the property and retry the operation. For more information about valid values, see the OpenManage Enterprise-Modular User's Guide available on the support site."}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst
index d4d2c53a7..4b1ba6c0f 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_baseline.rst
@@ -102,7 +102,7 @@ Parameters
job_wait_timeout (optional, int, 10800)
The maximum wait time of *job_wait* in seconds.The job will only be tracked for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
hostname (True, str, None)
@@ -122,11 +122,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -268,7 +268,7 @@ incompatible_devices (when I(device_service_tags) or I(device_ids) contains inco
Details of the devices which cannot be used to perform baseline compliance operations
-compliance_status (when I(command) is C(create) or C(modify), dict, AnsibleMapping([('Id', 13), ('Name', 'baseline1'), ('Description', None), ('TemplateId', 102), ('TemplateName', 'one'), ('TemplateType', 2), ('TaskId', 26584), ('PercentageComplete', '100'), ('TaskStatus', 2070), ('LastRun', '2021-02-27 13:15:13.751'), ('BaselineTargets', [AnsibleMapping([('Id', 1111), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('ConfigComplianceSummary', AnsibleMapping([('ComplianceStatus', 'OK'), ('NumberOfCritical', 0), ('NumberOfWarning', 0), ('NumberOfNormal', 0), ('NumberOfIncomplete', 0)]))]))
+compliance_status (when I(command) is C(create) or C(modify), dict, {'Id': 13, 'Name': 'baseline1', 'Description': None, 'TemplateId': 102, 'TemplateName': 'one', 'TemplateType': 2, 'TaskId': 26584, 'PercentageComplete': '100', 'TaskStatus': 2070, 'LastRun': '2021-02-27 13:15:13.751', 'BaselineTargets': [{'Id': 1111, 'Type': {'Id': 1000, 'Name': 'DEVICE'}}], 'ConfigComplianceSummary': {'ComplianceStatus': 'OK', 'NumberOfCritical': 0, 'NumberOfWarning': 0, 'NumberOfNormal': 0, 'NumberOfIncomplete': 0}})
Status of compliance baseline operation.
@@ -276,7 +276,7 @@ job_id (when I(command) is C(remediate), int, 14123)
Task ID created when *command* is ``remediate``.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -294,4 +294,5 @@ Authors
~~~~~~~
- Sajna Shetty(@Sajna-Shetty)
+- Abhishek Sinha(@Abhishek-Dell)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst
index feeadd160..a04e3404d 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_configuration_compliance_info.rst
@@ -60,11 +60,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -82,7 +82,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -130,11 +130,11 @@ msg (on error, str, Unable to complete the operation because the entered target
Over all compliance report status.
-compliance_info (success, dict, [AnsibleMapping([('ComplianceAttributeGroups', [AnsibleMapping([('Attributes', []), ('ComplianceReason', 'One or more attributes on the target device(s) does not match the compliance template.'), ('ComplianceStatus', 2), ('ComplianceSubAttributeGroups', [AnsibleMapping([('Attributes', [AnsibleMapping([('AttributeId', 75369), ('ComplianceReason', 'Attribute has different value from template'), ('ComplianceStatus', 3), ('CustomId', 0), ('Description', None), ('DisplayName', 'Workload Profile'), ('ExpectedValue', 'HpcProfile'), ('Value', 'NotAvailable')])]), ('ComplianceReason', 'One or more attributes on the target device(s) does not match the compliance template.'), ('ComplianceStatus', 2), ('ComplianceSubAttributeGroups', []), ('DisplayName', 'System Profile Settings'), ('GroupNameId', 1)])]), ('DisplayName', 'BIOS'), ('GroupNameId', 1)])]), ('ComplianceStatus', 'NONCOMPLIANT'), ('DeviceName', 'WIN-PLOV8MPIP40'), ('DeviceType', 1000), ('Id', 25011), ('InventoryTime', '2021-03-18 00:01:57.809771'), ('Model', 'PowerEdge R7525'), ('ServiceTag', 'JHMBX53')])])
+compliance_info (success, dict, [{'ComplianceAttributeGroups': [{'Attributes': [], 'ComplianceReason': 'One or more attributes on the target device(s) does not match the compliance template.', 'ComplianceStatus': 2, 'ComplianceSubAttributeGroups': [{'Attributes': [{'AttributeId': 75369, 'ComplianceReason': 'Attribute has different value from template', 'ComplianceStatus': 3, 'CustomId': 0, 'Description': None, 'DisplayName': 'Workload Profile', 'ExpectedValue': 'HpcProfile', 'Value': 'NotAvailable'}], 'ComplianceReason': 'One or more attributes on the target device(s) does not match the compliance template.', 'ComplianceStatus': 2, 'ComplianceSubAttributeGroups': [], 'DisplayName': 'System Profile Settings', 'GroupNameId': 1}], 'DisplayName': 'BIOS', 'GroupNameId': 1}], 'ComplianceStatus': 'NONCOMPLIANT', 'DeviceName': 'WIN-PLOV8MPIP40', 'DeviceType': 1000, 'Id': 25011, 'InventoryTime': '2021-03-18 00:01:57.809771', 'Model': 'PowerEdge R7525', 'ServiceTag': 'JHMBX53'}])
Returns the compliance report information.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -152,4 +152,5 @@ Authors
~~~~~~~
- Felix Stephen A (@felixs88)
+- Kritika Bhateja (@Kritika-Bhateja)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst
index e3a32f824..c111d5f2f 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_group.rst
@@ -101,11 +101,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -288,7 +288,6 @@ Examples
-
Return Values
-------------
@@ -304,7 +303,7 @@ ip_addresses_added (success, list, 21078)
IP Addresses which are added to the device group.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst
index 1896725d1..747bc2adc 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_info.rst
@@ -73,11 +73,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -95,7 +95,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -178,7 +178,6 @@ Examples
-
Return Values
-------------
@@ -186,7 +185,7 @@ msg (on error, str, Failed to fetch the device information)
Over all device information status.
-device_info (success, dict, AnsibleMapping([('value', [AnsibleMapping([('Actions', None), ('AssetTag', None), ('ChassisServiceTag', None), ('ConnectionState', True), ('DeviceManagement', [AnsibleMapping([('DnsName', 'dnsname.host.com'), ('InstrumentationName', 'MX-12345'), ('MacAddress', '11:10:11:10:11:10'), ('ManagementId', 12345), ('ManagementProfile', [AnsibleMapping([('HasCreds', 0), ('ManagementId', 12345), ('ManagementProfileId', 12345), ('ManagementURL', 'https://192.168.0.1:443'), ('Status', 1000), ('StatusDateTime', '2019-01-21 06:30:08.501')])]), ('ManagementType', 2), ('NetworkAddress', '192.168.0.1')])]), ('DeviceName', 'MX-0003I'), ('DeviceServiceTag', 'MXL1234'), ('DeviceSubscription', None), ('LastInventoryTime', '2019-01-21 06:30:08.501'), ('LastStatusTime', '2019-01-21 06:30:02.492'), ('ManagedState', 3000), ('Model', 'PowerEdge MX7000'), ('PowerState', 17), ('SlotConfiguration', AnsibleMapping()), ('Status', 4000), ('SystemId', 2031), ('Type', 2000)])])]))
+device_info (success, dict, {'value': [{'Actions': None, 'AssetTag': None, 'ChassisServiceTag': None, 'ConnectionState': True, 'DeviceManagement': [{'DnsName': 'dnsname.host.com', 'InstrumentationName': 'MX-12345', 'MacAddress': '11:10:11:10:11:10', 'ManagementId': 12345, 'ManagementProfile': [{'HasCreds': 0, 'ManagementId': 12345, 'ManagementProfileId': 12345, 'ManagementURL': 'https://192.168.0.1:443', 'Status': 1000, 'StatusDateTime': '2019-01-21 06:30:08.501'}], 'ManagementType': 2, 'NetworkAddress': '192.168.0.1'}], 'DeviceName': 'MX-0003I', 'DeviceServiceTag': 'MXL1234', 'DeviceSubscription': None, 'LastInventoryTime': '2019-01-21 06:30:08.501', 'LastStatusTime': '2019-01-21 06:30:02.492', 'ManagedState': 3000, 'Model': 'PowerEdge MX7000', 'PowerState': 17, 'SlotConfiguration': {}, 'Status': 4000, 'SystemId': 2031, 'Type': 2000}]})
Returns the information collected from the Device.
@@ -203,5 +202,6 @@ Status
Authors
~~~~~~~
-- Sajna Shetty(@Sajna-Shetty)
+- Sajna Shetty (@Sajna-Shetty)
+- Felix Stephen (@felixs88)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst
index 0e9e2b7bf..a5846243e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_local_access_configuration.rst
@@ -58,21 +58,23 @@ Parameters
enable_chassis_power_button (True, bool, None)
Enables or disables the chassis power button.
- If ``False``, the chassis cannot be turn on or turn off using the power button.
+ If ``false``, the chassis cannot be turn on or turn off using the power button.
enable_lcd_override_pin (optional, bool, None)
Enables or disables the LCD override pin.
- This is required when *enable_chassis_power_button* is ``False``.
+ This is required when *enable_chassis_power_button* is ``false``.
- disabled_button_lcd_override_pin (optional, int, None)
+ disabled_button_lcd_override_pin (optional, str, None)
The six digit LCD override pin to change the power state of the chassis.
- This is required when *enable_lcd_override_pin* is ``True``.
+ This is required when *enable_lcd_override_pin* is ``true``.
- The module will always report change when *disabled_button_lcd_override_pin* is ``True``.
+ The module will always report change when *disabled_button_lcd_override_pin* is ``true``.
+
+ The value must be specified in quotes. ex: "001100".
@@ -101,7 +103,7 @@ Parameters
The range is 120 to 3600 in seconds, or 2 to 60 in minutes.
- This option is required when *enable_inactivity_timeout* is ``True``.
+ This option is required when *enable_inactivity_timeout* is ``true``.
timeout_limit_unit (optional, str, None)
@@ -111,7 +113,7 @@ Parameters
``MINUTES`` to set *timeout_limit* in minutes.
- This option is required when *enable_inactivity_timeout* is ``True``.
+ This option is required when *enable_inactivity_timeout* is ``true``.
enable_read_authentication (optional, bool, None)
@@ -177,11 +179,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -201,7 +203,7 @@ Notes
.. note::
- Run this module from a system that has direct access to OpenManage Enterprise Modular.
- This module supports ``check_mode``.
- - The module will always report change when *enable_chassis_power_button* is ``True``.
+ - The module will always report change when *enable_chassis_power_button* is ``true``.
@@ -225,7 +227,7 @@ Examples
chassis_power_button:
enable_chassis_power_button: false
enable_lcd_override_pin: true
- disabled_button_lcd_override_pin: 123456
+ disabled_button_lcd_override_pin: "123456"
- name: Configure Quick sync and LCD settings of the chassis using device service tag.
dellemc.openmanage.ome_device_local_access_configuration:
@@ -257,7 +259,7 @@ Examples
chassis_power_button:
enable_chassis_power_button: false
enable_lcd_override_pin: true
- disabled_button_lcd_override_pin: 123456
+ disabled_button_lcd_override_pin: "123456"
quick_sync:
quick_sync_access: READ_WRITE
enable_read_authentication: true
@@ -279,11 +281,11 @@ msg (always, str, Successfully updated the local access settings.)
Overall status of the device local access settings.
-location_details (success, dict, AnsibleMapping([('SettingType', 'LocalAccessConfiguration'), ('EnableChassisDirect', False), ('EnableChassisPowerButton', False), ('EnableKvmAccess', True), ('EnableLcdOverridePin', False), ('LcdAccess', 'VIEW_ONLY'), ('LcdCustomString', 'LCD Text'), ('LcdLanguage', 'en'), ('LcdOverridePin', ''), ('LcdPinLength', None), ('LcdPresence', 'Present'), ('LedPresence', None), ('QuickSync', AnsibleMapping([('EnableInactivityTimeout', True), ('EnableQuickSyncWifi', False), ('EnableReadAuthentication', False), ('QuickSyncAccess', 'READ_ONLY'), ('QuickSyncHardware', 'Present'), ('TimeoutLimit', 7), ('TimeoutLimitUnit', 'MINUTES')]))]))
+location_details (success, dict, {'SettingType': 'LocalAccessConfiguration', 'EnableChassisDirect': False, 'EnableChassisPowerButton': False, 'EnableKvmAccess': True, 'EnableLcdOverridePin': False, 'LcdAccess': 'VIEW_ONLY', 'LcdCustomString': 'LCD Text', 'LcdLanguage': 'en', 'LcdOverridePin': '', 'LcdPinLength': None, 'LcdPresence': 'Present', 'LedPresence': None, 'QuickSync': {'EnableInactivityTimeout': True, 'EnableQuickSyncWifi': False, 'EnableReadAuthentication': False, 'QuickSyncAccess': 'READ_ONLY', 'QuickSyncHardware': 'Present', 'TimeoutLimit': 7, 'TimeoutLimitUnit': 'MINUTES'}})
returned when local access settings are updated successfully.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -301,4 +303,5 @@ Authors
~~~~~~~
- Felix Stephen (@felixs88)
+- Shivam Sharma (@ShivamSh3)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst
index 3d61a4f4e..de96c2c7e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_location.rst
@@ -84,11 +84,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -106,7 +106,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -169,11 +169,11 @@ msg (always, str, Successfully updated the location settings.)
Overall status of the device location settings.
-location_details (success, dict, AnsibleMapping([('Aisle', 'aisle 1'), ('DataCenter', 'data center 1'), ('Location', 'location 1'), ('RackName', 'rack 1'), ('RackSlot', 2), ('Room', 'room 1'), ('SettingType', 'Location')]))
+location_details (success, dict, {'Aisle': 'aisle 1', 'DataCenter': 'data center 1', 'Location': 'location 1', 'RackName': 'rack 1', 'RackSlot': 2, 'Room': 'room 1', 'SettingType': 'Location'})
returned when location settings are updated successfully.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst
index a60b09262..3939b6e38 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_mgmt_network.rst
@@ -64,7 +64,7 @@ Parameters
enable_dhcp (optional, bool, None)
Enable or disable the automatic request to obtain an IPv4 address from the IPv4 Dynamic Host Configuration Protocol (DHCP) server.
- ``NOTE`` If this option is ``True``, the values provided for *static_ip_address*, *static_subnet_mask*, and *static_gateway* are not applied for these fields. However, the module may report changes.
+ ``NOTE`` If this option is ``true``, the values provided for *static_ip_address*, *static_subnet_mask*, and *static_gateway* are not applied for these fields. However, the module may report changes.
static_ip_address (optional, str, None)
@@ -90,7 +90,7 @@ Parameters
This option is applicable when *enable_dhcp* is true.
- ``NOTE`` If this option is ``True``, the values provided for *static_preferred_dns_server* and *static_alternate_dns_server* are not applied for these fields. However, the module may report changes.
+ ``NOTE`` If this option is ``true``, the values provided for *static_preferred_dns_server* and *static_alternate_dns_server* are not applied for these fields. However, the module may report changes.
static_preferred_dns_server (optional, str, None)
@@ -121,7 +121,7 @@ Parameters
If *enable_auto_configuration* is ``true``, OpenManage Enterprise Modular retrieves IP configuration (IPv6 address, prefix, and gateway address) from a DHCPv6 server on the existing network.
- ``NOTE`` If this option is ``True``, the values provided for *static_ip_address*, *static_prefix_length*, and *static_gateway* are not applied for these fields. However, the module may report changes.
+ ``NOTE`` If this option is ``true``, the values provided for *static_ip_address*, *static_prefix_length*, and *static_gateway* are not applied for these fields. However, the module may report changes.
static_ip_address (optional, str, None)
@@ -147,7 +147,7 @@ Parameters
This option is applicable when *enable_auto_configuration* is true
- ``NOTE`` If this option is ``True``, the values provided for *static_preferred_dns_server* and *static_alternate_dns_server* are not applied for these fields. However, the module may report changes.
+ ``NOTE`` If this option is ``true``, the values provided for *static_preferred_dns_server* and *static_alternate_dns_server* are not applied for these fields. However, the module may report changes.
static_preferred_dns_server (optional, str, None)
@@ -269,11 +269,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -291,7 +291,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -334,7 +334,7 @@ Examples
use_dhcp_for_dns_domain_name: false
dns_name: "MX-SVCTAG"
dns_domain_name: "dnslocaldomain"
- auto_negotiation: no
+ auto_negotiation: false
network_speed: 100_MB
- name: Network settings for server
@@ -392,7 +392,7 @@ Examples
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
- device_id : 12345
+ device_id: 12345
management_vlan:
enable_vlan: true
vlan_id: 2345
@@ -408,7 +408,7 @@ msg (always, str, Successfully applied the network settings.)
Overall status of the network config operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGEN1004'), ('RelatedProperties', []), ('Message', 'Unable to complete the request because IPV4 Settings Capability is not Supported does not exist or is not applicable for the resource URI.'), ('MessageArgs', ['IPV4 Settings Capability is not Supported']), ('Severity', 'Critical'), ('Resolution', "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties.")])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGEN1004', 'RelatedProperties': [], 'Message': 'Unable to complete the request because IPV4 Settings Capability is not Supported does not exist or is not applicable for the resource URI.', 'MessageArgs': ['IPV4 Settings Capability is not Supported'], 'Severity': 'Critical', 'Resolution': "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties."}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst
index 9d57373c2..f4f84f340 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_network_services.rst
@@ -113,11 +113,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -135,7 +135,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -204,11 +204,11 @@ msg (always, str, Successfully updated the network services settings.)
Overall status of the network services settings.
-network_services_details (success, dict, AnsibleMapping([('EnableRemoteRacadm', True), ('SettingType', 'NetworkServices'), ('SnmpConfiguration', AnsibleMapping([('PortNumber', 161), ('SnmpEnabled', True), ('SnmpV1V2Credential', AnsibleMapping([('CommunityName', 'public')]))])), ('SshConfiguration', AnsibleMapping([('IdleTimeout', 60), ('MaxAuthRetries', 3), ('MaxSessions', 1), ('PortNumber', 22), ('SshEnabled', False)]))]))
+network_services_details (success, dict, {'EnableRemoteRacadm': True, 'SettingType': 'NetworkServices', 'SnmpConfiguration': {'PortNumber': 161, 'SnmpEnabled': True, 'SnmpV1V2Credential': {'CommunityName': 'public'}}, 'SshConfiguration': {'IdleTimeout': 60, 'MaxAuthRetries': 3, 'MaxSessions': 1, 'PortNumber': 22, 'SshEnabled': False}})
returned when network services settings are updated successfully.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CAPP1042'), ('RelatedProperties', []), ('Message', 'Unable to update the network configuration because the SNMP PortNumber is already in use.'), ('MessageArgs', ['SNMP PortNumber']), ('Severity', 'Informational'), ('Resolution', 'Enter a different port number and retry the operation.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CAPP1042', 'RelatedProperties': [], 'Message': 'Unable to update the network configuration because the SNMP PortNumber is already in use.', 'MessageArgs': ['SNMP PortNumber'], 'Severity': 'Informational', 'Resolution': 'Enter a different port number and retry the operation.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst
index 46f75bb27..11ff3c2c4 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_power_settings.rst
@@ -107,11 +107,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -129,7 +129,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -183,11 +183,11 @@ msg (always, str, Successfully updated the power settings.)
Overall status of the device power settings.
-power_details (success, dict, AnsibleMapping([('EnableHotSpare', True), ('EnablePowerCapSettings', True), ('MaxPowerCap', '3424'), ('MinPowerCap', '3291'), ('PowerCap', '3425'), ('PrimaryGrid', 'GRID_1'), ('RedundancyPolicy', 'NO_REDUNDANCY'), ('SettingType', 'Power')]))
+power_details (success, dict, {'EnableHotSpare': True, 'EnablePowerCapSettings': True, 'MaxPowerCap': '3424', 'MinPowerCap': '3291', 'PowerCap': '3425', 'PrimaryGrid': 'GRID_1', 'RedundancyPolicy': 'NO_REDUNDANCY', 'SettingType': 'Power'})
returned when power settings are updated successfully.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst
index 0f32a4138..185331335 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_device_quick_deploy.rst
@@ -20,7 +20,7 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- python >= 3.8.6
+- python >= 3.9.6
@@ -58,7 +58,7 @@ Parameters
job_wait_timeout (optional, int, 120)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
quick_deploy_options (True, dict, None)
@@ -78,7 +78,7 @@ Parameters
ipv4_network_type (optional, str, None)
IPv4 network type.
- *ipv4_network_type* is required if *ipv4_enabled* is ``True``.
+ *ipv4_network_type* is required if *ipv4_enabled* is ``true``.
``Static`` to configure the static IP settings.
@@ -104,7 +104,7 @@ Parameters
ipv6_network_type (optional, str, None)
IPv6 network type.
- *ipv6_network_type* is required if *ipv6_enabled* is ``True``.
+ *ipv6_network_type* is required if *ipv6_enabled* is ``true``.
``Static`` to configure the static IP settings.
@@ -162,11 +162,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -209,11 +209,11 @@ Examples
ca_path: "/path/to/ca_cert.pem"
quick_deploy_options:
password: "password"
- ipv4_enabled: True
+ ipv4_enabled: true
ipv4_network_type: Static
ipv4_subnet_mask: 255.255.255.0
ipv4_gateway: 192.168.0.1
- ipv6_enabled: True
+ ipv6_enabled: true
ipv6_network_type: Static
ipv6_prefix_length: 1
ipv6_gateway: "::"
@@ -237,11 +237,11 @@ Examples
ca_path: "/path/to/ca_cert.pem"
quick_deploy_options:
password: "password"
- ipv4_enabled: True
+ ipv4_enabled: true
ipv4_network_type: Static
ipv4_subnet_mask: 255.255.255.0
ipv4_gateway: 192.168.0.1
- ipv6_enabled: True
+ ipv6_enabled: true
ipv6_network_type: Static
ipv6_prefix_length: 1
ipv6_gateway: "::"
@@ -268,11 +268,11 @@ job_id (when quick deploy job is submitted., int, 1234)
The job ID of the submitted quick deploy job.
-quick_deploy_settings (success, dict, AnsibleMapping([('DeviceId', 25011), ('SettingType', 'ServerQuickDeploy'), ('ProtocolTypeV4', True), ('NetworkTypeV4', 'Static'), ('IpV4Gateway', '192.168.0.1'), ('IpV4SubnetMask', '255.255.255.0'), ('ProtocolTypeV6', True), ('NetworkTypeV6', 'Static'), ('PrefixLength', '2'), ('IpV6Gateway', '::'), ('slots', [AnsibleMapping([('DeviceId', 25011), ('DeviceCapabilities', [18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 41, 8, 7, 4, 3, 2, 1, 31, 30]), ('DeviceIPV4Address', '192.168.0.2'), ('DeviceIPV6Address', '::'), ('Dhcpipv4', 'Disabled'), ('Dhcpipv6', 'Disabled'), ('Ipv4Enabled', 'Enabled'), ('Ipv6Enabled', 'Enabled'), ('Model', 'PowerEdge MX840c'), ('SlotIPV4Address', '192.168.0.2'), ('SlotIPV6Address', '::'), ('SlotId', 1), ('SlotSelected', True), ('SlotSettingsApplied', True), ('SlotType', '2000'), ('Type', '1000'), ('VlanId', '1')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 2), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 3), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 4), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 5), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 6), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 7), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')]), AnsibleMapping([('DeviceId', 0), ('Model', ''), ('SlotIPV4Address', '0.0.0.0'), ('SlotIPV6Address', '::'), ('SlotId', 8), ('SlotSelected', False), ('SlotSettingsApplied', False), ('SlotType', '2000'), ('Type', '0')])])]))
+quick_deploy_settings (success, dict, {'DeviceId': 25011, 'SettingType': 'ServerQuickDeploy', 'ProtocolTypeV4': True, 'NetworkTypeV4': 'Static', 'IpV4Gateway': '192.168.0.1', 'IpV4SubnetMask': '255.255.255.0', 'ProtocolTypeV6': True, 'NetworkTypeV6': 'Static', 'PrefixLength': '2', 'IpV6Gateway': '::', 'slots': [{'DeviceId': 25011, 'DeviceCapabilities': [18, 17, 16, 15, 14, 13, 12, 11, 10, 9, 41, 8, 7, 4, 3, 2, 1, 31, 30], 'DeviceIPV4Address': '192.168.0.2', 'DeviceIPV6Address': '::', 'Dhcpipv4': 'Disabled', 'Dhcpipv6': 'Disabled', 'Ipv4Enabled': 'Enabled', 'Ipv6Enabled': 'Enabled', 'Model': 'PowerEdge MX840c', 'SlotIPV4Address': '192.168.0.2', 'SlotIPV6Address': '::', 'SlotId': 1, 'SlotSelected': True, 'SlotSettingsApplied': True, 'SlotType': '2000', 'Type': '1000', 'VlanId': '1'}, {'DeviceId': 0, 'Model': '', 'SlotIPV4Address': '0.0.0.0', 'SlotIPV6Address': '::', 'SlotId': 2, 'SlotSelected': False, 'SlotSettingsApplied': False, 'SlotType': '2000', 'Type': '0'}, {'DeviceId': 0, 'Model': '', 'SlotIPV4Address': '0.0.0.0', 'SlotIPV6Address': '::', 'SlotId': 3, 'SlotSelected': False, 'SlotSettingsApplied': False, 'SlotType': '2000', 'Type': '0'}, {'DeviceId': 0, 'Model': '', 'SlotIPV4Address': '0.0.0.0', 'SlotIPV6Address': '::', 'SlotId': 4, 'SlotSelected': False, 'SlotSettingsApplied': False, 'SlotType': '2000', 'Type': '0'}, {'DeviceId': 0, 'Model': '', 'SlotIPV4Address': '0.0.0.0', 'SlotIPV6Address': '::', 'SlotId': 5, 'SlotSelected': False, 'SlotSettingsApplied': False, 'SlotType': '2000', 'Type': '0'}, {'DeviceId': 0, 'Model': '', 'SlotIPV4Address': '0.0.0.0', 'SlotIPV6Address': '::', 'SlotId': 6, 'SlotSelected': False, 'SlotSettingsApplied': False, 'SlotType': '2000', 'Type': '0'}, {'DeviceId': 0, 'Model': '', 'SlotIPV4Address': '0.0.0.0', 'SlotIPV6Address': '::', 'SlotId': 7, 'SlotSelected': False, 'SlotSettingsApplied': False, 'SlotType': '2000', 'Type': '0'}, {'DeviceId': 0, 'Model': '', 'SlotIPV4Address': '0.0.0.0', 'SlotIPV6Address': '::', 'SlotId': 8, 'SlotSelected': False, 'SlotSettingsApplied': False, 'SlotType': '2000', 'Type': '0'}]})
returned when quick deploy settings are deployed successfully.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -290,4 +290,5 @@ Authors
~~~~~~~
- Felix Stephen (@felixs88)
+- Shivam Sharma (@ShivamSh3)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst
index 66f4f27f3..756adde24 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_devices.rst
@@ -42,7 +42,7 @@ Parameters
state (optional, str, present)
``present`` Allows to perform the *device_action* on the target devices.
- ``absent`` Removes the device from OpenManage Enterprise. No job is triggered. *job_wait*, *job_schedule*, *job_name*, and *job_description* are not applicable to this operation.
+ ``absent`` Removes the device from OpenManage Enterprise. Job is not triggered. *job_wait*, *job_schedule*, *job_name*, and *job_description* are not applicable to this operation.
device_action (optional, str, refresh_inventory)
@@ -66,7 +66,7 @@ Parameters
job_wait_timeout (optional, int, 1200)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
job_schedule (optional, str, startnow)
@@ -98,11 +98,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst
index 7a7d231f8..3469c4f4a 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_diagnostics.rst
@@ -156,11 +156,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -258,11 +258,11 @@ msg (always, str, Export log job completed successfully.)
Overall status of the export log.
-jog_status (success, dict, AnsibleMapping([('Builtin', False), ('CreatedBy', 'root'), ('Editable', True), ('EndTime', 'None'), ('Id', 12778), ('JobDescription', 'Export device log'), ('JobName', 'Export Log'), ('JobStatus', AnsibleMapping([('Id', 2080), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 18), ('Internal', False), ('Name', 'DebugLogs_Task')])), ('LastRun', '2021-07-06 10:52:50.519'), ('LastRunStatus', AnsibleMapping([('Id', 2060), ('Name', 'Completed')])), ('NextRun', 'None'), ('Schedule', 'startnow'), ('StartTime', 'None'), ('State', 'Enabled'), ('UpdatedBy', 'None'), ('UserGenerated', True), ('Visible', True), ('Params', [AnsibleMapping([('JobId', 12778), ('Key', 'maskSensitiveInfo'), ('Value', 'FALSE')]), AnsibleMapping([('JobId', 12778), ('Key', 'password'), ('Value', 'tY86w7q92u0QzvykuF0gQQ')]), AnsibleMapping([('JobId', 12778), ('Key', 'userName'), ('Value', 'administrator')]), AnsibleMapping([('JobId', 12778), ('Key', 'shareName'), ('Value', 'iso')]), AnsibleMapping([('JobId', 12778), ('Key', 'OPERATION_NAME'), ('Value', 'EXTRACT_LOGS')]), AnsibleMapping([('JobId', 12778), ('Key', 'shareType'), ('Value', 'CIFS')]), AnsibleMapping([('JobId', 12778), ('Key', 'shareAddress'), ('Value', '100.96.32.142')])]), ('Targets', [AnsibleMapping([('Data', ''), ('Id', 10053), ('JobId', 12778), ('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])])]))
+jog_status (success, dict, {'Builtin': False, 'CreatedBy': 'root', 'Editable': True, 'EndTime': 'None', 'Id': 12778, 'JobDescription': 'Export device log', 'JobName': 'Export Log', 'JobStatus': {'Id': 2080, 'Name': 'New'}, 'JobType': {'Id': 18, 'Internal': False, 'Name': 'DebugLogs_Task'}, 'LastRun': '2021-07-06 10:52:50.519', 'LastRunStatus': {'Id': 2060, 'Name': 'Completed'}, 'NextRun': 'None', 'Schedule': 'startnow', 'StartTime': 'None', 'State': 'Enabled', 'UpdatedBy': 'None', 'UserGenerated': True, 'Visible': True, 'Params': [{'JobId': 12778, 'Key': 'maskSensitiveInfo', 'Value': 'FALSE'}, {'JobId': 12778, 'Key': 'password', 'Value': 'tY86w7q92u0QzvykuF0gQQ'}, {'JobId': 12778, 'Key': 'userName', 'Value': 'administrator'}, {'JobId': 12778, 'Key': 'shareName', 'Value': 'iso'}, {'JobId': 12778, 'Key': 'OPERATION_NAME', 'Value': 'EXTRACT_LOGS'}, {'JobId': 12778, 'Key': 'shareType', 'Value': 'CIFS'}, {'JobId': 12778, 'Key': 'shareAddress', 'Value': '100.96.32.142'}], 'Targets': [{'Data': '', 'Id': 10053, 'JobId': 12778, 'TargetType': {'Id': 1000, 'Name': 'DEVICE'}}]})
Details of the export log operation status.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst
index 79f68dd8f..e290629b1 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_discovery.rst
@@ -88,17 +88,17 @@ Parameters
job_wait_timeout (optional, int, 10800)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
ignore_partial_failure (optional, bool, False)
Provides the option to ignore partial failures. Partial failures occur when there is a combination of both discovered and undiscovered IPs.
- If ``False``, then the partial failure is not ignored, and the module will error out.
+ If ``false``, then the partial failure is not ignored, and the module will error out.
- If ``True``, then the partial failure is ignored.
+ If ``true``, then the partial failure is ignored.
- This option is only applicable if *job_wait* is ``True``.
+ This option is only applicable if *job_wait* is ``true``.
discovery_config_targets (optional, list, None)
@@ -424,11 +424,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -446,7 +446,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support ``check_mode``.
- If *state* is ``present``, then Idempotency is not supported.
@@ -578,9 +578,9 @@ Examples
password: ipmi_pwd
schedule: RunLater
cron: "0 0 9 ? * MON,WED,FRI *"
- ignore_partial_failure: True
- trap_destination: True
- community_string: True
+ ignore_partial_failure: true
+ trap_destination: true
+ community_string: true
email_recipient: test_email@company.com
- name: Discover servers with ca check enabled
@@ -598,7 +598,7 @@ Examples
wsman:
username: user
password: password
- ca_check: True
+ ca_check: true
certificate_data: "{{ lookup('ansible.builtin.file', '/path/to/certificate_data_file') }}"
- name: Discover chassis with ca check enabled data
@@ -616,7 +616,7 @@ Examples
redfish:
username: user
password: password
- ca_check: True
+ ca_check: true
certificate_data: "-----BEGIN CERTIFICATE-----\r\n
ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
@@ -633,7 +633,7 @@ msg (always, str, Successfully deleted 1 discovery job(s).)
Overall status of the discovery operation.
-discovery_status (when I(state) is C(present), dict, AnsibleMapping([('Completed', ['192.168.24.17', '192.168.24.20', '192.168.24.22']), ('Failed', ['192.168.24.15', '192.168.24.16', '192.168.24.18', '192.168.24.19', '192.168.24.21', 'host123']), ('DiscoveredDevicesByType', [AnsibleMapping([('Count', 3), ('DeviceType', 'SERVER')])]), ('DiscoveryConfigDiscoveredDeviceCount', 3), ('DiscoveryConfigEmailRecipient', 'myemail@dell.com'), ('DiscoveryConfigExpectedDeviceCount', 9), ('DiscoveryConfigGroupId', 125), ('JobDescription', 'D1'), ('JobEnabled', True), ('JobEndTime', '2021-01-01 06:27:29.99'), ('JobId', 12666), ('JobName', 'D1'), ('JobNextRun', None), ('JobProgress', '100'), ('JobSchedule', 'startnow'), ('JobStartTime', '2021-01-01 06:24:10.071'), ('JobStatusId', 2090), ('LastUpdateTime', '2021-01-01 06:27:30.001'), ('UpdatedBy', 'admin')]))
+discovery_status (when I(state) is C(present), dict, {'Completed': ['192.168.24.17', '192.168.24.20', '192.168.24.22'], 'Failed': ['192.168.24.15', '192.168.24.16', '192.168.24.18', '192.168.24.19', '192.168.24.21', 'host123'], 'DiscoveredDevicesByType': [{'Count': 3, 'DeviceType': 'SERVER'}], 'DiscoveryConfigDiscoveredDeviceCount': 3, 'DiscoveryConfigEmailRecipient': 'myemail@dell.com', 'DiscoveryConfigExpectedDeviceCount': 9, 'DiscoveryConfigGroupId': 125, 'JobDescription': 'D1', 'JobEnabled': True, 'JobEndTime': '2021-01-01 06:27:29.99', 'JobId': 12666, 'JobName': 'D1', 'JobNextRun': None, 'JobProgress': '100', 'JobSchedule': 'startnow', 'JobStartTime': '2021-01-01 06:24:10.071', 'JobStatusId': 2090, 'LastUpdateTime': '2021-01-01 06:27:30.001', 'UpdatedBy': 'admin'})
Details of the discovery job created or modified.
If *job_wait* is true, Completed and Failed IPs are also listed.
@@ -643,7 +643,11 @@ discovery_ids (when discoveries with duplicate name exist for I(state) is C(pres
IDs of the discoveries with duplicate names.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+job_detailed_status (All time., list, [{'ElapsedTime': '00:00:00', 'EndTime': None, 'ExecutionHistoryId': 564873, 'Id': 656893, 'IdBaseEntity': 0, 'JobStatus': {'Id': 2050, 'Name': 'Running'}, 'Key': '192.96.24.1', 'Progress': '0', 'StartTime': '2023-07-04 06:23:54.008', 'Value': 'Running\nDiscovery of target 192.96.24.1 started.\nDiscovery target resolved to IP 192.96.24.1 .'}])
+ Detailed last execution history of a job.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -662,4 +666,5 @@ Authors
- Jagadeesh N V (@jagadeeshnv)
- Sajna Shetty (@Sajna-Shetty)
+- Abhishek Sinha (@Abhishek-Dell)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst
index 29a8b20c6..a9d971a78 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_domain_user_groups.rst
@@ -1,8 +1,8 @@
.. _ome_domain_user_groups_module:
-ome_domain_user_groups -- Create, modify, or delete an Active Directory user group on OpenManage Enterprise and OpenManage Enterprise Modular
-=============================================================================================================================================
+ome_domain_user_groups -- Create, modify, or delete an Active Directory/LDAP user group on OpenManage Enterprise and OpenManage Enterprise Modular
+==================================================================================================================================================
.. contents::
:local:
@@ -12,7 +12,7 @@ ome_domain_user_groups -- Create, modify, or delete an Active Directory user gro
Synopsis
--------
-This module allows to create, modify, or delete an Active Directory user group on OpenManage Enterprise and OpenManage Enterprise Modular.
+This module allows to create, modify, or delete an Active Directory/LDAP user group on OpenManage Enterprise and OpenManage Enterprise Modular.
@@ -20,7 +20,7 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- python >= 3.8.6
+- python >= 3.9.6
@@ -28,13 +28,13 @@ Parameters
----------
state (optional, str, present)
- ``present`` imports or modifies the Active Directory user group.
+ ``present`` imports or modifies the Active Directory/LDAP user group.
- ``absent`` deletes an existing Active Directory user group.
+ ``absent`` deletes an existing Active Directory/LDAP user group.
group_name (True, str, None)
- The desired Active Directory user group name to be imported or removed.
+ The desired Active Directory/LDAP user group name to be imported or removed.
Examples for user group name: Administrator or Account Operators or Access Control Assistance Operator.
@@ -42,7 +42,7 @@ Parameters
role (optional, str, None)
- The desired roles and privilege for the imported Active Directory user group.
+ The desired roles and privilege for the imported Active Directory/LDAP user group.
OpenManage Enterprise Modular Roles: CHASSIS ADMINISTRATOR, COMPUTE MANAGER, STORAGE MANAGER, FABRIC MANAGER, VIEWER.
@@ -52,25 +52,29 @@ Parameters
directory_name (optional, str, None)
- The directory name set while adding the Active Directory.
+ The directory name set while adding the Active Directory/LDAP.
*directory_name* is mutually exclusive with *directory_id*.
+ directory_type (optional, str, AD)
+ Type of the account.
+
+
directory_id (optional, int, None)
- The ID of the Active Directory.
+ The ID of the Active Directory/LDAP.
*directory_id* is mutually exclusive with *directory_name*.
domain_username (optional, str, None)
- Active directory domain username.
+ Active Directory/LDAP domain username.
Example: username@domain or domain\username.
domain_password (optional, str, None)
- Active directory domain password.
+ Active Directory/LDAP domain password.
hostname (True, str, None)
@@ -90,11 +94,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -157,20 +161,43 @@ Examples
state: absent
group_name: administrators
+ - name: Import LDAP directory group.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ directory_type: LDAP
+ state: present
+ group_name: account operators
+ directory_name: directory_name
+ role: administrator
+ domain_username: username@domain
+ domain_password: domain_password
+
+ - name: Remove LDAP directory group.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ group_name: account operators
+
Return Values
-------------
-msg (always, str, Successfully imported the active directory user group.)
- Overall status of the Active Directory user group operation.
+msg (always, str, Successfully imported the Active Directory/LDAP user group.)
+ Overall status of the Active Directory/LDAP user group operation.
-domain_user_status (When I(state) is C(present)., dict, AnsibleMapping([('Description', None), ('DirectoryServiceId', 16097), ('Enabled', True), ('Id', '16617'), ('IsBuiltin', False), ('IsVisible', True), ('Locked', False), ('Name', 'Account Operators'), ('ObjectGuid', 'a491859c-031e-42a3-ae5e-0ab148ecf1d6'), ('ObjectSid', None), ('Oem', None), ('Password', None), ('PlainTextPassword', None), ('RoleId', '16'), ('UserName', 'Account Operators'), ('UserTypeId', 2)]))
+domain_user_status (When I(state) is C(present)., dict, {'Description': None, 'DirectoryServiceId': 16097, 'Enabled': True, 'Id': '16617', 'IsBuiltin': False, 'IsVisible': True, 'Locked': False, 'Name': 'Account Operators', 'ObjectGuid': 'a491859c-031e-42a3-ae5e-0ab148ecf1d6', 'ObjectSid': None, 'Oem': None, 'Password': None, 'PlainTextPassword': None, 'RoleId': '16', 'UserName': 'Account Operators', 'UserTypeId': 2})
Details of the domain user operation, when *state* is ``present``.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -188,4 +215,5 @@ Authors
~~~~~~~
- Felix Stephen (@felixs88)
+- Abhishek Sinha (@Abhishek-Dell)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst
index 4dcc4ae3b..f9870d4f9 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware.rst
@@ -20,7 +20,7 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- python >= 3.8.6
+- python >= 3.9.6
@@ -61,7 +61,7 @@ Parameters
This option is mutually exclusive with *dup_file* and *device_group_names*.
- components (optional, list, None)
+ components (optional, list, [])
List of components to be updated.
If not provided, all components applicable are considered.
@@ -89,7 +89,7 @@ Parameters
This option is mutually exclusive with *id*.
- components (optional, list, None)
+ components (optional, list, [])
The target components to be updated. If not specified, all applicable device components are considered.
@@ -102,6 +102,18 @@ Parameters
if ``RebootNow`` will apply the firmware updates immediately.
+ reboot_type (optional, str, GracefulRebootForce)
+ This option provides the choices to reboot the server immediately after the firmware update.
+
+ This is applicable when *schedule* is ``RebootNow``.
+
+ ``GracefulRebootForce`` performs a graceful reboot with forced shutdown.
+
+ ``GracefulReboot`` performs a graceful reboot without forced shutdown.
+
+ ``PowerCycle`` performs a power cycle for a hard reset on the device.
+
+
hostname (True, str, None)
OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
@@ -119,11 +131,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -141,7 +153,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -249,7 +261,7 @@ Examples
devices:
- id: 12345
components:
- - Lifecycle Controller
+ - Lifecycle Controller
- id: 12346
components:
- Enterprise UEFI Diagnostics
@@ -287,6 +299,17 @@ Examples
components:
- iDRAC with Lifecycle Controller
+ - name: "Update firmware using baseline name and components and perform Powercycle."
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ components:
+ - BIOS
+ reboot_type: PowerCycle
+
Return Values
@@ -296,11 +319,11 @@ msg (always, str, Successfully submitted the firmware update job.)
Overall firmware update status.
-update_status (success, dict, AnsibleMapping([('LastRun', 'None'), ('CreatedBy', 'user'), ('Schedule', 'startnow'), ('LastRunStatus', AnsibleMapping([('Id', 1111), ('Name', 'NotRun')])), ('Builtin', False), ('Editable', True), ('NextRun', 'None'), ('JobStatus', AnsibleMapping([('Id', 1111), ('Name', 'New')])), ('JobName', 'Firmware Update Task'), ('Visible', True), ('State', 'Enabled'), ('JobDescription', 'dup test'), ('Params', [AnsibleMapping([('Value', 'true'), ('Key', 'signVerify'), ('JobId', 11111)]), AnsibleMapping([('Value', 'false'), ('Key', 'stagingValue'), ('JobId', 11112)]), AnsibleMapping([('Value', 'false'), ('Key', 'complianceUpdate'), ('JobId', 11113)]), AnsibleMapping([('Value', 'INSTALL_FIRMWARE'), ('Key', 'operationName'), ('JobId', 11114)])]), ('Targets', [AnsibleMapping([('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')])), ('Data', 'DCIM:INSTALLED#701__NIC.Mezzanine.1A-1-1=1234567654321'), ('Id', 11115), ('JobId', 11116)])]), ('StartTime', 'None'), ('UpdatedBy', 'None'), ('EndTime', 'None'), ('Id', 11117), ('JobType', AnsibleMapping([('Internal', False), ('Id', 5), ('Name', 'Update_Task')]))]))
+update_status (success, dict, {'LastRun': 'None', 'CreatedBy': 'user', 'Schedule': 'startnow', 'LastRunStatus': {'Id': 1111, 'Name': 'NotRun'}, 'Builtin': False, 'Editable': True, 'NextRun': 'None', 'JobStatus': {'Id': 1111, 'Name': 'New'}, 'JobName': 'Firmware Update Task', 'Visible': True, 'State': 'Enabled', 'JobDescription': 'dup test', 'Params': [{'Value': 'true', 'Key': 'signVerify', 'JobId': 11111}, {'Value': 'false', 'Key': 'stagingValue', 'JobId': 11112}, {'Value': 'false', 'Key': 'complianceUpdate', 'JobId': 11113}, {'Value': 'INSTALL_FIRMWARE', 'Key': 'operationName', 'JobId': 11114}], 'Targets': [{'TargetType': {'Id': 1000, 'Name': 'DEVICE'}, 'Data': 'DCIM:INSTALLED#701__NIC.Mezzanine.1A-1-1=1234567654321', 'Id': 11115, 'JobId': 11116}], 'StartTime': 'None', 'UpdatedBy': 'None', 'EndTime': 'None', 'Id': 11117, 'JobType': {'Internal': False, 'Id': 5, 'Name': 'Update_Task'}})
The firmware update job and progress details from the OME.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -319,4 +342,5 @@ Authors
- Felix Stephen (@felixs88)
- Jagadeesh N V (@jagadeeshnv)
+- Abhishek Sinha (@ABHISHEK-SINHA10)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst
index 673804ea8..971551b21 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline.rst
@@ -60,13 +60,13 @@ Parameters
downgrade_enabled (optional, bool, None)
Indicates whether firmware downgrade is allowed for the devices in the baseline.
- This value will be set to ``True`` by default, if not provided during baseline creation.
+ This value will be set to ``true`` by default, if not provided during baseline creation.
is_64_bit (optional, bool, None)
Indicates if the repository contains 64-bit DUPs.
- This value will be set to ``True`` by default, if not provided during baseline creation.
+ This value will be set to ``true`` by default, if not provided during baseline creation.
device_ids (optional, list, None)
@@ -96,7 +96,11 @@ Parameters
job_wait_timeout (optional, int, 600)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
+
+
+ filter_no_reboot_required (optional, bool, None)
+ Select only components with no reboot required allows to create a firmware/driver baseline that consists of only the components of the target devices that don't require a reboot of the target devices.
hostname (True, str, None)
@@ -116,11 +120,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -138,7 +142,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
- *device_group_names* option is not applicable for OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -165,6 +169,20 @@ Examples
- 1010
- 2020
+ - name: Create baseline for device IDs with no reboot required
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ filter_no_reboot_required: true
+ device_ids:
+ - 1010
+ - 2020
+
- name: Create baseline for servicetags
dellemc.openmanage.ome_firmware_baseline:
hostname: "192.168.0.1"
@@ -178,6 +196,20 @@ Examples
- "SVCTAG1"
- "SVCTAG2"
+ - name: Create baseline for servicetags with no reboot required
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ filter_no_reboot_required: true
+ device_service_tags:
+ - "SVCTAG1"
+ - "SVCTAG2"
+
- name: Create baseline for device groups without job tracking
dellemc.openmanage.ome_firmware_baseline:
hostname: "192.168.0.1"
@@ -190,7 +222,7 @@ Examples
device_group_names:
- "Group1"
- "Group2"
- job_wait: no
+ job_wait: false
- name: Modify an existing baseline
dellemc.openmanage.ome_firmware_baseline:
@@ -206,8 +238,18 @@ Examples
- "Group3"
- "Group4"
- "Group5"
- downgrade_enabled: no
- is_64_bit: yes
+ downgrade_enabled: false
+ is_64_bit: true
+
+ - name: Modify no reboot filter in existing baseline
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "existing_baseline_name"
+ new_baseline_name: "new_baseline_name"
+ filter_no_reboot_required: true
- name: Delete a baseline
dellemc.openmanage.ome_firmware_baseline:
@@ -227,7 +269,7 @@ msg (always, str, Successfully created the firmware baseline.)
Overall status of the firmware baseline operation.
-baseline_status (success, dict, AnsibleMapping([('CatalogId', 123), ('Description', 'BASELINE DESCRIPTION'), ('DeviceComplianceReports', []), ('DowngradeEnabled', True), ('Id', 23), ('Is64Bit', True), ('Name', 'my_baseline'), ('RepositoryId', 123), ('RepositoryName', 'catalog123'), ('RepositoryType', 'HTTP'), ('Targets', [AnsibleMapping([('Id', 10083), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))]), AnsibleMapping([('Id', 10076), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('TaskId', 11235), ('TaskStatusId', 2060)]))
+baseline_status (success, dict, {'CatalogId': 123, 'Description': 'BASELINE DESCRIPTION', 'DeviceComplianceReports': [], 'DowngradeEnabled': True, 'FilterNoRebootRequired': True, 'Id': 23, 'Is64Bit': True, 'Name': 'my_baseline', 'RepositoryId': 123, 'RepositoryName': 'catalog123', 'RepositoryType': 'HTTP', 'Targets': [{'Id': 10083, 'Type': {'Id': 1000, 'Name': 'DEVICE'}}, {'Id': 10076, 'Type': {'Id': 1000, 'Name': 'DEVICE'}}], 'TaskId': 11235, 'TaskStatusId': 2060})
Details of the baseline status.
@@ -239,7 +281,7 @@ baseline_id (When I(state) is C(absent), int, 10123)
ID of the deleted baseline.
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to retrieve baseline list either because the device ID(s) entered are invalid'), ('Resolution', 'Make sure the entered device ID(s) are valid and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to retrieve baseline list either because the device ID(s) entered are invalid', 'Resolution': 'Make sure the entered device ID(s) are valid and retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of http error.
@@ -257,4 +299,5 @@ Authors
~~~~~~~
- Jagadeesh N V(@jagadeeshnv)
+- Kritika Bhateja (@Kritika-Bhateja-03)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst
index 80b4c5077..a1c5ca246 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_compliance_info.rst
@@ -82,11 +82,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -104,7 +104,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -124,8 +124,8 @@ Examples
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_ids:
- - 11111
- - 22222
+ - 11111
+ - 22222
- name: Retrieves device based compliance report for specified service Tags
dellemc.openmanage.ome_firmware_baseline_compliance_info:
@@ -134,8 +134,8 @@ Examples
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_service_tags:
- - MXL1234
- - MXL4567
+ - MXL1234
+ - MXL4567
- name: Retrieves device based compliance report for specified group names
dellemc.openmanage.ome_firmware_baseline_compliance_info:
@@ -144,8 +144,8 @@ Examples
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_group_names:
- - "group1"
- - "group2"
+ - "group1"
+ - "group2"
- name: Retrieves device compliance report for a specified baseline
dellemc.openmanage.ome_firmware_baseline_compliance_info:
@@ -164,11 +164,11 @@ msg (on error, str, Failed to fetch the compliance baseline information.)
Overall baseline compliance report status.
-baseline_compliance_info (success, dict, [AnsibleMapping([('CatalogId', 53), ('ComplianceSummary', AnsibleMapping([('ComplianceStatus', 'CRITICAL'), ('NumberOfCritical', 2), ('NumberOfDowngrade', 0), ('NumberOfNormal', 0), ('NumberOfWarning', 0)])), ('Description', ''), ('DeviceComplianceReports', [AnsibleMapping([('ComplianceStatus', 'CRITICAL'), ('ComponentComplianceReports', [AnsibleMapping([('ComplianceDependencies', []), ('ComplianceStatus', 'DOWNGRADE'), ('Criticality', 'Ok'), ('CurrentVersion', 'OSC_1.1'), ('Id', 1258), ('ImpactAssessment', ''), ('Name', 'OS COLLECTOR 2.1'), ('Path', 'FOLDER04118304M/2/Diagnostics_Application_JCCH7_WN64_4.0_A00_01.EXE'), ('PrerequisiteInfo', ''), ('RebootRequired', False), ('SourceName', 'DCIM:INSTALLED#802__OSCollector.Embedded.1'), ('TargetIdentifier', '101734'), ('UniqueIdentifier', 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'), ('UpdateAction', 'DOWNGRADE'), ('Uri', 'http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX'), ('Version', '4.0')]), AnsibleMapping([('ComplianceDependencies', []), ('ComplianceStatus', 'CRITICAL'), ('Criticality', 'Recommended'), ('CurrentVersion', 'DN02'), ('Id', 1259), ('ImpactAssessment', ''), ('Name', 'TOSHIBA AL14SE 1.8 TB 2.5 12Gb 10K 512n SAS HDD Drive'), ('Path', 'FOLDER04086111M/1/SAS-Drive_Firmware_VDGFM_WN64_DN03_A00.EXE'), ('PrerequisiteInfo', ''), ('RebootRequired', True), ('SourceName', 'DCIM:INSTALLED#304_C_Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1'), ('TargetIdentifier', '103730'), ('UniqueIdentifier', 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx'), ('UpdateAction', 'UPGRADE'), ('Uri', 'http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX'), ('Version', 'DN03')])]), ('DeviceId', 11603), ('DeviceModel', 'PowerEdge R630'), ('DeviceName', None), ('DeviceTypeId', 1000), ('DeviceTypeName', 'CPGCGS'), ('FirmwareStatus', 'Non-Compliant'), ('Id', 194), ('RebootRequired', True), ('ServiceTag', 'MXL1234')])]), ('DowngradeEnabled', True), ('Id', 53), ('Is64Bit', False), ('LastRun', '2019-09-27 05:08:16.301'), ('Name', 'baseline1'), ('RepositoryId', 43), ('RepositoryName', 'catalog2'), ('RepositoryType', 'CIFS'), ('Targets', [AnsibleMapping([('Id', 11603), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('TaskId', 11710), ('TaskStatusId', 0)])])
+baseline_compliance_info (success, dict, [{'CatalogId': 53, 'ComplianceSummary': {'ComplianceStatus': 'CRITICAL', 'NumberOfCritical': 2, 'NumberOfDowngrade': 0, 'NumberOfNormal': 0, 'NumberOfWarning': 0}, 'Description': '', 'DeviceComplianceReports': [{'ComplianceStatus': 'CRITICAL', 'ComponentComplianceReports': [{'ComplianceDependencies': [], 'ComplianceStatus': 'DOWNGRADE', 'Criticality': 'Ok', 'CurrentVersion': 'OSC_1.1', 'Id': 1258, 'ImpactAssessment': '', 'Name': 'OS COLLECTOR 2.1', 'Path': 'FOLDER04118304M/2/Diagnostics_Application_JCCH7_WN64_4.0_A00_01.EXE', 'PrerequisiteInfo': '', 'RebootRequired': False, 'SourceName': 'DCIM:INSTALLED#802__OSCollector.Embedded.1', 'TargetIdentifier': '101734', 'UniqueIdentifier': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', 'UpdateAction': 'DOWNGRADE', 'Uri': 'http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX', 'Version': '4.0'}, {'ComplianceDependencies': [], 'ComplianceStatus': 'CRITICAL', 'Criticality': 'Recommended', 'CurrentVersion': 'DN02', 'Id': 1259, 'ImpactAssessment': '', 'Name': 'TOSHIBA AL14SE 1.8 TB 2.5 12Gb 10K 512n SAS HDD Drive', 'Path': 'FOLDER04086111M/1/SAS-Drive_Firmware_VDGFM_WN64_DN03_A00.EXE', 'PrerequisiteInfo': '', 'RebootRequired': True, 'SourceName': 'DCIM:INSTALLED#304_C_Disk.Bay.1:Enclosure.Internal.0-1:RAID.Integrated.1-1', 'TargetIdentifier': '103730', 'UniqueIdentifier': 'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx', 'UpdateAction': 'UPGRADE', 'Uri': 'http://www.dell.com/support/home/us/en/19/Drivers/DriversDetails?driverId=XXXXX', 'Version': 'DN03'}], 'DeviceId': 11603, 'DeviceModel': 'PowerEdge R630', 'DeviceName': None, 'DeviceTypeId': 1000, 'DeviceTypeName': 'CPGCGS', 'FirmwareStatus': 'Non-Compliant', 'Id': 194, 'RebootRequired': True, 'ServiceTag': 'MXL1234'}], 'DowngradeEnabled': True, 'Id': 53, 'Is64Bit': False, 'LastRun': '2019-09-27 05:08:16.301', 'Name': 'baseline1', 'RepositoryId': 43, 'RepositoryName': 'catalog2', 'RepositoryType': 'CIFS', 'Targets': [{'Id': 11603, 'Type': {'Id': 1000, 'Name': 'DEVICE'}}], 'TaskId': 11710, 'TaskStatusId': 0}])
Details of the baseline compliance report.
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to retrieve baseline list either because the device ID(s) entered are invalid'), ('Resolution', 'Make sure the entered device ID(s) are valid and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to retrieve baseline list either because the device ID(s) entered are invalid', 'Resolution': 'Make sure the entered device ID(s) are valid and retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of http error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst
index 8b03396fc..49ba4cd8f 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_baseline_info.rst
@@ -48,11 +48,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -70,7 +70,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -107,7 +107,7 @@ msg (on error, str, Successfully fetched firmware baseline information.)
Overall baseline information.
-baseline_info (success, dict, AnsibleMapping([('@odata.id', '/api/UpdateService/Baselines(239)'), ('@odata.type', '#UpdateService.Baselines'), ('CatalogId', 22), ('ComplianceSummary', AnsibleMapping([('ComplianceStatus', 'CRITICAL'), ('NumberOfCritical', 1), ('NumberOfDowngrade', 0), ('NumberOfNormal', 0), ('NumberOfWarning', 0)])), ('Description', 'baseline_description'), ('DeviceComplianceReports@odata.navigationLink', '/api/UpdateService/Baselines(239)/DeviceComplianceReports'), ('DowngradeEnabled', True), ('Id', 239), ('Is64Bit', True), ('LastRun', '2020-05-22 16:42:40.307'), ('Name', 'baseline_name'), ('RepositoryId', 12), ('RepositoryName', 'HTTP DELL'), ('RepositoryType', 'DELL_ONLINE'), ('Targets', [AnsibleMapping([('Id', 10342), ('Type', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('TaskId', 41415), ('TaskStatusId', 2060)]))
+baseline_info (success, dict, {'@odata.id': '/api/UpdateService/Baselines(239)', '@odata.type': '#UpdateService.Baselines', 'CatalogId': 22, 'ComplianceSummary': {'ComplianceStatus': 'CRITICAL', 'NumberOfCritical': 1, 'NumberOfDowngrade': 0, 'NumberOfNormal': 0, 'NumberOfWarning': 0}, 'Description': 'baseline_description', 'DeviceComplianceReports@odata.navigationLink': '/api/UpdateService/Baselines(239)/DeviceComplianceReports', 'DowngradeEnabled': True, 'Id': 239, 'Is64Bit': True, 'LastRun': '2020-05-22 16:42:40.307', 'Name': 'baseline_name', 'RepositoryId': 12, 'RepositoryName': 'HTTP DELL', 'RepositoryType': 'DELL_ONLINE', 'Targets': [{'Id': 10342, 'Type': {'Id': 1000, 'Name': 'DEVICE'}}], 'TaskId': 41415, 'TaskStatusId': 2060})
Details of the baselines.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst
index 99983a769..2813e66a6 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_firmware_catalog.rst
@@ -104,7 +104,7 @@ Parameters
check_certificate (optional, bool, False)
- The certificate warnings are ignored when *repository_type* is HTTPS. If ``True``. If not, certificate warnings are not ignored.
+ The certificate warnings are ignored when *repository_type* is HTTPS. If ``true``. If not, certificate warnings are not ignored.
job_wait (optional, bool, True)
@@ -116,7 +116,7 @@ Parameters
job_wait_timeout (optional, int, 600)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
hostname (True, str, None)
@@ -136,11 +136,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -159,7 +159,8 @@ Notes
.. note::
- If *repository_password* is provided, then the module always reports the changed status.
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
- This module supports ``check_mode``.
@@ -184,7 +185,7 @@ Examples
source: "downloads.dell.com"
source_path: "catalog"
file_name: "catalog.gz"
- check_certificate: True
+ check_certificate: true
- name: Create a catalog from HTTP repository
dellemc.openmanage.ome_firmware_catalog:
@@ -237,7 +238,7 @@ Examples
catalog_name: "catalog_name"
catalog_description: "catalog_description"
repository_type: "DELL_ONLINE"
- check_certificate: True
+ check_certificate: true
- name: Modify a catalog using a repository from CIFS share
dellemc.openmanage.ome_firmware_catalog:
@@ -293,7 +294,7 @@ msg (always, str, Successfully triggered the job to create a catalog with Task I
Overall status of the firmware catalog operation.
-catalog_status (When I(state) is C(present), dict, AnsibleMapping([('AssociatedBaselines', []), ('BaseLocation', None), ('BundlesCount', 0), ('Filename', 'catalog.gz'), ('Id', 0), ('LastUpdated', None), ('ManifestIdentifier', None), ('ManifestVersion', None), ('NextUpdate', None), ('PredecessorIdentifier', None), ('ReleaseDate', None), ('ReleaseIdentifier', None), ('Repository', AnsibleMapping([('CheckCertificate', True), ('Description', 'HTTPS Desc'), ('DomainName', None), ('Id', None), ('Name', 'catalog4'), ('Password', None), ('RepositoryType', 'HTTPS'), ('Source', 'company.com'), ('Username', None)])), ('Schedule', None), ('SourcePath', 'catalog'), ('Status', None), ('TaskId', 10094)]))
+catalog_status (When I(state) is C(present), dict, {'AssociatedBaselines': [], 'BaseLocation': None, 'BundlesCount': 0, 'Filename': 'catalog.gz', 'Id': 0, 'LastUpdated': None, 'ManifestIdentifier': None, 'ManifestVersion': None, 'NextUpdate': None, 'PredecessorIdentifier': None, 'ReleaseDate': None, 'ReleaseIdentifier': None, 'Repository': {'CheckCertificate': True, 'Description': 'HTTPS Desc', 'DomainName': None, 'Id': None, 'Name': 'catalog4', 'Password': None, 'RepositoryType': 'HTTPS', 'Source': 'company.com', 'Username': None}, 'Schedule': None, 'SourcePath': 'catalog', 'Status': None, 'TaskId': 10094})
Details of the catalog operation.
@@ -305,11 +306,11 @@ catalog_id (When I(state) is C(absent), int, 10123)
IDs of the deleted catalog.
-associated_baselines (When I(state) is C(absent), list, [AnsibleMapping([('BaselineId', 24), ('BaselineName', 'new')]), AnsibleMapping([('BaselineId', 25), ('BaselineName', 'c7')]), AnsibleMapping([('BaselineId', 27), ('BaselineName', 'c4')])])
+associated_baselines (When I(state) is C(absent), list, [{'BaselineId': 24, 'BaselineName': 'new'}, {'BaselineId': 25, 'BaselineName': 'c7'}, {'BaselineId': 27, 'BaselineName': 'c4'}])
IDs of the baselines associated with catalog.
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to create or update the catalog because a repository with the same name already exists.'), ('Resolution', 'Enter a different name and retry the operation.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to create or update the catalog because a repository with the same name already exists.', 'Resolution': 'Enter a different name and retry the operation.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of the http error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst
index d5abeab16..4322d460c 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_groups.rst
@@ -100,11 +100,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -122,10 +122,10 @@ Notes
-----
.. note::
- - This module manages only static device groups on Dell EMC OpenManage Enterprise.
+ - This module manages only static device groups on Dell OpenManage Enterprise.
- If a device group with the name *parent_group_name* does not exist, a new device group with the same name is created.
- Make sure the entered parent group is not the descendant of the provided group.
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -187,7 +187,7 @@ msg (always, str, Successfully deleted the device group(s).)
Overall status of the device group operation.
-group_status (success, dict, AnsibleMapping([('Description', 'my group description'), ('Id', 12123), ('MembershipTypeId', 12), ('Name', 'group 1'), ('ParentId', 12345), ('TypeId', 3000), ('IdOwner', 30), ('CreatedBy', 'admin'), ('CreationTime', '2021-01-01 10:10:10.100'), ('DefinitionDescription', 'UserDefined'), ('DefinitionId', 400), ('GlobalStatus', 5000), ('HasAttributes', False), ('UpdatedBy', ''), ('UpdatedTime', '2021-01-01 11:11:10.100'), ('Visible', True)]))
+group_status (success, dict, {'Description': 'my group description', 'Id': 12123, 'MembershipTypeId': 12, 'Name': 'group 1', 'ParentId': 12345, 'TypeId': 3000, 'IdOwner': 30, 'CreatedBy': 'admin', 'CreationTime': '2021-01-01 10:10:10.100', 'DefinitionDescription': 'UserDefined', 'DefinitionId': 400, 'GlobalStatus': 5000, 'HasAttributes': False, 'UpdatedBy': '', 'UpdatedTime': '2021-01-01 11:11:10.100', 'Visible': True})
Details of the device group operation status.
@@ -199,7 +199,7 @@ invalid_groups (when I(state) is C(absent), list, [1234, 5678])
List of the invalid device group IDs or names.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGRP9013'), ('RelatedProperties', []), ('Message', 'Unable to update group 12345 with the provided parent 54321 because a group/parent relationship already exists.'), ('MessageArgs', ['12345', '54321']), ('Severity', 'Warning'), ('Resolution', 'Make sure the entered parent ID does not create a bidirectional relationship and retry the operation.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGRP9013', 'RelatedProperties': [], 'Message': 'Unable to update group 12345 with the provided parent 54321 because a group/parent relationship already exists.', 'MessageArgs': ['12345', '54321'], 'Severity': 'Warning', 'Resolution': 'Make sure the entered parent ID does not create a bidirectional relationship and retry the operation.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst
index 733c837c8..4df20d14f 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_identity_pool.rst
@@ -160,11 +160,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -182,7 +182,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -205,25 +205,25 @@ Examples
pool_name: "pool1"
pool_description: "Identity pool with Ethernet, FCoE, iSCSI and FC settings"
ethernet_settings:
- starting_mac_address: "50:50:50:50:50:00"
- identity_count: 60
+ starting_mac_address: "50:50:50:50:50:00"
+ identity_count: 60
fcoe_settings:
- starting_mac_address: "70:70:70:70:70:00"
- identity_count: 75
+ starting_mac_address: "70:70:70:70:70:00"
+ identity_count: 75
iscsi_settings:
- starting_mac_address: "60:60:60:60:60:00"
- identity_count: 30
- initiator_config:
- iqn_prefix: "iqn.myprefix."
- initiator_ip_pool_settings:
- ip_range: "10.33.0.1-10.33.0.255"
- subnet_mask: "255.255.255.0"
- gateway: "192.168.4.1"
- primary_dns_server : "10.8.8.8"
- secondary_dns_server : "8.8.8.8"
+ starting_mac_address: "60:60:60:60:60:00"
+ identity_count: 30
+ initiator_config:
+ iqn_prefix: "iqn.myprefix."
+ initiator_ip_pool_settings:
+ ip_range: "10.33.0.1-10.33.0.255"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.4.1"
+ primary_dns_server: "10.8.8.8"
+ secondary_dns_server: "8.8.8.8"
fc_settings:
- starting_address: "30:30:30:30:30:00"
- identity_count: 45
+ starting_address: "30:30:30:30:30:00"
+ identity_count: 45
- name: Create an identity pool using only ethernet settings
dellemc.openmanage.ome_identity_pool:
@@ -234,8 +234,8 @@ Examples
pool_name: "pool2"
pool_description: "create identity pool with ethernet"
ethernet_settings:
- starting_mac_address: "aa-bb-cc-dd-ee-aa"
- identity_count: 80
+ starting_mac_address: "aa-bb-cc-dd-ee-aa"
+ identity_count: 80
- name: Modify an identity pool
dellemc.openmanage.ome_identity_pool:
@@ -247,11 +247,11 @@ Examples
new_pool_name: "pool3"
pool_description: "modifying identity pool with ethernet and fcoe settings"
ethernet_settings:
- starting_mac_address: "90-90-90-90-90-90"
- identity_count: 61
+ starting_mac_address: "90-90-90-90-90-90"
+ identity_count: 61
fcoe_settings:
- starting_mac_address: "aabb.ccdd.5050"
- identity_count: 77
+ starting_mac_address: "aabb.ccdd.5050"
+ identity_count: 77
- name: Modify an identity pool using iSCSI and FC settings
dellemc.openmanage.ome_identity_pool:
@@ -290,11 +290,11 @@ msg (always, str, Successfully created an identity pool.)
Overall status of the identity pool operation.
-pool_status (success, dict, AnsibleMapping([('Id', 29), ('IsSuccessful', True), ('Issues', [])]))
+pool_status (success, dict, {'Id': 29, 'IsSuccessful': True, 'Issues': []})
Details of the user operation, when *state* is ``present``.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to process the request because an error occurred: Ethernet-MAC Range overlap found (in this Identity Pool or in a different one) .'), ('MessageArgs', ['Ethernet-MAC Range overlap found (in this Identity Pool or in a different one)"']), ('MessageId', 'CGEN6001'), ('RelatedProperties', []), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.'), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on HTTP error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to process the request because an error occurred: Ethernet-MAC Range overlap found (in this Identity Pool or in a different one) .', 'MessageArgs': ['Ethernet-MAC Range overlap found (in this Identity Pool or in a different one)"'], 'MessageId': 'CGEN6001', 'RelatedProperties': [], 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.', 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst
index cd4170162..5c998cf44 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_job_info.rst
@@ -48,6 +48,16 @@ Parameters
+ fetch_execution_history (optional, bool, False)
+ Fetches the execution history of the job.
+
+ *fetch_execution_history* is only applicable when valid *job_id* is given.
+
+ When ``true``, fetches all the execution history details.
+
+ When ``false``, fetches only the job info and last execution details.
+
+
hostname (True, str, None)
OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
@@ -65,11 +75,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -87,7 +97,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -126,6 +136,14 @@ Examples
skip: 1
filter: "JobType/Id eq 8"
+ - name: Get detail job execution history with last execution detail for a job.
+ dellemc.openmanage.ome_job_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: 12345
+ fetch_execution_history: true
@@ -136,7 +154,7 @@ msg (always, str, Successfully fetched the job info)
Overall status of the job facts operation.
-job_info (success, dict, AnsibleMapping([('value', [AnsibleMapping([('Builtin', False), ('CreatedBy', 'system'), ('Editable', True), ('EndTime', None), ('Id', 12345), ('JobDescription', 'Refresh Inventory for Device'), ('JobName', 'Refresh Inventory for Device'), ('JobStatus', AnsibleMapping([('Id', 2080), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 8), ('Internal', False), ('Name', 'Inventory_Task')])), ('LastRun', '2000-01-29 10:51:34.776'), ('LastRunStatus', AnsibleMapping([('Id', 2060), ('Name', 'Completed')])), ('NextRun', None), ('Params', []), ('Schedule', ''), ('StartTime', None), ('State', 'Enabled'), ('Targets', [AnsibleMapping([('Data', "''"), ('Id', 123123), ('JobId', 12345), ('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('UpdatedBy', None), ('Visible', True)])])]))
+job_info (success, dict, {'value': [{'Id': 10429, 'JobName': 'Discovery-201', 'JobDescription': 'Discovery-201', 'NextRun': None, 'LastRun': '2023-06-07 09:33:07.161', 'StartTime': None, 'EndTime': None, 'Schedule': 'startnow', 'State': 'Enabled', 'CreatedBy': 'admin', 'UpdatedBy': 'admin', 'Visible': True, 'Editable': True, 'Builtin': False, 'UserGenerated': True, 'Targets': [], 'Params': [], 'LastRunStatus': {'Id': 2070, 'Name': 'Failed'}, 'JobType': {'Id': 101, 'Name': 'Discovery_Task', 'Internal': False}, 'JobStatus': {'Id': 2080, 'Name': 'New'}, 'ExecutionHistories': [{'Id': 1243224, 'JobName': 'Discovery-201', 'Progress': '100', 'StartTime': '2023-06-07 09:33:07.148', 'EndTime': '2023-06-07 09:33:08.403', 'LastUpdateTime': '2023-06-07 09:33:08.447185', 'ExecutedBy': 'admin', 'JobId': 10429, 'JobStatus': {'Id': 2070, 'Name': 'Failed'}, 'ExecutionHistoryDetails': [{'Id': 1288519, 'Progress': '100', 'StartTime': '2023-06-07 09:33:07.525', 'EndTime': '2023-06-07 09:33:08.189', 'ElapsedTime': '00:00:00', 'Key': '198.168.0.1', 'Value': 'Running\nDiscovery of target 198.168.0.1 started .\nDiscovery target resolved to IP 198.168.0.1 .\n: ========== EEMI Code: CGEN1009 ==========\nMessage: Unable to perform the requested action because the device management endpoint authentication over WSMAN, REDFISH failed. \nRecommended actions: Make sure the credentials associated with the device management endpoint are valid and retry the operation.\n=======================================\nTask Failed. Completed With Errors.', 'ExecutionHistoryId': 1243224, 'IdBaseEntity': 0, 'JobStatus': {'Id': 2070, 'Name': 'Failed'}}, {'Id': 1288518, 'Progress': '100', 'StartTime': '2023-06-07 09:33:07.521', 'EndTime': '2023-06-07 09:33:08.313', 'ElapsedTime': '00:00:00', 'Key': '198.168.0.2', 'Value': 'Running\nDiscovery of target 198.168.0.2 started. \nDiscovery target resolved to IP 198.168.0.2 .\n: ========== EEMI Code: CGEN1009 ==========\nMessage: Unable to perform the requested action because the device management endpoint authentication over WSMAN, REDFISH failed. \nRecommended actions: Make sure the credentials associated with the device management endpoint are valid and retry the operation.\n=======================================\nTask Failed. Completed With Errors.', 'ExecutionHistoryId': 1243224, 'IdBaseEntity': 0, 'JobStatus': {'Id': 2070, 'Name': 'Failed'}}]}, {'Id': 1243218, 'JobName': 'Discovery-201', 'Progress': '100', 'StartTime': '2023-06-07 09:30:55.064', 'EndTime': '2023-06-07 09:30:56.338', 'LastUpdateTime': '2023-06-07 09:30:56.365294', 'ExecutedBy': 'admin', 'JobId': 10429, 'JobStatus': {'Id': 2070, 'Name': 'Failed'}, 'ExecutionHistoryDetails': [{'Id': 1288512, 'Progress': '100', 'StartTime': '2023-06-07 09:30:55.441', 'EndTime': '2023-06-07 09:30:56.085', 'ElapsedTime': '00:00:00', 'Key': '198.168.0.1', 'Value': 'Running\nDiscovery of target 198.168.0.1 started. \nDiscovery target resolved to IP 198.168.0.1 .\n: ========== EEMI Code: CGEN1009 ==========\nMessage: Unable to perform the requested action because the device management endpoint authentication over WSMAN, REDFISH failed. \nRecommended actions: Make sure the credentials associated with the device management endpoint are valid and retry the operation.\n=======================================\nTask Failed. Completed With Errors.', 'ExecutionHistoryId': 1243218, 'IdBaseEntity': 0, 'JobStatus': {'Id': 2070, 'Name': 'Failed'}}, {'Id': 1288511, 'Progress': '100', 'StartTime': '2023-06-07 09:30:55.439', 'EndTime': '2023-06-07 09:30:56.21', 'ElapsedTime': '00:00:00', 'Key': '198.168.0.2', 'Value': 'Running\nDiscovery of target 198.168.0.2 started. \nDiscovery target resolved to IP 198.168.0.2 .\n: ========== EEMI Code: CGEN1009 ==========\nMessage: Unable to perform the requested action because the device management endpoint authentication over WSMAN, REDFISH failed. \nRecommended actions: Make sure the credentials associated with the device management endpoint are valid and retry the operation.\n=======================================\nTask Failed. Completed With Errors.', 'ExecutionHistoryId': 1243218, 'IdBaseEntity': 0, 'JobStatus': {'Id': 2070, 'Name': 'Failed'}}]}], 'LastExecutionDetail': {'Id': 1288519, 'Progress': '100', 'StartTime': '2023-06-07 09:33:07.525', 'EndTime': '2023-06-07 09:33:08.189', 'ElapsedTime': None, 'Key': '198.168.0.1', 'Value': 'Running\nDiscovery of target 198.168.0.1 started. \nDiscovery target resolved to IP 198.168.0.1 .\n: ========== EEMI Code: CGEN1009 ==========\nMessage: Unable to perform the requested action because the device management endpoint authentication over WSMAN, REDFISH failed. \nRecommended actions: Make sure the credentials associated with the device management endpoint are valid and retry the operation. \n=======================================\nTask Failed. Completed With Errors.', 'ExecutionHistoryId': 1243224, 'IdBaseEntity': 0, 'JobStatus': {'Id': 2070, 'Name': 'Failed'}}}]})
Details of the OpenManage Enterprise jobs.
@@ -153,5 +171,6 @@ Status
Authors
~~~~~~~
-- Jagadeesh N V(@jagadeeshnv)
+- Jagadeesh N V (@jagadeeshnv)
+- Abhishek Sinha (@Abhishek-Dell)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst
index 798f41bc2..839554682 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_port_breakout.rst
@@ -56,11 +56,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -78,7 +78,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -118,11 +118,11 @@ msg (always, str, Port breakout configuration job submitted successfully.)
Overall status of the port configuration.
-breakout_status (success, dict, AnsibleMapping([('Builtin', False), ('CreatedBy', 'root'), ('Editable', True), ('EndTime', None), ('Id', 11111), ('JobDescription', ''), ('JobName', 'Breakout Port'), ('JobStatus', AnsibleMapping([('Id', 1112), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 3), ('Internal', False), ('Name', 'DeviceAction_Task')])), ('LastRun', None), ('LastRunStatus', AnsibleMapping([('Id', 1113), ('Name', 'NotRun')])), ('NextRun', None), ('Params', [AnsibleMapping([('JobId', 11111), ('Key', 'operationName'), ('Value', 'CONFIGURE_PORT_BREAK_OUT')]), AnsibleMapping([('JobId', 11111), ('Key', 'interfaceId'), ('Value', '2HB7NX2:phy-port1/1/11')]), AnsibleMapping([('JobId', 11111), ('Key', 'breakoutType'), ('Value', '1X40GE')])]), ('Schedule', 'startnow'), ('StartTime', None), ('State', 'Enabled'), ('Targets', [AnsibleMapping([('Data', ''), ('Id', 11112), ('JobId', 34206), ('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('UpdatedBy', None), ('UserGenerated', True), ('Visible', True)]))
+breakout_status (success, dict, {'Builtin': False, 'CreatedBy': 'root', 'Editable': True, 'EndTime': None, 'Id': 11111, 'JobDescription': '', 'JobName': 'Breakout Port', 'JobStatus': {'Id': 1112, 'Name': 'New'}, 'JobType': {'Id': 3, 'Internal': False, 'Name': 'DeviceAction_Task'}, 'LastRun': None, 'LastRunStatus': {'Id': 1113, 'Name': 'NotRun'}, 'NextRun': None, 'Params': [{'JobId': 11111, 'Key': 'operationName', 'Value': 'CONFIGURE_PORT_BREAK_OUT'}, {'JobId': 11111, 'Key': 'interfaceId', 'Value': '2HB7NX2:phy-port1/1/11'}, {'JobId': 11111, 'Key': 'breakoutType', 'Value': '1X40GE'}], 'Schedule': 'startnow', 'StartTime': None, 'State': 'Enabled', 'Targets': [{'Data': '', 'Id': 11112, 'JobId': 34206, 'TargetType': {'Id': 1000, 'Name': 'DEVICE'}}], 'UpdatedBy': None, 'UserGenerated': True, 'Visible': True})
Details of the OpenManage Enterprise jobs.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst
index e5dc4bdfa..0ab868cb9 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan.rst
@@ -84,11 +84,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -106,7 +106,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -181,11 +181,11 @@ msg (always, str, Successfully created the VLAN.)
Overall status of the VLAN operation.
-vlan_status (when I(state=present), dict, AnsibleMapping([('@odata.context', '/api/$metadata#NetworkConfigurationService.Network'), ('@odata.type', '#NetworkConfigurationService.Network'), ('@odata.id', '/api/NetworkConfigurationService/Networks(1234)'), ('Id', 1234), ('Name', 'vlan1'), ('Description', 'VLAN description'), ('VlanMaximum', 130), ('VlanMinimum', 140), ('Type', 1), ('CreatedBy', 'admin'), ('CreationTime', '2020-01-01 05:54:36.113'), ('UpdatedBy', None), ('UpdatedTime', '2020-01-01 05:54:36.113'), ('InternalRefNWUUId', '6d6effcc-eca4-44bd-be07-1234ab5cd67e')]))
+vlan_status (when I(state=present), dict, {'@odata.context': '/api/$metadata#NetworkConfigurationService.Network', '@odata.type': '#NetworkConfigurationService.Network', '@odata.id': '/api/NetworkConfigurationService/Networks(1234)', 'Id': 1234, 'Name': 'vlan1', 'Description': 'VLAN description', 'VlanMaximum': 130, 'VlanMinimum': 140, 'Type': 1, 'CreatedBy': 'admin', 'CreationTime': '2020-01-01 05:54:36.113', 'UpdatedBy': None, 'UpdatedTime': '2020-01-01 05:54:36.113', 'InternalRefNWUUId': '6d6effcc-eca4-44bd-be07-1234ab5cd67e'})
Details of the VLAN that is either created or modified.
-error_info (on HTTP error, dict, AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CTEM1043'), ('RelatedProperties', []), ('Message', 'Unable to create or update the network because the entered VLAN minimum 0 is not within a valid range ( 1 - 4000 or 4021 - 4094 ).'), ('MessageArgs', ['0', '1', '4000', '4021', '4094']), ('Severity', 'Warning'), ('Resolution', 'Enter a valid VLAN minimum as identified in the message and retry the operation.')])])]))
+error_info (on HTTP error, dict, {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CTEM1043', 'RelatedProperties': [], 'Message': 'Unable to create or update the network because the entered VLAN minimum 0 is not within a valid range ( 1 - 4000 or 4021 - 4094 ).', 'MessageArgs': ['0', '1', '4000', '4021', '4094'], 'Severity': 'Warning', 'Resolution': 'Enter a valid VLAN minimum as identified in the message and retry the operation.'}]})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst
index 266e67d90..af790fa30 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_network_vlan_info.rst
@@ -56,11 +56,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -78,7 +78,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -119,11 +119,11 @@ Examples
Return Values
-------------
-msg (success, str, AnsibleMapping([('msg', 'Successfully retrieved the network VLAN information.'), ('network_vlan_info', [AnsibleMapping([('CreatedBy', 'admin'), ('CreationTime', '2020-09-02 18:48:42.129'), ('Description', 'Description of Logical Network - 1'), ('Id', 20057), ('InternalRefNWUUId', '42b9903d-93f8-4184-adcf-0772e4492f71'), ('Name', 'Network VLAN - 1'), ('Type', AnsibleMapping([('Description', 'This is the network for general purpose traffic. QOS Priority : Bronze.'), ('Id', 1), ('Name', 'General Purpose (Bronze)'), ('NetworkTrafficType', 'Ethernet'), ('QosType', AnsibleMapping([('Id', 4), ('Name', 'Bronze')])), ('VendorCode', 'GeneralPurpose')])), ('UpdatedBy', None), ('UpdatedTime', '2020-09-02 18:48:42.129'), ('VlanMaximum', 111), ('VlanMinimum', 111)]), AnsibleMapping([('CreatedBy', 'admin'), ('CreationTime', '2020-09-02 18:49:11.507'), ('Description', 'Description of Logical Network - 2'), ('Id', 20058), ('InternalRefNWUUId', 'e46ccb3f-ef57-4617-ac76-46c56594005c'), ('Name', 'Network VLAN - 2'), ('Type', AnsibleMapping([('Description', 'This is the network for general purpose traffic. QOS Priority : Silver.'), ('Id', 2), ('Name', 'General Purpose (Silver)'), ('NetworkTrafficType', 'Ethernet'), ('QosType', AnsibleMapping([('Id', 3), ('Name', 'Silver')])), ('VendorCode', 'GeneralPurpose')])), ('UpdatedBy', None), ('UpdatedTime', '2020-09-02 18:49:11.507'), ('VlanMaximum', 112), ('VlanMinimum', 112)])])]))
+msg (success, str, {'msg': 'Successfully retrieved the network VLAN information.', 'network_vlan_info': [{'CreatedBy': 'admin', 'CreationTime': '2020-09-02 18:48:42.129', 'Description': 'Description of Logical Network - 1', 'Id': 20057, 'InternalRefNWUUId': '42b9903d-93f8-4184-adcf-0772e4492f71', 'Name': 'Network VLAN - 1', 'Type': {'Description': 'This is the network for general purpose traffic. QOS Priority : Bronze.', 'Id': 1, 'Name': 'General Purpose (Bronze)', 'NetworkTrafficType': 'Ethernet', 'QosType': {'Id': 4, 'Name': 'Bronze'}, 'VendorCode': 'GeneralPurpose'}, 'UpdatedBy': None, 'UpdatedTime': '2020-09-02 18:48:42.129', 'VlanMaximum': 111, 'VlanMinimum': 111}, {'CreatedBy': 'admin', 'CreationTime': '2020-09-02 18:49:11.507', 'Description': 'Description of Logical Network - 2', 'Id': 20058, 'InternalRefNWUUId': 'e46ccb3f-ef57-4617-ac76-46c56594005c', 'Name': 'Network VLAN - 2', 'Type': {'Description': 'This is the network for general purpose traffic. QOS Priority : Silver.', 'Id': 2, 'Name': 'General Purpose (Silver)', 'NetworkTrafficType': 'Ethernet', 'QosType': {'Id': 3, 'Name': 'Silver'}, 'VendorCode': 'GeneralPurpose'}, 'UpdatedBy': None, 'UpdatedTime': '2020-09-02 18:49:11.507', 'VlanMaximum': 112, 'VlanMinimum': 112}]})
Detailed information of the network VLAN(s).
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst
index 461f1ed57..a9046a98e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_powerstate.rst
@@ -60,11 +60,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -82,7 +82,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -146,7 +146,7 @@ msg (always, str, Power State operation job submitted successfully.)
Overall power state operation job status.
-job_status (success, dict, AnsibleMapping([('Builtin', False), ('CreatedBy', 'user'), ('Editable', True), ('EndTime', None), ('Id', 11111), ('JobDescription', 'DeviceAction_Task'), ('JobName', 'DeviceAction_Task_PowerState'), ('JobStatus', AnsibleMapping([('Id', 1111), ('Name', 'New')])), ('JobType', AnsibleMapping([('Id', 1), ('Internal', False), ('Name', 'DeviceAction_Task')])), ('LastRun', '2019-04-01 06:39:02.69'), ('LastRunStatus', AnsibleMapping([('Id', 1112), ('Name', 'Running')])), ('NextRun', None), ('Params', [AnsibleMapping([('JobId', 11111), ('Key', 'powerState'), ('Value', '2')]), AnsibleMapping([('JobId', 11111), ('Key', 'operationName'), ('Value', 'POWER_CONTROL')])]), ('Schedule', ''), ('StartTime', None), ('State', 'Enabled'), ('Targets', [AnsibleMapping([('Data', ''), ('Id', 11112), ('JobId', 11111), ('TargetType', AnsibleMapping([('Id', 1000), ('Name', 'DEVICE')]))])]), ('UpdatedBy', None), ('Visible', True)]))
+job_status (success, dict, {'Builtin': False, 'CreatedBy': 'user', 'Editable': True, 'EndTime': None, 'Id': 11111, 'JobDescription': 'DeviceAction_Task', 'JobName': 'DeviceAction_Task_PowerState', 'JobStatus': {'Id': 1111, 'Name': 'New'}, 'JobType': {'Id': 1, 'Internal': False, 'Name': 'DeviceAction_Task'}, 'LastRun': '2019-04-01 06:39:02.69', 'LastRunStatus': {'Id': 1112, 'Name': 'Running'}, 'NextRun': None, 'Params': [{'JobId': 11111, 'Key': 'powerState', 'Value': '2'}, {'JobId': 11111, 'Key': 'operationName', 'Value': 'POWER_CONTROL'}], 'Schedule': '', 'StartTime': None, 'State': 'Enabled', 'Targets': [{'Data': '', 'Id': 11112, 'JobId': 11111, 'TargetType': {'Id': 1000, 'Name': 'DEVICE'}}], 'UpdatedBy': None, 'Visible': True})
Power state operation job and progress details from the OME.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst
index e0b5f0eeb..8faf8b8fe 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_profile.rst
@@ -214,11 +214,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -236,7 +236,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
- ``assign`` operation on a already assigned profile will not redeploy.
@@ -271,7 +271,7 @@ Examples
name_prefix: "omam_profile"
number_of_profiles: 1
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.1"
iso_path: "path/to/my_iso.iso"
@@ -288,7 +288,7 @@ Examples
name_prefix: "omam_profile"
number_of_profiles: 1
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: CIFS
share_ip: "192.168.0.2"
share_user: "username"
@@ -308,7 +308,7 @@ Examples
new_name: "modified profile"
description: "new description"
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.3"
iso_path: "path/to/my_iso.iso"
@@ -344,7 +344,7 @@ Examples
ca_path: "/path/to/ca_cert.pem"
command: "delete"
filters:
- SelectAll: True
+ SelectAll: true
Filters: =contains(ProfileName,'Profile 00002')
- name: Delete profiles using profile list filter
@@ -369,7 +369,7 @@ Examples
name: "Profile 00001"
device_id: 12456
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.1"
iso_path: "path/to/my_iso.iso"
@@ -383,10 +383,10 @@ Examples
ShutdownType: 0
TimeToWaitBeforeShutdown: 300
EndHostPowerState: 1
- StrictCheckingVlan: True
+ StrictCheckingVlan: true
Schedule:
- RunNow: True
- RunLater: False
+ RunNow: true
+ RunLater: false
- name: Unassign a profile using profile name
dellemc.openmanage.ome_profile:
@@ -405,7 +405,7 @@ Examples
ca_path: "/path/to/ca_cert.pem"
command: "unassign"
filters:
- SelectAll: True
+ SelectAll: true
Filters: =contains(ProfileName,'Profile 00003')
- name: Unassign profiles using profile list filter
@@ -449,7 +449,7 @@ job_id (when I(command) is C(assign), C(migrate) or C(unassign), int, 14123)
``assign`` and ``unassign`` operations do not trigger a task if a profile is auto-deployed.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_profile_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_profile_info.rst
new file mode 100644
index 000000000..697a8c05f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_profile_info.rst
@@ -0,0 +1,196 @@
+.. _ome_profile_info_module:
+
+
+ome_profile_info -- Retrieve profiles with attribute details
+============================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module retrieve profiles with attributes on OpenManage Enterprise or OpenManage Enterprise Modular.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ profile_id (optional, int, None)
+ Id of the profile.
+
+ This is mutually exclusive with *profile_name*, *system_query_options*, *template_id*, and *template_name*.
+
+
+ profile_name (optional, str, None)
+ Name of the profile.
+
+ This is mutually exclusive with *template_id*, *profile_id*, *system_query_options*, and *template_name*.
+
+
+ template_id (optional, int, None)
+ Provide the ID of the template to retrieve the list of profile(s) linked to it.
+
+ This is mutually exclusive with *profile_name*, *profile_id*, *system_query_options*, and *template_name*.
+
+
+ template_name (optional, str, None)
+ Provide the name of the template to retrieve the list of profile(s) linked to it.
+
+ This is mutually exclusive with *profile_name*, *profile_id*, *template_id*, and *system_query_options*.
+
+
+ system_query_options (optional, dict, None)
+ Option for providing supported odata filters.
+
+ The profile list can be fetched and sorted based on ProfileName, TemplateName, TargetTypeId, TargetName, ChassisName, ProfileState, LastRunStatus, or ProfileModified.
+
+ This is mutually exclusive with *profile_name*, *profile_id*, *template_id*, and *template_name*.
+
+ ``Note`` If *profile_name*, *profile_id*, *template_id*, or *template_name* option is not provided, the module retrieves all the profiles.
+
+
+ hostname (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module on a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Retrieve all profiles
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+ - name: Retrieve profile using the name
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ profile_name: eprof 00001
+
+ - name: Retrieve profile using the id
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ profile_id: 10129
+
+ - name: Retrieve the profiles using the template name
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: t2
+
+ - name: Retrieve the profiles using the template id
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 11
+
+ - name: Retrieve the profiles based on the odata filters
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ system_query_options:
+ filter: TemplateName eq 'mytemplate'
+ orderby: ProfileState
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully retrieved the profile information.)
+ Status of profile information retrieval.
+
+
+profile_info (success, list, [{'Id': 71460, 'ProfileName': 'Profile 00001', 'ProfileDescription': 'from source template: (Template)', 'TemplateId': 8, 'TemplateName': 'Template', 'DataSchemaId': 8, 'TargetId': 0, 'TargetName': None, 'TargetTypeId': 0, 'DeviceIdInSlot': 0, 'ChassisId': 0, 'ChassisName': None, 'GroupId': 0, 'GroupName': None, 'NetworkBootToIso': None, 'ProfileState': 0, 'DeploymentTaskId': 0, 'LastRunStatus': 2200, 'ProfileModified': 0, 'CreatedBy': 'admin', 'EditedBy': None, 'CreatedDate': '2019-09-26 13:56:41.924966', 'LastEditDate': '2020-12-11 08:27:20.500564', 'LastDeployDate': '', 'AttributeIdMap': {'4965': {'Value': 'hostname', 'IsReadOnly': False, 'IsIgnored': True}, '4963': {'Value': 'second floor', 'IsReadOnly': False, 'IsIgnored': True}, '4960': {'Value': '10A', 'IsReadOnly': False, 'IsIgnored': True}, '4959': {'Value': 'OMAMDEV', 'IsReadOnly': False, 'IsIgnored': True}, '4957': {'Value': 'Dell LAB', 'IsReadOnly': False, 'IsIgnored': True}, '4958': {'Value': None, 'IsReadOnly': False, 'IsIgnored': True}, '4066': {'Value': None, 'IsReadOnly': False, 'IsIgnored': True}, '4231': {'Value': '1', 'IsReadOnly': False, 'IsIgnored': False}, '4229': {'Value': 'Disabled', 'IsReadOnly': False, 'IsIgnored': False}}, 'AttributeDetails': {'System': {'Server Operating System': {'ServerOS 1 Server Host Name': 4965}, 'Server Topology': {'ServerTopology 1 Room Name': 4963, 'ServerTopology 1 Rack Slot': 4960, 'ServerTopology 1 Rack Name': 4959, 'ServerTopology 1 Data Center Name': 4957, 'ServerTopology 1 Aisle Name': 4958}}, 'iDRAC': {'Active Directory': {'ActiveDirectory 1 Active Directory RAC Name': 4066}, 'NIC Information': {'NIC 1 VLAN ID': 4231, 'NIC 1 Enable VLAN': 4229}}}}])
+ Information about the profile.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Jagadeesh N V(@jagadeeshnv)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst
index 3531cb240..95cd95bd5 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profile_info.rst
@@ -56,11 +56,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -120,11 +120,11 @@ msg (on success, str, Successfully retrieved the server interface profile inform
Overall status of the server interface profile information.
-server_profiles (success, list, [AnsibleMapping([('BondingTechnology', 'LACP'), ('Id', '6KZK6K2'), ('ServerInterfaceProfile', [AnsibleMapping([('FabricId', '1ea6bf64-3cf0-4e06-a136-5046d874d1e7'), ('Id', 'NIC.Mezzanine.1A-1-1'), ('NativeVLAN', 0), ('Networks', [AnsibleMapping([('CreatedBy', 'system'), ('CreationTime', '2018-11-27 10:22:14.140'), ('Description', 'VLAN 1'), ('Id', 10001), ('InternalRefNWUUId', 'add035b9-a971-400d-a3fa-bb365df1d476'), ('Name"', 'VLAN 1'), ('Type', 2), ('UpdatedBy', None), ('UpdatedTime', '2018-11-27 10:22:14.140'), ('VlanMaximum', 1), ('VlanMinimum', 1)])]), ('NicBonded', True), ('OnboardedPort', '59HW8X2:ethernet1/1/1')]), AnsibleMapping([('FabricId', '3ea6be04-5cf0-4e05-a136-5046d874d1e6'), ('Id', 'NIC.Mezzanine.1A-2-1'), ('NativeVLAN', 0), ('Networks', [AnsibleMapping([('CreatedBy', 'system'), ('CreationTime', '2018-09-25 14:46:12.374'), ('Description', None), ('Id', 10155), ('InternalRefNWUUId', 'f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d'), ('Name', 'jagvlan'), ('Type', 1), ('UpdatedBy', None), ('UpdatedTime', '2018-09-25 14:46:12.374'), ('VlanMaximum', 143), ('VlanMinimum', 143)])]), ('NicBonded', False), ('OnboardedPort', '6H7J6Z2:ethernet1/1/1')])])])])
+server_profiles (success, list, [{'BondingTechnology': 'LACP', 'Id': '6KZK6K2', 'ServerInterfaceProfile': [{'FabricId': '1ea6bf64-3cf0-4e06-a136-5046d874d1e7', 'Id': 'NIC.Mezzanine.1A-1-1', 'NativeVLAN': 0, 'Networks': [{'CreatedBy': 'system', 'CreationTime': '2018-11-27 10:22:14.140', 'Description': 'VLAN 1', 'Id': 10001, 'InternalRefNWUUId': 'add035b9-a971-400d-a3fa-bb365df1d476', 'Name"': 'VLAN 1', 'Type': 2, 'UpdatedBy': None, 'UpdatedTime': '2018-11-27 10:22:14.140', 'VlanMaximum': 1, 'VlanMinimum': 1}], 'NicBonded': True, 'OnboardedPort': '59HW8X2:ethernet1/1/1'}, {'FabricId': '3ea6be04-5cf0-4e05-a136-5046d874d1e6', 'Id': 'NIC.Mezzanine.1A-2-1', 'NativeVLAN': 0, 'Networks': [{'CreatedBy': 'system', 'CreationTime': '2018-09-25 14:46:12.374', 'Description': None, 'Id': 10155, 'InternalRefNWUUId': 'f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d', 'Name': 'jagvlan', 'Type': 1, 'UpdatedBy': None, 'UpdatedTime': '2018-09-25 14:46:12.374', 'VlanMaximum': 143, 'VlanMinimum': 143}], 'NicBonded': False, 'OnboardedPort': '6H7J6Z2:ethernet1/1/1'}]}])
Returns the information of collected server interface profile information.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst
index c4f9f0f40..f70a50125 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_server_interface_profiles.rst
@@ -104,7 +104,7 @@ Parameters
job_wait_timeout (optional, int, 120)
The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
- This option is applicable when *job_wait* is ``True``.
+ This option is applicable when *job_wait* is ``true``.
hostname (True, str, None)
@@ -124,11 +124,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -147,7 +147,7 @@ Notes
.. note::
- This module supports ``check_mode``.
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
@@ -171,13 +171,13 @@ Examples
nic_teaming: LACP
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan1
- nic_identifier: NIC.Mezzanine.1A-2-1
- team: yes
+ team: true
untagged_network: 3
tagged_networks:
names:
@@ -195,13 +195,13 @@ Examples
nic_teaming: NoTeaming
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan2
- nic_identifier: NIC.Mezzanine.1A-2-1
- team: yes
+ team: true
untagged_network: 3
tagged_networks:
names:
@@ -220,7 +220,7 @@ job_id (on applying the Interface profiles, int, 14123)
Job ID of the task to apply the server interface profiles.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst
index 1e6ddda5f..69e22ab98 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric.rst
@@ -92,11 +92,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -114,7 +114,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -174,11 +174,11 @@ fabric_id (success, str, 1312cceb-c3dd-4348-95c1-d8541a17d776)
Returns the ID when an fabric is created, modified or deleted.
-additional_info (when I(state=present) and additional information present in response., dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('RelatedProperties', []), ('Message', 'Fabric update is successful. The OverrideLLDPConfiguration attribute is not provided in the payload, so it preserves the previous value.'), ('MessageArgs', []), ('Severity', 'Informational'), ('Resolution', 'Please update the Fabric with the OverrideLLDPConfiguration as Disabled or Enabled if necessary.')])])]))]))
+additional_info (when I(state=present) and additional information present in response., dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'RelatedProperties': [], 'Message': 'Fabric update is successful. The OverrideLLDPConfiguration attribute is not provided in the payload, so it preserves the previous value.', 'MessageArgs': [], 'Severity': 'Informational', 'Resolution': 'Please update the Fabric with the OverrideLLDPConfiguration as Disabled or Enabled if necessary.'}]}})
Additional details of the fabric operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('RelatedProperties', []), ('Message', 'Unable to perform operation, because the fabric manager was not reachable.'), ('MessageArgs', []), ('Severity', 'Warning'), ('Resolution', 'Make sure of the following and retry the operation: 1) There is at least one advanced I/O Module in power-on mode. For example, MX9116n Ethernet Switch and MX5108n Ethernet Switch. However, if an advanced I/O Module is available in the power-on mode, make sure that the network profile is not set when the fabric manager is in the switch-over mode. 2) If the issue persists, wait for few minutes and retry the operation.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'RelatedProperties': [], 'Message': 'Unable to perform operation, because the fabric manager was not reachable.', 'MessageArgs': [], 'Severity': 'Warning', 'Resolution': 'Make sure of the following and retry the operation: 1) There is at least one advanced I/O Module in power-on mode. For example, MX9116n Ethernet Switch and MX5108n Ethernet Switch. However, if an advanced I/O Module is available in the power-on mode, make sure that the network profile is not set when the fabric manager is in the switch-over mode. 2) If the issue persists, wait for few minutes and retry the operation.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_info.rst
new file mode 100644
index 000000000..ccad0973a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_info.rst
@@ -0,0 +1,148 @@
+.. _ome_smart_fabric_info_module:
+
+
+ome_smart_fabric_info -- Retrieves the information of smart fabrics inventoried by OpenManage Enterprise Modular
+================================================================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module retrieves the list of smart fabrics in the inventory of OpenManage Enterprise Modular.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ fabric_id (optional, str, None)
+ Unique Id of the fabric.
+
+ *fabric_id* is mutually exclusive with *fabric_name*.
+
+
+ fabric_name (optional, str, None)
+ Name of the fabric.
+
+ *fabric_name* is mutually exclusive with *fabric_id*.
+
+
+ hostname (True, str, None)
+ OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Retrieve details of all smart fabrics
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+ - name: Retrieve details of a specific smart fabric identified by its fabric ID
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+
+ - name: Retrieve details of a specific smart fabric identified by its fabric name
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_name: "f1"
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully retrieved the smart fabric information.)
+ Status of smart fabric information retrieval.
+
+
+smart_fabric_info (success, list, [{'Description': 'Fabric f1', 'FabricDesign': [{'Actions': {'#NetworkService.GetApplicableNodes': {'target': "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/FabricDesign/NetworkService.GetApplicableNodes"}, 'Oem': {}}, 'FabricDesignNode': [{'ChassisName': 'Chassis-X', 'NodeName': 'Switch-B', 'Slot': 'Slot-A2', 'Type': 'WeaverSwitch'}, {'ChassisName': 'Chassis-X', 'NodeName': 'Switch-A', 'Slot': 'Slot-A1', 'Type': 'WeaverSwitch'}], 'Name': '2xMX9116n_Fabric_Switching_Engines_in_same_chassis', 'NetworkLink': [{'DestinationInterface': 'ethernet1/1/38', 'DestinationNode': 'Switch-B', 'SourceInterface': 'ethernet1/1/38', 'SourceNode': 'Switch-A'}, {'DestinationInterface': 'ethernet1/1/37', 'DestinationNode': 'Switch-B', 'SourceInterface': 'ethernet1/1/37', 'SourceNode': 'Switch-A'}, {'DestinationInterface': 'ethernet1/1/39', 'DestinationNode': 'Switch-B', 'SourceInterface': 'ethernet1/1/39', 'SourceNode': 'Switch-A'}, {'DestinationInterface': 'ethernet1/1/40', 'DestinationNode': 'Switch-B', 'SourceInterface': 'ethernet1/1/40', 'SourceNode': 'Switch-A'}]}], 'FabricDesignMapping': [{'DesignNode': 'Switch-A', 'PhysicalNode': 'NODEID1'}, {'DesignNode': 'Switch-B', 'PhysicalNode': 'NODEID2'}], 'Health': {'Issues': [{'Category': 'Audit', 'DetailedDescription': 'The SmartFabric is not healthy because the interface for an uplink mentioned in the message is not in operational status.', 'Message': 'The SmartFabric is not healthy because the interface JRWSV43:ethernet1/1/35 for uplink 1ad54420-b145-49a1-9779-21a579ef6f2d is not in operational status.', 'MessageArgs': [], 'MessageId': 'NFAB0016', 'Resolution': 'Make sure that all the uplink interfaces are in operational status.', 'Severity': 'Warning', 'TimeStamp': '2019-09-25T11:50:06Z'}, {'Category': 'Audit', 'DetailedDescription': 'The SmartFabric is not healthy because one or more VLTi links are not connected.', 'Message': 'The SmartFabric is not healthy because all InterSwitch Links are not connected.', 'MessageArgs': [], 'MessageId': 'NFAB0017', 'Resolution': 'Make sure that the VLTi cables for all ISLs are connected and operational as per the selected fabric design.', 'Severity': 'Warning', 'TimeStamp': '2019-09-25T11:50:06Z'}, {'Category': 'Audit', 'DetailedDescription': 'The SmartFabric is not healthy because the interface for an uplink mentioned in the message is not in operational status.', 'Message': 'The SmartFabric is not healthy because the interface 6H7J6Z2:ethernet1/1/35 for uplink 1ad54420-b145-49a1-9779-21a579ef6f2d is not in operational status.', 'MessageArgs': [], 'MessageId': 'NFAB0016', 'Resolution': 'Make sure that all the uplink interfaces are in operational status.', 'Severity': 'Warning', 'TimeStamp': '2019-09-25T11:50:06Z'}, {'Category': 'Audit', 'DetailedDescription': 'The SmartFabric is not healthy because one or more of the uplink interfaces are not bonded.', 'Message': 'The SmartFabric is not healthy because the uplink 1ad54420-b145-49a1-9779-21a579ef6f2d interface 6H7J6Z2:ethernet1/1/35 is not bonded to the other interfaces in the uplink.', 'MessageArgs': [], 'MessageId': 'NFAB0019', 'Resolution': 'Make sure that the Link Aggregation Control Protocol (LACP) is enabled on all ports on the remote switch to which the uplink ports from the fabric are connected.', 'Severity': 'Warning', 'TimeStamp': '2019-09-25T11:50:06Z'}, {'Category': 'Audit', 'DetailedDescription': 'The SmartFabric is not healthy because one or more of the uplink interfaces are not bonded.', 'Message': 'The SmartFabric is not healthy because the uplink 1ad54420-b145-49a1-9779-21a579ef6f2d interface JRWSV43:ethernet1/1/35 is not bonded to the other interfaces in the uplink.', 'MessageArgs': [], 'MessageId': 'NFAB0019', 'Resolution': 'Make sure that the Link Aggregation Control Protocol (LACP) is enabled on all ports on the remote switch to which the uplink ports from the fabric are connected.', 'Severity': 'Warning', 'TimeStamp': '2019-09-25T11:50:06Z'}], 'Status': '4000'}, 'Id': '61c20a59-9ed5-4ae5-b850-5e5acf42d2f2', 'LifeCycleStatus': [{'Activity': 'Create', 'Status': '2060'}], 'Multicast': [{'FloodRestrict': True, 'IgmpVersion': '3', 'MldVersion': '2'}], 'Name': 'f1', 'OverrideLLDPConfiguration': 'Disabled', 'ScaleVLANProfile': 'Enabled', 'Servers': [{'ChassisServiceTag': '6H5S6Z2', 'ConnectionState': True, 'ConnectionStateReason': 101, 'DeviceCapabilities': [1, 2, 3, 4, 7, 8, 9, 41, 10, 11, 12, 13, 14, 15, 208, 16, 17, 18, 212, 30, 31], 'DeviceManagement': [{'DnsName': 'iDRAC-6GZK6Z2', 'InstrumentationName': '', 'MacAddress': '4c:d9:8f:7a:7c:43', 'ManagementId': 135185, 'ManagementProfile': [{'AgentName': 'iDRAC', 'HasCreds': 0, 'ManagementId': 135185, 'ManagementProfileId': 135185, 'ManagementURL': 'https://[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]:443/', 'ProfileId': 'WSMAN_OOB', 'Status': 1000, 'StatusDateTime': '2019-10-29 09:30:38.552', 'Version': '3.20.21.20'}], 'ManagementType': 2, 'NetworkAddress': '100.96.24.28'}, {'DnsName': 'iDRAC-6GZK6Z2', 'InstrumentationName': '', 'MacAddress': '4c:d9:8f:7a:7c:43', 'ManagementId': 135186, 'ManagementProfile': [{'AgentName': 'iDRAC', 'HasCreds': 0, 'ManagementId': 135186, 'ManagementProfileId': 135186, 'ManagementURL': 'https://[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]:443/', 'ProfileId': 'WSMAN_OOB', 'Status': 1000, 'StatusDateTime': '2019-10-29 09:30:38.552', 'Version': '3.20.21.20'}], 'ManagementType': 2, 'NetworkAddress': '[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]'}], 'DeviceName': 'MX-6H5S6Z2:Sled-1', 'DeviceServiceTag': '6GZK6Z2', 'Enabled': True, 'Id': 10071, 'Identifier': '6GZK6Z2', 'LastInventoryTime': '2019-10-29 09:30:38.552', 'LastStatusTime': '2019-10-29 09:41:51.051', 'ManagedState': 3000, 'Model': 'PowerEdge MX840c', 'PowerState': 17, 'SlotConfiguration': {'ChassisId': '10072', 'ChassisName': 'MX-6H5S6Z2', 'ChassisServiceTag': '6H5S6Z2', 'DeviceType': '1000', 'SledBlockPowerOn': 'None blocking', 'SlotId': '10084', 'SlotName': 'Sled-1', 'SlotNumber': '1', 'SlotType': '2000'}, 'Status': 1000, 'SystemId': 1894, 'Type': 1000}], 'Summary': {'NodeCount': 2, 'ServerCount': 1, 'UplinkCount': 1}, 'Switches': [{'ChassisServiceTag': '6H5S6Z2', 'ConnectionState': True, 'ConnectionStateReason': 101, 'DeviceCapabilities': [1, 2, 3, 5, 7, 8, 9, 207, 18, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622], 'DeviceManagement': [{'DnsName': '', 'InstrumentationName': 'MX9116n Fabric Engine', 'MacAddress': '20:04:0F:4F:4E:04', 'ManagementId': 135181, 'ManagementProfile': [{'HasCreds': 0, 'ManagementId': 135181, 'ManagementProfileId': 135181, 'ManagementURL': '', 'ProfileId': '', 'Status': 1000, 'StatusDateTime': '2019-10-29 09:30:36.273'}], 'ManagementType': 2, 'NetworkAddress': '100.96.24.36'}, {'DnsName': '', 'InstrumentationName': 'MX9116n Fabric Engine', 'MacAddress': '20:04:0F:4F:4E:04', 'ManagementId': 135182, 'ManagementProfile': [{'HasCreds': 0, 'ManagementId': 135182, 'ManagementProfileId': 135182, 'ManagementURL': '', 'ProfileId': '', 'Status': 1000, 'StatusDateTime': '2019-10-29 09:30:36.273'}], 'ManagementType': 2, 'NetworkAddress': ''}], 'DeviceName': 'MX-6H5S6Z2:IOM-A2', 'DeviceServiceTag': '6H7J6Z2', 'Enabled': True, 'Id': 10074, 'Identifier': '6H7J6Z2', 'LastInventoryTime': '2019-10-29 09:30:36.332', 'LastStatusTime': '2019-10-29 09:31:00.931', 'ManagedState': 3000, 'Model': 'MX9116n Fabric Engine', 'PowerState': 17, 'SlotConfiguration': {'ChassisId': '10072', 'ChassisName': 'MX-6H5S6Z2', 'ChassisServiceTag': '6H5S6Z2', 'DeviceType': '4000', 'SledBlockPowerOn': 'null', 'SlotId': '10079', 'SlotName': 'IOM-A2', 'SlotNumber': '2', 'SlotType': '4000'}, 'Status': 1000, 'SystemId': 2031, 'Type': 4000}, {'ChassisServiceTag': '6H5S6Z2', 'ConnectionState': True, 'ConnectionStateReason': 101, 'DeviceCapabilities': [1, 2, 3, 5, 7, 8, 9, 207, 18, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615, 616, 617, 618, 619, 620, 621, 622], 'DeviceManagement': [{'DnsName': '', 'InstrumentationName': 'MX9116n Fabric Engine', 'MacAddress': 'E8:B5:D0:52:61:46', 'ManagementId': 135183, 'ManagementProfile': [{'HasCreds': 0, 'ManagementId': 135183, 'ManagementProfileId': 135183, 'ManagementURL': '', 'ProfileId': '', 'Status': 1000, 'StatusDateTime': '2019-10-29 09:30:37.115'}], 'ManagementType': 2, 'NetworkAddress': '100.96.24.37'}, {'DnsName': '', 'InstrumentationName': 'MX9116n Fabric Engine', 'MacAddress': 'E8:B5:D0:52:61:46', 'ManagementId': 135184, 'ManagementProfile': [{'HasCreds': 0, 'ManagementId': 135184, 'ManagementProfileId': 135184, 'ManagementURL': '', 'ProfileId': '', 'Status': 1000, 'StatusDateTime': '2019-10-29 09:30:37.115'}], 'ManagementType': 2, 'NetworkAddress': ''}], 'DeviceName': 'MX-6H5S6Z2:IOM-A1', 'DeviceServiceTag': 'JRWSV43', 'Enabled': True, 'Id': 20881, 'Identifier': 'JRWSV43', 'LastInventoryTime': '2019-10-29 09:30:37.172', 'LastStatusTime': '2019-10-29 09:31:00.244', 'ManagedState': 3000, 'Model': 'MX9116n Fabric Engine', 'PowerState': 17, 'SlotConfiguration': {'ChassisId': '10072', 'ChassisName': 'MX-6H5S6Z2', 'ChassisServiceTag': '6H5S6Z2', 'DeviceType': '4000', 'SledBlockPowerOn': 'null', 'SlotId': '10078', 'SlotName': 'IOM-A1', 'SlotNumber': '1', 'SlotType': '4000'}, 'Status': 1000, 'SystemId': 2031, 'Type': 4000}], 'Uplinks': [{'Id': '1ad54420-b145-49a1-9779-21a579ef6f2d', 'MediaType': 'Ethernet', 'Name': 'u1', 'NativeVLAN': 1, 'Summary': {'NetworkCount': 1, 'PortCount': 2}, 'UfdEnable': 'Disabled'}]}])
+ Returns the information about smart fabric.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGEN1006', 'RelatedProperties': [], 'Message': 'Unable to complete the request because the resource URI does not exist or is not implemented.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties."}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Kritika Bhateja(@Kritka-Bhateja)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst
index 83ddfcdca..fa691dd07 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink.rst
@@ -108,11 +108,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -130,7 +130,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports ``check_mode``.
@@ -266,11 +266,11 @@ uplink_id (when I(state=present), str, ddc3d260-fd71-46a1-97f9-708e12345678)
Returns the ID when an uplink is created or modified.
-additional_info (when I(state=present) and additional information present in response., dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to configure the Uplink Failure Detection mode on the uplink because the firmware version of the I/O Module running the Fabric Manager does not support the configuration feature.'), ('MessageArgs', []), ('MessageId', 'CDEV7151'), ('RelatedProperties', []), ('Resolution', "Update the firmware version of the I/O Module running the Fabric Manager and retry the operation. For information about the recommended I/O Module firmware versions, see the OpenManage Enterprise-Modular User's Guide available on the support site."), ('Severity', 'Informational')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+additional_info (when I(state=present) and additional information present in response., dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to configure the Uplink Failure Detection mode on the uplink because the firmware version of the I/O Module running the Fabric Manager does not support the configuration feature.', 'MessageArgs': [], 'MessageId': 'CDEV7151', 'RelatedProperties': [], 'Resolution': "Update the firmware version of the I/O Module running the Fabric Manager and retry the operation. For information about the recommended I/O Module firmware versions, see the OpenManage Enterprise-Modular User's Guide available on the support site.", 'Severity': 'Informational'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Additional details of the fabric operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'CGEN1006'), ('RelatedProperties', []), ('Message', 'Unable to complete the request because the resource URI does not exist or is not implemented.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties.")])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGEN1006', 'RelatedProperties': [], 'Message': 'Unable to complete the request because the resource URI does not exist or is not implemented.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties."}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink_info.rst
new file mode 100644
index 000000000..96a111bcb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_smart_fabric_uplink_info.rst
@@ -0,0 +1,175 @@
+.. _ome_smart_fabric_uplink_info_module:
+
+
+ome_smart_fabric_uplink_info -- Retrieve details of fabric uplink on OpenManage Enterprise Modular.
+===================================================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module retrieve details of fabric uplink on OpenManage Enterprise Modular.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ fabric_id (optional, str, None)
+ Unique id of the fabric.
+
+ *fabric_id* is mutually exclusive with *fabric_name*.
+
+
+ fabric_name (optional, str, None)
+ Unique name of the fabric.
+
+ *fabric_name* is mutually exclusive with *fabric_id*.
+
+
+ uplink_id (optional, str, None)
+ Unique id of the uplink.
+
+ *uplink_id* is mutually exclusive with *uplink_name*.
+
+ *fabric_id* or *fabric_name* is required along with *uplink_id*.
+
+
+ uplink_name (optional, str, None)
+ Unique name of the uplink.
+
+ *uplink_name* is mutually exclusive with *uplink_id*.
+
+ *fabric_id* or *fabric_name* is required along with *uplink_name*.
+
+
+ hostname (True, str, None)
+ OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Retrieve all fabric uplink information using fabric_id.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+
+ - name: Retrieve all fabric uplink information using fabric_name.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_name: "f1"
+
+ - name: Retrieve specific fabric information using uplink_id.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ uplink_id: "1ad54420-b145-49a1-9779-21a579ef6f2d"
+
+ - name: Retrieve specific fabric information using uplink_name.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ uplink_name: "u1"
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully retrieved the fabric uplink information.)
+ Status of fabric uplink information retrieval.
+
+
+uplink_info (on success, list, [{'Description': '', 'Id': '1ad54420-b145-49a1-9779-21a579ef6f2d', 'MediaType': 'Ethernet', 'Name': 'u1', 'NativeVLAN': 1, 'Networks': [{'CreatedBy': 'system', 'CreationTime': '2018-09-25 14:46:12.374', 'Description': None, 'Id': 10155, 'InternalRefNWUUId': 'f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d', 'Name': 'testvlan', 'Type': 1, 'UpdatedBy': 'root', 'UpdatedTime': '2019-06-27 15:06:22.836', 'VlanMaximum': 143, 'VlanMinimum': 143}], 'Ports': [{'AdminStatus': 'Enabled', 'BlinkStatus': 'OFF', 'ConfiguredSpeed': '0', 'CurrentSpeed': '0', 'Description': '', 'Id': 'SVCTAG1:ethernet1/1/35', 'MaxSpeed': '0', 'MediaType': 'Ethernet', 'Name': '', 'NodeServiceTag': 'SVCTAG1', 'OpticsType': 'NotPresent', 'PortNumber': 'ethernet1/1/35', 'Role': 'Uplink', 'Status': 'Down', 'Type': 'PhysicalEthernet'}, {'AdminStatus': 'Enabled', 'BlinkStatus': 'OFF', 'ConfiguredSpeed': '0', 'CurrentSpeed': '0', 'Description': '', 'Id': 'SVCTAG1:ethernet1/1/35', 'MaxSpeed': '0', 'MediaType': 'Ethernet', 'Name': '', 'NodeServiceTag': 'SVCTAG1', 'OpticsType': 'NotPresent', 'PortNumber': 'ethernet1/1/35', 'Role': 'Uplink', 'Status': 'Down', 'Type': 'PhysicalEthernet'}], 'Summary': {'NetworkCount': 1, 'PortCount': 2}, 'UfdEnable': 'Disabled'}])
+ Information about the fabric uplink.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'CGEN1006', 'RelatedProperties': [], 'Message': 'Unable to complete the request because the resource URI does not exist or is not implemented.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties."}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Husniya Hameed(@husniya_hameed)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst
index 5b58dffc4..409d2f7e8 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template.rst
@@ -46,7 +46,7 @@ Parameters
template_id (optional, int, None)
ID of the existing template.
- This option is applicable when *command* is ``modify``, ``deploy``, ``delete`` and ``export``.
+ This option is applicable when *command* is ``modify``, ``deploy``, ``delete``, ``clone`` and ``export``.
This option is mutually exclusive with *template_name*.
@@ -54,7 +54,7 @@ Parameters
template_name (optional, str, None)
Name of the existing template.
- This option is applicable when *command* is ``modify``, ``deploy``, ``delete`` and ``export``.
+ This option is applicable when *command* is ``modify``, ``deploy``, ``delete``, ``clone`` and ``export``.
This option is mutually exclusive with *template_id*.
@@ -109,6 +109,18 @@ Parameters
Refer OpenManage Enterprise API Reference Guide for more details.
+ job_wait (optional, bool, True)
+ Provides the option to wait for job completion.
+
+ This option is applicable when *command* is ``create``, or ``deploy``.
+
+
+ job_wait_timeout (optional, int, 1200)
+ The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
+
+ This option is applicable when *job_wait* is ``true``.
+
+
hostname (True, str, None)
OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
@@ -126,11 +138,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -148,7 +160,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -215,7 +227,7 @@ Examples
- name: Deploy template on multiple devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -230,7 +242,7 @@ Examples
- name: Deploy template on groups
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -242,7 +254,7 @@ Examples
- name: Deploy template on multiple devices along with the attributes values to be modified on the target devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -263,18 +275,18 @@ Examples
# Service tags not allowed.
- DeviceId: 12765
Attributes:
- - Id : 15645
- Value : "0.0.0.0"
- IsIgnored : false
+ - Id: 15645
+ Value: "0.0.0.0"
+ IsIgnored: false
- DeviceId: 10173
Attributes:
- - Id : 18968,
- Value : "hostname-1"
- IsIgnored : false
+ - Id: 18968,
+ Value: "hostname-1"
+ IsIgnored: false
- name: Deploy template and Operating System (OS) on multiple devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -308,7 +320,7 @@ Examples
- name: "Deploy template on multiple devices and changes the device-level attributes. After the template is deployed,
install OS using its image"
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -324,14 +336,14 @@ Examples
Attributes:
- DeviceId: 12765
Attributes:
- - Id : 15645
- Value : "0.0.0.0"
- IsIgnored : false
+ - Id: 15645
+ Value: "0.0.0.0"
+ IsIgnored: false
- DeviceId: 10173
Attributes:
- - Id : 18968,
- Value : "hostname-1"
- IsIgnored : false
+ - Id: 18968,
+ Value: "hostname-1"
+ IsIgnored: false
NetworkBootIsoModel:
BootToNetwork: true
ShareType: "NFS"
@@ -488,6 +500,19 @@ Examples
Content: "{{ lookup('ansible.builtin.file', './test.xml') }}"
Type: 2
+ - name: Create a template from a reference device with Job wait as false
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25123
+ attributes:
+ Name: "New Template"
+ Description: "New Template description"
+ Fqdds: iDRAC,BIOS,
+ job_wait: false
+
Return Values
@@ -522,11 +547,11 @@ Content (success, when I(command) is C(export), str, <SystemConfiguration Model=
XML content of the exported template. This content can be written to a xml file.
-devices_assigned (I(command) is C(deploy), dict, AnsibleMapping([('10362', 28), ('10312', 23)]))
+devices_assigned (I(command) is C(deploy), dict, {'10362': 28, '10312': 23})
Mapping of devices with the templates already deployed on them.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
@@ -544,4 +569,6 @@ Authors
~~~~~~~
- Jagadeesh N V (@jagadeeshnv)
+- Husniya Hameed (@husniya_hameed)
+- Kritika Bhateja (@Kritika-Bhateja)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst
index 3210a8da8..64a63b477 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_identity_pool.rst
@@ -52,11 +52,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -74,7 +74,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -113,7 +113,7 @@ msg (always, str, Successfully attached identity pool to template.)
Overall identity pool status of the attach or detach operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.'), ('@Message.ExtendedInfo', [AnsibleMapping([('MessageId', 'GEN1234'), ('RelatedProperties', []), ('Message', 'Unable to process the request because an error occurred.'), ('MessageArgs', []), ('Severity', 'Critical'), ('Resolution', 'Retry the operation. If the issue persists, contact your system administrator.')])])]))]))
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst
index 72e0b6d96..d1da26467 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_info.rst
@@ -57,11 +57,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -79,7 +79,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -125,7 +125,7 @@ msg (on error, str, Failed to fetch the template facts)
Overall template facts status.
-template_info (success, dict, AnsibleMapping([('192.168.0.1', AnsibleMapping([('CreatedBy', 'system'), ('CreationTime', '1970-01-31 00:00:56.372144'), ('Description', 'Tune workload for Performance Optimized Virtualization'), ('HasIdentityAttributes', False), ('Id', 1), ('IdentityPoolId', 0), ('IsBuiltIn', True), ('IsPersistencePolicyValid', False), ('IsStatelessAvailable', False), ('LastUpdatedBy', None), ('LastUpdatedTime', '1970-01-31 00:00:56.372144'), ('Name', 'iDRAC Enable Performance Profile for Virtualization'), ('SourceDeviceId', 0), ('Status', 0), ('TaskId', 0), ('TypeId', 2), ('ViewTypeId', 4)]))]))
+template_info (success, dict, {'192.168.0.1': {'CreatedBy': 'system', 'CreationTime': '1970-01-31 00:00:56.372144', 'Description': 'Tune workload for Performance Optimized Virtualization', 'HasIdentityAttributes': False, 'Id': 1, 'IdentityPoolId': 0, 'IsBuiltIn': True, 'IsPersistencePolicyValid': False, 'IsStatelessAvailable': False, 'LastUpdatedBy': None, 'LastUpdatedTime': '1970-01-31 00:00:56.372144', 'Name': 'iDRAC Enable Performance Profile for Virtualization', 'SourceDeviceId': 0, 'Status': 0, 'TaskId': 0, 'TypeId': 2, 'ViewTypeId': 4}})
Details of the templates.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst
index 6d2752fca..89d908556 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan.rst
@@ -120,11 +120,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -142,7 +142,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -217,7 +217,7 @@ msg (always, str, Successfully applied the network settings to template.)
Overall status of the template vlan operation.
-error_info (on HTTP error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the request because TemplateId does not exist or is not applicable for the resource URI.'), ('MessageArgs', ['TemplateId']), ('MessageId', 'CGEN1004'), ('RelatedProperties', []), ('Resolution', "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties."), ('Severity', 'Critical')])]), ('code', 'Base.1.0.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on HTTP error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to complete the request because TemplateId does not exist or is not applicable for the resource URI.', 'MessageArgs': ['TemplateId'], 'MessageId': 'CGEN1004', 'RelatedProperties': [], 'Resolution': "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide for more information about resource URI and its properties.", 'Severity': 'Critical'}], 'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of the HTTP Error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan_info.rst
new file mode 100644
index 000000000..6d5c9fad8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_template_network_vlan_info.rst
@@ -0,0 +1,150 @@
+.. _ome_template_network_vlan_info_module:
+
+
+ome_template_network_vlan_info -- Retrieves network configuration of template.
+==============================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module retrieves the network configuration of a template on OpenManage Enterprise or OpenManage Enterprise Modular.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ template_id (optional, int, None)
+ Id of the template.
+
+ This is mutually exclusive with *template_name*.
+
+
+ template_name (optional, str, None)
+ Name of the template.
+
+ This is mutually exclusive with *template_id*.
+
+ ``Note`` If *template_id* or *template_name* option is not provided, the module retrieves network VLAN info of all templates.
+
+
+ hostname (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+
+
+ username (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular username.
+
+
+ password (True, str, None)
+ OpenManage Enterprise or OpenManage Enterprise Modular password.
+
+
+ port (optional, int, 443)
+ OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module on a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Retrieve network details of all templates.
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+ - name: Retrieve network details using template ID
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 1234
+
+ - name: Retrieve network details using template name
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: template1
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully retrieved the template network VLAN information.)
+ Status of template VLAN information retrieval.
+
+
+vlan_info (success, list, [{'TemplateId': 58, 'TemplateName': 't2', 'NicBondingTechnology': 'LACP', 'NicModel': {'NIC in Mezzanine 1B': {'1': {'Port': 1, 'Vlan Tagged': ['25367', '32656', '32658', '26898'], 'Vlan UnTagged': '21474', 'NICBondingEnabled': 'false'}, '2': {'Port': 2, 'Vlan Tagged': [], 'Vlan UnTagged': '32658', 'NIC Bonding Enabled': 'true'}}, 'NIC in Mezzanine 1A': {'1': {'Port': 1, 'Vlan Tagged': ['32656', '32658'], 'Vlan UnTagged': '25367', 'NIC Bonding Enabled': 'true'}, '2': {'Port': 2, 'Vlan Tagged': ['21474'], 'Vlan UnTagged': '32656', 'NIC Bonding Enabled': 'false'}}}}])
+ Information about the template network VLAN.
+
+
+error_info (on HTTP error, dict, {'error': {'code': 'Base.1.0.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.', '@Message.ExtendedInfo': [{'MessageId': 'GEN1234', 'RelatedProperties': [], 'Message': 'Unable to process the request because an error occurred.', 'MessageArgs': [], 'Severity': 'Critical', 'Resolution': 'Retry the operation. If the issue persists, contact your system administrator.'}]}})
+ Details of the HTTP Error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Jagadeesh N V(@jagadeeshnv)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst
index 4e46c91fc..908640f5e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_user.rst
@@ -47,7 +47,7 @@ Parameters
Either *user_id* or *name* is mandatory for ``absent`` operation.
- attributes (optional, dict, AnsibleMapping())
+ attributes (optional, dict, {})
Payload data for the user operations. It can take the following attributes for ``present``.
UserTypeId, DirectoryServiceId, Description, Name, Password, UserName, RoleId, Locked, Enabled.
@@ -74,11 +74,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -96,7 +96,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support ``check_mode``.
@@ -119,7 +119,7 @@ Examples
UserName: "user1"
Password: "UserPassword"
RoleId: "10"
- Enabled: True
+ Enabled: true
- name: Create user with all parameters
dellemc.openmanage.ome_user:
@@ -132,10 +132,10 @@ Examples
Description: "user2 description"
Password: "UserPassword"
RoleId: "10"
- Enabled: True
+ Enabled: true
DirectoryServiceId: 0
UserTypeId: 1
- Locked: False
+ Locked: false
Name: "user2"
- name: Modify existing user
@@ -148,7 +148,7 @@ Examples
attributes:
UserName: "user3"
RoleId: "10"
- Enabled: True
+ Enabled: true
Description: "Modify user Description"
- name: Delete existing user using id
@@ -178,7 +178,7 @@ msg (always, str, Successfully created a User)
Overall status of the user operation.
-user_status (When I(state) is C(present)., dict, AnsibleMapping([('Description', 'Test user creation'), ('DirectoryServiceId', 0), ('Enabled', True), ('Id', '61546'), ('IsBuiltin', False), ('Locked', False), ('Name', 'test'), ('Password', None), ('PlainTextPassword', None), ('RoleId', '10'), ('UserName', 'test'), ('UserTypeId', 1)]))
+user_status (When I(state) is C(present)., dict, {'Description': 'Test user creation', 'DirectoryServiceId': 0, 'Enabled': True, 'Id': '61546', 'IsBuiltin': False, 'Locked': False, 'Name': 'test', 'Password': None, 'PlainTextPassword': None, 'RoleId': '10', 'UserName': 'test', 'UserTypeId': 1})
Details of the user operation, when *state* is ``present``.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst b/ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst
index 80e8250d1..ba0043f28 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/ome_user_info.rst
@@ -57,11 +57,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -79,7 +79,7 @@ Notes
-----
.. note::
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports ``check_mode``.
@@ -125,7 +125,7 @@ msg (on error, str, Unable to retrieve the account details.)
Over all status of fetching user facts.
-user_info (success, dict, AnsibleMapping([('192.168.0.1', AnsibleMapping([('Id', '1814'), ('UserTypeId', 1), ('DirectoryServiceId', 0), ('Description', 'user name description'), ('Name', 'user_name'), ('Password', None), ('UserName', 'user_name'), ('RoleId', '10'), ('Locked', False), ('IsBuiltin', True), ('Enabled', True)]))]))
+user_info (success, dict, {'192.168.0.1': {'Id': '1814', 'UserTypeId': 1, 'DirectoryServiceId': 0, 'Description': 'user name description', 'Name': 'user_name', 'Password': None, 'UserName': 'user_name', 'RoleId': '10', 'Locked': False, 'IsBuiltin': True, 'Enabled': True}})
Details of the user.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst
index d5fe2c96f..0ea5da0d6 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_event_subscription.rst
@@ -68,11 +68,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -146,11 +146,11 @@ msg (always, str, Successfully added the subscription.)
Overall status of the task.
-status (on adding subscription successfully, dict, AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'The resource has been created successfully'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'Base.1.7.Created'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'None'), ('Severity', 'OK')]), AnsibleMapping([('Message', 'A new resource is successfully created.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'IDRAC.2.2.SYS414'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'No response action is required.'), ('Severity', 'Informational')])]), ('Actions', AnsibleMapping([('#EventDestination.ResumeSubscription', AnsibleMapping([('target', '/redfish/v1/EventService/Subscriptions/5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a/Actions/EventDestination.ResumeSubscription')]))])), ('Context', 'RedfishEvent'), ('DeliveryRetryPolicy', 'RetryForever'), ('Description', 'Event Subscription Details'), ('Destination', 'https://192.168.1.100:8188'), ('EventFormatType', 'Event'), ('EventTypes', ['Alert']), ('EventTypes@odata.count', 1), ('HttpHeaders', []), ('HttpHeaders@odata.count', 0), ('Id', '5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a'), ('MetricReportDefinitions', []), ('MetricReportDefinitions@odata.count', 0), ('Name', 'EventSubscription 5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a'), ('OriginResources', []), ('OriginResources@odata.count', 0), ('Protocol', 'Redfish'), ('Status', AnsibleMapping([('Health', 'OK'), ('HealthRollup', 'OK'), ('State', 'Enabled')])), ('SubscriptionType', 'RedfishEvent')]))
+status (on adding subscription successfully, dict, {'@Message.ExtendedInfo': [{'Message': 'The resource has been created successfully', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'Base.1.7.Created', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'None', 'Severity': 'OK'}, {'Message': 'A new resource is successfully created.', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'IDRAC.2.2.SYS414', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'No response action is required.', 'Severity': 'Informational'}], 'Actions': {'#EventDestination.ResumeSubscription': {'target': '/redfish/v1/EventService/Subscriptions/5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a/Actions/EventDestination.ResumeSubscription'}}, 'Context': 'RedfishEvent', 'DeliveryRetryPolicy': 'RetryForever', 'Description': 'Event Subscription Details', 'Destination': 'https://192.168.1.100:8188', 'EventFormatType': 'Event', 'EventTypes': ['Alert'], 'EventTypes@odata.count': 1, 'HttpHeaders': [], 'HttpHeaders@odata.count': 0, 'Id': '5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a', 'MetricReportDefinitions': [], 'MetricReportDefinitions@odata.count': 0, 'Name': 'EventSubscription 5d432f36-81f4-11eb-9dc0-2cea7ff7ff9a', 'OriginResources': [], 'OriginResources@odata.count': 0, 'Protocol': 'Redfish', 'Status': {'Health': 'OK', 'HealthRollup': 'OK', 'State': 'Enabled'}, 'SubscriptionType': 'RedfishEvent'})
Returns subscription object created
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the operation because the JSON data format entered is invalid.'), ('Resolution', 'Do the following and the retry the operation: 1) Enter the correct JSON data format and retry the operation. 2) Make sure that no syntax error is present in JSON data format. 3) Make sure that a duplicate key is not present in JSON data format.'), ('Severity', 'Critical')]), AnsibleMapping([('Message', 'The request body submitted was malformed JSON and could not be parsed by the receiving service.'), ('Resolution', 'Ensure that the request body is valid JSON and resubmit the request.'), ('Severity', 'Critical')])]), ('code', 'Base.1.2.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to complete the operation because the JSON data format entered is invalid.', 'Resolution': 'Do the following and the retry the operation: 1) Enter the correct JSON data format and retry the operation. 2) Make sure that no syntax error is present in JSON data format. 3) Make sure that a duplicate key is not present in JSON data format.', 'Severity': 'Critical'}, {'Message': 'The request body submitted was malformed JSON and could not be parsed by the receiving service.', 'Resolution': 'Ensure that the request body is valid JSON and resubmit the request.', 'Severity': 'Critical'}], 'code': 'Base.1.2.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of http error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst
index d1225dd7f..3a3471c1f 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware.rst
@@ -40,6 +40,18 @@ Parameters
Protocol used to transfer the firmware image file. Applicable for URI based update.
+ job_wait (optional, bool, True)
+ Provides the option to wait for job completion.
+
+
+ job_wait_timeout (optional, int, 3600)
+ The maximum wait time of *job_wait* in seconds. The job is tracked only for this duration.
+
+ This option is applicable when *job_wait* is ``true``.
+
+ Note: If a firmware update needs a reboot, the job will get scheduled and waits for no of seconds specfied in *job_wait_time*. to reduce the wait time either give *job_wait_time* minimum or make *job_wait*as false and retrigger.
+
+
baseuri (True, str, None)
IP address of the target out-of-band controller. For example- <ipaddress>:<port>.
@@ -53,11 +65,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -76,6 +88,8 @@ Notes
.. note::
- Run this module from a system that has direct access to Redfish APIs.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports only iDRAC9 and above.
- This module does not support ``check_mode``.
@@ -97,6 +111,17 @@ Examples
image_uri: "http://192.168.0.2/firmware_repo/component.exe"
transfer_protocol: "HTTP"
+ - name: Update the firmware from a single executable file available in a HTTP protocol with job_Wait
+ dellemc.openmanage.redfish_firmware:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ image_uri: "http://192.168.0.2/firmware_repo/component.exe"
+ transfer_protocol: "HTTP"
+ job_wait: true
+ job_wait_timeout: 600
+
- name: Update the firmware from a single executable file available in a local path
dellemc.openmanage.redfish_firmware:
baseuri: "192.168.0.1"
@@ -110,15 +135,15 @@ Examples
Return Values
-------------
-msg (always, str, Successfully submitted the firmware update task.)
+msg (always, str, Successfully updated the firmware.)
Overall status of the firmware update task.
-task (success, dict, AnsibleMapping([('id', 'JID_XXXXXXXXXXXX'), ('uri', '/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXX')]))
+task (success, dict, {'id': 'JID_XXXXXXXXXXXX', 'uri': '/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXX'})
Returns ID and URI of the created task.
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the operation because the JSON data format entered is invalid.'), ('Resolution', 'Do the following and the retry the operation: 1) Enter the correct JSON data format and retry the operation. 2) Make sure that no syntax error is present in JSON data format. 3) Make sure that a duplicate key is not present in JSON data format.'), ('Severity', 'Critical')]), AnsibleMapping([('Message', 'The request body submitted was malformed JSON and could not be parsed by the receiving service.'), ('Resolution', 'Ensure that the request body is valid JSON and resubmit the request.'), ('Severity', 'Critical')])]), ('code', 'Base.1.2.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information.')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to complete the operation because the JSON data format entered is invalid.', 'Resolution': 'Do the following and the retry the operation: 1) Enter the correct JSON data format and retry the operation. 2) Make sure that no syntax error is present in JSON data format. 3) Make sure that a duplicate key is not present in JSON data format.', 'Severity': 'Critical'}, {'Message': 'The request body submitted was malformed JSON and could not be parsed by the receiving service.', 'Resolution': 'Ensure that the request body is valid JSON and resubmit the request.', 'Severity': 'Critical'}], 'code': 'Base.1.2.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information.'}})
Details of http error.
@@ -136,4 +161,8 @@ Authors
~~~~~~~
- Felix Stephen (@felixs88)
+- Husniya Hameed (@husniya_hameed)
+- Shivam Sharma (@Shivam-Sharma)
+- Kritika Bhateja (@Kritika_Bhateja)
+- Abhishek Sinha (@ABHISHEK-SINHA10)
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware_rollback.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware_rollback.rst
new file mode 100644
index 000000000..466239e40
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_firmware_rollback.rst
@@ -0,0 +1,163 @@
+.. _redfish_firmware_rollback_module:
+
+
+redfish_firmware_rollback -- To perform a component firmware rollback using component name
+==========================================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+This module allows to rollback the firmware of different server components.
+
+Depending on the component, the firmware update is applied after an automatic or manual reboot.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- python >= 3.9.6
+
+
+
+Parameters
+----------
+
+ name (True, str, None)
+ The name or regular expression of the component to match and is case-sensitive.
+
+
+ reboot (optional, bool, True)
+ Reboot the server to apply the previous version of the firmware.
+
+ ``true`` reboots the server to rollback the firmware to the available version.
+
+ ``false`` schedules the rollback of firmware until the next restart.
+
+ When *reboot* is ``false``, some components update immediately, and the server may reboot. So, the module must wait till the server is accessible.
+
+
+ reboot_timeout (optional, int, 900)
+ Wait time in seconds. The module waits for this duration till the server reboots.
+
+
+ baseuri (True, str, None)
+ IP address of the target out-of-band controller. For example- <ipaddress>:<port>.
+
+
+ username (True, str, None)
+ Username of the target out-of-band controller.
+
+
+ password (True, str, None)
+ Password of the target out-of-band controller.
+
+
+ validate_certs (optional, bool, True)
+ If ``false``, the SSL certificates will not be validated.
+
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
+
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
+
+
+ ca_path (optional, path, None)
+ The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+
+
+ timeout (optional, int, 30)
+ The socket level timeout in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - Run this module from a system that has direct access to Redfish APIs.
+ - For components that do not require a reboot, firmware rollback proceeds irrespective of *reboot* is ``true`` or ``false``.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports ``check_mode``.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ ---
+ - name: Rollback a BIOS component firmware
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "BIOS"
+
+ - name: Rollback all NIC cards with a name starting from 'Broadcom Gigabit'.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Broadcom Gigabit Ethernet.*"
+
+ - name: Rollback all the component firmware except BIOS component.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "(?!BIOS).*"
+
+ - name: Rollback all the available firmware component.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: ".*"
+
+
+
+Return Values
+-------------
+
+msg (always, str, Successfully completed the job for firmware rollback.)
+ Overall firmware rollback status.
+
+
+status (success, list, [{'ActualRunningStartTime': '2023-08-04T12:26:55', 'ActualRunningStopTime': '2023-08-04T12:32:35', 'CompletionTime': '2023-08-04T12:32:35', 'Description': 'Job Instance', 'EndTime': 'TIME_NA', 'Id': 'JID_911698303631', 'JobState': 'Completed', 'JobType': 'FirmwareUpdate', 'Message': 'Job completed successfully.', 'MessageArgs': [], 'MessageId': 'PR19', 'Name': 'Firmware Rollback: Firmware', 'PercentComplete': 100, 'StartTime': '2023-08-04T12:23:50', 'TargetSettingsURI': None}])
+ Firmware rollback job and progress details from the iDRAC.
+
+
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'InstanceID value provided for the update operation is invalid', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'IDRAC.2.8.SUP024', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'Enumerate inventory, copy the InstanceID value and provide that value for the update operation.', 'Severity': 'Warning'}], 'code': 'Base.1.12.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information'}})
+ Details of the HTTP error.
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Felix Stephen (@felixs88)
+
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst
index fb05fe3e3..fa8ac069e 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_powerstate.rst
@@ -72,11 +72,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If ``false``, the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure ``false`` only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version ``5.0.0``, the *validate_certs* is ``false`` by default.
ca_path (optional, path, None)
@@ -133,7 +133,7 @@ msg (always, str, Successfully performed the reset type operation 'On'.)
Overall status of the reset operation.
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to complete the operation because the resource /redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset entered in not found.'), ('MessageArgs', ['/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset']), ('MessageArgs@odata.count', 1), ('MessageId', 'IDRAC.2.1.SYS403'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'Enter the correct resource and retry the operation. For information about valid resource, see the Redfish Users Guide available on the support site.'), ('Severity', 'Critical')])]), ('code', 'Base.1.5.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to complete the operation because the resource /redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset entered in not found.', 'MessageArgs': ['/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset'], 'MessageArgs@odata.count': 1, 'MessageId': 'IDRAC.2.1.SYS403', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'Enter the correct resource and retry the operation. For information about valid resource, see the Redfish Users Guide available on the support site.', 'Severity': 'Critical'}], 'code': 'Base.1.5.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information'}})
Details of the HTTP error.
diff --git a/ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst b/ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst
index d0dfe4b15..f6e5f577f 100644
--- a/ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst
+++ b/ansible_collections/dellemc/openmanage/docs/modules/redfish_storage_volume.rst
@@ -20,7 +20,7 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- python >= 3.8.6
+- python \>= 3.9.6
@@ -32,7 +32,7 @@ Parameters
For example- RAID.Slot.1-1.
- This option is mandatory when *state* is ``present`` while creating a volume.
+ This option is mandatory when \ :emphasis:`state`\ is \ :literal:`present`\ while creating a volume.
volume_id (optional, str, None)
@@ -42,41 +42,43 @@ Parameters
This option is mandatory in the following scenarios,
- *state* is ``present``, when updating a volume.
+ \ :emphasis:`state`\ is \ :literal:`present`\ , when updating a volume.
- *state* is ``absent``, when deleting a volume.
+ \ :emphasis:`state`\ is \ :literal:`absent`\ , when deleting a volume.
- *command* is ``initialize``, when initializing a volume.
+ \ :emphasis:`command`\ is \ :literal:`initialize`\ , when initializing a volume.
state (optional, str, None)
- ``present`` creates a storage volume for the specified I (controller_id), or modifies the storage volume for the specified I (volume_id). "Note: Modification of an existing volume properties depends on drive and controller capabilities".
+ \ :literal:`present`\ creates a storage volume for the specified I (controller\_id), or modifies the storage volume for the specified I (volume\_id). "Note: Modification of an existing volume properties depends on drive and controller capabilities".
- ``absent`` deletes the volume for the specified *volume_id*.
+ \ :literal:`absent`\ deletes the volume for the specified \ :emphasis:`volume\_id`\ .
command (optional, str, None)
- ``initialize`` initializes an existing storage volume for a specified *volume_id*.
+ \ :literal:`initialize`\ initializes an existing storage volume for a specified \ :emphasis:`volume\_id`\ .
volume_type (optional, str, None)
One of the following volume types must be selected to create a volume.
- ``Mirrored`` The volume is a mirrored device.
+ \ :literal:`NonRedundant`\ The volume is a non-redundant storage device.
- ``NonRedundant`` The volume is a non-redundant storage device.
+ \ :literal:`Mirrored`\ The volume is a mirrored device.
- ``SpannedMirrors`` The volume is a spanned set of mirrored devices.
+ \ :literal:`StripedWithParity`\ The volume is a device which uses parity to retain redundant information.
- ``SpannedStripesWithParity`` The volume is a spanned set of devices which uses parity to retain redundant information.
+ \ :literal:`SpannedMirrors`\ The volume is a spanned set of mirrored devices.
- ``StripedWithParity`` The volume is a device which uses parity to retain redundant information.
+ \ :literal:`SpannedStripesWithParity`\ The volume is a spanned set of devices which uses parity to retain redundant information.
+
+ \ :emphasis:`volume\_type`\ is mutually exclusive with \ :emphasis:`raid\_type`\ .
name (optional, str, None)
Name of the volume to be created.
- Only applicable when *state* is ``present``.
+ Only applicable when \ :emphasis:`state`\ is \ :literal:`present`\ .
drives (optional, list, None)
@@ -84,57 +86,111 @@ Parameters
For example- Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1.
- Only applicable when *state* is ``present`` when creating a new volume.
+ Only applicable when \ :emphasis:`state`\ is \ :literal:`present`\ when creating a new volume.
block_size_bytes (optional, int, None)
- Block size in bytes.Only applicable when *state* is ``present``.
+ Block size in bytes.Only applicable when \ :emphasis:`state`\ is \ :literal:`present`\ .
capacity_bytes (optional, str, None)
Volume size in bytes.
- Only applicable when *state* is ``present``.
+ Only applicable when \ :emphasis:`state`\ is \ :literal:`present`\ .
optimum_io_size_bytes (optional, int, None)
- Stripe size value must be in multiples of 64 * 1024.
+ Stripe size value must be in multiples of 64 \* 1024.
- Only applicable when *state* is ``present``.
+ Only applicable when \ :emphasis:`state`\ is \ :literal:`present`\ .
encryption_types (optional, str, None)
The following encryption types can be selected.
- ``ControllerAssisted`` The volume is encrypted by the storage controller entity.
+ \ :literal:`ControllerAssisted`\ The volume is encrypted by the storage controller entity.
- ``NativeDriveEncryption`` The volume utilizes the native drive encryption capabilities of the drive hardware.
+ \ :literal:`NativeDriveEncryption`\ The volume utilizes the native drive encryption capabilities of the drive hardware.
- ``SoftwareAssisted`` The volume is encrypted by the software running on the system or the operating system.
+ \ :literal:`SoftwareAssisted`\ The volume is encrypted by the software running on the system or the operating system.
- Only applicable when *state* is ``present``.
+ Only applicable when \ :emphasis:`state`\ is \ :literal:`present`\ .
encrypted (optional, bool, None)
Indicates whether volume is currently utilizing encryption or not.
- Only applicable when *state* is ``present``.
+ Only applicable when \ :emphasis:`state`\ is \ :literal:`present`\ .
oem (optional, dict, None)
Includes OEM extended payloads.
- Only applicable when *state* is *present*.
+ Only applicable when \ :emphasis:`state`\ is \ :emphasis:`present`\ .
initialize_type (optional, str, Fast)
Initialization type of existing volume.
- Only applicable when *command* is ``initialize``.
+ Only applicable when \ :emphasis:`command`\ is \ :literal:`initialize`\ .
+
+
+ raid_type (optional, str, None)
+ \ :literal:`RAID0`\ to create a RAID0 type volume.
+
+ \ :literal:`RAID1`\ to create a RAID1 type volume.
+
+ \ :literal:`RAID5`\ to create a RAID5 type volume.
+
+ \ :literal:`RAID6`\ to create a RAID6 type volume.
+
+ \ :literal:`RAID10`\ to create a RAID10 type volume.
+
+ \ :literal:`RAID50`\ to create a RAID50 type volume.
+
+ \ :literal:`RAID60`\ to create a RAID60 type volume.
+
+ \ :emphasis:`raid\_type`\ is mutually exclusive with \ :emphasis:`volume\_type`\ .
+
+
+ apply_time (optional, str, None)
+ Apply time of the Volume configuration.
+
+ \ :literal:`Immediate`\ allows you to apply the volume configuration on the host server immediately and apply the changes. This is applicable for \ :emphasis:`job\_wait`\ .
+
+ \ :literal:`OnReset`\ allows you to apply the changes on the next reboot of the host server.
+
+ \ :emphasis:`apply\_time`\ has a default value based on the different types of the controller. For example, BOSS-S1 and BOSS-N1 controllers have a default value of \ :emphasis:`apply\_time`\ as \ :literal:`OnReset`\ , and PERC controllers have a default value of \ :emphasis:`apply\_time`\ as \ :literal:`Immediate`\ .
+
+
+ reboot_server (optional, bool, False)
+ Reboot the server to apply the changes.
+
+ \ :emphasis:`reboot\_server`\ is applicable only when \ :emphasis:`apply\_timeout`\ is \ :literal:`OnReset`\ or when the default value for the apply time of the controller is \ :literal:`OnReset`\ .
+
+
+ force_reboot (optional, bool, False)
+ Reboot the server forcefully to apply the changes when the normal reboot fails.
+
+ \ :emphasis:`force\_reboot`\ is applicable only when \ :emphasis:`reboot\_server`\ is \ :literal:`true`\ .
+
+
+ job_wait (optional, bool, False)
+ This parameter provides the option to wait for the job completion.
+
+ This is applicable when \ :emphasis:`apply\_time`\ is \ :literal:`Immediate`\ .
+
+ This is applicable when \ :emphasis:`apply\_time`\ is \ :literal:`OnReset`\ and \ :emphasis:`reboot\_server`\ is \ :literal:`true`\ .
+
+
+ job_wait_timeout (optional, int, 1200)
+ This parameter is the maximum wait time of \ :emphasis:`job\_wait`\ in seconds.
+
+ This option is applicable when \ :emphasis:`job\_wait`\ is \ :literal:`true`\ .
baseuri (True, str, None)
- IP address of the target out-of-band controller. For example- <ipaddress>:<port>.
+ IP address of the target out-of-band controller. For example- \<ipaddress\>:\<port\>.
username (True, str, None)
@@ -146,11 +202,11 @@ Parameters
validate_certs (optional, bool, True)
- If ``False``, the SSL certificates will not be validated.
+ If \ :literal:`false`\ , the SSL certificates will not be validated.
- Configure ``False`` only on personally controlled sites where self-signed certificates are used.
+ Configure \ :literal:`false`\ only on personally controlled sites where self-signed certificates are used.
- Prior to collection version ``5.0.0``, the *validate_certs* is ``False`` by default.
+ Prior to collection version \ :literal:`5.0.0`\ , the \ :emphasis:`validate\_certs`\ is \ :literal:`false`\ by default.
ca_path (optional, path, None)
@@ -169,8 +225,9 @@ Notes
.. note::
- Run this module from a system that has direct access to Redfish APIs.
- - This module supports ``check_mode``.
- - This module always reports changes when *name* and *volume_id* are not specified. Either *name* or *volume_id* is required to support ``check_mode``.
+ - This module supports \ :literal:`check\_mode`\ .
+ - This module always reports changes when \ :emphasis:`name`\ and \ :emphasis:`volume\_id`\ are not specified. Either \ :emphasis:`name`\ or \ :emphasis:`volume\_id`\ is required to support \ :literal:`check\_mode`\ .
+ - This module supports IPv4 and IPv6 addresses.
@@ -211,7 +268,48 @@ Examples
controller_id: "RAID.Slot.1-1"
volume_type: "NonRedundant"
drives:
- - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+
+ - name: Create a RAID0 on PERC controller on reset
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ apply_time: OnReset
+
+ - name: Create a RAID0 on BOSS controller with restart
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ apply_time: OnReset
+ reboot_server: true
+
+ - name: Create a RAID0 on BOSS controller with force restart
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ reboot_server: true
+ force_reboot: true
- name: Modify a volume's encryption type settings
dellemc.openmanage.redfish_storage_volume:
@@ -243,6 +341,38 @@ Examples
volume_id: "Disk.Virtual.6:RAID.Slot.1-1"
initialize_type: "Slow"
+ - name: Create a RAID6 volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID6"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-3
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-4
+
+ - name: Create a RAID60 volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID60"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-3
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-4
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-5
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-6
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-7
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-8
+
Return Values
@@ -252,11 +382,11 @@ msg (always, str, Successfully submitted create volume task.)
Overall status of the storage configuration operation.
-task (success, dict, AnsibleMapping([('id', 'JID_XXXXXXXXXXXXX'), ('uri', '/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXXX')]))
+task (success, dict, {'id': 'JID_XXXXXXXXXXXXX', 'uri': '/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX'})
Returns ID and URI of the created task.
-error_info (on http error, dict, AnsibleMapping([('error', AnsibleMapping([('@Message.ExtendedInfo', [AnsibleMapping([('Message', 'Unable to perform configuration operations because a configuration job for the device already exists.'), ('MessageArgs', []), ('MessageArgs@odata.count', 0), ('MessageId', 'IDRAC.1.6.STOR023'), ('RelatedProperties', []), ('RelatedProperties@odata.count', 0), ('Resolution', 'Wait for the current job for the device to complete or cancel the current job before attempting more configuration operations on the device.'), ('Severity', 'Informational')])]), ('code', 'Base.1.2.GeneralError'), ('message', 'A general error has occurred. See ExtendedInfo for more information')]))]))
+error_info (on http error, dict, {'error': {'@Message.ExtendedInfo': [{'Message': 'Unable to perform configuration operations because a configuration job for the device already exists.', 'MessageArgs': [], 'MessageArgs@odata.count': 0, 'MessageId': 'IDRAC.1.6.STOR023', 'RelatedProperties': [], 'RelatedProperties@odata.count': 0, 'Resolution': 'Wait for the current job for the device to complete or cancel the current job before attempting more configuration operations on the device.', 'Severity': 'Informational'}], 'code': 'Base.1.2.GeneralError', 'message': 'A general error has occurred. See ExtendedInfo for more information'}})
Details of a http error.
@@ -274,4 +404,5 @@ Authors
~~~~~~~
- Sajna Shetty(@Sajna-Shetty)
+- Kritika Bhateja(@Kritika-Bhateja-03)
diff --git a/ansible_collections/dellemc/openmanage/meta/execution-environment.yml b/ansible_collections/dellemc/openmanage/meta/execution-environment.yml
index 5aa14625e..7fcb9819a 100644
--- a/ansible_collections/dellemc/openmanage/meta/execution-environment.yml
+++ b/ansible_collections/dellemc/openmanage/meta/execution-environment.yml
@@ -1,5 +1,6 @@
---
-version: 1
+version: 3
dependencies:
galaxy: requirements.yml
python: requirements.txt
+ system: bindep.txt
diff --git a/ansible_collections/dellemc/openmanage/meta/runtime.yml b/ansible_collections/dellemc/openmanage/meta/runtime.yml
index d550a6d38..8255bdc82 100644
--- a/ansible_collections/dellemc/openmanage/meta/runtime.yml
+++ b/ansible_collections/dellemc/openmanage/meta/runtime.yml
@@ -1,15 +1,15 @@
---
-requires_ansible: '>=2.10.0'
+requires_ansible: ">=2.14.0"
plugin_routing:
modules:
dellemc_get_firmware_inventory:
- deprecation:
- removal_date: "2023-01-15"
- warning_text: dellemc_get_firmware_inventory will be removed in a future release of this collection. Use dellemc.openmanage.idrac_firmware_info instead.
+ tombstone:
+ removal_version: 8.0.0
+ warning_text: Use dellemc.openmanage.idrac_firmware_info instead.
dellemc_get_system_inventory:
- deprecation:
- removal_date: "2023-01-15"
- warning_text: dellemc_get_system_inventory will be removed in a future release of this collection. Use dellemc.openmanage.idrac_system_info instead.
+ tombstone:
+ removal_version: 8.0.0
+ warning_text: Use dellemc.openmanage.idrac_system_info instead.
dellemc_configure_idrac_eventing:
deprecation:
removal_date: "2024-07-31"
@@ -33,4 +33,4 @@ plugin_routing:
idrac_timezone_ntp:
deprecation:
removal_date: "2024-07-31"
- warning_text: idrac_timezone_ntp will be removed in a future release of this collection. Use dellemc.openmanage.idrac_attributes instead. \ No newline at end of file
+ warning_text: idrac_timezone_ntp will be removed in a future release of this collection. Use dellemc.openmanage.idrac_attributes instead.
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml
index d81640658..d3d561ca4 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/dellemc_idrac_storage_volume.yml
@@ -1,32 +1,29 @@
---
-- hosts: idrac
- connection: local
- name: iDRAC storage volume configuration.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Storage volume configuration on iDRAC.
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Create single volume.
- dellemc_idrac_storage_volume:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "create"
- controller_id: "RAID.Slot.1-1"
- volumes:
- - drives:
- location: [5]
- tags:
+ - name: Create single volume.
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "create"
+ controller_id: "RAID.Slot.1-1"
+ volumes:
+ - drives:
+ location: [5]
+ delegate_to: localhost
+ tags:
- create_single_volume
-
- - name: Create multiple volume.
- dellemc_idrac_storage_volume:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
+
+ - name: Create multiple volume.
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
ca_path: "/path/to/ca_cert.pem"
raid_reset_config: "True"
state: "create"
@@ -44,67 +41,74 @@
volumes:
- name: "volume_1"
drives:
- id: ["Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1",
- "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Slot.1-1"]
+ id:
+ [
+ "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1",
+ "Disk.Bay.2:Enclosure.Internal.0-1:RAID.Slot.1-1",
+ ]
- name: "volume_2"
volume_type: "RAID 5"
span_length: 3
- span_depth: 1
+ span_depth: 1
drives:
- location: [7, 3, 5]
+ location: [7, 3, 5]
disk_cache_policy: "Disabled"
write_cache_policy: "WriteBack"
read_cache_policy: "NoReadAhead"
stripe_size: 131072
capacity: "200"
raid_init_operation: "None"
- tags:
- - create_multiple_volume
-
- - name: Delete single volume.
- dellemc_idrac_storage_volume:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "delete"
- volumes:
- - name: "volume_1"
- tags:
+ delegate_to: localhost
+ tags:
+ - create_multiple_volume
+
+ - name: Delete single volume.
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "delete"
+ volumes:
+ - name: "volume_1"
+ delegate_to: localhost
+ tags:
- delete_single_volume
-
- - name: Delete multiple volume.
- dellemc_idrac_storage_volume:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "delete"
- volumes:
- - name: "volume_1"
- - name: "volume_2"
- tags:
+ - name: Delete multiple volume.
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "delete"
+ volumes:
+ - name: "volume_1"
+ - name: "volume_2"
+ delegate_to: localhost
+ tags:
- delete_multiple_volume
-
- - name: View specific volume details.
- dellemc_idrac_storage_volume:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "view"
- controller_id: "RAID.Slot.1-1"
- volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
- tags:
+
+ - name: View specific volume details.
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "view"
+ controller_id: "RAID.Slot.1-1"
+ volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
+ delegate_to: localhost
+ tags:
- view_specific_volume
-
- - name: View all volume details.
- dellemc_idrac_storage_volume:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "view"
- tags:
- - view_all_volume \ No newline at end of file
+
+ - name: View all volume details.
+ dellemc.openmanage.dellemc_idrac_storage_volume:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "view"
+ delegate_to: localhost
+ tags:
+ - view_all_volume
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml
index c712288e7..8f4a25086 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_eventing.yml
@@ -1,62 +1,58 @@
---
-- hosts: idrac
- connection: local
- name: Configure the iDRAC eventing attributes
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure the iDRAC eventing attributes
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Setup iDRAC SMTP
- dellemc_configure_idrac_eventing:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- smtp_ip_address: "0.0.0.0"
- authentication: "Enabled"
- username: "test"
- password: "test"
-
- tags:
- - idrac_smtp
-
- - name: Setup iDRAC SNMP Trap
- dellemc_configure_idrac_eventing:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- snmp_trap_state: "Enabled"
- destination_number: "2"
- snmp_v3_username: "None"
- destination: "1.1.1.1"
-
- tags:
- - idrac_snmptrap
-
- - name: Setup iDRAC Email Alerts
- dellemc_configure_idrac_eventing:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- email_alert_state: "Disabled"
- address: "test@test.com"
- alert_number: "1"
- custom_message: "test"
-
- tags:
- - idrac_email_alerts
-
- - name: Setup iDRAC Alerts
- dellemc_configure_idrac_eventing:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- enable_alerts: "Disabled"
-
- tags:
- - idrac_alerts \ No newline at end of file
+ - name: Setup iDRAC SMTP
+ dellemc.openmanage.dellemc_configure_idrac_eventing:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ smtp_ip_address: "0.0.0.0"
+ authentication: "Enabled"
+ username: "test"
+ password: "test"
+ delegate_to: localhost
+ tags:
+ - idrac_smtp
+
+ - name: Setup iDRAC SNMP Trap
+ dellemc.openmanage.dellemc_configure_idrac_eventing:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ snmp_trap_state: "Enabled"
+ destination_number: "2"
+ snmp_v3_username: "None"
+ destination: "1.1.1.1"
+ delegate_to: localhost
+ tags:
+ - idrac_snmptrap
+
+ - name: Setup iDRAC Email Alerts
+ dellemc.openmanage.dellemc_configure_idrac_eventing:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ email_alert_state: "Disabled"
+ address: "test@test.com"
+ alert_number: "1"
+ custom_message: "test"
+ delegate_to: localhost
+ tags:
+ - idrac_email_alerts
+
+ - name: Setup iDRAC Alerts
+ dellemc.openmanage.dellemc_configure_idrac_eventing:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_alerts: "Disabled"
+ delegate_to: localhost
+ tags:
+ - idrac_alerts
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml
index e0d4bbe87..de80ffe89 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_configure_idrac_services.yml
@@ -1,46 +1,44 @@
---
-- hosts: idrac
- connection: local
- name: Configure the iDRAC services attributes
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure the iDRAC services attributes
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Setup iDRAC Webserver
- dellemc_configure_idrac_services:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- ssl_encryption: "T_168_Bit_or_higher"
- tls_protocol: "TLS_1_0_and_Higher"
-
- tags:
- - idrac_webserver
+ - name: Setup iDRAC Webserver
+ dellemc.openmanage.dellemc_configure_idrac_services:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ ssl_encryption: "T_168_Bit_or_higher"
+ tls_protocol: "TLS_1_0_and_Higher"
+ delegate_to: localhost
+ tags:
+ - idrac_webserver
- - name: Setup iDRAC SNMP
- dellemc_configure_idrac_services:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- snmp_enable: "Enabled"
- snmp_protocol: "All"
+ - name: Setup iDRAC SNMP
+ dellemc.openmanage.dellemc_configure_idrac_services:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ snmp_enable: "Enabled"
+ snmp_protocol: "All"
+ delegate_to: localhost
- tags:
- - idrac_snmp
+ tags:
+ - idrac_snmp
- - name: Setup iDRAC SNMP settings
- dellemc_configure_idrac_services:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- ipmi_lan:
- community_name: public
- alert_port: 161
- trap_format: SNMPv3
- tags:
- - idrac-snmp-settings
+ - name: Setup iDRAC SNMP settings
+ dellemc.openmanage.dellemc_configure_idrac_services:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ ipmi_lan:
+ community_name: public
+ alert_port: 161
+ trap_format: SNMPv3
+ delegate_to: localhost
+ tags:
+ - idrac-snmp-settings
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml
deleted file mode 100644
index ac4736c5f..000000000
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_firmware_inventory.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: idrac
- connection: local
- name: Get Installed Firmware Inventory
- gather_facts: False
-
- collections:
- - dellemc.openmanage
-
- tasks:
- - name: Get Installed Firmware Inventory
- dellemc_get_firmware_inventory:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_system_inventory.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_system_inventory.yml
deleted file mode 100644
index 085b14bfe..000000000
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_get_system_inventory.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: idrac
- connection: local
- name: Get system inventory
- gather_facts: False
-
- collections:
- - dellemc.openmanage
-
- tasks:
- - name: Get system inventory
- dellemc_get_system_inventory:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml
index 51a06ad1e..cf3c7eb9f 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_idrac_lc_attributes.yml
@@ -1,17 +1,14 @@
---
-- hosts: idrac
- connection: local
- name: Configure iDRAC CSIOR Setting
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure iDRAC CSIOR Setting
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Configure iDRAC CSIOR Setting
- dellemc_idrac_lc_attributes:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- csior: "Enabled"
+ - name: Configure iDRAC CSIOR Setting
+ dellemc.openmanage.dellemc_idrac_lc_attributes:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ csior: "Enabled"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml
index 61260e3eb..9a480db0b 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/dellemc_system_lockdown_mode.yml
@@ -1,17 +1,14 @@
---
-- hosts: idrac
- connection: local
- name: Configure System lockdown mode
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure System lockdown mode
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Configure System lockdown mode
- dellemc_system_lockdown_mode:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- lockdown_mode: "Disabled" \ No newline at end of file
+ - name: Configure System lockdown mode
+ dellemc.openmanage.dellemc_system_lockdown_mode:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ lockdown_mode: "Disabled"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml
index 9ee11728a..0a9b35723 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_network.yml
@@ -1,75 +1,71 @@
---
-- hosts: idrac
- connection: local
- name: Configure the iDRAC network attributes
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure the iDRAC network attributes
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Register iDRAC on DNS
- idrac_network:
- idrac_ip: "{{idrac_ip}}"
- idrac_user: "{{idrac_user}}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- register_idrac_on_dns: "Enabled"
- dns_idrac_name: "idrac-3CZWCK2"
- auto_config: "Enabled"
- static_dns: "dell.com"
-
- tags:
- - dns_register
-
- - name: Setup VLAN attributes
- idrac_network:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- setup_idrac_nic_vlan: "Enabled"
-
- tags:
- - setup_vlan
-
- - name: Setup iDRAC NIC
- idrac_network:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- enable_nic: "Enabled"
- nic_selection: "Dedicated"
- failover_network: "T_None"
- auto_detect: "Disabled"
- auto_negotiation: "Enabled"
- network_speed: "T_1000"
- duplex_mode: "Full"
-
- tags:
- - idrac_nic
+ - name: Register iDRAC on DNS
+ dellemc.openmanage.idrac_network:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ register_idrac_on_dns: "Enabled"
+ dns_idrac_name: "idrac-3CZWCK2"
+ auto_config: "Enabled"
+ static_dns: "dell.com"
+ delegate_to: localhost
+ tags:
+ - dns_register
- - name: Setup iDRAC IPv4
- idrac_network:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- enable_dhcp: "Enabled"
- dns_from_dhcp: "Enabled"
- enable_ipv4: "Enabled"
+ - name: Setup VLAN attributes
+ dellemc.openmanage.idrac_network:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ setup_idrac_nic_vlan: "Enabled"
+ delegate_to: localhost
+ tags:
+ - setup_vlan
- tags:
- - idrac_ipv4
+ - name: Setup iDRAC NIC
+ dellemc.openmanage.idrac_network:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_nic: "Enabled"
+ nic_selection: "Dedicated"
+ failover_network: "T_None"
+ auto_detect: "Disabled"
+ auto_negotiation: "Enabled"
+ network_speed: "T_1000"
+ duplex_mode: "Full"
+ delegate_to: localhost
+ tags:
+ - idrac_nic
- - name: Setup iDRAC Static IPv4
- idrac_network:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- dns_from_dhcp: "Disabled"
+ - name: Setup iDRAC IPv4
+ dellemc.openmanage.idrac_network:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_dhcp: "Enabled"
+ dns_from_dhcp: "Enabled"
+ enable_ipv4: "Enabled"
+ delegate_to: localhost
+ tags:
+ - idrac_ipv4
- tags:
- - idrac_staticipv4 \ No newline at end of file
+ - name: Setup iDRAC Static IPv4
+ dellemc.openmanage.idrac_network:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ dns_from_dhcp: "Disabled"
+ delegate_to: localhost
+ tags:
+ - idrac_staticipv4
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml
index c5fe77911..df3f3ebfb 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/deprecated/idrac_timezone_ntp.yml
@@ -1,24 +1,20 @@
---
-- hosts: idrac
- connection: local
- name: Configure the iDRAC timezone attributes
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure the iDRAC timezone attributes
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Setup iDRAC Timezone
- idrac_timezone_ntp:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- setup_idrac_timezone: "Singapore"
- enable_ntp: "Disabled"
- ntp_server_1: "100.100.25.1"
- ntp_server_2: "100.100.26.2"
- ntp_server_3: "100.100.27.3"
-
- tags:
- - idrac_timezone \ No newline at end of file
+ - name: Setup iDRAC Timezone
+ dellemc.openmanage.idrac_timezone_ntp:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ setup_idrac_timezone: "Singapore"
+ enable_ntp: "Disabled"
+ ntp_server_1: "100.100.25.1"
+ ntp_server_2: "100.100.26.2"
+ ntp_server_3: "100.100.27.3"
+ delegate_to: localhost
+ tags:
+ - idrac_timezone
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml
index 9a3621761..a19f085a4 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_attributes.yml
@@ -1,25 +1,22 @@
---
-- hosts: idrac
- connection: local
- name: Dell OpenManage Ansible iDRAC Certificates management.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible iDRAC Certificates management.
+ hosts: idrac
+ gather_facts: false
tasks:
- name: Update iDRAC attributes
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
+ idrac_password: "{{ idrac_password }}"
ca_path: "/path/to/ca_cert.pem"
idrac_attributes:
SNMP.1.AgentCommunity: Enabled
tags: idrac
+ delegate_to: localhost
- name: Update System attributes
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
idrac_password: "{{ idrac_password }}"
@@ -27,9 +24,10 @@
system_attributes:
ThermalSettings.1.ThermalProfile: Sound Cap
tags: system
+ delegate_to: localhost
- name: Update Lifecycle Controller attributes
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
idrac_password: "{{ idrac_password }}"
@@ -37,9 +35,10 @@
lifecycle_controller_attributes:
LCAttributes.1.AutoUpdate: Enabled
tags: lc
+ delegate_to: localhost
- name: Configure the iDRAC attributes for email alert settings.
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
idrac_password: "{{ idrac_password }}"
@@ -49,9 +48,10 @@
EmailAlert.1.Enable: Enabled
EmailAlert.1.Address: test@test.com
tags: email-alerts
+ delegate_to: localhost
- name: Configure the iDRAC attributes for SNMP alert settings.
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
idrac_password: "{{ idrac_password }}"
@@ -61,9 +61,10 @@
SNMPAlert.1.State: Enabled
SNMPAlert.1.SNMPv3Username: username
tags: snmp-alerts
+ delegate_to: localhost
- name: Configure the iDRAC attributes for SMTP alert settings.
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
idrac_password: "{{ idrac_password }}"
@@ -75,9 +76,10 @@
RemoteHosts.1.SMTPUserName: username
RemoteHosts.1.SMTPPassword: password
tags: smtp-alerts
+ delegate_to: localhost
- name: Configure the iDRAC attributes for webserver settings.
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
idrac_password: "{{ idrac_password }}"
@@ -86,9 +88,10 @@
WebServer.1.SSLEncryptionBitLength: 128-Bit or higher
WebServer.1.TLSProtocol: TLS 1.1 and Higher
tags: webserver-settings
+ delegate_to: localhost
- name: Configure the iDRAC attributes for SNMP settings.
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
idrac_password: "{{ idrac_password }}"
@@ -100,9 +103,10 @@
SNMP.1.AlertPort: 162
SNMP.1.AgentCommunity: public
tags: snmp-settings
+ delegate_to: localhost
- name: Configure the iDRAC LC attributes for collecting system inventory.
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
idrac_password: "{{ idrac_password }}"
@@ -110,9 +114,10 @@
lifecycle_controller_attributes:
LCAttributes.1.CollectSystemInventoryOnRestart: Enabled
tags: collect-inventory
+ delegate_to: localhost
- name: Configure the iDRAC system attributes for LCD settings.
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "192.168.0.1"
idrac_user: "user_name"
idrac_password: "user_password"
@@ -123,20 +128,22 @@
LCD.1.FrontPanelLocking: Full-Access
LCD.1.UserDefinedString: custom lcd string
tags: lcd-config
+ delegate_to: localhost
- name: Configure the iDRAC attributes for Timezone settings.
- idrac_attributes:
+ dellemc.openmanage.idrac_attributes:
idrac_ip: "192.168.0.1"
idrac_user: "user_name"
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
idrac_attributes:
- Time.1.TimeZone: CST6CDT
+ Time.1.Timezone: CST6CDT
NTPConfigGroup.1.NTPEnable: Enabled
NTPConfigGroup.1.NTP1: 192.168.0.5
NTPConfigGroup.1.NTP2: 192.168.0.6
NTPConfigGroup.1.NTP3: 192.168.0.7
tags: timezone-settings
+ delegate_to: localhost
- name: Configure all attributes
dellemc.openmanage.idrac_attributes:
@@ -153,3 +160,4 @@
lifecycle_controller_attributes:
LCAttributes.1.AutoUpdate: Disabled
tags: all-attributes
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml
index a541dce7a..feb8eb7e7 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_bios.yml
@@ -1,115 +1,119 @@
---
-- hosts: idrac
- connection: local
- name: Configure Boot Mode Setting
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure Boot Mode Setting
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Configure Bios Generic Attributes
- idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- attributes:
- BootMode: "Bios"
- OneTimeBootMode: "Enabled"
- BootSeqRetry: "Enabled"
- tags:
+ - name: Configure Bios Generic Attributes
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+ tags:
- bootconfig
-
- - name: Configure PXE Generic Attributes
- idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- attributes:
- PxeDev1EnDis: "Enabled"
- PxeDev1Protocol: "IPV4"
- PxeDev1VlanEnDis: "Enabled"
- PxeDev1VlanId: x
- PxeDev1Interface: "NIC.Embedded.x-x-x"
- PxeDev1VlanPriority: x
- tags:
+ delegate_to: localhost
+
+ - name: Configure PXE Generic Attributes
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ attributes:
+ PxeDev1EnDis: "Enabled"
+ PxeDev1Protocol: "IPV4"
+ PxeDev1VlanEnDis: "Enabled"
+ PxeDev1VlanId: x
+ PxeDev1Interface: "NIC.Embedded.x-x-x"
+ PxeDev1VlanPriority: x
+ tags:
- pxeconfig
+ delegate_to: localhost
- - name: Configure attributes of the BIOS at Maintenance window
- idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- apply_time: AtMaintenanceWindowStart
- maintenance_window:
- start_time: "2022-09-30T05:15:40-05:00"
- duration: 600
- attributes:
- BootMode: "Bios"
- OneTimeBootMode: "Enabled"
- BootSeqRetry: "Enabled"
- tags:
+ - name: Configure attributes of the BIOS at Maintenance window
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+ attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+ tags:
- at_maintenance_start
+ delegate_to: localhost
- - name: Clear pending BIOS attributes
- idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- clear_pending: yes
- tags:
- - clear_pending
+ - name: Clear pending BIOS attributes
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ clear_pending: true
+ tags:
+ - clear_pending
+ delegate_to: localhost
- - name: Reset BIOS attributes to default settings.
- idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_pwd }}"
- ca_path: "/path/to/ca_cert.pem"
- reset_bios: yes
- tags:
- - reset_bios
+ - name: Reset BIOS attributes to default settings.
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_pwd }}"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_bios: true
+ tags:
+ - reset_bios
+ delegate_to: localhost
- - name: Configure Boot Sources
- idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- boot_sources:
- - Name: "NIC.Integrated.x-x-x"
- Enabled: true
- Index: 1
- - Name: "NIC.Integrated.x-x-x"
- Enabled: true
- Index: 0
- tags:
+ - name: Configure Boot Sources
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_sources:
+ - Name: "NIC.Integrated.x-x-x"
+ Enabled: true
+ Index: 1
+ - Name: "NIC.Integrated.x-x-x"
+ Enabled: true
+ Index: 0
+ tags:
- boot_sources
+ delegate_to: localhost
- - name: Configure Boot Sources - Enabled
- idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- boot_sources:
- - Name: "HardDisk.List.1-1"
- Enabled: true
- tags:
+ - name: Configure Boot Sources - Enabled
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_sources:
+ - Name: "HardDisk.List.1-1"
+ Enabled: true
+ tags:
- boot_sources_enabled
+ delegate_to: localhost
- - name: Configure Boot Sources - Index
- idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- boot_sources:
- - Name: "NIC.Integrated.x-x-x"
- Index: 1
- tags:
- - boot_sources_index \ No newline at end of file
+ - name: Configure Boot Sources - Index
+ dellemc.openmanage.idrac_bios:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_sources:
+ - Name: "NIC.Integrated.x-x-x"
+ Index: 1
+ tags:
+ - boot_sources_index
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml
index 22afb949a..f6f3ac2b1 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot.yml
@@ -1,69 +1,69 @@
---
-- hosts: idrac
- connection: local
- name: Configure the boot order settings
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure the boot order settings
+ hosts: idrac
+ gather_facts: false
tasks:
+ - name: Configure the system boot options settings.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_options:
+ - display_name: Hard drive C
+ enabled: true
+ - boot_option_reference: NIC.PxeDevice.2-1
+ enabled: true
+ tags: boot-option
+ delegate_to: localhost
- - name: Configure the system boot options settings.
- idrac_boot:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- boot_options:
- - display_name: Hard drive C
- enabled: true
- - boot_option_reference: NIC.PxeDevice.2-1
- enabled: true
- tags: boot-option
-
- - name: Configure the boot order settings.
- idrac_boot:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- boot_order:
- - Boot0001
- - Boot0002
- - Boot0004
- - Boot0003
- tags: boot-order
+ - name: Configure the boot order settings.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_order:
+ - Boot0001
+ - Boot0002
+ - Boot0004
+ - Boot0003
+ tags: boot-order
+ delegate_to: localhost
- - name: Configure the boot source override mode.
- idrac_boot:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- boot_source_override_mode: legacy
- boot_source_override_target: cd
- boot_source_override_enabled: once
- tags: boot-mode
+ - name: Configure the boot source override mode.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_source_override_mode: legacy
+ boot_source_override_target: cd
+ boot_source_override_enabled: once
+ tags: boot-mode
+ delegate_to: localhost
- - name: Configure the UEFI target settings.
- idrac_boot:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- boot_source_override_mode: uefi
- boot_source_override_target: uefi_target
- uefi_target_boot_source_override: "VenHw(3A191845-5F86-4E78-8FCE-C4CFF59F9DAA)"
- tags: uefi-target
+ - name: Configure the UEFI target settings.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_source_override_mode: uefi
+ boot_source_override_target: uefi_target
+ uefi_target_boot_source_override: "VenHw(3A191845-5F86-4E78-8FCE-C4CFF59F9DAA)"
+ tags: uefi-target
+ delegate_to: localhost
- - name: Configure the boot source override mode as pxe.
- idrac_boot:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- boot_source_override_mode: legacy
- boot_source_override_target: pxe
- boot_source_override_enabled: continuous
- tags: pxe-boot-mode
+ - name: Configure the boot source override mode as pxe.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ boot_source_override_mode: legacy
+ boot_source_override_target: pxe
+ boot_source_override_enabled: continuous
+ tags: pxe-boot-mode
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml
index aa6d43ed5..fcf13d9be 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_boot_virtual_media_workflow.yml
@@ -1,7 +1,6 @@
---
-- hosts: idrac
- connection: local
- name: Dell OpenManage Ansible iDRAC boot operations.
+- name: Dell OpenManage Ansible iDRAC boot operations.
+ hosts: idrac
vars:
ansible_python_interpreter: /usr/bin/python3
virtual_media_uri: "/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.InsertMedia"
@@ -9,48 +8,48 @@
nfs_dir: "192.168.0.1:/nfsshare"
iso_file: "boot_image.iso"
ca_path: "/path/to/ca_cert.pem"
- boot_source_mode: "legacy" #other options are UEFI
+ boot_source_mode: "legacy" # other options are UEFI
- gather_facts: False
+ gather_facts: false
tasks:
+ # Mount the ISO image as a virtual media CD.
+ - name: "Insert virtual media"
+ ansible.builtin.uri:
+ url: "https://{{ idrac_ip }}{{ virtual_media_uri }}"
+ user: "{{ idrac_user }}"
+ password: "{{ idrac_password }}"
+ method: "POST"
+ body_format: json
+ body:
+ Image: "{{ file_location }}"
+ Inserted: true
+ WriteProtected: true
+ use_proxy: true
+ status_code: 204
+ return_content: false
+ ca_path: "{{ ca_path }}"
+ force_basic_auth: true
+ headers:
+ Content-Type: "application/json"
+ Accept: "application/json"
+ tags:
+ - virtual_media
+ - vm_boot
+ delegate_to: localhost
-# Mount the ISO image as a virtual media CD.
- - name: "Insert virtual media"
- ansible.builtin.uri:
- url: "https://{{ idrac_ip }}{{ virtual_media_uri }}"
- user: "{{ idrac_user }}"
- password: "{{ idrac_password }}"
- method: "POST"
- body_format: json
- body:
- Image: "{{ file_location }}"
- Inserted: true
- WriteProtected: true
- use_proxy: yes
- status_code: 204
- return_content: no
- ca_path: "{{ ca_path }}"
- force_basic_auth: yes
- headers:
- Content-Type: "application/json"
- Accept: "application/json"
- tags:
- - virtual_media
- - vm_boot
-
-# One-time boot with virtual media.
- - name: Boot once from mounted CD.
- dellemc.openmanage.idrac_boot:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "{{ ca_path }}"
- boot_source_override_mode: "{{ boot_source_mode }}"
- boot_source_override_target: cd
- boot_source_override_enabled: once
- tags:
- - boot_cd
- - vm_boot
-
+ # One-time boot with virtual media.
+ - name: Boot once from mounted CD.
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "{{ ca_path }}"
+ boot_source_override_mode: "{{ boot_source_mode }}"
+ boot_source_override_target: cd
+ boot_source_override_enabled: once
+ tags:
+ - boot_cd
+ - vm_boot
+ delegate_to: localhost
# Eject the virtual media after boot.
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml
index 801f12ed5..c32ac21bd 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_certificates.yml
@@ -1,21 +1,17 @@
---
-- hosts: idrac
- connection: local
- name: Dell OpenManage Ansible iDRAC Certificates management.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible iDRAC Certificates management.
+ hosts: idrac
+ gather_facts: false
tasks:
- name: Generate https signing request
- idrac_certificates:
+ dellemc.openmanage.idrac_certificates:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
+ idrac_password: "{{ idrac_password }}"
ca_path: "/path/to/ca_cert.pem"
- command: "generate_csr"
- certificate_type: "HTTPS"
+ command: "generate_csr"
+ certificate_type: "HTTPS"
certificate_path: "/home/omam/mycert_dir"
cert_params:
common_name: "sample.domain.com"
@@ -27,43 +23,72 @@
organization_name: "OrgName"
subject_alt_name:
- 192.198.2.1
+ delegate_to: localhost
- name: Import a SSL certificate.
- idrac_certificates:
+ dellemc.openmanage.idrac_certificates:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
+ idrac_password: "{{ idrac_password }}"
ca_path: "/path/to/ca_cert.pem"
command: "import"
certificate_type: "HTTPS"
certificate_path: "/path/to/cert.pem"
+ delegate_to: localhost
+
+ - name: Import an HTTPS certificate along with its private key.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "/path/to/cert.pem"
+ ssl_key: "/path/to/private_key.pem"
+ delegate_to: localhost
- name: Export a SSL certificate.
- idrac_certificates:
+ dellemc.openmanage.idrac_certificates:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
+ idrac_password: "{{ idrac_password }}"
ca_path: "/path/to/ca_cert.pem"
command: "export"
certificate_type: "HTTPS"
certificate_path: "/home/omam/mycert_dir"
+ delegate_to: localhost
- name: Import a CSC certificate.
- idrac_certificates:
+ dellemc.openmanage.idrac_certificates:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
+ idrac_password: "{{ idrac_password }}"
ca_path: "/path/to/ca_cert.pem"
command: "import"
certificate_type: "CSC"
- certificate_file: "/path/to/cert.pem"
+ certificate_path: "/path/to/cert.pem"
+ delegate_to: localhost
+
+ - name: Import a custom certificate with a passphrase.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ command: "import"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "/path/to/idrac_cert.p12"
+ passphrase: "cert_passphrase"
+ reset: false
+ delegate_to: localhost
- name: Export a Client trust certificate.
- idrac_certificates:
+ dellemc.openmanage.idrac_certificates:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
+ idrac_password: "{{ idrac_password }}"
ca_path: "/path/to/ca_cert.pem"
command: "export"
certificate_type: "CLIENT_TRUST_CERTIFICATE"
- certificate_path: "/home/omam/mycert_dir" \ No newline at end of file
+ certificate_path: "/home/omam/mycert_dir"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml
index c1a2c891e..dbb3e315f 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware.yml
@@ -1,69 +1,88 @@
---
-- hosts: idrac
- connection: local
- name: Update Firmware Inventory
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Update Firmware Inventory
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Update firmware from repository on a HTTP/HTTP/FTP repository
- idrac_firmware:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "https://downloads.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ - name: Update firmware from repository on a HTTP/HTTP/FTP repository
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://downloads.dell.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ delegate_to: localhost
+
+ - name: Update firmware from repository on a HTTP/HTTP/FTP repository via proxy
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://downloads.dell.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: ParametersProxy
+ proxy_server: 192.168.1.10
+ proxy_type: HTTP
+ proxy_port: 80
+ proxy_uname: "proxy_user"
+ proxy_passwd: "proxy_pwd"
+ delegate_to: localhost
- - name: Update firmware from repository on a internally hosted HTTP repository.
- idrac_firmware:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password}}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "http://192.168.0.1/path_to_folder/"
- reboot: True
- job_wait: True
- apply_update: True
- catalog_file_name: "Catalog.xml"
+ - name: Update firmware from repository on a internally hosted HTTP repository.
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "http://192.168.0.1/path_to_folder/"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+ delegate_to: localhost
- - name: Update firmware from repository on a NFS Share
- idrac_firmware:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password}}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.1:/complete_share_path"
- reboot: True
- job_wait: True
- apply_update: True
- catalog_file_name: "Catalog.xml"
+ - name: Update firmware from repository on a NFS Share
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.1:/complete_share_path"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+ delegate_to: localhost
- - name: Update firmware from repository on a CIFS Share
- idrac_firmware:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password}}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "\\\\192.168.0.1\\share_path"
- share_user: "{{ share_user }}"
- share_password: "{{ share_password }}"
- share_mnt: "/mnt/cifs_share"
- reboot: False
- job_wait: True
- catalog_file_name: "Catalog.xml"
+ - name: Update firmware from repository on a CIFS Share
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "\\\\192.168.0.1\\share_path"
+ share_user: "{{ share_user }}"
+ share_password: "{{ share_password }}"
+ share_mnt: "/mnt/cifs_share"
+ reboot: false
+ job_wait: true
+ catalog_file_name: "Catalog.xml"
+ delegate_to: localhost
- - name: Firmware compliance report using HTTPS repository.
- idrac_firmare:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "https://downloads.dell.com"
- reboot: False
- job_wait: True
- apply_update: False
+ - name: Firmware compliance report using HTTPS repository.
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://downloads.dell.com"
+ reboot: false
+ job_wait: true
+ apply_update: false
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml
index aaca53a58..b19baec42 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_firmware_info.yml
@@ -1,16 +1,13 @@
---
-- hosts: idrac
- connection: local
- name: Get Installed Firmware Inventory
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Get Installed Firmware Inventory
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Get Installed Firmware Inventory.
- idrac_firmware_info:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
+ - name: Get Installed Firmware Inventory.
+ dellemc.openmanage.idrac_firmware_info:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_license.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_license.yml
new file mode 100644
index 000000000..2304fa5b6
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_license.yml
@@ -0,0 +1,183 @@
+---
+- name: Dell OpenManage Ansible iDRAC License Management.
+ hosts: idrac
+ gather_facts: false
+
+ tasks:
+ - name: Export a license from iDRAC to local
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "local"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ delegate_to: localhost
+
+ - name: Export a license from iDRAC to NFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "nfs"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ delegate_to: localhost
+
+ - name: Export a license from iDRAC to CIFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "cifs"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ workgroup: "workgroup"
+ delegate_to: localhost
+
+ - name: Export a license from iDRAC to HTTP share via proxy
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "http"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ proxy_support: "parameters_proxy"
+ proxy_type: socks
+ proxy_server: "192.168.0.2"
+ proxy_port: 1080
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+ delegate_to: localhost
+
+ - name: Export a license from iDRAC to HTTPS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "https"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ignore_certificate_warning: "on"
+ delegate_to: localhost
+
+ - name: Import a license to iDRAC from local
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: local
+ share_name: "/path/to/share"
+ delegate_to: localhost
+
+ - name: Import a license to iDRAC from NFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: nfs
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ delegate_to: localhost
+
+ - name: Import a license to iDRAC from CIFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: cifs
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+ delegate_to: localhost
+
+ - name: Import a license to iDRAC from HTTP share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: http
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+ delegate_to: localhost
+
+ - name: Import a license to iDRAC from HTTPS share via proxy
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: https
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+ proxy_support: "parameters_proxy"
+ proxy_server: "192.168.0.2"
+ proxy_port: 808
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+ delegate_to: localhost
+
+ - name: Delete a License from iDRAC
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENCE_123"
+ delete: true
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml
index 9f0f61deb..a1a95f19e 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_job_status_info.yml
@@ -1,17 +1,14 @@
---
-- hosts: idrac
- connection: local
- name: Get LC job Status
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Get LC job Status
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Get LC job Status
- idrac_lifecycle_controller_job_status_info:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "JID_844222910040"
+ - name: Get LC job Status
+ dellemc.openmanage.idrac_lifecycle_controller_job_status_info:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "JID_844222910040"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml
index 495e84a66..ca78d8f91 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_jobs.yml
@@ -1,28 +1,26 @@
---
-- hosts: idrac
- connection: local
- name: Delete LC job
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Delete LC job
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Delete LC job Queue
- idrac_lifecycle_controller_jobs:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- tags:
- - delete_all_jobs
+ - name: Delete LC job Queue
+ dellemc.openmanage.idrac_lifecycle_controller_jobs:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ tags:
+ - delete_all_jobs
+ delegate_to: localhost
- - name: Delete a LC job
- idrac_lifecycle_controller_jobs:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "JID_123456789"
- tags:
- - delete_job \ No newline at end of file
+ - name: Delete a LC job
+ dellemc.openmanage.idrac_lifecycle_controller_jobs:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "JID_123456789"
+ tags:
+ - delete_job
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml
index 99c9d0cef..8cda54fe6 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_logs.yml
@@ -1,18 +1,15 @@
---
-- hosts: idrac
- connection: local
- name: Export Lifecycle Controller Logs
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Export Lifecycle Controller Logs
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Export Lifecycle Controller Logs
- idrac_lifecycle_controller_logs:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "{{ playbook_dir }}"
- job_wait: "True" \ No newline at end of file
+ - name: Export Lifecycle Controller Logs
+ dellemc.openmanage.idrac_lifecycle_controller_logs:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "{{ playbook_dir }}"
+ job_wait: true
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml
index 1798ab99e..a68a37187 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_lifecycle_controller_status_info.yml
@@ -1,16 +1,13 @@
---
-- hosts: idrac
- connection: local
- name: Check LC Ready Status
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Check LC Ready Status
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Check LC Ready Status
- idrac_lifecycle_controller_status_info:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
+ - name: Check LC Ready Status
+ dellemc.openmanage.idrac_lifecycle_controller_status_info:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_network_attributes.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_network_attributes.yml
new file mode 100644
index 000000000..006172964
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_network_attributes.yml
@@ -0,0 +1,139 @@
+---
+- name: Dell OpenManage Ansible iDRAC Network Attributes.
+ hosts: idrac
+ gather_facts: false
+
+ tasks:
+ - name: Configure OEM network attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: "NIC.Integrated.1"
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ delegate_to: localhost
+
+ - name: Configure OEM network attributes to apply on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: NIC.Integrated.1
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ apply_time: OnReset
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+ delegate_to: localhost
+
+ - name: Configure OEM network attributes to apply at maintainance window
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: NIC.Integrated.1
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+ delegate_to: localhost
+
+ - name: Clearing the OEM pending attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: NIC.Integrated.1
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ clear_pending: true
+ delegate_to: localhost
+
+ - name: Configure OEM network attributes and wait for the job
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: NIC.Integrated.1
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ oem_network_attributes:
+ LnkSpeed: "10MbpsHalf"
+ WakeOnLan: "Enabled"
+ VLanMode: "Enabled"
+ job_wait: true
+ job_wait_timeout: 2000
+ delegate_to: localhost
+
+ - name: Configure redfish network attributes to update fiber channel on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: NIC.Integrated.1
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ apply_time: OnReset
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: true
+ delegate_to: localhost
+
+ - name: Configure redfish network attributes to apply on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: NIC.Integrated.1
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: true
+ apply_time: OnReset
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+ delegate_to: localhost
+
+ - name: Configure redfish network attributes of iscsi to apply at maintainance window start
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: NIC.Integrated.1
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ iSCSIBoot:
+ InitiatorIPAddress: 1.0.0.1
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+ delegate_to: localhost
+
+ - name: Configure redfish network attributes to apply at maintainance window on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_adapter_id: NIC.Integrated.1
+ network_device_function_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: false
+ VLANId: 1
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml
index 3ad52adc1..fa5d3ef04 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_os_deployment.yml
@@ -1,22 +1,18 @@
----
-- hosts: idrac
- connection: local
+---
+- name: Booting to Network Operating System image
+ hosts: idrac
gather_facts: false
- name: Booting to Network Operating System image
- collections:
- - dellemc.openmanage
-
- tasks:
+ tasks:
- name: "Booting to Network Operating System image"
- idrac_os_deployment:
+ dellemc.openmanage.idrac_os_deployment:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
+ idrac_password: "{{ idrac_password }}"
ca_path: "/path/to/ca_cert.pem"
share_name: "{{ playbook_dir }}"
- iso_image: "uninterrupted_os_installation_image.iso."
+ iso_image: "uninterrupted_os_installation_image.iso."
expose_duration: 180
-
+ delegate_to: localhost
tags:
- - network_iso \ No newline at end of file
+ - network_iso
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml
index 2cb447883..bcf96ddd9 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller.yml
@@ -1,216 +1,290 @@
---
-- hosts: idrac
- connection: local
- name: Dell OpenManage Ansible iDRAC Redfish Storage Controller service.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible iDRAC Redfish Storage Controller service.
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Assign dedicated hot spare.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- volume_id:
- - "Disk.Virtual.0:RAID.Slot.1-1"
- target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
- tags:
- - assign_dedicated_hot_spare
-
- - name: Assign global hot spare.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
- tags:
- - assign_global_hot_spare
-
- - name: Unassign hot spare
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
- command: UnassignSpare
- tags:
- - un-assign-hot-spare
-
- - name: Set controller encryption key.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "SetControllerKey"
- controller_id: "RAID.Slot.1-1"
- key: "PassPhrase@123"
- key_id: "mykeyid123"
- tags:
- - set_controller_key
-
- - name: Rekey in LKM mode.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "ReKey"
- controller_id: "RAID.Slot.1-1"
- key: "NewPassPhrase@123"
- key_id: "newkeyid123"
- old_key: "OldPassPhrase@123"
- tags:
- - rekey_lkm
-
- - name: Rekey in SEKM mode.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "ReKey"
- controller_id: "RAID.Slot.1-1"
- mode: "SEKM"
- tags:
- - rekey_sekm
-
- - name: Remove controller key.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "RemoveControllerKey"
- controller_id: "RAID.Slot.1-1"
- tags:
- - remove_controller_key
-
- - name: Reset controller configuration.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "ResetConfig"
- controller_id: "RAID.Slot.1-1"
- tags:
- - reset_config
-
- - name: Enable controller encryption
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "EnableControllerEncryption"
- controller_id: "RAID.Slot.1-1"
- mode: "LKM"
- key: "your_Key@123"
- key_id: "your_Keyid@123"
- tags:
- - enable-encrypt
-
- - name: Blink physical disk.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "BlinkTarget"
- target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
- tags:
- - blink-target
-
- - name: Blink virtual drive.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "BlinkTarget"
- volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
- tags:
- - blink-volume
-
- - name: Unblink physical disk.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "UnBlinkTarget"
- target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
- tags:
- - unblink-target
-
- - name: Unblink virtual drive.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "UnBlinkTarget"
- volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
- tags:
- - unblink-drive
-
- - name: Convert physical disk to RAID
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "ConvertToRAID"
- target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
- tags:
- - convert-raid
-
- - name: Convert physical disk to non-RAID
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "ConvertToNonRAID"
- target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
- tags:
- - convert-non-raid
-
- - name: Change physical disk state to online.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "ChangePDStateToOnline"
- target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"
- tags:
- - pd-state-online
-
- - name: Change physical disk state to offline.
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "ChangePDStateToOnline"
- target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"
- tags:
- - pd-state-offline
-
- - name: Lock virtual drive
- idrac_redfish_storage_controller:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "LockVirtualDisk"
- volume_id: "Disk.Virtual.0:RAID.SL.3-1"
- tags:
- - lock
+ - name: Assign dedicated hot spare.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ command: "AssignSpare"
+ ca_path: "/path/to/ca_cert.pem"
+ volume_id:
+ - "Disk.Virtual.0:RAID.Slot.1-1"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - assign_dedicated_hot_spare
+ delegate_to: localhost
+
+ - name: Assign global hot spare.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ command: "AssignSpare"
+ ca_path: "/path/to/ca_cert.pem"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - assign_global_hot_spare
+ delegate_to: localhost
+
+ - name: Unassign hot spare
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ command: UnassignSpare
+ tags:
+ - un-assign-hot-spare
+ delegate_to: localhost
+
+ - name: Set controller encryption key.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "SetControllerKey"
+ controller_id: "RAID.Slot.1-1"
+ key: "PassPhrase@123"
+ key_id: "mykeyid123"
+ tags:
+ - set_controller_key
+ delegate_to: localhost
+
+ - name: Rekey in LKM mode.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ReKey"
+ controller_id: "RAID.Slot.1-1"
+ key: "NewPassPhrase@123"
+ key_id: "newkeyid123"
+ old_key: "OldPassPhrase@123"
+ tags:
+ - rekey_lkm
+ delegate_to: localhost
+
+ - name: Rekey in SEKM mode.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ReKey"
+ controller_id: "RAID.Slot.1-1"
+ mode: "SEKM"
+ tags:
+ - rekey_sekm
+ delegate_to: localhost
+
+ - name: Remove controller key.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "RemoveControllerKey"
+ controller_id: "RAID.Slot.1-1"
+ tags:
+ - remove_controller_key
+ delegate_to: localhost
+
+ - name: Reset controller configuration.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ResetConfig"
+ controller_id: "RAID.Slot.1-1"
+ tags:
+ - reset_config
+ delegate_to: localhost
+
+ - name: Enable controller encryption
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "EnableControllerEncryption"
+ controller_id: "RAID.Slot.1-1"
+ mode: "LKM"
+ key: "your_Key@123"
+ key_id: "your_Keyid@123"
+ tags:
+ - enable-encrypt
+ delegate_to: localhost
+
+ - name: Blink physical disk.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "BlinkTarget"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - blink-target
+ delegate_to: localhost
+
+ - name: Blink virtual drive.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "BlinkTarget"
+ volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
+ tags:
+ - blink-volume
+ delegate_to: localhost
+
+ - name: Unblink physical disk.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "UnBlinkTarget"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - unblink-target
+ delegate_to: localhost
+
+ - name: Unblink virtual drive.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "UnBlinkTarget"
+ volume_id: "Disk.Virtual.0:RAID.Slot.1-1"
+ tags:
+ - unblink-drive
+ delegate_to: localhost
+
+ - name: Convert physical disk to RAID
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ConvertToRAID"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - convert-raid
+ delegate_to: localhost
+
+ - name: Convert physical disk to non-RAID
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ConvertToNonRAID"
+ target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - convert-non-raid
+ delegate_to: localhost
+
+ - name: Change physical disk state to online.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ChangePDStateToOnline"
+ target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - pd-state-online
+ delegate_to: localhost
+
+ - name: Change physical disk state to offline.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "ChangePDStateToOnline"
+ target: "Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"
+ tags:
+ - pd-state-offline
+ delegate_to: localhost
+
+ - name: Lock virtual drive
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "LockVirtualDisk"
+ volume_id: "Disk.Virtual.0:RAID.SL.3-1"
+ tags:
+ - lock
+ delegate_to: localhost
+
+ - name: Online Capacity Expansion of a volume using target
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "OnlineCapacityExpansion"
+ volume_id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ target:
+ - "Disk.Bay.3:Enclosure.Internal.0-0:RAID.Integrated.1-1"
+ tags:
+ - oce_target
+ delegate_to: localhost
+
+ - name: Online Capacity Expansion of a volume using size
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "OnlineCapacityExpansion"
+ volume_id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ size: 363786
+ tags:
+ - oce_size
+ delegate_to: localhost
+
+ - name: Set controller attributes.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: "RAID.Slot.1-1"
+ attributes:
+ ControllerMode: "HBA"
+ apply_time: "OnReset"
+ tags:
+ - controller
+ delegate_to: localhost
+
+ - name: Configure Controller attributes at Maintenance window
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: "RAID.Slot.1-1"
+ attributes:
+ CheckConsistencyMode: Normal
+ CopybackMode: "Off"
+ LoadBalanceMode: Disabled
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 1200
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml
index d61112f0f..1d8d2489e 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_redfish_storage_controller_job_tracking.yml
@@ -1,51 +1,51 @@
---
-- hosts: idrac
- connection: local
- name: iDRAC Redfish storage controller service with job tracking.
- gather_facts: False
+- name: Redfish storage controller service with job tracking for iDRAC.
+ hosts: idrac
+ gather_facts: false
vars:
- retries_count: 100
- polling_interval: 10
- all_ctrl_task_tags:
- - assign_dedicated_hot_spare
- - assign_global_hot_spare
- - set_controller_key
- - rekey_lkm
- - rekey_sekm
- - remove_controller_key
- - reset_config
+ retries_count: 100
+ polling_interval: 10
+ all_ctrl_task_tags:
+ - assign_dedicated_hot_spare
+ - assign_global_hot_spare
+ - set_controller_key
+ - rekey_lkm
+ - rekey_sekm
+ - remove_controller_key
+ - reset_config
- collections:
- - dellemc.openmanage
-
-# Use a single tag to run each task with job tracker
+ # Use a single tag to run each task with job tracker
tasks:
- name: Assign dedicated hot spare.
- idrac_redfish_storage_controller:
+ dellemc.openmanage.idrac_redfish_storage_controller:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
+ command: "AssignSpare"
volume_id:
- "Disk.Virtual.0:RAID.Slot.1-1"
target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
register: result
tags:
- assign_dedicated_hot_spare
+ delegate_to: localhost
- name: Assign global hot spare.
- idrac_redfish_storage_controller:
+ dellemc.openmanage.idrac_redfish_storage_controller:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
+ command: "AssignSpare"
target: "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
register: result
tags:
- assign_global_hot_spare
+ delegate_to: localhost
- name: Set controller encryption key.
- idrac_redfish_storage_controller:
+ dellemc.openmanage.idrac_redfish_storage_controller:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -57,9 +57,10 @@
register: result
tags:
- set_controller_key
+ delegate_to: localhost
- name: Rekey in LKM mode.
- idrac_redfish_storage_controller:
+ dellemc.openmanage.idrac_redfish_storage_controller:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -72,9 +73,10 @@
register: result
tags:
- rekey_lkm
+ delegate_to: localhost
- name: Rekey in SEKM mode.
- idrac_redfish_storage_controller:
+ dellemc.openmanage.idrac_redfish_storage_controller:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -85,9 +87,10 @@
register: result
tags:
- rekey_sekm
+ delegate_to: localhost
- name: Remove controller key.
- idrac_redfish_storage_controller:
+ dellemc.openmanage.idrac_redfish_storage_controller:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -97,9 +100,10 @@
register: result
tags:
- remove_controller_key
+ delegate_to: localhost
- name: Reset controller configuration.
- idrac_redfish_storage_controller:
+ dellemc.openmanage.idrac_redfish_storage_controller:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -109,18 +113,19 @@
register: result
tags:
- reset_config
+ delegate_to: localhost
- - name: "iDRAC Job tracking"
- uri:
+ - name: Job tracking iDRAC
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -129,10 +134,12 @@
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
tags: "{{ all_ctrl_task_tags }}"
+ delegate_to: localhost
- - name: "iDRAC job result."
- set_fact:
+ - name: Job result of iDRAC
+ ansible.builtin.set_fact:
job_details: "{{ result.json }}"
failed_when: result.json.Message == "Failed"
changed_when: result.json.Message != "Failed"
- tags: "{{ all_ctrl_task_tags }}" \ No newline at end of file
+ tags: "{{ all_ctrl_task_tags }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml
index 209befd22..69e4ac36f 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset.yml
@@ -1,19 +1,15 @@
---
-- hosts: idrac
- connection: local
- name: Reset iDRAC
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Reset iDRAC
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Reset iDRAC
- idrac_reset:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
-
- tags:
- - idrac_reset \ No newline at end of file
+ - name: Reset iDRAC
+ dellemc.openmanage.idrac_reset:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ tags:
+ - idrac_reset
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml
index 534b22274..8c266c184 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_reset_result_tracking.yml
@@ -1,39 +1,38 @@
---
-- hosts: idrac
- connection: local
- name: Reset iDRAC
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Reset iDRAC
+ hosts: idrac
+ gather_facts: false
tasks:
- name: Reset iDRAC
- idrac_reset:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
+ dellemc.openmanage.idrac_reset:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
register: result
failed_when: result is changed
+ delegate_to: localhost
- name: Wait for port 443 to become open on the host
- wait_for:
- host: "{{idrac_ip}}"
- port: 443
- delay: 30
- connect_timeout: 5
- timeout: 500
+ ansible.builtin.wait_for:
+ host: "{{ idrac_ip }}"
+ port: 443
+ delay: 30
+ connect_timeout: 5
+ timeout: 500
register: result
failed_when: result.elapsed < 20
+ delegate_to: localhost
- name: Get LC status.
- idrac_lifecycle_controller_status_info:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
+ dellemc.openmanage.idrac_lifecycle_controller_status_info:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
register: result
until: result.msg.LCStatus == 'Ready' or result.msg.LCReady is true
retries: 30
delay: 10
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml
index 0d61f54c7..80136f456 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_server_config_profile.yml
@@ -1,220 +1,309 @@
---
-- hosts: idrac
- connection: local
- name: Server Configuration Profile
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Server Configuration Profile
+ hosts: idrac
+ gather_facts: false
tasks:
+ - name: Export SCP with IDRAC components in JSON format to a local path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "/scp_folder"
+ scp_components:
+ - IDRAC
+ scp_file: example_file
+ export_format: JSON
+ export_use: Clone
+ job_wait: true
+ tags: export-scp-local
+ delegate_to: localhost
+
+ - name: Import SCP with IDRAC components in JSON format from a local path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "/scp_folder"
+ command: import
+ scp_components:
+ - IDRAC
+ scp_file: example_file.json
+ shutdown_type: Graceful
+ end_host_power_state: "On"
+ job_wait: false
+ tags: import-scp-local
+ delegate_to: localhost
+
+ - name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.2:/share"
+ scp_components:
+ - BIOS
+ export_format: XML
+ export_use: Default
+ job_wait: true
+ tags: export-scp-nfs
+ delegate_to: localhost
+
+ - name: Import SCP with BIOS components in XML format from a NFS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.2:/share"
+ command: import
+ scp_components:
+ - BIOS
+ scp_file: 192.168.0.1_20210618_162856.xml
+ shutdown_type: NoReboot
+ end_host_power_state: "Off"
+ job_wait: false
+ tags: import-scp-nfs
+ delegate_to: localhost
+
+ - name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "\\\\192.168.0.2\\share"
+ share_user: share_username@domain
+ share_password: share_password
+ scp_file: example_file.xml
+ scp_components:
+ - RAID
+ export_format: XML
+ export_use: Default
+ job_wait: true
+ tags: export-scp-cifs
+ delegate_to: localhost
+
+ - name: Import SCP with RAID components in XML format from a CIFS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "\\\\192.168.0.2\\share"
+ share_user: share_username
+ share_password: share_password
+ command: import
+ scp_components:
+ - RAID
+ scp_file: example_file.xml
+ shutdown_type: Forced
+ end_host_power_state: "On"
+ job_wait: true
+ tags: import-scp-cifs
+ delegate_to: localhost
+
+ - name: Export SCP with ALL components in JSON format to a HTTP share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "http://192.168.0.3/share"
+ share_user: share_username
+ share_password: share_password
+ scp_file: example_file.json
+ scp_components:
+ - ALL
+ export_format: JSON
+ job_wait: false
+ tags: export-scp-http
+ delegate_to: localhost
+
+ - name: Import SCP with ALL components in JSON format from a HTTP share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ share_name: "http://192.168.0.3/share"
+ share_user: share_username
+ share_password: share_password
+ scp_file: example_file.json
+ shutdown_type: Graceful
+ end_host_power_state: "On"
+ job_wait: true
+ tags: import-scp-http
+ delegate_to: localhost
+
+ - name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://192.168.0.4/share"
+ share_user: share_username
+ share_password: share_password
+ scp_components:
+ - ALL
+ export_format: XML
+ export_use: Replace
+ job_wait: true
+ tags: export-scp-https
+ delegate_to: localhost
+
+ - name: Import SCP with ALL components in XML format from a HTTPS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ share_name: "https://192.168.0.4/share"
+ share_user: share_username
+ share_password: share_password
+ scp_file: 192.168.0.1_20160618_164647.xml
+ shutdown_type: Graceful
+ end_host_power_state: "On"
+ job_wait: false
+ tags: import-scp-https
+ delegate_to: localhost
+
+ - name: Preview SCP with ALL components in XML format from a CIFS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "\\\\192.168.0.2\\share"
+ share_user: share_username
+ share_password: share_password
+ command: preview
+ scp_components:
+ - ALL
+ scp_file: example_file.xml
+ job_wait: true
+ tags: preview-scp-cifs
+ delegate_to: localhost
+
+ - name: Preview SCP with IDRAC components in JSON format from a NFS share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "192.168.0.2:/share"
+ command: preview
+ scp_components:
+ - IDRAC
+ scp_file: example_file.xml
+ job_wait: true
+ tags: preview-scp-nfs
+ delegate_to: localhost
+
+ - name: Preview SCP with ALL components in XML format from a HTTP share path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "http://192.168.0.1/http-share"
+ share_user: share_username
+ share_password: share_password
+ command: preview
+ scp_components:
+ - ALL
+ scp_file: example_file.xml
+ job_wait: true
+ tags: preview-scp-http
+ delegate_to: localhost
+
+ - name: Preview SCP with IDRAC components in XML format from a local path
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "/scp_folder"
+ command: preview
+ scp_components:
+ - IDRAC
+ scp_file: example_file.json
+ job_wait: false
+ tags: import-scp-local
+ delegate_to: localhost
+
+ - name: Import SCP with IDRAC components in XML format from the XML content.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ job_wait: true
+ import_buffer: "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'>
+ Disabled</Attribute></Component></SystemConfiguration>"
+ tags: import-buffer-xml
+ delegate_to: localhost
+
+ - name: Export SCP with ALL components in XML format using HTTP proxy.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ scp_components:
+ - ALL
+ share_name: "https://192.168.0.1/http-share"
+ proxy_support: true
+ proxy_server: 192.168.0.5
+ proxy_port: 8080
+ proxy_username: proxy_username
+ proxy_password: proxy_password
+ proxy_type: http
+ include_in_export: passwordhashvalues
+ job_wait: true
+ tags: export-http-proxy
+ delegate_to: localhost
+
+ - name: Import SCP with IDRAC and BIOS components in XML format using SOCKS4 proxy
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ - BIOS
+ share_name: "https://192.168.0.1/http-share"
+ proxy_support: true
+ proxy_server: 192.168.0.6
+ proxy_port: 8080
+ proxy_type: socks4
+ scp_file: filename.xml
+ job_wait: true
+ tags: import-scp-proxy
+ delegate_to: localhost
- - name: Export SCP with IDRAC components in JSON format to a local path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "/scp_folder"
- scp_components: IDRAC
- scp_file: example_file
- export_format: JSON
- export_use: Clone
- job_wait: True
- tags: export-scp-local
-
- - name: Import SCP with IDRAC components in JSON format from a local path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "/scp_folder"
- command: import
- scp_components: "IDRAC"
- scp_file: example_file.json
- shutdown_type: Graceful
- end_host_power_state: "On"
- job_wait: False
- tags: import-scp-local
-
- - name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- scp_components: "BIOS"
- export_format: XML
- export_use: Default
- job_wait: True
- tags: export-scp-nfs
-
- - name: Import SCP with BIOS components in XML format from a NFS share path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- command: import
- scp_components: "BIOS"
- scp_file: 192.168.0.1_20210618_162856.xml
- shutdown_type: NoReboot
- end_host_power_state: "Off"
- job_wait: False
- tags: import-scp-nfs
-
- - name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "\\\\192.168.0.2\\share"
- share_user: share_username@domain
- share_password: share_password
- share_mnt: /mnt/cifs
- scp_file: example_file.xml
- scp_components: "RAID"
- export_format: XML
- export_use: Default
- job_wait: True
- tags: export-scp-cifs
-
- - name: Import SCP with RAID components in XML format from a CIFS share path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "\\\\192.168.0.2\\share"
- share_user: share_username
- share_password: share_password
- share_mnt: /mnt/cifs
- command: import
- scp_components: "RAID"
- scp_file: example_file.xml
- shutdown_type: Forced
- end_host_power_state: "On"
- job_wait: True
- tags: import-scp-cifs
-
- - name: Export SCP with ALL components in JSON format to a HTTP share path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "http://192.168.0.3/share"
- share_user: share_username
- share_password: share_password
- scp_file: example_file.json
- scp_components: ALL
- export_format: JSON
- job_wait: False
- tags: export-scp-http
-
- - name: Import SCP with ALL components in JSON format from a HTTP share path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: import
- share_name: "http://192.168.0.3/share"
- share_user: share_username
- share_password: share_password
- scp_file: example_file.json
- shutdown_type: Graceful
- end_host_power_state: "On"
- job_wait: True
- tags: import-scp-http
-
- - name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "https://192.168.0.4/share"
- share_user: share_username
- share_password: share_password
- scp_components: ALL
- export_format: XML
- export_use: Replace
- job_wait: True
- tags: export-scp-https
-
- - name: Import SCP with ALL components in XML format from a HTTPS share path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: import
- share_name: "https://192.168.0.4/share"
- share_user: share_username
- share_password: share_password
- scp_file: 192.168.0.1_20160618_164647.xml
- shutdown_type: Graceful
- end_host_power_state: "On"
- job_wait: False
- tags: import-scp-https
-
- - name: Preview SCP with ALL components in XML format from a CIFS share path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "\\\\192.168.0.2\\share"
- share_user: share_username
- share_password: share_password
- command: preview
- scp_components: "ALL"
- scp_file: example_file.xml
- job_wait: True
- tags: preview-scp-cifs
-
- - name: Preview SCP with ALL components in JSON format from a NFS share path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- command: preview
- scp_components: "IDRAC"
- scp_file: example_file.xml
- job_wait: True
- tags: preview-scp-nfs
-
- - name: Preview SCP with ALL components in XML format from a HTTP share path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "http://192.168.0.1/http-share"
- share_user: share_username
- share_password: share_password
- command: preview
- scp_components: "ALL"
- scp_file: example_file.xml
- job_wait: True
- tags: preview-scp-http
-
- - name: Preview SCP with ALL components in XML format from a local path
- dellemc.openmanage.idrac_server_config_profile:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "/scp_folder"
- command: preview
- scp_components: "IDRAC"
- scp_file: example_file.json
- job_wait: False
- tags: import-scp-local
+ - name: Import SCP with IDRAC components in JSON format from the JSON content.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ job_wait: true
+ import_buffer: "{\"SystemConfiguration\": {\"Components\": [{\"FQDD\": \"iDRAC.Embedded.1\",\"Attributes\":
+ [{\"Name\": \"SNMP.1#AgentCommunity\",\"Value\": \"public1\"}]}]}}"
+ tags: import-scp-json
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml
index 9820b6b69..08e753af3 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_syslog.yml
@@ -1,18 +1,15 @@
---
-- hosts: idrac
- connection: local
- name: Configure iDRAC syslog attributes
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure iDRAC syslog attributes
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Configure iDRAC syslog attributes
- idrac_syslog:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- share_name: "{{ playbook_dir }}"
- syslog: "Disabled" \ No newline at end of file
+ - name: Configure iDRAC syslog attributes
+ dellemc.openmanage.idrac_syslog:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "{{ playbook_dir }}"
+ syslog: "Disabled"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml
index b2f1e1ecc..f4eafcf42 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_system_info.yml
@@ -1,16 +1,13 @@
---
-- hosts: idrac
- connection: local
- name: Get system inventory
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Get system inventory
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Get system inventory.
- idrac_system_info:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
+ - name: Get system inventory.
+ dellemc.openmanage.idrac_system_info:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml
index ab011e13a..847446f0d 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user.yml
@@ -1,71 +1,71 @@
---
-- hosts: idrac
- connection: local
- name: Configure the iDRAC users attributes
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure the iDRAC users attributes
+ hosts: idrac
+ gather_facts: false
tasks:
- - name: Configure the create iDRAC users attributes
- idrac_user:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "present"
- user_name: "user_name"
- user_password: "user_password"
- privilege: "Administrator"
- ipmi_lan_privilege: "User"
- enable: "true"
- sol_enable: "true"
- protocol_enable: "true"
- authentication_protocol: "MD5"
- privacy_protocol: "DES"
- tags:
- - create-user
+ - name: Configure the create iDRAC users attributes
+ dellemc.openmanage.idrac_user:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ user_name: "user_name"
+ user_password: "user_password"
+ privilege: "Administrator"
+ ipmi_lan_privilege: "User"
+ enable: true
+ sol_enable: true
+ protocol_enable: true
+ authentication_protocol: "MD5"
+ privacy_protocol: "DES"
+ tags:
+ - create-user
+ delegate_to: localhost
- - name: Configure the modify iDRAC users attributes
- idrac_user:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- action: "present"
- user_name: "user_name"
- new_user_name: "new_user_name"
- privilege: "Administrator"
- ipmi_lan_privilege: "User"
- enable: "true"
- sol_enable: "true"
- protocol_enable: "true"
- authentication_protocol: "MD5"
- privacy_protocol: "DES"
- tags:
- - modify-user
+ - name: Configure the modify iDRAC users attributes
+ dellemc.openmanage.idrac_user:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ user_name: "user_name"
+ new_user_name: "new_user_name"
+ privilege: "Administrator"
+ ipmi_lan_privilege: "User"
+ enable: true
+ sol_enable: true
+ protocol_enable: true
+ authentication_protocol: "MD5"
+ privacy_protocol: "DES"
+ tags:
+ - modify-user
+ delegate_to: localhost
- - name: Configure the modify iDRAC username and password attributes.
- idrac_user:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- action: "present"
- user_name: "user_name"
- new_user_name: "new_user_name"
- user_password: "user_password"
- tags:
- - modify-username
+ - name: Configure the modify iDRAC username and password attributes.
+ dellemc.openmanage.idrac_user:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ user_name: "user_name"
+ new_user_name: "new_user_name"
+ user_password: "user_password"
+ tags:
+ - modify-username
+ delegate_to: localhost
- - name: Configure the delete iDRAC users attributes
- idrac_user:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "absent"
- user_name: "user_name"
- tags:
- - remove-user
+ - name: Configure the delete iDRAC users attributes
+ dellemc.openmanage.idrac_user:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ user_name: "user_name"
+ tags:
+ - remove-user
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user_info.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user_info.yml
new file mode 100644
index 000000000..e49cb0bfe
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_user_info.yml
@@ -0,0 +1,31 @@
+---
+- name: Fetching idrac user facts.
+ hosts: idrac
+ gather_facts: false
+
+ tasks:
+ - name: Retrieve basic details of all accounts.
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
+
+ - name: Retreive user details of a specific user_id
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ user_id: 1
+ delegate_to: localhost
+
+ - name: Retreive user details of a specific username
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ username: user_name
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml
index 9a2cc5209..7ed4d9ba4 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/idrac/idrac_virtual_media.yml
@@ -1,107 +1,109 @@
---
-- hosts: idrac
- connection: local
- name: Configure the boot order settings
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure the boot order settings
+ hosts: idrac
+ gather_facts: false
tasks:
+ - name: Insert image file to Remote File Share 1 using CIFS share.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ virtual_media:
+ - insert: true
+ image: "//192.168.0.2/file_path/file.iso"
+ username: "username"
+ password: "password"
+ tags: insert-media-cifs
+ delegate_to: localhost
- - name: Insert image file to Remote File Share 1 using CIFS share.
- idrac_virtual_media:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- virtual_media:
- - insert: true
- image: "//192.168.0.2/file_path/file.iso"
- username: "username"
- password: "password"
- tags: insert-media-cifs
+ - name: Insert image file to Remote File Share 2 using NFS share.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ virtual_media:
+ - index: 2
+ insert: true
+ image: "192.168.0.4:/file_path/file.iso"
+ tags: insert-media-nfs
+ delegate_to: localhost
- - name: Insert image file to Remote File Share 2 using NFS share.
- idrac_virtual_media:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- virtual_media:
- - index: 2
- insert: true
- image: "192.168.0.4:/file_path/file.iso"
- tags: insert-media-nfs
+ - name: Insert image file to Remote File Share 1 and 2 using HTTP.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
+ - index: 1
+ insert: true
+ image: "http://192.168.0.4/file_path/file.img"
+ - index: 2
+ insert: true
+ image: "http://192.168.0.4/file_path/file.img"
+ tags: insert-media-http
+ delegate_to: localhost
- - name: Insert image file to Remote File Share 1 and 2 using HTTP.
- idrac_virtual_media:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- force: true
- virtual_media:
- - index: 1
- insert: true
- image: "http://192.168.0.4/file_path/file.img"
- - index: 2
- insert: true
- image: "http://192.168.0.4/file_path/file.img"
- tags: insert-media-http
+ - name: Insert image file using HTTPS.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
+ - index: 1
+ insert: true
+ image: "https://192.168.0.5/file_path/file.img"
+ username: username
+ password: password
+ tags: insert-media-http
+ delegate_to: localhost
- - name: Insert image file using HTTPS.
- idrac_virtual_media:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- force: true
- virtual_media:
- - index: 1
- insert: true
- image: "https://192.168.0.5/file_path/file.img"
- username: username
- password: password
- tags: insert-media-http
+ - name: Eject multiple virtual media.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ virtual_media:
+ - index: 1
+ insert: false
+ - index: 2
+ insert: false
+ tags: eject-media
+ delegate_to: localhost
- - name: Eject multiple virtual media.
- idrac_virtual_media:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- virtual_media:
- - index: 1
- insert: false
- - index: 2
+ - name: Ejection of image file from Remote File Share 1.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
insert: false
- tags: eject-media
+ tags: eject-media-rfs1
+ delegate_to: localhost
- - name: Ejection of image file from Remote File Share 1.
- idrac_virtual_media:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- force: true
- virtual_media:
- insert: false
- tags: eject-media-rfs1
-
- - name: Insertion and ejection of image file in single task.
- idrac_virtual_media:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_password }}"
- ca_path: "/path/to/ca_cert.pem"
- force: true
- virtual_media:
- - index: 1
- insert: true
- image: https://192.168.0.5/file/file.iso
- username: username
- password: password
- - index: 2
- insert: false
- tags: insert-eject-media
+ - name: Insertion and ejection of image file in single task.
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ force: true
+ virtual_media:
+ - index: 1
+ insert: true
+ image: https://192.168.0.5/file/file.iso
+ username: username
+ password: password
+ - index: 2
+ insert: false
+ tags: insert-eject-media
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies.yml
new file mode 100644
index 000000000..b9fbcb12b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies.yml
@@ -0,0 +1,110 @@
+---
+- name: Dell OpenManage Ansible Alert policies.
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: "Create an alert policy"
+ dellemc.openmanage.ome_alert_policies:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Alert Policy One"
+ device_service_tag:
+ - ABCD123
+ - SVC7845
+ category:
+ - catalog_name: Application
+ catalog_category:
+ - category_name: Audit
+ sub_category_names:
+ - Generic
+ - Devices
+ - catalog_name: iDRAC
+ catalog_category:
+ - category_name: Audit
+ sub_category_names:
+ - BIOS Management
+ - iDRAC Service Module
+ date_and_time:
+ date_from: "2023-10-10"
+ date_to: "2023-10-11"
+ time_from: "11:00"
+ time_to: "12:00"
+ time_interval: true
+ severity:
+ - unknown
+ - critical
+ actions:
+ - action_name: Trap
+ parameters:
+ - name: "192.1.2.3:162"
+ value: true
+ - name: "traphostname.domain.com:162"
+ value: true
+ tags: create_alert_policy
+ delegate_to: localhost
+
+ - name: "Update an alert Policy"
+ dellemc.openmanage.ome_alert_policies:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Existing Policy Name"
+ new_name: "Update Policy Name"
+ device_group: "Group Name"
+ message_ids:
+ - AMP400
+ - CTL201
+ - BIOS101
+ date_and_time:
+ date_from: "2023-10-10"
+ date_to: "2023-10-11"
+ time_from: "11:00"
+ time_to: "12:00"
+ time_interval: true
+ actions:
+ - action_name: Trap
+ parameters:
+ - name: "192.1.2.3:162"
+ value: true
+ tags: update_alert_policy
+ delegate_to: localhost
+
+ - name: "Enable an alert policy"
+ dellemc.openmanage.ome_alert_policies:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Policy Name"
+ enable: true
+ tags: enable_alert_policy
+ delegate_to: localhost
+
+ - name: "Disable multiple alert policies"
+ dellemc.openmanage.ome_alert_policies:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name:
+ - "Policy Name 1"
+ - "Policy Name 2"
+ enable: false
+ tags: disable_alert_policy
+ delegate_to: localhost
+
+ - name: "Delete an alert policy"
+ dellemc.openmanage.ome_alert_policies:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name:
+ - "Policy Name"
+ state: absent
+ tags: delete_alert_policy
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_actions_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_actions_info.yml
new file mode 100644
index 000000000..f5aec1d90
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_actions_info.yml
@@ -0,0 +1,13 @@
+---
+- name: Dell OpenManage Ansible User service.
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: Get action details of all alert policies.
+ dellemc.openmanage.ome_alert_policies_actions_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_category_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_category_info.yml
new file mode 100644
index 000000000..45eb038ef
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_category_info.yml
@@ -0,0 +1,13 @@
+---
+- name: Dell OpenManage Ansible Alert policy categories info
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: Retrieve information about all the OME alert policies
+ dellemc.openmanage.ome_alert_policies_category_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_info.yml
new file mode 100644
index 000000000..9951b6b59
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_info.yml
@@ -0,0 +1,21 @@
+- name: Dell OpenManage Ansible Alert policy info
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: Retrieve information about all the OME alert policies.
+ dellemc.openmanage.ome_alert_policies_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
+
+ - name: Retrieve information about specific OME alert policy.
+ dellemc.openmanage.ome_alert_policies_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ policy_name: "Mobile Push Notification - Critical Alerts"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_message_id_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_message_id_info.yml
new file mode 100644
index 000000000..20d6e865a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/alert_policy/ome_alert_policies_message_id_info.yml
@@ -0,0 +1,13 @@
+---
+- name: Fetching message ids of ome alert policies .
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: Retrieve mesaage ids of alert policies.
+ dellemc.openmanage.ome_alert_policies_message_id_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml
index f77eabdd0..3b2852bf5 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_smtp.yml
@@ -1,37 +1,36 @@
---
-- hosts: ome
- connection: local
- name: Configure the SMTP settings of OME and OME-M.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure the SMTP settings of OME and OME-M.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Update SMTP destination server configuration with authentication
- ome_application_alerts_smtp:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- destination_address: "localhost"
- port_number: 25
- use_ssl: true
- enable_authentication: true
- credentials:
- username: "username"
- password: "password"
- tags:
- - smtp_auth
- - name: Update SMTP destination server configuration without authentication
- ome_application_alerts_smtp:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- destination_address: "localhost"
- port_number: 25
- use_ssl: false
- enable_authentication: false
- tags:
- - smtp_no_auth \ No newline at end of file
+ - name: Update SMTP destination server configuration with authentication
+ dellemc.openmanage.ome_application_alerts_smtp:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ destination_address: "localhost"
+ port_number: 25
+ use_ssl: true
+ enable_authentication: true
+ credentials:
+ username: "username"
+ password: "password"
+ delegate_to: localhost
+ tags:
+ - smtp_auth
+
+ - name: Update SMTP destination server configuration without authentication
+ dellemc.openmanage.ome_application_alerts_smtp:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ destination_address: "localhost"
+ port_number: 25
+ use_ssl: false
+ enable_authentication: false
+ delegate_to: localhost
+ tags:
+ - smtp_no_auth
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml
index 9fce647e3..f83e4e29f 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_alerts_syslog.yml
@@ -1,40 +1,38 @@
---
-- hosts: ome
- connection: local
- name: Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure syslog forwarding settings on OpenManage Enterprise and OpenManage Enterprise Modular
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Configure single server to forward syslog
- ome_application_alerts_syslog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- syslog_servers:
- - id: 1
- enabled: true
- destination_address: 192.168.0.2
- port_number: 514
+ - name: Configure single server to forward syslog
+ dellemc.openmanage.ome_application_alerts_syslog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ syslog_servers:
+ - id: 1
+ enabled: true
+ destination_address: 192.168.0.2
+ port_number: 514
+ delegate_to: localhost
- - name: Configure multiple server to forward syslog
- ome_application_alerts_syslog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- syslog_servers:
- - id: 1
- port_number: 523
- - id: 2
- enabled: true
- destination_address: sysloghost1.lab.com
- - id: 3
- enabled: false
- - id: 4
- enabled: true
- destination_address: 192.168.0.4
- port_number: 514 \ No newline at end of file
+ - name: Configure multiple server to forward syslog
+ dellemc.openmanage.ome_application_alerts_syslog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ syslog_servers:
+ - id: 1
+ port_number: 523
+ - id: 2
+ enabled: true
+ destination_address: sysloghost1.lab.com
+ - id: 3
+ enabled: false
+ - id: 4
+ enabled: true
+ destination_address: 192.168.0.4
+ port_number: 514
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml
index ab0fb9ebc..90e4cd8a7 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_certificate.yml
@@ -1,53 +1,54 @@
---
-- hosts: ome
- connection: local
- name: Dell OME Application Certificate Signing Request.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OME Application Certificate Signing Request.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: generate certificate signing request.
- ome_application_certificate:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "generate_csr"
- distinguished_name: "hostname.com"
- department_name: "Remote Access Group"
- business_name: "Dell Inc."
- locality: "Round Rock"
- country_state: "Texas"
- country: "US"
- email: "support@dell.com"
- register: result
- tags:
- - generate
+ - name: Generate certificate signing request.
+ dellemc.openmanage.ome_application_certificate:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "generate_csr"
+ distinguished_name: "hostname.com"
+ department_name: "Remote Access Group"
+ business_name: "Dell Inc."
+ locality: "Round Rock"
+ country_state: "Texas"
+ country: "US"
+ email: "support@dell.com"
+ register: result
+ tags:
+ - generate
+ delegate_to: localhost
- - name: copy CSR data into a file.
- ansible.builtin.copy:
- content: "{{ result.csr_status.CertificateData }}"
- dest: "csr_data.txt"
- tags:
- - csr-data
+ - name: Copy CSR data into a file.
+ ansible.builtin.copy:
+ content: "{{ result.csr_status.CertificateData }}"
+ dest: "csr_data.txt"
+ mode: "0600"
+ tags:
+ - csr-data
+ delegate_to: localhost
- - name: upload the certificate.
- ome_application_certificate:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- command: "upload"
- upload_file: "/path/certificate.cer"
- tags:
- - upload
+ - name: Upload the certificate.
+ dellemc.openmanage.ome_application_certificate:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "upload"
+ upload_file: "/path/certificate.cer"
+ tags:
+ - upload
+ delegate_to: localhost
- - name: "once certificate uploaded, OME cannot be accessed for few seconds, hence wait for 10 seconds."
- wait_for:
- host: "{{ hostname }}"
- port: "{{ port }}"
- delay: 10
- tags:
- - upload
+ - name: Once certificate uploaded, OME cannot be accessed for few seconds, hence wait for 10 seconds.
+ ansible.builtin.wait_for:
+ host: "{{ hostname }}"
+ port: "{{ port }}"
+ delay: 10
+ tags:
+ - upload
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml
index b0b29ae97..00a2ce003 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_console_preferences.yml
@@ -1,97 +1,98 @@
---
-- hosts: ome
- connection: local
- name: Dell OME Application Console Preferences.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OME Application Console Preferences.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Update Console preferences with all the settings.
- ome_application_console_preferences:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- report_row_limit: 123
- device_health:
- health_check_interval: 1
- health_check_interval_unit: "Hourly"
- health_and_power_state_on_connection_lost: "last_known"
- discovery_settings:
- general_device_naming: "DNS"
- server_device_naming: "IDRAC_HOSTNAME"
- invalid_device_hostname: "localhost"
- common_mac_addresses: "::"
- server_initiated_discovery:
- device_discovery_approval_policy: "Automatic"
- set_trap_destination: True
- mx7000_onboarding_preferences: "all"
- builtin_appliance_share:
- share_options: "CIFS"
- cifs_options: "V1"
- email_sender_settings: "admin@dell.com"
- trap_forwarding_format: "Original"
- metrics_collection_settings: 31
- tags:
- - all_settings
+ - name: Update Console preferences with all the settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ report_row_limit: 123
+ device_health:
+ health_check_interval: 1
+ health_check_interval_unit: "Hourly"
+ health_and_power_state_on_connection_lost: "last_known"
+ discovery_settings:
+ general_device_naming: "DNS"
+ server_device_naming: "IDRAC_HOSTNAME"
+ invalid_device_hostname: "localhost"
+ common_mac_addresses: "::"
+ server_initiated_discovery:
+ device_discovery_approval_policy: "Automatic"
+ set_trap_destination: true
+ mx7000_onboarding_preferences: "all"
+ builtin_appliance_share:
+ share_options: "CIFS"
+ cifs_options: "V1"
+ email_sender_settings: "admin@dell.com"
+ trap_forwarding_format: "Original"
+ metrics_collection_settings: 31
+ tags:
+ - all_settings
+ delegate_to: localhost
- - name: Update Console preferences with report and device health settings.
- ome_application_console_preferences:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- report_row_limit: 236
- device_health:
- health_check_interval: 10
- health_check_interval_unit: "Hourly"
- health_and_power_state_on_connection_lost: "last_known"
- tags:
- - valid_report_device
+ - name: Update Console preferences with report and device health settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ report_row_limit: 236
+ device_health:
+ health_check_interval: 10
+ health_check_interval_unit: "Hourly"
+ health_and_power_state_on_connection_lost: "last_known"
+ tags:
+ - valid_report_device
+ delegate_to: localhost
- - name: Update Console preferences with invalid device health settings.
- ome_application_console_preferences:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_health:
- health_check_interval: 65
- health_check_interval_unit: "Minutes"
- tags:
- - invalid_device
+ - name: Update Console preferences with invalid device health settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_health:
+ health_check_interval: 65
+ health_check_interval_unit: "Minutes"
+ tags:
+ - invalid_device
+ delegate_to: localhost
- - name: Update Console preferences with discovery and built in appliance share settings.
- ome_application_console_preferences:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- discovery_settings:
- general_device_naming: "DNS"
- server_device_naming: "IDRAC_SYSTEM_HOSTNAME"
- invalid_device_hostname: "localhost"
- common_mac_addresses: "00:53:45:00:00:00"
- builtin_appliance_share:
- share_options: "CIFS"
- cifs_options: "V1"
- tags:
- - valid_discovery
+ - name: Update Console preferences with discovery and built in appliance share settings.
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ discovery_settings:
+ general_device_naming: "DNS"
+ server_device_naming: "IDRAC_SYSTEM_HOSTNAME"
+ invalid_device_hostname: "localhost"
+ common_mac_addresses: "00:53:45:00:00:00"
+ builtin_appliance_share:
+ share_options: "CIFS"
+ cifs_options: "V1"
+ tags:
+ - valid_discovery
+ delegate_to: localhost
- - name: Update Console preferences with server initiated discovery, mx7000 onboarding preferences, email sender, trap forwarding format, and metrics collection settings.
- ome_application_console_preferences:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- server_initiated_discovery:
- device_discovery_approval_policy: "Automatic"
- set_trap_destination: True
- mx7000_onboarding_preferences: "chassis"
- email_sender_settings: "admin@dell.com"
- trap_forwarding_format: "Normalized"
- metrics_collection_settings: 361
- tags:
+ - name: Update Console preferences with various settings
+ dellemc.openmanage.ome_application_console_preferences:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ server_initiated_discovery:
+ device_discovery_approval_policy: "Automatic"
+ set_trap_destination: true
+ mx7000_onboarding_preferences: "chassis"
+ email_sender_settings: "admin@dell.com"
+ trap_forwarding_format: "Normalized"
+ metrics_collection_settings: 361
+ tags:
- valid_metrics
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml
index 3eff08bc3..22565bcc3 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address.yml
@@ -1,115 +1,117 @@
---
-- hosts: ome
- connection: local
- name: Dell OME Application network settings.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OME Application network settings.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: IPv4 network settings
- ome_application_network_address:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- ipv4_configuration:
- enable: true
- enable_dhcp: false
- static_ip_address: 192.168.0.2
- static_subnet_mask: 255.255.254.0
- static_gateway: 192.168.0.3
- use_dhcp_for_dns_server_names: false
- static_preferred_dns_server: 192.168.0.4
- static_alternate_dns_server: ""
- reboot_delay: 5
- tags:
- - ipv4_config
+ - name: IPv4 network settings
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ ipv4_configuration:
+ enable: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_for_dns_server_names: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: ""
+ reboot_delay: 5
+ tags:
+ - ipv4_config
+ delegate_to: localhost
- - name: IPv6 network settings
- ome_application_network_address:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- ipv6_configuration:
- enable: true
- enable_auto_configuration: true
- static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
- static_prefix_length: 10
- static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2
- use_dhcp_for_dns_server_names: true
- static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
- static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
- reboot_delay: 10
- tags:
- - ipv6_config
+ - name: IPv6 network settings
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ ipv6_configuration:
+ enable: true
+ enable_auto_configuration: true
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2
+ use_dhcp_for_dns_server_names: true
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+ reboot_delay: 10
+ tags:
+ - ipv6_config
+ delegate_to: localhost
- - name: Management vLAN settings for primary interface
- ome_application_network_address:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- management_vlan:
- enable_vlan: true
- vlan_id: 3344
- dns_configuration:
- register_with_dns: false
- reboot_delay: 1
- tags:
- - mgmt_vlan
+ - name: Management vLAN settings for primary interface
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ management_vlan:
+ enable_vlan: true
+ vlan_id: 3344
+ dns_configuration:
+ register_with_dns: false
+ reboot_delay: 1
+ tags:
+ - mgmt_vlan
+ delegate_to: localhost
- - name: DNS settings
- ome_application_network_address:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- ipv4_configuration:
- enable: true
- use_dhcp_for_dns_server_names: false
- static_preferred_dns_server: 192.168.0.4
- static_alternate_dns_server: 192.168.0.5
- dns_configuration:
- register_with_dns: true
- use_dhcp_for_dns_domain_name: false
- dns_name: "MX-SVCTAG"
- dns_domain_name: "localdomainname"
- reboot_delay: 1
- tags:
- - dns_config
+ - name: DNS settings
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ ipv4_configuration:
+ enable: true
+ enable_dhcp: true
+ use_dhcp_for_dns_server_names: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ dns_configuration:
+ register_with_dns: true
+ use_dhcp_for_dns_domain_name: false
+ dns_name: "MX-SVCTAG"
+ dns_domain_name: "localdomainname"
+ reboot_delay: 1
+ tags:
+ - dns_config
+ delegate_to: localhost
- - name: Complete network settings
- ome_application_network_address:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- ipv4_configuration:
- enable: true
- enable_dhcp: false
- static_ip_address: 192.168.0.2
- static_subnet_mask: 255.255.254.0
- static_gateway: 192.168.0.3
- use_dhcp_for_dns_server_names: false
- static_preferred_dns_server: 192.168.0.4
- static_alternate_dns_server: 192.168.0.5
- ipv6_configuration:
- enable: true
- enable_auto_configuration: true
- static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
- static_prefix_length: 10
- static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2
- use_dhcp_for_dns_server_names: true
- static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
- static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
- dns_configuration:
- register_with_dns: true
- use_dhcp_for_dns_domain_name: false
- dns_name: "MX-SVCTAG"
- dns_domain_name: "localdomainname"
- reboot_delay: 1
- tags:
- - all_network_config \ No newline at end of file
+ - name: Complete network settings
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ ipv4_configuration:
+ enable: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_for_dns_server_names: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ ipv6_configuration:
+ enable: true
+ enable_auto_configuration: true
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2
+ use_dhcp_for_dns_server_names: true
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+ dns_configuration:
+ register_with_dns: true
+ use_dhcp_for_dns_domain_name: false
+ dns_name: "MX-SVCTAG"
+ dns_domain_name: "localdomainname"
+ reboot_delay: 1
+ tags:
+ - all_network_config
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml
index 1f4cf709d..b978a157f 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_address_with_job_tracking.yml
@@ -1,65 +1,64 @@
---
-- hosts: ome
+- name: OME - Complete network settings with details tracking
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 50
polling_interval: 5 # in seconds
- connection: local
- name: OME - Complete network settings with details tracking
- gather_facts: False
-
- collections:
- - dellemc.openmanage
tasks:
- - name: Complete network settings
- ome_application_network_address:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- ipv4_configuration:
- enable: true
- enable_dhcp: false
- static_ip_address: 192.168.0.2
- static_subnet_mask: 255.255.254.0
- static_gateway: 192.168.0.3
- use_dhcp_for_dns_server_names: false
- static_preferred_dns_server: 192.168.0.4
- static_alternate_dns_server: 192.168.0.5
- ipv6_configuration:
- enable: true
- enable_auto_configuration: true
- static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
- static_prefix_length: 10
- static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2
- use_dhcp_for_dns_server_names: true
- static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
- static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
- dns_configuration:
- register_with_dns: true
- use_dhcp_for_dns_domain_name: false
- dns_name: "MX-SVCTAG"
- dns_domain_name: "localdomainname"
- reboot_delay: 1
- register: facts_result
+ - name: Complete network settings
+ dellemc.openmanage.ome_application_network_address:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ ipv4_configuration:
+ enable: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_for_dns_server_names: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ ipv6_configuration:
+ enable: true
+ enable_auto_configuration: true
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: 2626:f2f2:f081:9:1c1c:f1f1:4747:2
+ use_dhcp_for_dns_server_names: true
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+ dns_configuration:
+ register_with_dns: true
+ use_dhcp_for_dns_domain_name: false
+ dns_name: "MX-SVCTAG"
+ dns_domain_name: "localdomainname"
+ reboot_delay: 1
+ register: facts_result
+ delegate_to: localhost
- # To end play when no job_info
- - name: "End the play when no job_info"
- meta: end_play
- when:
- - facts_result.changed == false
- - "'job_info' not in facts_result"
+ # To end play when no job_info
+ - name: End the play when no job_info # noqa: no-handler
+ ansible.builtin.meta: end_play
+ when:
+ - not facts_result.changed
+ - "'job_info' not in facts_result"
+ delegate_to: localhost
- - name: "Get job details using job id from network address config task."
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "{{ facts_result.job_info.Id }}"
- register: job_result
- failed_when: job_result.job_info.LastRunStatus.Name == 'Failed'
- changed_when: job_result.job_info.LastRunStatus.Name == 'Completed'
- until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
- retries: "{{ retries_count }}"
- delay: "{{ polling_interval }}"
+ - name: Get job details using job id from network address config task.
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "{{ facts_result.job_info.Id }}"
+ register: job_result
+ failed_when: job_result.job_info.LastRunStatus.Name == 'Failed'
+ changed_when: job_result.job_info.LastRunStatus.Name == 'Completed'
+ until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
+ retries: "{{ retries_count }}"
+ delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml
index 0c0e8abf9..7ccb3e581 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_proxy.yml
@@ -1,18 +1,14 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible Application network proxy setting.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible Application network proxy setting.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Update proxy configuration and enable authentication.
- ome_application_network_proxy:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Update proxy configuration and enable authentication.
+ dellemc.openmanage.ome_application_network_proxy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
enable_proxy: true
ip_address: "192.168.0.2"
@@ -20,25 +16,28 @@
enable_authentication: true
proxy_username: "root"
proxy_password: "proxy_password"
- tags: setting1
-
- - name: Reset proxy authentication.
- ome_application_network_proxy:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ tags: setting1
+ delegate_to: localhost
+
+ - name: Reset proxy authentication.
+ dellemc.openmanage.ome_application_network_proxy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
enable_proxy: true
ip_address: "192.168.0.2"
proxy_port: 444
enable_authentication: false
- tags: setting2
+ tags: setting2
+ delegate_to: localhost
- - name: Reset proxy configuration.
- ome_application_network_proxy:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Reset proxy configuration.
+ dellemc.openmanage.ome_application_network_proxy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
enable_proxy: false
- tags: setting3
+ tags: setting3
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml
index 68340ba9f..1a853bcaf 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_settings.yml
@@ -1,18 +1,14 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible Application network setting.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible Application network setting.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Configure universal inactivity timeout
- ome_application_network_settings:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_application_network_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
session_inactivity_timeout:
enable_universal_timeout: true
@@ -23,51 +19,58 @@
serial_sessions: 1
tags:
- enable_universal_timeout
+ delegate_to: localhost
+
- name: Configure API and GUI timeout and sessions
- ome_application_network_settings:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- session_inactivity_timeout:
- api_timeout: 20
- api_sessions: 100
- gui_timeout: 25
- gui_sessions: 5
+ dellemc.openmanage.ome_application_network_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ session_inactivity_timeout:
+ api_timeout: 20
+ api_sessions: 100
+ gui_timeout: 25
+ gui_sessions: 5
tags:
- enable_api_gui_timout_sessions
+ delegate_to: localhost
+
- name: Configure timeout and sessions for all parameters
- ome_application_network_settings:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- session_inactivity_timeout:
- api_timeout: 20
- api_sessions: 100
- gui_timeout: 15
- gui_sessions: 5
- ssh_timeout: 30
- ssh_sessions: 2
- serial_timeout: 35
- serial_sessions: 1
+ dellemc.openmanage.ome_application_network_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ session_inactivity_timeout:
+ api_timeout: 20
+ api_sessions: 100
+ gui_timeout: 15
+ gui_sessions: 5
+ ssh_timeout: 30
+ ssh_sessions: 2
+ serial_timeout: 35
+ serial_sessions: 1
tags:
- enable_all_timeout_sessions
+ delegate_to: localhost
+
- name: Disable universal timeout and configure timeout and sessions for other parameters
- ome_application_network_settings:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- session_inactivity_timeout:
- enable_universal_timeout: false
- api_timeout: 20
- api_sessions: 100
- gui_timeout: 15
- gui_sessions: 5
- ssh_timeout: 30
- ssh_sessions: 2
- serial_timeout: 35
- serial_sessions: 1
+ dellemc.openmanage.ome_application_network_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ session_inactivity_timeout:
+ enable_universal_timeout: false
+ api_timeout: 20
+ api_sessions: 100
+ gui_timeout: 15
+ gui_sessions: 5
+ ssh_timeout: 30
+ ssh_sessions: 2
+ serial_timeout: 35
+ serial_sessions: 1
tags:
- - disa_all_timeout_sessions \ No newline at end of file
+ - disa_all_timeout_sessions
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml
index 7dd4edade..00c1a77f7 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time.yml
@@ -1,33 +1,31 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible Application network time setting.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible Application network time setting.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Configure system time.
- ome_application_network_time:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Configure system time.
+ dellemc.openmanage.ome_application_network_time:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
enable_ntp: false
system_time: "2020-03-31 21:35:18"
time_zone: "TZ_ID_11"
- tags: time_setting1
-
- - name: Configure NTP server for time synchronization.
- ome_application_network_time:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ tags: time_setting1
+ delegate_to: localhost
+
+ - name: Configure NTP server for time synchronization.
+ dellemc.openmanage.ome_application_network_time:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
time_zone: "TZ_ID_66"
enable_ntp: true
primary_ntp_address: "192.168.0.2"
secondary_ntp_address1: "192.168.0.3"
secondary_ntp_address2: "192.168.0.4"
- tags: time_setting2
+ tags: time_setting2
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml
index a57e0b905..cc34fa8a1 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_time_zone_info.yml
@@ -1,31 +1,29 @@
---
-- hosts: ome
- connection: local
+- name: "Ome application network time zone informaion - Ansible Module"
+ hosts: ome
gather_facts: false
- name: "Ome application network time zone informaion - Ansible Module"
vars:
time_zone_uri: "/api/ApplicationService/Network/TimeZones"
- collections:
- - dellemc.openmanage
-
tasks:
- name: "Get list of all available times zones along with information specific to each time zone."
- uri:
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ time_zone_uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200
- validate_certs: no
- force_basic_auth: yes
+ validate_certs: false
+ force_basic_auth: true
register: time_zone_result
failed_when: "'value' not in time_zone_result.json"
-
+ delegate_to: localhost
+
- name: Get specific time zone ID using time zone name
with_items:
- "{{ time_zone_result.json.value }}"
- debug:
- msg: "{{item['Id']}}"
- when: item['Name']=='(GMT+05:30) Sri Jayawardenepura'
+ ansible.builtin.debug:
+ msg: "{{ item['Id'] }}"
+ when: item['Name'] == '(GMT+05:30) Sri Jayawardenepura'
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml
index e445ed849..00680fb7e 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver.yml
@@ -1,40 +1,39 @@
---
-- hosts: ome
- connection: local
- name: Dell OME Application network webserver settings.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OME Application network webserver settings.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Update webserver port and session time out configuration.
- ome_application_network_webserver:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- webserver_port: 443
- webserver_timeout: 10
- tags:
- - port_timeout_update
+ - name: Update webserver port and session time out configuration.
+ dellemc.openmanage.ome_application_network_webserver:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ webserver_port: 443
+ webserver_timeout: 10
+ tags:
+ - port_timeout_update
+ delegate_to: localhost
- - name: Update session time out
- ome_application_network_webserver:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- webserver_timeout: 30
- tags:
- - timeout_update
+ - name: Update session time out
+ dellemc.openmanage.ome_application_network_webserver:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ webserver_timeout: 30
+ tags:
+ - timeout_update
+ delegate_to: localhost
- - name: Update web server port.
- ome_application_network_webserver:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- webserver_port: 8443
- tags:
- - port_update \ No newline at end of file
+ - name: Update web server port.
+ dellemc.openmanage.ome_application_network_webserver:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ webserver_port: 8443
+ tags:
+ - port_update
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml
index 28911b801..6fbae4b13 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_network_webserver_port_changed_tracking.yml
@@ -1,61 +1,58 @@
---
-- hosts: ome
- connection: local
- name: "Dell OME Application network webserver port change and track web
- server till the service restarts."
- gather_facts: False
+- name: Dell OME Application network webserver port change and track webserver till the service restarts.
+ hosts: ome
+ gather_facts: false
vars:
# 5 minutes wait max
retries_count: 30
polling_interval: 10
webserver_uri: "/api/ApplicationService/Network/WebServerConfiguration"
- collections:
- - dellemc.openmanage
-
tasks:
- # Update web server configuration
- - name: Update webserver port and timeout of OME
- ome_application_network_webserver:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- port: "{{ ome_webserver_port }}"
- webserver_port: "{{ new_port }}"
- webserver_timeout: 21
- register: result
+ # Update web server configuration
+ - name: Update webserver port and timeout of OME
+ dellemc.openmanage.ome_application_network_webserver:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ port: "{{ ome_webserver_port }}"
+ webserver_port: "{{ new_port }}"
+ webserver_timeout: 21
+ register: result
+ delegate_to: localhost
- # To end play when no port change or failure
- - name: "End the play when no port change"
- meta: end_play
- when:
- - result.changed == false
- - "'webserver_configuration' not in result"
+ # To end play when no port change or failure
+ - name: End the play when no port change # noqa: no-handler
+ ansible.builtin.meta: end_play
+ when:
+ - not result.changed
+ - "'webserver_configuration' not in result"
+ delegate_to: localhost
- # Loop till OME webserver is active by using the new port and webserver config GET call
- - name: "Pause play until webserver URL is reachable from this host with new port"
- uri:
- url: "https://{{ hostname }}:{{ result.webserver_configuration.PortNumber
- }}{{ webserver_uri }}"
- user: "{{ username }}"
- password: "{{ password }}"
- method: "GET"
- use_proxy: yes
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
- headers:
- Content-Type: "application/json"
- Accept: "application/json"
- register: webport_result
- until: "'PortNumber' in webport_result or webport_result.status == 200"
- retries: "{{ retries_count }}"
- delay: "{{ polling_interval }}"
+ # Loop till OME webserver is active by using the new port and webserver config GET call
+ - name: Pause play until webserver URL is reachable from this host with new port
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ result.webserver_configuration.PortNumber }}{{ webserver_uri }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ method: "GET"
+ use_proxy: true
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
+ headers:
+ Content-Type: "application/json"
+ Accept: "application/json"
+ register: webport_result
+ until: "'PortNumber' in webport_result or webport_result.status == 200"
+ retries: "{{ retries_count }}"
+ delay: "{{ polling_interval }}"
+ delegate_to: localhost
- # Output the webserver_configuration values to be used further
- - name: "Output the webserver config"
- vars:
- webserver_configuration: "{{ webport_result.json }}"
- debug:
- var: webserver_configuration \ No newline at end of file
+ # Output the webserver_configuration values to be used further
+ - name: "Output the webserver config"
+ vars:
+ webserver_configuration: "{{ webport_result.json }}"
+ ansible.builtin.debug:
+ var: webserver_configuration
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml
index 6a259e961..8bdd9c475 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/application/ome_application_security_settings.yml
@@ -1,57 +1,57 @@
---
-- hosts: ome
- connection: local
- name: Configure login security settings
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure login security settings
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Configure restricted allowed IP range
- ome_application_security_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- restrict_allowed_ip_range:
- enable_ip_range: true
- ip_range: 192.1.2.3/24
+ - name: Configure restricted allowed IP range
+ dellemc.openmanage.ome_application_security_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ restrict_allowed_ip_range:
+ enable_ip_range: true
+ ip_range: 192.1.2.3/24
+ delegate_to: localhost
- - name: Configure login lockout policy
- ome_application_security_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- login_lockout_policy:
- by_user_name: true
- by_ip_address: true
- lockout_fail_count: 3
- lockout_fail_window: 30
- lockout_penalty_time: 900
+ - name: Configure login lockout policy
+ dellemc.openmanage.ome_application_security_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ login_lockout_policy:
+ by_user_name: true
+ by_ip_address: true
+ lockout_fail_count: 3
+ lockout_fail_window: 30
+ lockout_penalty_time: 900
+ delegate_to: localhost
- - name: Configure restricted allowed IP range and login lockout policy with job wait time out of 60 seconds
- ome_application_security_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- restrict_allowed_ip_range:
- enable_ip_range: true
- ip_range: 192.1.2.3/24
- login_lockout_policy:
- by_user_name: true
- by_ip_address: true
- lockout_fail_count: 3
- lockout_fail_window: 30
- lockout_penalty_time: 900
- job_wait_timeout: 60
+ - name: Configure restricted allowed IP range and login lockout policy with job wait time out of 60 seconds
+ dellemc.openmanage.ome_application_security_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ restrict_allowed_ip_range:
+ enable_ip_range: true
+ ip_range: 192.1.2.3/24
+ login_lockout_policy:
+ by_user_name: true
+ by_ip_address: true
+ lockout_fail_count: 3
+ lockout_fail_window: 30
+ lockout_penalty_time: 900
+ job_wait_timeout: 60
+ delegate_to: localhost
- - name: Enable FIPS mode
- ome_application_security_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- fips_mode_enable: yes
+ - name: Enable FIPS mode
+ dellemc.openmanage.ome_application_security_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ fips_mode_enable: true
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml
index 1d5f23753..510700f06 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline.yml
@@ -1,18 +1,14 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible configuration compliance baseline.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible configuration compliance baseline.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Create a configuration compliance baseline using device IDs
- ome_configuration_compliance_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: create
template_name: "template 1"
@@ -23,12 +19,13 @@
- 2222
tags:
- create_compliance_baseline_device_id
+ delegate_to: localhost
- name: Create a configuration compliance baseline using device service tags
- ome_configuration_compliance_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: create
template_name: "template 1"
@@ -39,12 +36,13 @@
- "SVCTAG2"
tags:
- create_compliance_baseline_tags
+ delegate_to: localhost
- name: Create a configuration compliance baseline using group names
- ome_configuration_compliance_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: create
template_name: "template 1"
@@ -56,12 +54,13 @@
- "Group2"
tags:
- create_compliance_baseline_group_id
+ delegate_to: localhost
- name: Delete the configuration compliance baselines
- ome_configuration_compliance_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_configuration_compliance_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: delete
names:
@@ -69,9 +68,10 @@
- baseline2
tags:
- delete_compliance_baseline
+ delegate_to: localhost
- name: Modify a configuration compliance baseline using group names
- ome_configuration_compliance_baseline:
+ dellemc.openmanage.ome_configuration_compliance_baseline:
hostname: "192.168.0.1"
username: "username"
password: "password"
@@ -83,9 +83,10 @@
job_wait_timeout: 1000
device_group_names:
- Group1
+ delegate_to: localhost
- name: Remediate specific non-compliant devices to a configuration compliance baseline using device IDs
- ome_configuration_compliance_baseline:
+ dellemc.openmanage.ome_configuration_compliance_baseline:
hostname: "192.168.0.1"
username: "username"
password: "password"
@@ -94,9 +95,10 @@
names: "baseline1"
device_ids:
- 1111
+ delegate_to: localhost
- name: Remediate specific non-compliant devices to a configuration compliance baseline using device service tags
- ome_configuration_compliance_baseline:
+ dellemc.openmanage.ome_configuration_compliance_baseline:
hostname: "192.168.0.1"
username: "username"
password: "password"
@@ -107,13 +109,15 @@
device_service_tags:
- "SVCTAG1"
- "SVCTAG2"
+ delegate_to: localhost
- name: Remediate all the non-compliant devices to a configuration compliance baseline
- ome_configuration_compliance_baseline:
+ dellemc.openmanage.ome_configuration_compliance_baseline:
hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
command: "remediate"
job_wait_timeout: 2000
- names: "baseline1" \ No newline at end of file
+ names: "baseline1"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml
index 076ce84d7..3fd901e1c 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_baseline_workflow.yml
@@ -1,16 +1,11 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible configuration compliance baseline workflow.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible configuration compliance baseline workflow.
+ hosts: ome
+ gather_facts: false
tasks:
-
- name: Create a configuration compliance baseline using group names
- ome_configuration_compliance_baseline:
+ dellemc.openmanage.ome_configuration_compliance_baseline:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -23,25 +18,29 @@
device_group_names:
- "Group1"
- "Group2"
+ delegate_to: localhost
- name: Retrieve the compliance report of all of the devices in the specified configuration compliance baseline.
- ome_configuration_compliance_info:
+ dellemc.openmanage.ome_configuration_compliance_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
baseline: "baseline1"
register: compliance_report
+ delegate_to: localhost
# This tasks returns list of device ids.
# In case if you want to get devices based on service tag change attribute ServiceTag
# and next task device_ids attribute replaced with device_service_tag.
- name: Filter the non compliant device based on the retrieved compliance report.
ansible.builtin.set_fact:
- non_compliance_devices: "{{ compliance_report.compliance_info | json_query(\"value[?ComplianceStatus=='NONCOMPLIANT']\") | map(attribute='Id') | list }}"
+ non_compliance_devices: "{{ compliance_report.compliance_info | json_query(jquery | default('')) | map(attribute='Id') | list }}"
+ vars:
+ jquery: 'value[?ComplianceStatus==''NONCOMPLIANT'']'
- - name: Remediate a specified non-complaint devices to a configuration compliance baseline using device IDs
- ome_configuration_compliance_baseline:
+ - name: Remediate a specified non-complaint devices to a configuration compliance baseline using device IDs # noqa: args[module]
+ dellemc.openmanage.ome_configuration_compliance_baseline:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -49,4 +48,5 @@
command: "remediate"
names: "baseline1"
device_ids: "{{ non_compliance_devices }}"
- when: "non_compliance_devices|length>0"
+ when: "non_compliance_devices | length > 0"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml
index a24557030..3b52b3eff 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/compliance/ome_configuration_compliance_info.yml
@@ -1,35 +1,34 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible Module for Device compliance information
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible Module for Device compliance information
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Retrieve the compliance report of all of the devices in the specified configuration compliance baseline.
- ome_configuration_compliance_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline: baseline_name
+ - name: Retrieve the compliance report of all of the devices in the specified configuration compliance baseline.
+ dellemc.openmanage.ome_configuration_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline: baseline_name
+ delegate_to: localhost
- - name: Retrieve the compliance report for a specific device associated with the baseline using the device ID.
- ome_configuration_compliance_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline: baseline_name
- device_id: 10001
+ - name: Retrieve the compliance report for a specific device associated with the baseline using the device ID.
+ dellemc.openmanage.ome_configuration_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline: baseline_name
+ device_id: 10001
+ delegate_to: localhost
- - name: Retrieve the compliance report for a specific device associated with the baseline using the device service tag.
- ome_configuration_compliance_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline: baseline_name
- device_service_tag: 2HFGH3
+ - name: Retrieve the compliance report for a specific device associated with the baseline using the device service tag.
+ dellemc.openmanage.ome_configuration_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline: baseline_name
+ device_service_tag: 2HFGH3
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml
index 48259af68..87df15e12 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_baseline.yml
@@ -1,26 +1,23 @@
---
-- hosts: ome
- connection: local
+- name: OME - Ansible Modules
+ hosts: ome
gather_facts: false
- name: "OME - Ansible Modules"
-
- collections:
- - dellemc.openmanage
tasks:
+ - name: "Retrieve baseline information for specific baseline."
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ register: result
+ delegate_to: localhost
- - name: "Retrieve baseline information for specific baseline."
- ome_firmware_baseline_compliance_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: "baseline_name"
- register: result
-
- - name: "Filter out device compliance reports."
- loop: "{{ result.baseline_compliance_info }}"
- debug:
- msg: "{{item.ComponentComplianceReports}}"
- loop_control:
- label: "{{ item.DeviceId }}" \ No newline at end of file
+ - name: Filter out device compliance reports.
+ loop: "{{ result.baseline_compliance_info }}"
+ ansible.builtin.debug:
+ msg: "{{ item.ComponentComplianceReports }}"
+ loop_control:
+ label: "{{ item.DeviceId }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml
index 77d4eddf4..4cd2cb294 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/component_reports_filtering/component_complaince_report_with_devices.yml
@@ -1,28 +1,25 @@
---
-- hosts: ome
- connection: local
+- name: "OME - Ansible Modules"
+ hosts: ome
gather_facts: false
- name: "OME - Ansible Modules"
-
- collections:
- - dellemc.openmanage
tasks:
+ - name: Retrieve baseline information for specified devices.
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_ids:
+ - 11111
+ - 22222
+ register: result
+ delegate_to: localhost
- - name: "Retrieve baseline information for specified devices."
- ome_firmware_baseline_compliance_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_ids:
- - 11111
- - 22222
- register: result
-
- - name: "Filter out device compliance reports."
- debug:
- msg: "{{ item.DeviceComplianceReports.0.ComponentComplianceReports }}"
- loop: "{{ result.baseline_compliance_info }}"
- loop_control:
- label: "{{ item.Name }}" \ No newline at end of file
+ - name: Filter out device compliance reports.
+ ansible.builtin.debug:
+ msg: "{{ item.DeviceComplianceReports.0.ComponentComplianceReports }}"
+ loop: "{{ result.baseline_compliance_info }}"
+ loop_control:
+ label: "{{ item.Name }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml
index 35f0eb234..a9f8c70de 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline.yml
@@ -1,75 +1,118 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible OME firmware baseline operations.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible OME firmware baseline operations.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Create baseline for device IDs
- ome_firmware_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: "baseline_name"
- baseline_description: "baseline_description"
- catalog_name: "catalog_name"
- device_ids:
- - 1010
- - 2020
+ - name: Create baseline for device IDs
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ device_ids:
+ - 1010
+ - 2020
+ delegate_to: localhost
+
+ - name: Create baseline for device IDs with no reboot required
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ filter_no_reboot_required: true
+ device_ids:
+ - 1010
+ - 2020
+ delegate_to: localhost
+
+ - name: Create baseline for servicetags
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ device_service_tags:
+ - "SVCTAG1"
+ - "SVCTAG2"
+ delegate_to: localhost
+
+ - name: Create baseline for servicetags with no reboot required
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ filter_no_reboot_required: true
+ device_service_tags:
+ - "SVCTAG1"
+ - "SVCTAG2"
+ delegate_to: localhost
- - name: Create baseline for servicetags
- ome_firmware_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: "baseline_name"
- baseline_description: "baseline_description"
- catalog_name: "catalog_name"
- device_service_tags:
- - "SVCTAG1"
- - "SVCTAG2"
+ - name: Create baseline for device groups without job_tracking
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ device_group_names:
+ - "Group1"
+ - "Group2"
+ job_wait: false
+ delegate_to: localhost
- - name: create baseline for device groups without job_tracking
- ome_firmware_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: "baseline_name"
- baseline_description: "baseline_description"
- catalog_name: "catalog_name"
- device_group_names:
- - "Group1"
- - "Group2"
- job_wait: no
+ - name: Modify an existing baseline
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "existing_baseline_name"
+ new_baseline_name: "new_baseline_name"
+ baseline_description: "new baseline_description"
+ catalog_name: "catalog_other"
+ device_group_names:
+ - "Group3"
+ - "Group4"
+ - "Group5"
+ downgrade_enabled: false
+ is_64_bit: true
+ filter_no_reboot_required: true
+ delegate_to: localhost
- - name: Modify an existing baseline
- ome_firmware_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: "existing_baseline_name"
- new_baseline_name: "new_baseline_name"
- baseline_description: "new baseline_description"
- catalog_name: "catalog_other"
- device_group_names:
- - "Group3"
- - "Group4"
- - "Group5"
- downgrade_enabled: no
- is_64_bit: yes
+ - name: Modify no reboot filter in existing baseline
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "existing_baseline_name"
+ new_baseline_name: "new_baseline_name"
+ filter_no_reboot_required: true
+ delegate_to: localhost
- - name: Delete a baseline
- ome_firmware_baseline:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- state: absent
- baseline_name: "baseline_name" \ No newline at end of file
+ - name: Delete a baseline
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ baseline_name: "baseline_name"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml
index cb42e1740..f1a7afd03 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info.yml
@@ -1,51 +1,51 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible template inventory details.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible template inventory details.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Retrieves device based compliance report for specified device IDs..
- ome_firmware_baseline_compliance_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_ids:
- - 11111
- - 22222
+ - 11111
+ - 22222
tags: device_ids
+ delegate_to: localhost
- name: Retrieves device based compliance report for specified service Tags.
- ome_firmware_baseline_compliance_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_service_tags:
- - MXL1234
- - MXL4567
+ - MXL1234
+ - MXL4567
tags: device_service_tags
+ delegate_to: localhost
- name: Retrieves device based compliance report for specified group names.
- ome_firmware_baseline_compliance_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_group_names:
- - group1
- - group2
+ - group1
+ - group2
tags: device_group_names
+ delegate_to: localhost
- name: Retrieves device compliance report for a specified baseline.
- ome_firmware_baseline_compliance_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
baseline_name: "baseline_name"
- tags: baseline_device \ No newline at end of file
+ tags: baseline_device
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml
index bbbf5f0d2..1a4cb13ba 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_compliance_info_filters.yml
@@ -1,63 +1,58 @@
---
-- hosts: ome
- connection: local
+- name: "OME - Ansible Modules"
+ hosts: ome
gather_facts: false
- name: "OME - Ansible Modules"
-
- collections:
- - dellemc.openmanage
tasks:
-
- - name: "Retrieve baseline information for specific device ids."
- ome_firmware_baseline_compliance_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_ids:
- - 11111
- - 11112
- register: result
-
- tags:
- - overall-compliance-report
-
- - name: "Firmware baseline compliance info based on FirmwareStatus - Non-Compliant"
- set_fact:
- non_compliance_fact: "{{ item }}"
- when:
- - item.DeviceComplianceReports.0.FirmwareStatus=='Non-Compliant'
- with_items:
- - "{{ result.baseline_compliance_info }}"
- loop_control:
- label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.FirmwareStatus }}"
-
- tags:
- - non-compliance-report
-
- - name: "Firmware baseline compliance info based on Device ID"
- set_fact:
- device_fact: "{{ item }}"
- when:
- - item.DeviceComplianceReports.0.DeviceId==11111
- with_items:
- - "{{ result.baseline_compliance_info }}"
- loop_control:
- label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.DeviceId }}"
-
- tags:
- - device-id-report
-
- - name: "Firmware baseline compliance info based on Device Service Tag"
- set_fact:
- service_tag_fact: "{{ item }}"
- when:
- - item.DeviceComplianceReports.0.ServiceTag=='1X1X1'
- with_items:
- - "{{ result.baseline_compliance_info }}"
- loop_control:
- label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.ServiceTag }}"
-
- tags:
- - device-service-tag-report \ No newline at end of file
+ - name: Retrieve baseline information for specific device ids.
+ dellemc.openmanage.ome_firmware_baseline_compliance_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_ids:
+ - 11111
+ - 11112
+ register: result
+ delegate_to: localhost
+ tags:
+ - overall-compliance-report
+
+ - name: Firmware baseline compliance info based on FirmwareStatus - Non-Compliant
+ ansible.builtin.set_fact:
+ non_compliance_fact: "{{ item }}"
+ when:
+ - item.DeviceComplianceReports.0.FirmwareStatus=='Non-Compliant'
+ with_items:
+ - "{{ result.baseline_compliance_info }}"
+ loop_control:
+ label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.FirmwareStatus }}"
+ delegate_to: localhost
+ tags:
+ - non-compliance-report
+
+ - name: Firmware baseline compliance info based on Device ID
+ ansible.builtin.set_fact:
+ device_fact: "{{ item }}"
+ when:
+ - item.DeviceComplianceReports.0.DeviceId==11111
+ with_items:
+ - "{{ result.baseline_compliance_info }}"
+ loop_control:
+ label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.DeviceId }}"
+ delegate_to: localhost
+ tags:
+ - device-id-report
+
+ - name: Firmware baseline compliance info based on Device Service Tag
+ ansible.builtin.set_fact:
+ service_tag_fact: "{{ item }}"
+ when:
+ - item.DeviceComplianceReports.0.ServiceTag=='1X1X1'
+ with_items:
+ - "{{ result.baseline_compliance_info }}"
+ loop_control:
+ label: "{{ item.Name }} - {{ item.DeviceComplianceReports.0.ServiceTag }}"
+ delegate_to: localhost
+ tags:
+ - device-service-tag-report
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml
index 7993db51e..110901de7 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/baseline/ome_firmware_baseline_info.yml
@@ -1,26 +1,24 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible firmware baseline details.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible firmware baseline details.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Retrieve details of all the available firmware baselines.
- ome_firmware_baseline_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_firmware_baseline_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
tags: firmware_baselines
+ delegate_to: localhost
- - name: Retrieve details of a specific firmware baseline identified by its baseline name.
- ome_firmware_baseline_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Retrieve details of a specific firmware baseline identified by its baseline name.
+ dellemc.openmanage.ome_firmware_baseline_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
baseline_name: "baseline_name"
- tags: firmware_baseline \ No newline at end of file
+ tags: firmware_baseline
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml
index a065a3c0c..b16398e6b 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/catalog/ome_firmware_catalog.yml
@@ -1,121 +1,126 @@
---
-- hosts: ome
- connection: local
- name: "OME - Create Catalog using Repository."
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: "OME - Create Catalog using Repository."
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Create a catalog from HTTPS repository
- ome_firmware_catalog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- catalog_name: catalog1
- catalog_description: catalog description
- source: downloads.company.com
- repository_type: HTTPS
- source_path: "catalog"
- file_name: "catalog.gz"
- check_certificate: True
-
- - name: Create a catalog from HTTP repository
- ome_firmware_catalog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- catalog_name: "{{ catalog_name }}"
- catalog_description: catalog description
- source: downloads.company.com
- repository_type: HTTP
- source_path: "catalog"
- file_name: "catalog.gz"
+ - name: Create a catalog from HTTPS repository
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: catalog1
+ catalog_description: catalog description
+ source: downloads.company.com
+ repository_type: HTTPS
+ source_path: "catalog"
+ file_name: "catalog.gz"
+ check_certificate: true
+ delegate_to: localhost
+
+ - name: Create a catalog from HTTP repository
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "{{ catalog_name }}"
+ catalog_description: catalog description
+ source: downloads.company.com
+ repository_type: HTTP
+ source_path: "catalog"
+ file_name: "catalog.gz"
+ delegate_to: localhost
+
+ - name: Create a catalog using CIFS share
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "{{ catalog_name }}"
+ catalog_description: catalog description
+ source: "192.166.0.1"
+ repository_type: CIFS
+ source_path: "cifs/R940"
+ file_name: "catalog.gz"
+ repository_username: "{{ repository_username }}"
+ repository_password: "{{ repository_password }}"
+ repository_domain: "{{ repository_domain }}"
+ delegate_to: localhost
- - name: Create a catalog using CIFS share
- ome_firmware_catalog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- catalog_name: "{{ catalog_name }}"
- catalog_description: catalog description
- source: "192.166.0.1"
- repository_type: CIFS
- source_path: "cifs/R940"
- file_name: "catalog.gz"
- repository_username: "{{ repository_username }}"
- repository_password: "{{ repository_password }}"
- repository_domain: "{{ repository_domain }}"
-
- - name: Create a catalog using NFS share
- ome_firmware_catalog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- catalog_name: "{{ catalog_name }}"
- catalog_description: catalog description
- source: "192.166.0.2"
- repository_type: NFS
- source_path: "/nfs/R940"
- file_name: "catalog.xml"
+ - name: Create a catalog using NFS share
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "{{ catalog_name }}"
+ catalog_description: catalog description
+ source: "192.166.0.2"
+ repository_type: NFS
+ source_path: "/nfs/R940"
+ file_name: "catalog.xml"
+ delegate_to: localhost
- - name: Create a catalog using repository from Dell.com
- ome_firmware_catalog:
- hostname: "192.168.0.1"
- username: "username"
- password: "password"
- ca_path: "/path/to/ca_cert.pem"
- catalog_name: "catalog_name"
- catalog_description: "catalog_description"
- repository_type: "DELL_ONLINE"
- check_certificate: True
+ - name: Create a catalog using repository from Dell.com
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "catalog_name"
+ catalog_description: "catalog_description"
+ repository_type: "DELL_ONLINE"
+ check_certificate: true
+ delegate_to: localhost
- - name: Modify a catalog using a repository from CIFS share
- ome_firmware_catalog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- catalog_name: "{{ catalog_name }}"
- catalog_description: new catalog description
- source: "192.166.0.2"
- repository_type: CIFS
- source_path: "cifs/R941"
- file_name: "catalog1.gz"
- repository_username: "{{ repository_username }}"
- repository_password: "{{ repository_password }}"
- repository_domain: "{{ repository_domain }}"
+ - name: Modify a catalog using a repository from CIFS share
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_name: "{{ catalog_name }}"
+ catalog_description: new catalog description
+ source: "192.166.0.2"
+ repository_type: CIFS
+ source_path: "cifs/R941"
+ file_name: "catalog1.gz"
+ repository_username: "{{ repository_username }}"
+ repository_password: "{{ repository_password }}"
+ repository_domain: "{{ repository_domain }}"
+ delegate_to: localhost
- - name: Modify a catalog using a repository from Dell.com
- ome_firmware_catalog:
- hostname: "192.168.0.1"
- username: "username"
- password: "password"
- ca_path: "/path/to/ca_cert.pem"
- catalog_id: 10
- repository_type: DELL_ONLINE
- new_catalog_name: "new_catalog_name"
- catalog_description: "new_catalog_description"
+ - name: Modify a catalog using a repository from Dell.com
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ catalog_id: 10
+ repository_type: DELL_ONLINE
+ new_catalog_name: "new_catalog_name"
+ catalog_description: "new_catalog_description"
+ delegate_to: localhost
- - name: Delete catalog using catalog name
- ome_firmware_catalog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: absent
- catalog_name: ["catalog_name1", "catalog_name2"]
+ - name: Delete catalog using catalog name
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ catalog_name: ["catalog_name1", "catalog_name2"]
+ delegate_to: localhost
- - name: Delete catalog using catalog id
- ome_firmware_catalog:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: absent
- catalog_id: [11, 34] \ No newline at end of file
+ - name: Delete catalog using catalog id
+ dellemc.openmanage.ome_firmware_catalog:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ catalog_id: [11, 34]
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml
index 198e2cce9..05e32fdde 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware.yml
@@ -1,142 +1,161 @@
---
-- hosts: ome
- connection: local
- name: "OME - Update Firmware"
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: "OME - Update Firmware"
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Update firmware from a DUP file using a device ids
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id:
- - 11111
- - 22222
- dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE"
+ - name: Update firmware from a DUP file using a device ids
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id:
+ - 11111
+ - 22222
+ dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE"
+ delegate_to: localhost
+
+ - name: Update firmware from a DUP file using a device service tags
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag:
+ - KLBR111
+ - KLBR222
+ dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE"
+ delegate_to: localhost
- - name: Update firmware from a DUP file using a device service tags
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag:
- - KLBR111
- - KLBR222
- dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE"
+ - name: Update firmware from a DUP file using a device group names
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_group_names:
+ - servers
+ dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE"
+ delegate_to: localhost
- - name: Update firmware from a DUP file using a device group names
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_group_names:
- - servers
- dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE"
+ - name: Update firmware using baseline name
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ delegate_to: localhost
- - name: Update firmware using baseline name
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
+ - name: Stage firmware for the next reboot using baseline name
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ schedule: StageForNextReboot
+ delegate_to: localhost
- - name: Stage firmware for the next reboot using baseline name
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
- schedule: StageForNextReboot
+ - name: Update firmware using baseline name and components
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ components:
+ - BIOS
+ delegate_to: localhost
- - name: Update firmware using baseline name and components
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
- components:
- - BIOS
+ - name: Update firmware of device components from a DUP file using a device ids in a baseline
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ device_id:
+ - 11111
+ - 22222
+ components:
+ - iDRAC with Lifecycle Controller
+ delegate_to: localhost
- - name: Update firmware of device components from a DUP file using a device ids in a baseline
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
- device_id:
- - 11111
- - 22222
- components:
- - iDRAC with Lifecycle Controller
+ - name: Update firmware of device components from a baseline using a device service tags under a baseline
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ device_service_tag:
+ - KLBR111
+ - KLBR222
+ components:
+ - IOM-SAS
+ delegate_to: localhost
- - name: Update firmware of device components from a baseline using a device service tags under a baseline
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
- device_service_tag:
- - KLBR111
- - KLBR222
- components:
- - IOM-SAS
+ - name: Update firmware using baseline name with a device id and required components
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ devices:
+ - id: 12345
+ components:
+ - Lifecycle Controller
+ - id: 12346
+ components:
+ - Enterprise UEFI Diagnostics
+ - BIOS
+ delegate_to: localhost
- - name: Update firmware using baseline name with a device id and required components
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
- devices:
- - id: 12345
- components:
- - Lifecycle Controller
- - id: 12346
- components:
- - Enterprise UEFI Diagnostics
- - BIOS
+ - name: Update firmware using baseline name with a device service tag and required components
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ devices:
+ - service_tag: ABCDE12
+ components:
+ - PERC H740P Adapter
+ - BIOS
+ - service_tag: GHIJK34
+ components:
+ - OS Drivers Pack
+ delegate_to: localhost
- - name: Update firmware using baseline name with a device service tag and required components
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
- devices:
- - service_tag: ABCDE12
- components:
- - PERC H740P Adapter
- - BIOS
- - service_tag: GHIJK34
- components:
- - OS Drivers Pack
+ - name: Update firmware using baseline name with a device service tag or device id and required components
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ devices:
+ - service_tag: ABCDE12
+ components:
+ - BOSS-S1 Adapter
+ - PowerEdge Server BIOS
+ - id: 12345
+ components:
+ - iDRAC with Lifecycle Controller
+ delegate_to: localhost
- - name: Update firmware using baseline name with a device service tag or device id and required components
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
- devices:
- - service_tag: ABCDE12
- components:
- - BOSS-S1 Adapter
- - PowerEdge Server BIOS
- - id: 12345
- components:
- - iDRAC with Lifecycle Controller
+ - name: "Update firmware using baseline name and components and perform Powercycle."
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ components:
+ - BIOS
+ reboot_type: PowerCycle
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml
index c104f3f5b..d475277f2 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/firmware/ome_firmware_with_job_tracking.yml
@@ -1,8 +1,7 @@
---
-- hosts: ome
- connection: local
- name: "OME - Update Firmware"
- gather_facts: False
+- name: "OME - Update Firmware"
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 100
polling_interval: 10
@@ -13,99 +12,95 @@
- baseline-name
- baseline-name-dup
- collections:
- - dellemc.openmanage
-
tasks:
- - name: "Update firmware from a DUP file using a device ids."
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id:
- - 11111
- - 22222
- dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE"
- register: result
- tags:
- - device-ids
+ - name: Update firmware from a DUP file using a device ids.
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id:
+ - 11111
+ - 22222
+ dup_file: "/path/Chassis-System-Management_Firmware_6N9WN_WN64_1.00.01_A00.EXE"
+ register: result
+ tags:
+ - device-ids
+ delegate_to: localhost
- - name: "Update firmware from a DUP file using a device service tags."
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag:
- - KLBR111
- - KLBR222
- dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE"
- register: result
- tags:
- - service-tags
+ - name: Update firmware from a DUP file using a device service tags.
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag:
+ - KLBR111
+ - KLBR222
+ dup_file: "/path/Network_Firmware_NTRW0_WN64_14.07.07_A00-00_01.EXE"
+ register: result
+ tags:
+ - service-tags
+ delegate_to: localhost
- - name: "Update firmware from a DUP file using a device group names."
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_group_names:
- - servers
- dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE"
- register: result
- tags:
- - group-name
+ - name: Update firmware from a DUP file using a device group names.
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_group_names:
+ - servers
+ dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE"
+ register: result
+ tags:
+ - group-name
+ delegate_to: localhost
- - name: "Update firmware using baseline name."
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: baseline_devices
- register: result
- tags:
- - baseline-name
-
- - name: "Update firmware from a DUP file using a baseline names."
- ome_firmware:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- baseline_name: "baseline_devices, baseline_groups"
- dup_file: "/path/BIOS_87V69_WN64_2.4.7.EXE"
- tags:
- - baseline-name-dup
+ - name: Update firmware using baseline name.
+ dellemc.openmanage.ome_firmware:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ register: result
+ tags:
+ - baseline-name
+ delegate_to: localhost
- - name: "Track job details for the ome firmware update operation using a job id."
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "{{ result.update_status.Id }}"
- register: job_result
- until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed' or job_result.job_info.LastRunStatus.Name == 'Warning'
- retries: "{{ retries_count }}"
- delay: "{{ polling_interval }}"
- tags: "{{ all_firmware_task_tags }}"
+ - name: Track job details for the ome firmware update operation using a job id.
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "{{ result.update_status.Id }}"
+ register: job_result
+ until: job_result.job_info.LastRunStatus.Name == 'Completed'
+ or job_result.job_info.LastRunStatus.Name == 'Failed'
+ or job_result.job_info.LastRunStatus.Name == 'Warning'
+ retries: "{{ retries_count }}"
+ delay: "{{ polling_interval }}"
+ tags: "{{ all_firmware_task_tags }}"
+ delegate_to: localhost
- - name: "Set job fact details if the task status is warning."
- set_fact:
- ome_firmware_job_fact: "{{ job_result | combine(job_msg, recursive=true) }}"
- failed_when: job_result.job_info.LastRunStatus.Name == 'Warning'
- vars:
- job_msg: {'msg': 'Completed with {{ job_result.job_info.LastRunStatus.Name|lower}}'}
- when: job_result.job_info.LastRunStatus.Name == 'Warning'
- tags: "{{ all_firmware_task_tags }}"
+ - name: Set job fact details if the task status is warning.
+ ansible.builtin.set_fact:
+ ome_firmware_job_fact: "{{ job_result | combine(job_msg, recursive=true) }}"
+ failed_when: job_result.job_info.LastRunStatus.Name == 'Warning'
+ vars:
+ job_msg:
+ {
+ "msg": "Completed with {{ job_result.job_info.LastRunStatus.Name | lower }}",
+ }
+ when: job_result.job_info.LastRunStatus.Name == 'Warning'
+ tags: "{{ all_firmware_task_tags }}"
- - name: "Set job fact details if the task status is completed or failed."
- set_fact:
- ome_firmware_job_fact: "{{ job_result }}"
- failed_when: job_result.job_info.LastRunStatus.Name == 'Failed'
- changed_when: job_result.job_info.LastRunStatus.Name == 'Completed'
- when: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
- tags: "{{ all_firmware_task_tags }}" \ No newline at end of file
+ - name: Set job fact details if the task status is completed or failed.
+ ansible.builtin.set_fact:
+ ome_firmware_job_fact: "{{ job_result }}"
+ failed_when: job_result.job_info.LastRunStatus.Name == 'Failed'
+ changed_when: job_result.job_info.LastRunStatus.Name == 'Completed'
+ when: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
+ tags: "{{ all_firmware_task_tags }}"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_all_groups.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_all_groups.yml
new file mode 100644
index 000000000..18a52b8be
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_all_groups.yml
@@ -0,0 +1,6 @@
+---
+# To retrieve all the groups host details.
+plugin: dellemc.openmanage.ome_inventory
+hostname: "192.168.0.1"
+username: username
+password: password
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_by_group_with_host_vars.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_by_group_with_host_vars.yml
new file mode 100644
index 000000000..84d08b277
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_by_group_with_host_vars.yml
@@ -0,0 +1,10 @@
+---
+# To set host variables to specific group host.
+plugin: dellemc.openmanage.ome_inventory
+hostname: "192.168.0.3"
+username: username
+password: password
+ome_group_name: group_name
+host_vars:
+ idrac_user: username
+ idrac_password: password
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_by_groups.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_by_groups.yml
new file mode 100644
index 000000000..83e713955
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_by_groups.yml
@@ -0,0 +1,7 @@
+---
+# To retrieve specific group host details.
+plugin: dellemc.openmanage.ome_inventory
+hostname: "192.168.0.2"
+username: username
+password: password
+ome_group_name: group_name
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_groups_with_host_and_group_vars.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_groups_with_host_and_group_vars.yml
new file mode 100644
index 000000000..2fc9e45db
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/inventory/ome_inventory_groups_with_host_and_group_vars.yml
@@ -0,0 +1,14 @@
+---
+# To set host variables and multiple group variables.
+plugin: dellemc.openmanage.ome_inventory
+hostname: "192.168.0.4"
+username: username
+password: password
+host_vars:
+ idrac_user: username
+ idrac_password: password
+group_vars:
+ group_name:
+ attribute: value
+ group_name_one:
+ new_attribute: new_value
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml
index 160118093..70bec68bb 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_active_directory.yml
@@ -1,15 +1,11 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible Active Directory service configuration.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible Active Directory service configuration.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Add Active Directory service using DNS lookup along with the test connection
- ome_active_directory:
+ dellemc.openmanage.ome_active_directory:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -18,12 +14,13 @@
domain_server:
- domainname.com
group_domain: domainname.com
- test_connection: yes
+ test_connection: true
domain_username: user@domainname
domain_password: domain_password
+ delegate_to: localhost
- name: Add Active Directory service using IP address of the domain controller with certificate validation
- ome_active_directory:
+ dellemc.openmanage.ome_active_directory:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -33,11 +30,12 @@
domain_server:
- 192.68.20.181
group_domain: domainname.com
- validate_certificate: yes
+ validate_certificate: true
certificate_file: "/path/to/certificate/file.cer"
+ delegate_to: localhost
- name: Modify domain controller IP address, network_timeout and group_domain
- ome_active_directory:
+ dellemc.openmanage.ome_active_directory:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -48,25 +46,28 @@
- 192.68.20.189
group_domain: newdomain.in
network_timeout: 150
+ delegate_to: localhost
- name: Delete Active Directory service
- ome_active_directory:
+ dellemc.openmanage.ome_active_directory:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
name: my_ad2
state: absent
+ delegate_to: localhost
- name: Test connection to existing Active Directory service with certificate validation
- ome_active_directory:
+ dellemc.openmanage.ome_active_directory:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
name: my_ad2
- test_connection: yes
+ test_connection: true
domain_username: user@domainname
domain_password: domain_password
- validate_certificate: yes
+ validate_certificate: true
certificate_file: "/path/to/certificate/file.cer"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml
index 0099fc800..d696f35e3 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_chassis_slots.yml
@@ -1,15 +1,11 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible slot name configuration.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible slot name configuration.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Rename the slots in multiple chassis using slot number and chassis service tag.
- ome_chassis_slots:
+ dellemc.openmanage.ome_chassis_slots:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -27,9 +23,10 @@
slot_name: sled_name_1
- slot_number: 2
slot_name: sled_name_2
+ delegate_to: localhost
- name: Rename single slot name of the sled using sled ID
- ome_chassis_slots:
+ dellemc.openmanage.ome_chassis_slots:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -37,9 +34,10 @@
device_options:
- device_id: 10054
slot_name: slot_device_name_1
+ delegate_to: localhost
- name: Rename single slot name of the sled using sled service tag
- ome_chassis_slots:
+ dellemc.openmanage.ome_chassis_slots:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -47,9 +45,10 @@
device_options:
- device_service_tag: ABC1234
slot_name: service_tag_slot
+ delegate_to: localhost
- name: Rename multiple slot names of the devices
- ome_chassis_slots:
+ dellemc.openmanage.ome_chassis_slots:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -63,3 +62,4 @@
slot_name: sled_name_3
- device_service_tag: PQR1234
slot_name: sled_name_4
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml
index d7af342ab..341becdc8 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_group.yml
@@ -1,27 +1,24 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible device inventory details.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible device inventory details.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Add devices to a static device group by using the group name and device IDs
- ome_device_group:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_device_group:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
name: "Storage Services"
device_ids:
- 11111
- 11112
tags: device-id
+ delegate_to: localhost
- name: Add devices to a static device group by using the group name and device service tags
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -31,9 +28,10 @@
- GHRT2R
- KJHDF3
tags: device-service-tags
+ delegate_to: localhost
- name: Add devices to a static device group by using the group ID and device service tags
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -43,9 +41,10 @@
- GHRT2R
- KJHDF3
tags: group_id_device-service-tags
+ delegate_to: localhost
- name: Add devices to a static device group by using the group name and IPv4 addresses
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -55,9 +54,10 @@
- 192.35.0.1
- 192.35.0.5
tags: group_name_ipv4
+ delegate_to: localhost
- name: Add devices to a static device group by using the group ID and IPv6 addresses
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -67,9 +67,10 @@
- fe80::ffff:ffff:ffff:ffff
- fe80::ffff:ffff:ffff:2222
tags: group_id_ipv6
+ delegate_to: localhost
- name: Add devices to a static device group by using the group ID and supported IPv4 and IPv6 address formats.
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -83,12 +84,13 @@
- ::ffff:192.0.2.0/125
- fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff
tags: group_id_ipv4_ipv6
-
+ delegate_to: localhost
+
- name: Remove devices from a static device group by using the group name and device IDs
- ome_device_group:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_device_group:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "absent"
name: "Storage Services"
@@ -96,9 +98,10 @@
- 11111
- 11112
tags: device-id
+ delegate_to: localhost
- name: Remove devices from a static device group by using the group name and device service tags
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -109,9 +112,10 @@
- GHRT2R
- KJHDF3
tags: device-service-tags
+ delegate_to: localhost
- name: Remove devices from a static device group by using the group ID and device service tags
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -122,9 +126,10 @@
- GHRT2R
- KJHDF3
tags: group_id_device-service-tags
+ delegate_to: localhost
- name: Remove devices from a static device group by using the group name and IPv4 addresses
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -135,9 +140,10 @@
- 192.35.0.1
- 192.35.0.5
tags: group_name_ipv4
+ delegate_to: localhost
- name: Remove devices from a static device group by using the group ID and IPv6 addresses
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -148,9 +154,10 @@
- fe80::ffff:ffff:ffff:ffff
- fe80::ffff:ffff:ffff:2222
tags: group_id_ipv6
+ delegate_to: localhost
- name: Remove devices from a static device group by using the group ID and supported IPv4 and IPv6 address formats.
- ome_device_group:
+ dellemc.openmanage.ome_device_group:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -165,3 +172,4 @@
- ::ffff:192.0.2.0/125
- fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff
tags: group_id_ipv4_ipv6
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml
index 6b3077490..729021694 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_info.yml
@@ -1,22 +1,19 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible device inventory details.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible device inventory details.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Retrieve basic inventory of all devices.
- ome_device_info:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_device_info:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
- name: Retrieve basic inventory for devices identified by IDs 33333 or 11111 using filtering.
- ome_device_info:
+ dellemc.openmanage.ome_device_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -24,10 +21,11 @@
fact_subset: "basic_inventory"
system_query_options:
filter: "Id eq 33333 or Id eq 11111"
+ delegate_to: localhost
- name: Retrieve inventory details of specified devices identified by IDs 11111 and 22222.
- ome_device_info:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_device_info:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
@@ -36,9 +34,10 @@
device_id:
- 11111
- 22222
-
+ delegate_to: localhost
+
- name: Retrieve inventory details of specified devices identified by service tags MXL1234 and MXL4567.
- ome_device_info:
+ dellemc.openmanage.ome_device_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -48,9 +47,10 @@
device_service_tag:
- MXL1234
- MXL4567
+ delegate_to: localhost
- name: Retrieve details of specified inventory type of specified devices identified by ID and service tags.
- ome_device_info:
+ dellemc.openmanage.ome_device_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -63,9 +63,10 @@
- MXL1234
- MXL4567
inventory_type: "serverDeviceCards"
+ delegate_to: localhost
- name: Retrieve subsystem health of specified devices identified by service tags.
- ome_device_info:
+ dellemc.openmanage.ome_device_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -75,5 +76,4 @@
device_service_tag:
- MXL1234
- MXL4567
-
-
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml
index 6f282c8a5..7d0484531 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_local_access_configuration.yml
@@ -1,68 +1,68 @@
---
-- hosts: ome
- connection: local
- name: OpenManage Ansible Modules for local access settings.
+- name: OpenManage Ansible Modules for local access settings.
+ hosts: ome
gather_facts: false
- collections: dellemc.openmanage
tasks:
+ - name: Configure KVM, direct access and power button settings of the chassis using device ID.
+ dellemc.openmanage.ome_device_local_access_configuration:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ enable_kvm_access: true
+ enable_chassis_direct_access: false
+ chassis_power_button:
+ enable_chassis_power_button: false
+ enable_lcd_override_pin: true
+ disabled_button_lcd_override_pin: "123456"
+ tags: lac-device-id
+ delegate_to: localhost
- - name: Configure KVM, direct access and power button settings of the chassis using device ID.
- ome_device_local_access_configuration:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 25011
- enable_kvm_access: true
- enable_chassis_direct_access: false
- chassis_power_button:
- enable_chassis_power_button: false
- enable_lcd_override_pin: true
- disabled_button_lcd_override_pin: 123456
- tags: lac-device-id
+ - name: Configure Quick sync and LCD settings of the chassis using device service tag.
+ dellemc.openmanage.ome_device_local_access_configuration:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ quick_sync:
+ quick_sync_access: READ_ONLY
+ enable_read_authentication: true
+ enable_quick_sync_wifi: true
+ enable_inactivity_timeout: true
+ timeout_limit: 10
+ timeout_limit_unit: MINUTES
+ lcd:
+ lcd_access: VIEW_ONLY
+ lcd_language: en
+ user_defined: "LCD Text"
+ tags: lac-tag
+ delegate_to: localhost
- - name: Configure Quick sync and LCD settings of the chassis using device service tag.
- ome_device_local_access_configuration:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: GHRT2RL
- quick_sync:
- quick_sync_access: READ_ONLY
- enable_read_authentication: true
- enable_quick_sync_wifi: true
- enable_inactivity_timeout: true
- timeout_limit: 10
- timeout_limit_unit: MINUTES
- lcd:
- lcd_access: VIEW_ONLY
- lcd_language: en
- user_defined: "LCD Text"
- tags: lac-tag
-
- - name: Configure all local access settings of the host chassis.
- ome_device_local_access_configuration:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- enable_kvm_access: true
- enable_chassis_direct_access: false
- chassis_power_button:
- enable_chassis_power_button: false
- enable_lcd_override_pin: true
- disabled_button_lcd_override_pin: 123456
- quick_sync:
- quick_sync_access: READ_WRITE
- enable_read_authentication: true
- enable_quick_sync_wifi: true
- enable_inactivity_timeout: true
- timeout_limit: 120
- timeout_limit_unit: SECONDS
- lcd:
- lcd_access: VIEW_MODIFY
- lcd_language: en
- user_defined: "LCD Text"
- tags: lac-host
+ - name: Configure all local access settings of the host chassis.
+ dellemc.openmanage.ome_device_local_access_configuration:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_kvm_access: true
+ enable_chassis_direct_access: false
+ chassis_power_button:
+ enable_chassis_power_button: false
+ enable_lcd_override_pin: true
+ disabled_button_lcd_override_pin: "123456"
+ quick_sync:
+ quick_sync_access: READ_WRITE
+ enable_read_authentication: true
+ enable_quick_sync_wifi: true
+ enable_inactivity_timeout: true
+ timeout_limit: 120
+ timeout_limit_unit: SECONDS
+ lcd:
+ lcd_access: VIEW_MODIFY
+ lcd_language: en
+ user_defined: "LCD Text"
+ tags: lac-host
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml
index d2d860508..d68ba0944 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_location.yml
@@ -1,52 +1,52 @@
---
-- hosts: ome
- connection: local
- name: OpenManage Ansible Modules
+- name: OpenManage Ansible Modules
+ hosts: ome
gather_facts: false
- collections: dellemc.openmanage
tasks:
+ - name: Update device location settings of a chassis using the device ID.
+ dellemc.openmanage.ome_device_location:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ data_center: data center 1
+ room: room 1
+ aisle: aisle 1
+ rack: rack 1
+ rack_slot: 2
+ location: location 1
+ tags: location-device-id
+ delegate_to: localhost
- - name: Update device location settings of a chassis using the device ID.
- ome_device_location:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 25011
- data_center: data center 1
- room: room 1
- aisle: aisle 1
- rack: rack 1
- rack_slot: 2
- location: location 1
- tags: location-device-id
+ - name: Update device location settings of a chassis using the device service tag.
+ dellemc.openmanage.ome_device_location:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ data_center: data center 1
+ room: room 1
+ aisle: aisle 1
+ rack: rack 1
+ rack_slot: 2
+ location: location 1
+ tags: location-device-service-tag
+ delegate_to: localhost
- - name: Update device location settings of a chassis using the device service tag.
- ome_device_location:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: GHRT2RL
- data_center: data center 1
- room: room 1
- aisle: aisle 1
- rack: rack 1
- rack_slot: 2
- location: location 1
- tags: location-device-service-tag
-
- - name: Update device location settings of the host chassis.
- ome_device_location:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- data_center: data center 1
- room: room 1
- aisle: aisle 1
- rack: rack 1
- rack_slot: 2
- location: location 1
- tags: location-chassis
+ - name: Update device location settings of the host chassis.
+ dellemc.openmanage.ome_device_location:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ data_center: data center 1
+ room: room 1
+ aisle: aisle 1
+ rack: rack 1
+ rack_slot: 2
+ location: location 1
+ tags: location-chassis
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml
index e05a3772d..048f99ff8 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_mgmt_network.yml
@@ -1,105 +1,105 @@
---
-- hosts: ome
- connection: local
- name: Dell OME Modular device network settings.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OME Modular device network settings.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Network settings for chassis
- ome_device_mgmt_network:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: CHAS123
- delay: 10
- ipv4_configuration:
- enable_ipv4: true
- enable_dhcp: false
- static_ip_address: 192.168.0.2
- static_subnet_mask: 255.255.254.0
- static_gateway: 192.168.0.3
- use_dhcp_to_obtain_dns_server_address: false
- static_preferred_dns_server: 192.168.0.4
- static_alternate_dns_server: 192.168.0.5
- ipv6_configuration:
- enable_ipv6: true
- enable_auto_configuration: false
- static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
- static_prefix_length: 10
- static_gateway: ffff::2607:f2b1:f081:9
- use_dhcpv6_to_obtain_dns_server_address: false
- static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
- static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
- dns_configuration:
- register_with_dns: true
- use_dhcp_for_dns_domain_name: false
- dns_name: MX-SVCTAG
- dns_domain_name: dnslocaldomain
- auto_negotiation: no
- network_speed: 100_MB
+ - name: Network settings for chassis
+ dellemc.openmanage.ome_device_mgmt_network:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: CHAS123
+ delay: 10
+ ipv4_configuration:
+ enable_ipv4: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_to_obtain_dns_server_address: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ ipv6_configuration:
+ enable_ipv6: true
+ enable_auto_configuration: false
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: ffff::2607:f2b1:f081:9
+ use_dhcpv6_to_obtain_dns_server_address: false
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+ dns_configuration:
+ register_with_dns: true
+ use_dhcp_for_dns_domain_name: false
+ dns_name: MX-SVCTAG
+ dns_domain_name: dnslocaldomain
+ auto_negotiation: false
+ network_speed: 100_MB
+ delegate_to: localhost
- - name: Network settings for server
- ome_device_mgmt_network:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: SRVR123
- ipv4_configuration:
- enable_ipv4: true
- enable_dhcp: false
- static_ip_address: 192.168.0.2
- static_subnet_mask: 255.255.254.0
- static_gateway: 192.168.0.3
- use_dhcp_to_obtain_dns_server_address: false
- static_preferred_dns_server: 192.168.0.4
- static_alternate_dns_server: 192.168.0.5
- ipv6_configuration:
- enable_ipv6: true
- enable_auto_configuration: false
- static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
- static_prefix_length: 10
- static_gateway: ffff::2607:f2b1:f081:9
- use_dhcpv6_to_obtain_dns_server_address: false
- static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
- static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+ - name: Network settings for server
+ dellemc.openmanage.ome_device_mgmt_network:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: SRVR123
+ ipv4_configuration:
+ enable_ipv4: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ use_dhcp_to_obtain_dns_server_address: false
+ static_preferred_dns_server: 192.168.0.4
+ static_alternate_dns_server: 192.168.0.5
+ ipv6_configuration:
+ enable_ipv6: true
+ enable_auto_configuration: false
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: ffff::2607:f2b1:f081:9
+ use_dhcpv6_to_obtain_dns_server_address: false
+ static_preferred_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:3
+ static_alternate_dns_server: 2626:f2f2:f081:9:1c1c:f1f1:4747:4
+ delegate_to: localhost
- - name: Network settings for I/O module
- ome_device_mgmt_network:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: IOM1234
- ipv4_configuration:
- enable_ipv4: true
- enable_dhcp: false
- static_ip_address: 192.168.0.2
- static_subnet_mask: 255.255.254.0
- static_gateway: 192.168.0.3
- ipv6_configuration:
- enable_ipv6: true
- enable_auto_configuration: false
- static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
- static_prefix_length: 10
- static_gateway: ffff::2607:f2b1:f081:9
- dns_server_settings:
- preferred_dns_server: 192.168.0.4
- alternate_dns_server1: 192.168.0.5
+ - name: Network settings for I/O module
+ dellemc.openmanage.ome_device_mgmt_network:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: IOM1234
+ ipv4_configuration:
+ enable_ipv4: true
+ enable_dhcp: false
+ static_ip_address: 192.168.0.2
+ static_subnet_mask: 255.255.254.0
+ static_gateway: 192.168.0.3
+ ipv6_configuration:
+ enable_ipv6: true
+ enable_auto_configuration: false
+ static_ip_address: 2626:f2f2:f081:9:1c1c:f1f1:4747:1
+ static_prefix_length: 10
+ static_gateway: ffff::2607:f2b1:f081:9
+ dns_server_settings:
+ preferred_dns_server: 192.168.0.4
+ alternate_dns_server1: 192.168.0.5
+ delegate_to: localhost
- - name: Management VLAN configuration of chassis using device id
- ome_device_mgmt_network:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 12345
- management_vlan:
- enable_vlan: true
- vlan_id: 2345
- dns_configuration:
- register_with_dns: false \ No newline at end of file
+ - name: Management VLAN configuration of chassis using device id
+ dellemc.openmanage.ome_device_mgmt_network:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 12345
+ management_vlan:
+ enable_vlan: true
+ vlan_id: 2345
+ dns_configuration:
+ register_with_dns: false
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml
index 0a47d2ddc..8de818434 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_network_services.yml
@@ -1,59 +1,59 @@
---
-- hosts: ome
- connection: local
- name: OpenManage Ansible Modules for network services settings.
+- name: OpenManage Ansible Modules for network services settings.
+ hosts: ome
gather_facts: false
- collections: dellemc.openmanage
tasks:
+ - name: Update network services settings of a chassis using the device ID.
+ dellemc.openmanage.ome_device_network_services:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ snmp_settings:
+ enabled: true
+ port_number: 161
+ community_name: public
+ ssh_settings:
+ enabled: false
+ remote_racadm_settings:
+ enabled: false
+ tags: snmp-settings
+ delegate_to: localhost
- - name: Update network services settings of a chassis using the device ID.
- ome_device_power_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 25011
- snmp_settings:
- enabled: true
- port_number: 161
- community_name: public
- ssh_settings:
- enabled: false
- remote_racadm_settings:
- enabled: false
- tags: snmp-settings
+ - name: Update network services settings of a chassis using the device service tag.
+ dellemc.openmanage.ome_device_network_services:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ snmp_settings:
+ enabled: false
+ ssh_settings:
+ enabled: true
+ port_number: 22
+ max_sessions: 1
+ max_auth_retries: 3
+ idle_timeout: 1
+ remote_racadm_settings:
+ enabled: false
+ tags: ssh-settings
+ delegate_to: localhost
- - name: Update network services settings of a chassis using the device service tag.
- ome_device_power_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: GHRT2RL
- snmp_settings:
- enabled: false
- ssh_settings:
- enabled: true
- port_number: 22
- max_sessions: 1
- max_auth_retries: 3
- idle_timeout: 1
- remote_racadm_settings:
- enabled: false
- tags: ssh-settings
-
- - name: Update network services settings of the host chassis.
- ome_device_power_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 25012
- snmp_settings:
- enabled: false
- ssh_settings:
- enabled: false
- remote_racadm_settings:
- enabled: true
- tags: racadm-settings
+ - name: Update network services settings of the host chassis.
+ dellemc.openmanage.ome_device_network_services:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25012
+ snmp_settings:
+ enabled: false
+ ssh_settings:
+ enabled: false
+ remote_racadm_settings:
+ enabled: true
+ tags: racadm-settings
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml
index 4b68a29ba..cb3a9ad8c 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_power_settings.yml
@@ -1,54 +1,55 @@
---
-- hosts: ome
- connection: local
- name: OpenManage Ansible Modules
+- name: OpenManage Ansible Modules
+ hosts: ome
gather_facts: false
- collections: dellemc.openmanage
tasks:
+ - name: Update power configuration settings of a chassis using the device ID.
+ dellemc.openmanage.ome_device_power_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ power_configuration:
+ enable_power_cap: true
+ power_cap: 3424
+ tags: power-config
+ delegate_to: localhost
- - name: Update power configuration settings of a chassis using the device ID.
- ome_device_power_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 25011
- power_configuration:
- enable_power_cap: true
- power_cap: 3424
- tags: power-config
+ - name: Update redundancy configuration settings of a chassis using the device service tag.
+ dellemc.openmanage.ome_device_power_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ redundancy_configuration:
+ redundancy_policy: GRID_REDUNDANCY
+ tags: redundancy-config
+ delegate_to: localhost
- - name: Update redundancy configuration settings of a chassis using the device service tag.
- ome_device_power_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: GHRT2RL
- redundancy_configuration:
- redundancy_policy: GRID_REDUNDANCY
- tags: redundancy-config
+ - name: Update hot spare configuration settings of a chassis using device ID.
+ dellemc.openmanage.ome_device_power_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25012
+ hot_spare_configuration:
+ enable_hot_spare: true
+ primary_grid: GRID_1
+ tags: hostspare-config
+ delegate_to: localhost
- - name: Update hot spare configuration settings of a chassis using device ID.
- ome_device_power_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 25012
- hot_spare_configuration:
- enable_hot_spare: true
- primary_grid: GRID_1
- tags: hostspare-config
-
- - name: Update power configuration settings of a host chassis.
- ome_device_power_settings:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- power_configuration:
- enable_power_cap: true
- power_cap: 3425
- tags: power-config-chassis
+ - name: Update power configuration settings of a host chassis.
+ dellemc.openmanage.ome_device_power_settings:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ power_configuration:
+ enable_power_cap: true
+ power_cap: 3425
+ tags: power-config-chassis
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml
index 71a07e685..8c54d3db3 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_device_quick_deploy.yml
@@ -1,66 +1,65 @@
---
-- hosts: ome
- connection: local
- name: OpenManage Ansible Modules for Quick Deploy settings.
+- name: OpenManage Ansible Modules for Quick Deploy settings.
+ hosts: ome
gather_facts: false
- collections: dellemc.openmanage
tasks:
+ - name: Configure server Quick Deploy settings of the chassis using device ID.
+ dellemc.openmanage.ome_device_quick_deploy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25011
+ setting_type: ServerQuickDeploy
+ quick_deploy_options:
+ password: "password"
+ ipv4_enabled: true
+ ipv4_network_type: Static
+ ipv4_subnet_mask: 255.255.255.0
+ ipv4_gateway: 192.168.0.1
+ ipv6_enabled: true
+ ipv6_network_type: Static
+ ipv6_prefix_length: 1
+ ipv6_gateway: "::"
+ slots:
+ - slot_id: 1
+ slot_ipv4_address: 192.168.0.2
+ slot_ipv6_address: "::"
+ vlan_id: 1
+ - slot_id: 2
+ slot_ipv4_address: 192.168.0.3
+ slot_ipv6_address: "::"
+ vlan_id: 2
+ tags: server-quick-deploy
+ delegate_to: localhost
- - name: Configure server Quick Deploy settings of the chassis using device ID.
- ome_device_quick_deploy:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 25011
- setting_type: ServerQuickDeploy
- quick_deploy_options:
- password: "password"
- ipv4_enabled: True
- ipv4_network_type: Static
- ipv4_subnet_mask: 255.255.255.0
- ipv4_gateway: 192.168.0.1
- ipv6_enabled: True
- ipv6_network_type: Static
- ipv6_prefix_length: 1
- ipv6_gateway: "::"
- slots:
- - slot_id: 1
- slot_ipv4_address: 192.168.0.2
- slot_ipv6_address: "::"
- vlan_id: 1
- - slot_id: 2
- slot_ipv4_address: 192.168.0.3
- slot_ipv6_address: "::"
- vlan_id: 2
- tags: server-quick-deploy
-
- - name: Configure server Quick Deploy settings of the chassis using device service tag.
- ome_device_quick_deploy:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: GHRT2RL
- setting_type: IOMQuickDeploy
- quick_deploy_options:
- password: "password"
- ipv4_enabled: True
- ipv4_network_type: Static
- ipv4_subnet_mask: 255.255.255.0
- ipv4_gateway: 192.168.0.1
- ipv6_enabled: True
- ipv6_network_type: Static
- ipv6_prefix_length: 1
- ipv6_gateway: "::"
- slots:
- - slot_id: 1
- slot_ipv4_address: 192.168.0.2
- slot_ipv6_address: "::"
- vlan_id: 1
- - slot_id: 2
- slot_ipv4_address: 192.168.0.3
- slot_ipv6_address: "::"
- vlan_id: 2
- tags: iom-quick-deploy
+ - name: Configure server Quick Deploy settings of the chassis using device service tag.
+ dellemc.openmanage.ome_device_quick_deploy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: GHRT2RL
+ setting_type: IOMQuickDeploy
+ quick_deploy_options:
+ password: "password"
+ ipv4_enabled: true
+ ipv4_network_type: Static
+ ipv4_subnet_mask: 255.255.255.0
+ ipv4_gateway: 192.168.0.1
+ ipv6_enabled: true
+ ipv6_network_type: Static
+ ipv6_prefix_length: 1
+ ipv6_gateway: "::"
+ slots:
+ - slot_id: 1
+ slot_ipv4_address: 192.168.0.2
+ slot_ipv6_address: "::"
+ vlan_id: 1
+ - slot_id: 2
+ slot_ipv4_address: 192.168.0.3
+ slot_ipv6_address: "::"
+ vlan_id: 2
+ tags: iom-quick-deploy
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml
index ba93eb006..d5ba7cb6c 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_devices.yml
@@ -1,60 +1,61 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible device operations.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible device operations.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Refresh Inventory
- ome_devices:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_devices:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_action: refresh_inventory
device_service_tags:
- 2HB7NX2
+ delegate_to: localhost
- name: Clear iDRAC job queue
- ome_devices:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_devices:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_action: clear_idrac_job_queue
device_service_tags:
- 2HB7NX2
+ delegate_to: localhost
- name: Reset iDRAC using the service tag
- ome_devices:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_devices:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_action: reset_idrac
device_service_tags:
- 2H7HNX2
+ delegate_to: localhost
- name: Remove devices using servicetags
- ome_devices:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_devices:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: absent
device_service_tags:
- SVCTAG1
- SVCTAF2
+ delegate_to: localhost
- name: Remove devices using IDs
- ome_devices:
- hostname: "{{ hostname }}"
+ dellemc.openmanage.ome_devices:
+ hostname: "{{ hostname }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: absent
device_ids:
- 10235
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml
index b5f0fc970..dae4e5dbf 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_diagnostics.yml
@@ -1,15 +1,11 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible diagnostics operation.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible diagnostics operation.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Application log extraction using CIFS share location
- ome_diagnostics:
+ dellemc.openmanage.ome_diagnostics:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -23,9 +19,10 @@
mask_sensitive_info: false
test_connection: true
tags: app-cifs-log
+ delegate_to: localhost
- name: Application log extraction using NFS share location
- ome_diagnostics:
+ dellemc.openmanage.ome_diagnostics:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -37,9 +34,10 @@
mask_sensitive_info: true
test_connection: true
tags: app-nfs-log
+ delegate_to: localhost
- name: Support assist log extraction using CIFS share location
- ome_diagnostics:
+ dellemc.openmanage.ome_diagnostics:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -56,9 +54,10 @@
log_selectors: [OS_LOGS]
test_connection: true
tags: tsr-cifs-log
+ delegate_to: localhost
- name: Support assist log extraction using NFS share location
- ome_diagnostics:
+ dellemc.openmanage.ome_diagnostics:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -70,3 +69,4 @@
device_group_name: group_name
test_connection: true
tags: tsr-nfs-log
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml
index 1a16e3287..b524ab3f3 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_discovery.yml
@@ -1,18 +1,14 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible discovery operations.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible discovery operations.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Discover servers in a range
- ome_discovery:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_discovery:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
discovery_job_name: "Discovery_server_1"
discovery_config_targets:
@@ -25,12 +21,13 @@
password: password
tags:
- server_discovery
+ delegate_to: localhost
- name: Discover chassis in a range
- ome_discovery:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_discovery:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
discovery_job_name: "Discovery_chassis_1"
discovery_config_targets:
@@ -43,12 +40,13 @@
password: password
tags:
- chassis_discovery
+ delegate_to: localhost
- name: Discover switches in a range
- ome_discovery:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_discovery:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
discovery_job_name: "Discover_switch_1"
discovery_config_targets:
@@ -60,12 +58,13 @@
community: snmp_creds
tags:
- switch_discovery
+ delegate_to: localhost
- name: Discover storage in a range
- ome_discovery:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_discovery:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
discovery_job_name: "Discover_storage_1"
discovery_config_targets:
@@ -80,23 +79,25 @@
community: community_str
tags:
- storage_discovery
+ delegate_to: localhost
- name: Delete a discovery job
- ome_discovery:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_discovery:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "absent"
discovery_job_name: "Discovery-123"
tags:
- delete_discovery
+ delegate_to: localhost
- name: Schedule the discovery of multiple devices ignoring partial failure and enable trap to receive alerts
- ome_discovery:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_discovery:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "present"
discovery_job_name: "Discovery-123"
@@ -136,18 +137,19 @@
password: ipmi_pwd
schedule: RunLater
cron: "0 0 9 ? * MON,WED,FRI *"
- ignore_partial_failure: True
- trap_destination: True
- community_string: True
+ ignore_partial_failure: true
+ trap_destination: true
+ community_string: true
email_recipient: test_email@company.com
tags:
- schedule_discovery
+ delegate_to: localhost
- name: Discover servers with ca check enabled
- ome_discovery:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_discovery:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
discovery_job_name: "Discovery_server_ca1"
discovery_config_targets:
@@ -158,13 +160,14 @@
wsman:
username: user
password: password
- ca_check: True
+ ca_check: true
certificate_data: "{{ lookup('ansible.builtin.file', '/path/to/certificate_data_file') }}"
tags:
- server_ca_check
+ delegate_to: localhost
- name: Discover chassis with ca check enabled data
- ome_discovery:
+ dellemc.openmanage.ome_discovery:
hostname: "192.168.0.1"
username: "username"
password: "password"
@@ -178,12 +181,13 @@
redfish:
username: user
password: password
- ca_check: True
- certificate_data: "-----BEGIN CERTIFICATE-----\r\n
- ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
- ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
- ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
- aqwertyuiopasdfghjklzxcvbnmasdasagasvv=\r\n
- -----END CERTIFICATE-----"
+ ca_check: true
+ certificate_data: " -----BEGIN CERTIFICATE-----
+ ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv
+ ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv
+ ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv
+ aqwertyuiopasdfghjklzxcvbnmasdasagasvv=
+ -----END CERTIFICATE----- "
tags:
- - chassis_ca_check_data \ No newline at end of file
+ - chassis_ca_check_data
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml
index 7229f6385..e39dedff8 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_domain_user_groups.yml
@@ -1,16 +1,11 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible AD directory user group operation.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible AD directory user group operation.
+ hosts: ome
+ gather_facts: false
tasks:
-
- name: Create Active Directory user groups.
- ome_domain_user_groups:
+ dellemc.openmanage.ome_domain_user_groups:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -22,9 +17,10 @@
domain_username: username@domain
domain_password: domain_password
tags: user-group-add
+ delegate_to: localhost
- name: Create Active Directory user groups with different domain format.
- ome_domain_user_groups:
+ dellemc.openmanage.ome_domain_user_groups:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -36,9 +32,10 @@
domain_username: domain\\username
domain_password: domain_password
tags: user-group-add-domain
+ delegate_to: localhost
- name: Update Active Directory user groups.
- ome_domain_user_groups:
+ dellemc.openmanage.ome_domain_user_groups:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -47,9 +44,10 @@
group_name: account operators
role: chassis administrator
tags: user-group-update
+ delegate_to: localhost
- name: Remove Active Directory user groups.
- ome_domain_user_groups:
+ dellemc.openmanage.ome_domain_user_groups:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -57,3 +55,44 @@
state: "absent"
group_name: "Administrators"
tags: user-group-remove
+ delegate_to: localhost
+
+ - name: Create LDAP user groups.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ group_name: group_name1
+ directory_id: 10779
+ directory_type: LDAP
+ role: VIEWER
+ domain_username: domain_username
+ domain_password: domain_password
+ delegate_to: localhost
+
+ - name: Create LDAP user groups with different domain format.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: present
+ group_name: group_name1
+ directory_name: directory_name
+ directory_type: LDAP
+ role: administrator
+ domain_username: domain\\username
+ domain_password: domain_password
+ delegate_to: localhost
+
+ - name: Remove LDAP user groups.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "absent"
+ group_name: group_name1
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml
index 08b03786e..95334b010 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_group_device_action.yml
@@ -1,12 +1,11 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible group device operations.
- gather_facts: False
+- name: Dell OpenManage Ansible group device operations.
+ hosts: ome
+ gather_facts: false
vars:
group_name: Dell iDRAC Servers
- device_action: refresh_inventory #other options are clear_idrac_job_queue, reset_idrac
- validate_certs: True
+ device_action: refresh_inventory # other options are clear_idrac_job_queue, reset_idrac
+ validate_certs: true
ca_path: "/path/to/ca_cert.pem"
tasks:
@@ -16,20 +15,22 @@
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200
- return_content: yes
+ return_content: true
validate_certs: "{{ validate_certs }}"
ca_path: "{{ ca_path }}"
- force_basic_auth: yes
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
register: group_id
+ delegate_to: localhost
- name: Assign group ID to a variable.
- set_fact:
+ ansible.builtin.set_fact:
group_id_value: "{{ group_id.json.value[0].Id }}"
+ delegate_to: localhost
- name: Retrieve all devices under the group ID.
ansible.builtin.uri:
@@ -37,28 +38,31 @@
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200
- return_content: yes
+ return_content: true
validate_certs: "{{ validate_certs }}"
ca_path: "{{ ca_path }}"
- force_basic_auth: yes
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
register: all_devices
+ delegate_to: localhost
- name: Empty list to store device IDs.
- set_fact:
+ ansible.builtin.set_fact:
devices_list: []
+ delegate_to: localhost
- name: Add devices retrieved from a group to the list.
- set_fact:
+ ansible.builtin.set_fact:
devices_list: "{{ devices_list + [item.Id] }}"
with_items:
- "{{ all_devices.json.value }}"
+ delegate_to: localhost
- - name: Perform device action tasks on devices.
+ - name: Perform device action tasks on devices. # noqa: args[module]
dellemc.openmanage.ome_devices:
hostname: "{{ hostname }}"
username: "{{ username }}"
@@ -67,3 +71,4 @@
ca_path: "{{ ca_path }}"
device_action: "{{ device_action }}"
device_ids: "{{ devices_list }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml
index 027a53d09..ca6c61079 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_groups.yml
@@ -1,15 +1,11 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible Group configuration.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible Group configuration.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Create a new device group
- ome_groups:
+ dellemc.openmanage.ome_groups:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -19,9 +15,10 @@
parent_group_name: "group parent 1"
tags:
- create_group
+ delegate_to: localhost
- name: Modify a device group using the group ID
- ome_groups:
+ dellemc.openmanage.ome_groups:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -31,9 +28,10 @@
parent_group_name: "group parent 2"
tags:
- modify_group
+ delegate_to: localhost
- name: Delete a device group using the device group name
- ome_groups:
+ dellemc.openmanage.ome_groups:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -42,9 +40,10 @@
name: "group 1"
tags:
- delete_name
+ delegate_to: localhost
- name: Delete multiple device groups using the group IDs
- ome_groups:
+ dellemc.openmanage.ome_groups:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -55,3 +54,4 @@
- 5678
tags:
- delete_ids
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml
index b5d960ca1..b55cb751d 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_identity_pool.yml
@@ -1,96 +1,96 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible identity pool operations.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible identity pool operations.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: "Create an identity pool using ethernet, FCoE, iSCSI and FC settings."
- ome_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Create an identity pool using ethernet, FCoE, iSCSI and FC settings.
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: present
pool_name: "pool1"
pool_description: "Identity pool with Ethernet, FCoE, ISCSI and FC settings"
ethernet_settings:
- starting_mac_address: "50:50:50:50:50:00"
- identity_count: 60
+ starting_mac_address: "50:50:50:50:50:00"
+ identity_count: 60
fcoe_settings:
- starting_mac_address: "aabb.ccdd.7070"
- identity_count: 75
+ starting_mac_address: "aabb.ccdd.7070"
+ identity_count: 75
iscsi_settings:
- starting_mac_address: "60:60:60:60:60:00"
- identity_count: 30
- initiator_config:
- iqn_prefix: "iqn.myprefix."
- initiator_ip_pool_settings:
- ip_range: "10.33.0.1-10.33.0.255"
- subnet_mask: "255.255.255.0"
- gateway: "192.168.4.1"
- primary_dns_server: "10.8.8.8"
- secondary_dns_server: "8.8.8.8"
+ starting_mac_address: "60:60:60:60:60:00"
+ identity_count: 30
+ initiator_config:
+ iqn_prefix: "iqn.myprefix."
+ initiator_ip_pool_settings:
+ ip_range: "10.33.0.1-10.33.0.255"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.4.1"
+ primary_dns_server: "10.8.8.8"
+ secondary_dns_server: "8.8.8.8"
fc_settings:
- starting_address: "10-10-10-10-10-10"
- identity_count: 45
+ starting_address: "10-10-10-10-10-10"
+ identity_count: 45
tags: create1
-
- - name: "Create an identity pool using only ethernet settings."
- ome_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ delegate_to: localhost
+
+ - name: Create an identity pool using only ethernet settings.
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
pool_name: "pool2"
pool_description: "Identity pool with ethernet"
ethernet_settings:
- starting_mac_address: "aa-bb-cc-dd-ee-aa"
- identity_count: 80
+ starting_mac_address: "aa-bb-cc-dd-ee-aa"
+ identity_count: 80
tags: create2
-
- - name: "Create an identity pool using only iSCSI settings"
- ome_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ delegate_to: localhost
+
+ - name: Create an identity pool using only iSCSI settings
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
pool_name: "pool3"
pool_description: "Identity pool with iscsi"
iscsi_settings:
- starting_mac_address: "10:10:10:10:10:00"
- identity_count: 30
- initiator_config:
- iqn_prefix: "iqn.myprefix."
- initiator_ip_pool_settings:
- ip_range: "20.33.0.1-20.33.0.255"
- subnet_mask: "255.255.255.0"
- gateway: "192.168.4.1"
- primary_dns_server: "10.8.8.8"
- secondary_dns_server: "8.8.8.8"
+ starting_mac_address: "10:10:10:10:10:00"
+ identity_count: 30
+ initiator_config:
+ iqn_prefix: "iqn.myprefix."
+ initiator_ip_pool_settings:
+ ip_range: "20.33.0.1-20.33.0.255"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.4.1"
+ primary_dns_server: "10.8.8.8"
+ secondary_dns_server: "8.8.8.8"
tags: create3
-
- - name: "Modify an identity pool using FC settings."
- ome_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ delegate_to: localhost
+
+ - name: Modify an identity pool using FC settings.
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
pool_name: "pool2"
pool_description: "Identity pool with fc_settings"
fc_settings:
- starting_address: "40:40:40:40:40:22"
- identity_count: 48
+ starting_address: "40:40:40:40:40:22"
+ identity_count: 48
tags: modify1
+ delegate_to: localhost
- - name: "Modify an identity pool."
- ome_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify an identity pool.
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
pool_name: "pool1"
new_pool_name: "pool_new"
@@ -102,12 +102,13 @@
starting_mac_address: "aabb.ccdd.5050"
identity_count: 77
tags: modify2
+ delegate_to: localhost
- - name: "Modify an identity pool using iSCSI and FC settings."
- ome_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify an identity pool using iSCSI and FC settings.
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
pool_name: "pool_new"
new_pool_name: "pool_new2"
@@ -122,13 +123,15 @@
starting_address: "10:10:10:10:10:10"
identity_count: 98
tags: modify3
+ delegate_to: localhost
- - name: "Delete an identity pool"
- ome_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Delete an identity pool
+ dellemc.openmanage.ome_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "absent"
pool_name: "pool1"
tags: delete
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml
index f90892adb..90cd83a44 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_job_info.yml
@@ -1,35 +1,44 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible job details.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible job details.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Get all jobs details.
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
+ - name: Get all jobs details.
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
+
+ - name: Get job details for id.
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: 12345
+ delegate_to: localhost
- - name: Get job details for id.
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: 12345
+ - name: Get filtered job details.
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ system_query_options:
+ top: 2
+ skip: 1
+ filter: "JobType/Id eq 8"
+ delegate_to: localhost
- - name: Get filtered job details.
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- system_query_options:
- top: 2
- skip: 1
- filter: "JobType/Id eq 8" \ No newline at end of file
+ - name: Get detail job execution history with last execution detail for a job.
+ dellemc.openmanage.ome_job_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: 12345
+ fetch_execution_history: true
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml
index c9a8db75e..b6f8960b4 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout.yml
@@ -1,32 +1,29 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage ansible port breakout configuration.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage ansible port breakout configuration.
+ hosts: ome
+ gather_facts: false
tasks:
-
- name: Port breakout configuration.
- ome_network_port_breakout:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_network_port_breakout:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
target_port: "2HB7NX2:phy-port1/1/11"
breakout_type: "1X40GE"
tags:
- port-config
+ delegate_to: localhost
- name: Revoke the default breakout configuration.
- ome_network_port_breakout:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_network_port_breakout:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
target_port: "2HB7NX2:phy-port1/1/11"
breakout_type: "HardwareDefault"
tags:
- port-default
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml
index b94b6b48a..9162ecc7d 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_port_breakout_job_traking.yml
@@ -1,29 +1,25 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage ansible port breakout configuration.
- gather_facts: False
+- name: Dell OpenManage ansible port breakout configuration.
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 50
polling_interval: 5 # in seconds
- collections:
- - dellemc.openmanage
-
tasks:
-
- name: Port breakout configuration.
- ome_network_port_breakout:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_network_port_breakout:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
target_port: "2HB7NX2:phy-port1/1/11"
breakout_type: "1X40GE"
register: result
+ delegate_to: localhost
- - name: "Get job details using job id from port breakout configuration task."
- ome_job_info:
+ - name: Get job details using job id from port breakout configuration task.
+ dellemc.openmanage.ome_job_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -35,3 +31,4 @@
until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml
index d92ef99ff..84234478a 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan.yml
@@ -1,18 +1,14 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible VLAN operations.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible VLAN operations.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: "Create a VLAN range"
- ome_network_vlan:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Create a VLAN range
+ dellemc.openmanage.ome_network_vlan:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: present
name: "vlan1"
@@ -21,12 +17,13 @@
vlan_minimum: 35
vlan_maximum: 40
tags: create_vlan_range
+ delegate_to: localhost
- - name: "Create a VLAN with a single value"
- ome_network_vlan:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Create a VLAN with a single value
+ dellemc.openmanage.ome_network_vlan:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: present
name: "vlan2"
@@ -35,12 +32,13 @@
vlan_minimum: 127
vlan_maximum: 127
tags: create_vlan_single
+ delegate_to: localhost
- - name: "Modify a VLAN"
- ome_network_vlan:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify a VLAN
+ dellemc.openmanage.ome_network_vlan:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: present
name: "vlan1"
@@ -50,13 +48,15 @@
vlan_minimum: 45
vlan_maximum: 50
tags: modify_vlan
+ delegate_to: localhost
- - name: "Delete a VLAN"
- ome_network_vlan:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Delete a VLAN
+ dellemc.openmanage.ome_network_vlan:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "absent"
name: "vlan1"
tags: delete_vlan
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml
index 3cf9c3c23..3b241b26a 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_network_vlan_info.yml
@@ -1,32 +1,31 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible OpenManage Enterprise network vlan details.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible OpenManage Enterprise network vlan details.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Retrieve information about all network VLANs(s) available in the device.
- ome_network_vlan_info:
+ dellemc.openmanage.ome_network_vlan_info:
hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
- name: Retrieve information about a network VLAN using the VLAN ID.
- ome_network_vlan_info:
+ dellemc.openmanage.ome_network_vlan_info:
hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
id: 12345
+ delegate_to: localhost
- name: Retrieve information about a network VLAN using the VLAN name.
- ome_network_vlan_info:
+ dellemc.openmanage.ome_network_vlan_info:
hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
name: "Network VLAN - 1"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml
index 87c124b84..c6badbe2b 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_info.yml
@@ -1,15 +1,11 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible server interface profile information.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible server interface profile information.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Retrieves the server interface profiles of all the device using device ID.
- ome_server_interface_profile_info:
+ dellemc.openmanage.ome_server_interface_profile_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -19,9 +15,10 @@
- 10002
tags:
- sip-device-id
+ delegate_to: localhost
- name: Retrieves the server interface profiles of all the device using device service tag.
- ome_server_interface_profile_info:
+ dellemc.openmanage.ome_server_interface_profile_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -31,3 +28,4 @@
- 6KHH6H3
tags:
- sip-service-tag
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml
index 485a1a24b..1265aa152 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profile_workflow.yml
@@ -1,20 +1,15 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible server interface profile workflow.
- gather_facts: False
+- name: Dell OpenManage Ansible server interface profile workflow.
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 100
- polling_interval: 10 #in seconds
+ polling_interval: 10 # in seconds
src_service_tag: 7GHH6H1
- collections:
- - dellemc.openmanage
-
tasks:
-
- name: Create a smart fabric.
- ome_smart_fabric:
+ dellemc.openmanage.ome_smart_fabric:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -27,14 +22,16 @@
secondary_switch_service_tag: "59HW8X2"
override_LLDP_configuration: "Enabled"
register: fabric_result
+ delegate_to: localhost
- - name: "sleep for 300 seconds and continue with play"
- wait_for:
+ - name: Sleep for 300 seconds and continue with play # noqa: no-handler
+ ansible.builtin.wait_for:
timeout: 300
- when: fabric_result.changed == True
+ when: not fabric_result.changed
+ delegate_to: localhost
- name: Create a template from a reference device service tag.
- ome_template:
+ dellemc.openmanage.ome_template:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -45,18 +42,20 @@
Description: "New Template description"
register: result
failed_when: "'return_id' not in result"
+ delegate_to: localhost
- - name: "Get the job id using return id from template."
- ome_template_info:
+ - name: Get the job id using return id from template.
+ dellemc.openmanage.ome_template_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
template_id: "{{ result.return_id }}"
register: facts_result
+ delegate_to: localhost
- - name: "Get job details using job id from template task."
- ome_job_info:
+ - name: Get job details using job id from template task.
+ dellemc.openmanage.ome_job_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -68,6 +67,7 @@
until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
- name: Deploy template on multiple devices
dellemc.openmanage.ome_template:
@@ -81,12 +81,15 @@
- 6GHH6H1
- 6GHH6H2
register: deploy_result
+ delegate_to: localhost
- - name: "sleep for 10 seconds and continue with play"
- wait_for: timeout=10
+ - name: Sleep for 10 seconds and continue with play
+ ansible.builtin.wait_for:
+ timeout: 10
+ delegate_to: localhost
- - name: "Track the deploy job till completion"
- ome_job_info:
+ - name: Track the deploy job till completion
+ dellemc.openmanage.ome_job_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -97,9 +100,10 @@
until: deploy_job_result.job_info.LastRunStatus.Name == 'Completed' or deploy_job_result.job_info.LastRunStatus.Name == 'Failed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
- name: Modify Server Interface Profile for the server using the service tag.
- ome_server_interface_profiles:
+ dellemc.openmanage.ome_server_interface_profiles:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -109,17 +113,19 @@
nic_teaming: NoTeaming
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan
+ delegate_to: localhost
- name: Retrieves the server interface profiles of all the device using device service tag.
- ome_server_interface_profile_info:
+ dellemc.openmanage.ome_server_interface_profile_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_service_tag:
- 6GHH6H2
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml
index c003b7141..9a7642ebc 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_server_interface_profiles.yml
@@ -1,18 +1,14 @@
---
-- hosts: omem
- connection: local
- name: Dell OpenManage Ansible server interface profiles configuration.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible server interface profiles configuration.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Modify Server Interface Profile for the server using the service tag
- ome_server_interface_profiles:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_server_interface_profiles:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_service_tag:
- SVCTAG1
@@ -20,23 +16,24 @@
nic_teaming: LACP
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan1
- nic_identifier: NIC.Mezzanine.1A-2-1
- team: yes
+ team: true
untagged_network: 3
tagged_networks:
names:
- range120-125
+ delegate_to: localhost
- name: Modify Server Interface Profile for the server using the id
- ome_server_interface_profiles:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_server_interface_profiles:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_id:
- 34523
@@ -44,14 +41,15 @@
nic_teaming: NoTeaming
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan2
- nic_identifier: NIC.Mezzanine.1A-2-1
- team: yes
+ team: true
untagged_network: 3
tagged_networks:
names:
- range120-125
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml
index 3813458a4..51014afb4 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric.yml
@@ -1,18 +1,14 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible smart fabric operations.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible smart fabric operations.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: "Create a smart fabric"
- ome_smart_fabric:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Create a smart fabric
+ dellemc.openmanage.ome_smart_fabric:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: present
name: "fabric1"
@@ -22,26 +18,28 @@
secondary_switch_service_tag: "PXYT456"
override_LLDP_configuration: "Enabled"
tags: create_smart_fabric
+ delegate_to: localhost
- - name: "Modify a smart fabric"
- ome_smart_fabric:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify a smart fabric
+ dellemc.openmanage.ome_smart_fabric:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: present
name: "fabric1"
new_name: "fabric_gold1"
description: "new description"
tags: modify_smart_fabric
+ delegate_to: localhost
-
- - name: "Delete a smart fabric"
- ome_smart_fabric:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Delete a smart fabric
+ dellemc.openmanage.ome_smart_fabric:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "absent"
name: "fabric1"
tags: delete_smart_fabric
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_info.yml
new file mode 100644
index 000000000..9565de143
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_info.yml
@@ -0,0 +1,31 @@
+---
+- name: Dell OpenManage Ansible smart fabric details.
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: Get all smart fabric info.
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
+
+ - name: Get specific smart fabric info using fabric ID.
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ delegate_to: localhost
+
+ - name: Get specific smart fabric info using fabric name.
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_name: "f1"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml
index 88b5cc62e..615e1662e 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink.yml
@@ -1,18 +1,14 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible smart fabric uplink configuration.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible smart fabric uplink configuration.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: "Create a Uplink"
- ome_smart_fabric_uplink:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Create a Uplink
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "present"
fabric_name: "fabric1"
@@ -33,12 +29,13 @@
- vlan3
untagged_network: vlan2
tags: create_uplink
+ delegate_to: localhost
- - name: "modify a existing uplink1"
- ome_smart_fabric_uplink:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify a existing uplink1
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "present"
fabric_name: "fabric1"
@@ -60,35 +57,38 @@
- vlan33
untagged_network: vlan22
tags: modify_uplink
+ delegate_to: localhost
- - name: "Delete a Uplink"
- ome_smart_fabric_uplink:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Delete a Uplink
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "absent"
fabric_name: "fabric1"
name: "uplink1"
tags: delete_uplink
+ delegate_to: localhost
- - name: "Modify the Uplink name"
- ome_smart_fabric_uplink:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify the Uplink name
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "present"
fabric_name: "fabric1"
name: "uplink1"
new_name: "uplink2"
tags: modify_uplink_name
+ delegate_to: localhost
- - name: "Modify a Uplink ports"
- ome_smart_fabric_uplink:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify a Uplink ports
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "present"
fabric_name: "fabric1"
@@ -103,12 +103,13 @@
- ethernet1/1/9
- ethernet1/1/10
tags: modify_ports
+ delegate_to: localhost
- - name: "Modify Uplink networks"
- ome_smart_fabric_uplink:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify Uplink networks
+ dellemc.openmanage.ome_smart_fabric_uplink:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "present"
fabric_name: "fabric1"
@@ -117,3 +118,4 @@
tagged_networks:
- vlan4
tags: modify_networks
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink_info.yml
new file mode 100644
index 000000000..2eedc0c34
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_smart_fabric_uplink_info.yml
@@ -0,0 +1,43 @@
+---
+- name: Fetch fabric uplink information.
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: Retrieve all fabric uplink information using fabric_id.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ delegate_to: localhost
+
+ - name: Retrieve all fabric uplink information using fabric_name.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_name: "f1"
+ delegate_to: localhost
+
+ - name: Retrieve specific fabric information using uplink_id.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ uplink_id: "1ad54420-b145-49a1-9779-21a579ef6f2d"
+ delegate_to: localhost
+
+ - name: Retrieve specific fabric information using uplink_name.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ uplink_name: "u1"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml
index 433954aa0..dc9bfa741 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/ome_template_identity_pool.yml
@@ -1,31 +1,28 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible template identity pool attach and detach operation.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible template identity pool attach and detach operation.
+ hosts: ome
+ gather_facts: false
tasks:
-
- name: Attach an identity pool to a template.
- ome_template_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
template_name: template_name
identity_pool_name: identity_pool_name
tags:
- attach
+ delegate_to: localhost
- name: Detach an identity pool from a template.
- ome_template_identity_pool:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template_identity_pool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
template_name: template_name
tags:
- - detach \ No newline at end of file
+ - detach
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml
index 517ff118d..cf5d35ee0 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate.yml
@@ -1,51 +1,51 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible - OME Power state operations.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible - OME Power state operations.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Power state operation based on device id.
- ome_powerstate:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 11111
- power_state: "off"
+ - name: Power state operation based on device id.
+ dellemc.openmanage.ome_powerstate:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 11111
+ power_state: "off"
+ delegate_to: localhost
- - name: Power state operation based on device service tag.
- ome_powerstate:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: "KLBR111"
- power_state: "on"
+ - name: Power state operation based on device service tag.
+ dellemc.openmanage.ome_powerstate:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: "KLBR111"
+ power_state: "on"
+ delegate_to: localhost
- - name: Power state operation based on list of device ids.
- ome_powerstate:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: "{{ item.device_id }}"
- power_state: "{{ item.state }}"
- with_items:
- - { "device_id": 11111, "state": "on" }
- - { "device_id": 22222, "state": "off" }
+ - name: Power state operation based on list of device ids.
+ dellemc.openmanage.ome_powerstate:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: "{{ item.device_id }}"
+ power_state: "{{ item.state }}"
+ with_items:
+ - { "device_id": 11111, "state": "on" }
+ - { "device_id": 22222, "state": "off" }
+ delegate_to: localhost
- - name: Power state operation based on list of device service tags.
- ome_powerstate:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: "{{ item.service_tag }}"
- power_state: "{{ item.state }}"
- with_items:
- - { "service_tag": "KLBR111", "state": "on" }
- - { "service_tag": "KLBR222", "state": "off" } \ No newline at end of file
+ - name: Power state operation based on list of device service tags.
+ dellemc.openmanage.ome_powerstate:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: "{{ item.service_tag }}"
+ power_state: "{{ item.state }}"
+ with_items:
+ - { "service_tag": "KLBR111", "state": "on" }
+ - { "service_tag": "KLBR222", "state": "off" }
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml
index 8393992ab..70f4205d4 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/powerstate/ome_powerstate_with_job_tracking.yml
@@ -1,36 +1,34 @@
---
-- hosts: ome
+- name: "OME - Power state management job tracking."
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 5
- polling_interval: 5 #in seconds
- connection: local
- name: "OME - Power state management job tracking."
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+ polling_interval: 5 # in seconds
tasks:
- - name: "Power state operation based on device id"
- ome_powerstate:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- power_state: "off"
- device_id: 11111
- register: result
- failed_when: "'job_status' not in result"
+ - name: Power state operation based on device id
+ dellemc.openmanage.ome_powerstate:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ power_state: "off"
+ device_id: 11111
+ register: result
+ failed_when: "'job_status' not in result"
+ delegate_to: localhost
- - name: "Get job details using job id from power state operation."
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "{{result.job_status.Id}}"
- register: job_result
- failed_when: "'job_info' not in job_result"
- until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
- retries: "{{ retries_count }}"
- delay: "{{ polling_interval }}" \ No newline at end of file
+ - name: Get job details using job id from power state operation.
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "{{ result.job_status.Id }}"
+ register: job_result
+ failed_when: "'job_info' not in job_result"
+ until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
+ retries: "{{ retries_count }}"
+ delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml
index 14d43e6ac..89bad44b7 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile.yml
@@ -1,18 +1,14 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible profile operations.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible profile operations.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Create two profiles from a template
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: create
template_name: "template 1"
@@ -20,38 +16,40 @@
number_of_profiles: 2
tags:
- create_profile
+ delegate_to: localhost
- name: Create profile with NFS share
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: create
template_name: "template 1"
name_prefix: "omam_profile"
number_of_profiles: 1
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: "NFS"
share_ip: "192.168.0.1"
iso_path: "/path/to/my_iso.iso"
iso_timeout: 8
tags:
- create_profile_nfs
+ delegate_to: localhost
- name: Create profile with CIFS share
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: create
template_name: "template 1"
name_prefix: "omam_profile"
number_of_profiles: 1
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: CIFS
share_ip: "192.168.0.2"
share_user: "username"
@@ -61,19 +59,20 @@
iso_timeout: 8
tags:
- create_profile_cifs
+ delegate_to: localhost
- name: Modify profile name with NFS share and attributes
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: modify
name: "Profile 00001"
new_name: "modified profile"
description: "new description"
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.1"
iso_path: "/path/to/my_iso.iso"
@@ -86,41 +85,44 @@
- Id: 4507
Value: "server attr 2"
IsIgnored: true
- - DisplayName: 'System, Server Topology, ServerTopology 1 Aisle Name'
+ - DisplayName: "System, Server Topology, ServerTopology 1 Aisle Name"
Value: Aisle 5
IsIgnored: false
tags:
- modify_profile
+ delegate_to: localhost
- name: Delete using profile name
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "delete"
name: "Profile 00003"
tags:
- delete_profile_name
+ delegate_to: localhost
- name: Delete using filter
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "delete"
filters:
- SelectAll: True
+ SelectAll: true
Filters: =contains(ProfileName,'Profile 00002')
tags:
- delete_filter
+ delegate_to: localhost
- name: Delete using profile list filter
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "delete"
filters:
@@ -129,18 +131,19 @@
- 12124
tags:
- delete_profile_ids
+ delegate_to: localhost
- name: Assign profile name with network share
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: assign
name: "Profile 00001"
device_id: 12456
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.1"
iso_path: "/path/to/my_iso.iso"
@@ -154,42 +157,45 @@
ShutdownType: 0
TimeToWaitBeforeShutdown: 300
EndHostPowerState: 1
- StrictCheckingVlan: True
+ StrictCheckingVlan: true
Schedule:
- RunNow: True
- RunLater: False
+ RunNow: true
+ RunLater: false
tags:
- assign_profile
+ delegate_to: localhost
- name: Unassign using profile name
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "unassign"
name: "Profile 00003"
tags:
- unassign_profile_name
+ delegate_to: localhost
- name: "Unassign using filters"
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "unassign"
filters:
- SelectAll: True
+ SelectAll: true
Filters: =contains(ProfileName,'Profile 00003')
tags:
- unassign_filter
+ delegate_to: localhost
- name: Unassign using filter
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "unassign"
filters:
@@ -198,15 +204,17 @@
- 16123
tags:
- unassign_profile_list
+ delegate_to: localhost
- name: Migrate profile
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "migrate"
name: "Profile 0001"
device_id: 12456
tags:
- - migrate_profile \ No newline at end of file
+ - migrate_profile
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml
index d4c9c7723..6e66d82cc 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_assign_job_tracking.yml
@@ -1,39 +1,45 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible profile operations.
- gather_facts: False
+- name: Dell OpenManage Ansible profile operations.
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 120
polling_interval: 30 # 30 seconds x 120 times = 1 hour
- failed_states: ['Failed', 'Warning', 'Aborted', 'Paused', 'Stopped',
- 'Canceled']
- completed_states: ['Completed', 'Failed', 'Warning', 'Aborted', 'Paused',
- 'Stopped', 'Canceled']
-
- collections:
- - dellemc.openmanage
+ failed_states:
+ ["Failed", "Warning", "Aborted", "Paused", "Stopped", "Canceled"]
+ completed_states:
+ [
+ "Completed",
+ "Failed",
+ "Warning",
+ "Aborted",
+ "Paused",
+ "Stopped",
+ "Canceled",
+ ]
tasks:
- name: Assign a profile to target
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "assign"
name: "Profile 00001"
device_id: 12456
register: result
+ delegate_to: localhost
- - name: End play when no job_id in result
- meta: end_play
+ - name: End play when no job_id in result # noqa: no-handler
+ ansible.builtin.meta: end_play
when:
- - result.changed == false
+ - not result.changed
- "'job_id' not in result"
+ delegate_to: localhost
- name: Get job details using job id
- ome_job_info:
+ dellemc.openmanage.ome_job_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -45,3 +51,4 @@
until: job_result.job_info.LastRunStatus.Name in "{{ completed_states }}"
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_info.yml
new file mode 100644
index 000000000..6994d1542
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_info.yml
@@ -0,0 +1,60 @@
+---
+- name: Dell OpenManage Ansible profile info.
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: Retrieve all profiles
+ dellemc.openmanage.ome_profile_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
+
+ - name: Retrieve profile using the name
+ dellemc.openmanage.ome_profile_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ profile_name: eprof 00001
+ delegate_to: localhost
+
+ - name: Retrieve profile using the id
+ dellemc.openmanage.ome_profile_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ profile_id: 10129
+ delegate_to: localhost
+
+ - name: Retrieve the profiles using the template name
+ dellemc.openmanage.ome_profile_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: t2
+ delegate_to: localhost
+
+ - name: Retrieve the profiles using the template id
+ dellemc.openmanage.ome_profile_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 11
+ delegate_to: localhost
+
+ - name: Retrieve the profiles based on odata filters
+ dellemc.openmanage.ome_profile_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ system_query_options:
+ filter: TemplateName eq 'mytemplate'
+ orderby: ProfileState
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml
index ae7f732b6..c51ff03f0 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_migrate_job_tracking.yml
@@ -1,40 +1,45 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible profile operations.
- gather_facts: False
+- name: Dell OpenManage Ansible profile operations.
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 120
polling_interval: 30 # 30 seconds x 120 times = 1 hour
- failed_states: ['Failed', 'Warning', 'Aborted', 'Paused', 'Stopped',
- 'Canceled']
- completed_states: ['Completed', 'Failed', 'Warning', 'Aborted', 'Paused',
- 'Stopped', 'Canceled']
-
- collections:
- - dellemc.openmanage
+ failed_states:
+ ["Failed", "Warning", "Aborted", "Paused", "Stopped", "Canceled"]
+ completed_states:
+ [
+ "Completed",
+ "Failed",
+ "Warning",
+ "Aborted",
+ "Paused",
+ "Stopped",
+ "Canceled",
+ ]
tasks:
-
- name: Migrate a profile
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "migrate"
name: "Profile 00001"
device_id: 12456
register: result
+ delegate_to: localhost
- - name: End play when no job_id in result
- meta: end_play
+ - name: End play when no job_id in result # noqa: no-handler
+ ansible.builtin.meta: end_play
when:
- - result.changed == false
+ - not result.changed
- "'job_id' not in result"
+ delegate_to: localhost
- name: Get job details using job id
- ome_job_info:
+ dellemc.openmanage.ome_job_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -46,3 +51,4 @@
until: job_result.job_info.LastRunStatus.Name in "{{ completed_states }}"
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml
index b1a21312f..f472aafbc 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/profile/ome_profile_unassign_job_tracking.yml
@@ -1,39 +1,44 @@
---
-- hosts: ome
- connection: local
- name: Dell EMC OpenManage Ansible profile operations.
- gather_facts: False
+- name: Dell OpenManage Ansible profile operations.
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 120
polling_interval: 30 # 30 seconds x 120 times = 1 hour
- failed_states: ['Failed', 'Warning', 'Aborted', 'Paused', 'Stopped',
- 'Canceled']
- completed_states: ['Completed', 'Failed', 'Warning', 'Aborted', 'Paused',
- 'Stopped', 'Canceled']
-
- collections:
- - dellemc.openmanage
+ failed_states:
+ ["Failed", "Warning", "Aborted", "Paused", "Stopped", "Canceled"]
+ completed_states:
+ [
+ "Completed",
+ "Failed",
+ "Warning",
+ "Aborted",
+ "Paused",
+ "Stopped",
+ "Canceled",
+ ]
tasks:
-
- name: Unassign using profile name
- ome_profile:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_profile:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "unassign"
name: "Profile 00003"
register: result
+ delegate_to: localhost
- - name: End play when no job_id in result
- meta: end_play
+ - name: End play when no job_id in result # noqa: no-handler
+ ansible.builtin.meta: end_play
when:
- - result.changed == false
+ - not result.changed
- "'job_id' not in result"
+ delegate_to: localhost
- name: Get job details using job id
- ome_job_info:
+ dellemc.openmanage.ome_job_info:
hostname: "{{ hostname }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -45,3 +50,4 @@
until: job_result.job_info.LastRunStatus.Name in "{{ completed_states }}"
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml
index 58ac15ffb..856e6e544 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template.yml
@@ -1,29 +1,26 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible device Template service.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible device Template service.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: "Create a template from a reference device."
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Create a template from a reference device.
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
device_id: 25123
attributes:
Name: "New Template"
Description: "New Template description"
+ delegate_to: localhost
- - name: "Modify template name, description, and attribute value."
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify template name, description, and attribute value.
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "modify"
template_id: 12
@@ -37,12 +34,13 @@
- Id: 1234
Value: "Test Attribute"
IsIgnored: false
+ delegate_to: localhost
- name: Modify template name, description, and attribute using detailed view
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "modify"
template_id: 12
@@ -52,17 +50,18 @@
Attributes:
# Enter the comma separated string as appearing in the Detailed view on GUI
# NIC -> NIC.Integrated.1-1-1 -> NIC Configuration -> Wake On LAN1
- - DisplayName: 'NIC, NIC.Integrated.1-1-1, NIC Configuration, Wake On LAN'
+ - DisplayName: "NIC, NIC.Integrated.1-1-1, NIC Configuration, Wake On LAN"
Value: Enabled
IsIgnored: false
# System -> LCD Configuration -> LCD 1 User Defined String for LCD
- - DisplayName: 'System, LCD Configuration, LCD 1 User Defined String for LCD'
+ - DisplayName: "System, LCD Configuration, LCD 1 User Defined String for LCD"
Value: LCD str by OMAM
IsIgnored: false
+ delegate_to: localhost
- - name: "Deploy template on multiple devices "
- ome_template:
- hostname: "192.168.0.1"
+ - name: Deploy template on multiple devices
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -72,26 +71,28 @@
- 12765
- 10173
device_service_tag:
- - 'SVTG123'
- - 'SVTG456'
+ - "SVTG123"
+ - "SVTG456"
+ delegate_to: localhost
- name: Deploy template on groups
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "deploy"
template_id: 12
device_group_names:
- server_group_1
- server_group_2
+ delegate_to: localhost
- - name: "Deploy template on multiple devices along attributes modification for target device"
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Deploy template on multiple devices along attributes modification for target device"
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "deploy"
template_id: 12
@@ -99,7 +100,7 @@
- 12765
- 10173
device_service_tag:
- - 'SVTG123'
+ - "SVTG123"
attributes:
# Device specific attributes to be modified during deployment.
# For information on any attribute id, use API /TemplateService/Templates(Id)/Views(Id)/AttributeViewDetails
@@ -118,19 +119,20 @@
- Id: 18968,
Value: "hostname-1"
IsIgnored: false
+ delegate_to: localhost
- - name: "Deploy template and Operating System (OS) on multiple devices"
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Deploy template and Operating System (OS) on multiple devices"
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "deploy"
template_id: 12
device_id:
- 12765
device_service_tag:
- - 'SVTG123'
+ - "SVTG123"
attributes:
# Include this to install OS on the devices.
# This section is optional
@@ -151,13 +153,15 @@
Schedule:
RunLater: true
RunNow: false
+ delegate_to: localhost
- - name: "Deploy template on multiple devices and changes the device-level attributes. After the template is deployed,
- install OS using its image."
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name:
+ Deploy template on multiple devices and changes the device-level attributes. After the template is deployed,
+ install OS using its image.
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "deploy"
template_id: 12
@@ -165,8 +169,8 @@
- 12765
- 10173
device_service_tag:
- - 'SVTG123'
- - 'SVTG456'
+ - "SVTG123"
+ - "SVTG456"
attributes:
Attributes:
- DeviceId: 12765
@@ -196,60 +200,67 @@
Schedule:
RunLater: true
RunNow: false
+ delegate_to: localhost
- - name: "delete template"
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Delete template"
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "delete"
template_id: 12
+ delegate_to: localhost
- - name: "export a template"
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Export a template
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "export"
template_id: 12
+ delegate_to: localhost
# Start of example to export template to a local xml file
- - name: "export template to a local xml file"
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Export template to a local xml file
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "export"
template_name: "my_template"
register: result
tags:
- export_xml_to_file
- - ansible.builtin.copy:
- content: "{{ result.Content}}"
+ - name: Copy Task
+ ansible.builtin.copy:
+ content: "{{ result.Content }}"
dest: "/path/to/exported_template.xml"
+ mode: "0600"
tags:
- export_xml_to_file
# End of example to export template to a local xml file
+ delegate_to: localhost
- - name: "clone a template"
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Clone a template
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "clone"
template_id: 12
attributes:
Name: "New Cloned Template Name"
+ delegate_to: localhost
- - name: "import template from XML content"
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Import template from XML content
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "import"
attributes:
@@ -257,38 +268,41 @@
# Template Type from TemplateService/TemplateTypes
Type: 2
# xml string content
- Content: "<SystemConfiguration Model=\"PowerEdge R940\" ServiceTag=\"SVCTAG1\"
- TimeStamp=\"Tue Sep 24 09:20:57.872551 2019\">\n<Component FQDD=\"AHCI.Slot.6-1\">\n<Attribute
- Name=\"RAIDresetConfig\">True</Attribute>\n<Attribute Name=\"RAIDforeignConfig\">Clear</Attribute>\n
- </Component>\n<Component FQDD=\"Disk.Direct.0-0:AHCI.Slot.6-1\">\n<Attribute Name=\"RAIDPDState\">Ready
- </Attribute>\n<Attribute Name=\"RAIDHotSpareStatus\">No</Attribute>\n</Component>\n
- <Component FQDD=\"Disk.Direct.1-1:AHCI.Slot.6-1\">\n<Attribute Name=\"RAIDPDState\">Ready</Attribute>\n
- <Attribute Name=\"RAIDHotSpareStatus\">No</Attribute>\n</Component>\n</SystemConfiguration>\n"
+ Content:
+ "<SystemConfiguration Model=\"PowerEdge R940\" ServiceTag=\"SVCTAG1\"
+ TimeStamp=\"Tue Sep 24 09:20:57.872551 2019\">\n<Component FQDD=\"AHCI.Slot.6-1\">\n<Attribute
+ Name=\"RAIDresetConfig\">True</Attribute>\n<Attribute Name=\"RAIDforeignConfig\">Clear</Attribute>\n
+ </Component>\n<Component FQDD=\"Disk.Direct.0-0:AHCI.Slot.6-1\">\n<Attribute Name=\"RAIDPDState\">Ready
+ </Attribute>\n<Attribute Name=\"RAIDHotSpareStatus\">No</Attribute>\n</Component>\n
+ <Component FQDD=\"Disk.Direct.1-1:AHCI.Slot.6-1\">\n<Attribute Name=\"RAIDPDState\">Ready</Attribute>\n
+ <Attribute Name=\"RAIDHotSpareStatus\">No</Attribute>\n</Component>\n</SystemConfiguration>\n"
+ delegate_to: localhost
- - name: "import template from local XML file"
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Import template from local XML file
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "import"
attributes:
Name: "Imported Template Name"
Type: 2
Content: "{{ lookup('ansible.builtin.file', '/path/to/xmlfile') }}"
+ delegate_to: localhost
- - name: "Deploy template and Operating System (OS) on multiple devices."
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Deploy template and Operating System (OS) on multiple devices."
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "deploy"
template_id: 12
device_id:
- 12765
device_service_tag:
- - 'SVTG123'
+ - "SVTG123"
attributes:
# Include this to install OS on the devices.
# This section is optional
@@ -309,12 +323,13 @@
Schedule:
RunLater: true
RunNow: false
+ delegate_to: localhost
- name: Create a compliance template from reference device
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "create"
device_service_tag: "SVTG123"
@@ -323,12 +338,13 @@
Name: "Configuration Compliance"
Description: "Configuration Compliance Template"
Fqdds: "BIOS"
+ delegate_to: localhost
- name: Import a compliance template from XML file
- ome_template:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
command: "import"
template_view_type: "Compliance"
@@ -336,3 +352,4 @@
Name: "Configuration Compliance"
Content: "{{ lookup('ansible.builtin.file', './test.xml') }}"
Type: 2
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml
index 40f4c0027..6bb0acdee 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_create_modify_lcd_display.yml
@@ -1,11 +1,10 @@
---
-- hosts: ome
- connection: local
- name: "Creates a new template from the provided reference server device.
- Track the template creation job till completion.
- Fetch the Attribute specific to LCD Configuration settings from the attribute view of the created template.
- Modify the created template with the user defined LCD string."
- gather_facts: False
+- name: "Creates a new template from the provided reference server device.
+ Track the template creation job till completion.
+ Fetch the Attribute specific to LCD Configuration settings from the attribute view of the created template.
+ Modify the created template with the user defined LCD string."
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 50
polling_interval: 5
@@ -13,117 +12,121 @@
template_name: "LCD String Deploy Template"
lcd_display_string: "LCD Custom Display Message"
- collections:
- - dellemc.openmanage
-
tasks:
- - name: "create template from the reference server"
- ome_template:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_service_tag: "{{ reference_device }}"
- attributes:
- Name: "{{ template_name }}"
- Description: "LCD Template description"
- register: result
+ - name: Create template from the reference server
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_service_tag: "{{ reference_device }}"
+ attributes:
+ Name: "{{ template_name }}"
+ Description: "LCD Template description"
+ register: result
+ delegate_to: localhost
+
+ - name: Sleep for 30 seconds and continue with play
+ ansible.builtin.wait_for:
+ timeout: 30
- - name: "sleep for 30 seconds and continue with play"
- wait_for: timeout=30
+ - name: Fetch the Task ID from the Template Details using the Template ID
+ dellemc.openmanage.ome_template_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: "{{ result.return_id }}"
+ register: template_result
+ delegate_to: localhost
- - name: "Fetch the Task ID from the Template Details using the Template ID"
- ome_template_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- template_id: "{{ result.return_id }}"
- register: template_result
+ - name: "Track the Template Creation Job till Completion"
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "{{ template_result.template_info[hostname].TaskId }}"
+ register: job_result
+ failed_when: "'job_info' not in job_result"
+ until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
+ retries: "{{ retries_count }}"
+ delay: "{{ polling_interval }}"
+ delegate_to: localhost
- - name: "Track the Template Creation Job till Completion"
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "{{ template_result.template_info[hostname].TaskId }}"
- register: job_result
- failed_when: "'job_info' not in job_result"
- until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
- retries: "{{ retries_count }}"
- delay: "{{ polling_interval }}"
+ - name: "Retrieve the Attribute ID specific to LCD Configuration"
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/api/TemplateService/Templates({{ result.return_id }})/Views(1)/AttributeViewDetails"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ method: "GET"
+ use_proxy: true
+ status_code: 200
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
+ headers:
+ Content-Type: "application/json"
+ Accept: "application/json"
+ register: config_result
+ delegate_to: localhost
- - name: "Retrieve the Attribute ID specific to LCD Configuration"
- uri:
- url: "https://{{ hostname }}/api/TemplateService/Templates({{ result.return_id }})/Views(1)/AttributeViewDetails"
- user: "{{ username }}"
- password: "{{ password }}"
- method: "GET"
- use_proxy: yes
- status_code: 200
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
- headers:
- Content-Type: "application/json"
- Accept: "application/json"
- register: config_result
+ - name: "System Attribute Groups"
+ ansible.builtin.set_fact:
+ lcd_fact: "{{ item }}"
+ when:
+ - item.DisplayName=='System'
+ with_items:
+ - "{{ config_result.json.AttributeGroups }}"
+ loop_control:
+ label: "{{ config_result.json.Name }}"
+ delegate_to: localhost
- - name: "System Attribute Groups"
- set_fact:
- lcd_fact: "{{ item }}"
- when:
- - item.DisplayName=='System'
- with_items:
- - "{{ config_result.json.AttributeGroups }}"
- loop_control:
- label: "{{ config_result.json.Name }}"
+ - name: "LCD System Attributes Groups"
+ ansible.builtin.set_fact:
+ lcdconfig: "{{ item }}"
+ when:
+ - item.DisplayName=='LCD Configuration'
+ with_items:
+ - "{{ lcd_fact.SubAttributeGroups }}"
+ loop_control:
+ label: "{{ item.DisplayName }}"
- - name: "LCD System Attributes Groups"
- set_fact:
- lcdconfig: "{{ item }}"
- when:
- - item.DisplayName=='LCD Configuration'
- with_items:
- - "{{ lcd_fact.SubAttributeGroups }}"
- loop_control:
- label: "{{ item.DisplayName }}"
+ - name: "Retrieve LCD Display Attribute ID"
+ ansible.builtin.set_fact:
+ lcdattrid: "{{ item.AttributeId }}"
+ when:
+ - item.DisplayName=='LCD 1 User Defined String for LCD'
+ with_items:
+ - "{{ lcdconfig.Attributes }}"
+ loop_control:
+ label: "{{ item.DisplayName }}"
- - name: "Retrieve LCD Display Attribute ID"
- set_fact:
- lcdattrid: "{{ item.AttributeId }}"
- when:
- - item.DisplayName=='LCD 1 User Defined String for LCD'
- with_items:
- - "{{ lcdconfig.Attributes }}"
- loop_control:
- label: "{{ item.DisplayName }}"
-
- - name: "Retrieve LCD Config Attribute ID"
- set_fact:
- lcdconfigattrid: "{{ item.AttributeId }}"
- when:
- - item.DisplayName=='LCD 1 LCD Configuration'
- with_items:
- - "{{ lcdconfig.Attributes }}"
- loop_control:
- label: "{{ item.DisplayName }}"
+ - name: "Retrieve LCD Config Attribute ID"
+ ansible.builtin.set_fact:
+ lcdconfigattrid: "{{ item.AttributeId }}"
+ when:
+ - item.DisplayName=='LCD 1 LCD Configuration'
+ with_items:
+ - "{{ lcdconfig.Attributes }}"
+ loop_control:
+ label: "{{ item.DisplayName }}"
- - name: "Modify the created with Custom LCD String to be displayed"
- ome_template:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "modify"
- template_id: "{{ result.return_id }}"
- attributes:
- Name: "{{ template_name }}"
- Attributes:
- - Id: "{{ lcdattrid }}"
- Value: "{{ lcd_display_string }}"
- IsIgnored: false
- - Id: "{{ lcdconfigattrid }}"
- Value: "User Defined"
- IsIgnored: false \ No newline at end of file
+ - name: "Modify the created with Custom LCD String to be displayed"
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "modify"
+ template_id: "{{ result.return_id }}"
+ attributes:
+ Name: "{{ template_name }}"
+ Attributes:
+ - Id: "{{ lcdattrid }}"
+ Value: "{{ lcd_display_string }}"
+ IsIgnored: false
+ - Id: "{{ lcdconfigattrid }}"
+ Value: "User Defined"
+ IsIgnored: false
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml
index 3fd200c00..b206ce9ed 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info.yml
@@ -1,33 +1,32 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible template inventory details.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible template inventory details.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Retrieve basic details of all templates.
- ome_template_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
- name: Retrieve details of a specific template identified by its template ID.
- ome_template_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
- template_id: "{{template_id}}"
+ template_id: "{{ template_id }}"
+ delegate_to: localhost
- name: Get filtered template info based on name.
- ome_template_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_template_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
system_query_options:
- filter: "Name eq 'new template'" \ No newline at end of file
+ filter: "Name eq 'new template'"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml
index eb040c9c0..6f5500b20 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_info_with_filter.yml
@@ -1,27 +1,25 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible device Template service.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible device Template service.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: get template with filter option.
- register: result
- failed_when: "'template_info' not in result or result.template_info['{{hostname}}']['@odata.count'] == 0"
- ome_template_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Get template with filter option.
+ register: result
+ failed_when: "'template_info' not in result or result.template_info['{{ hostname }}']['@odata.count'] == 0"
+ dellemc.openmanage.ome_template_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
system_query_options:
- filter: "Name eq 'template_name'"
- - name: get specific template from result
- with_subelements:
+ filter: "Name eq 'template_name'"
+ delegate_to: localhost
+
+ - name: Get specific template from result
+ with_subelements:
- "{{ result.template_info }}"
- value
- debug:
- msg: "{{item.1}}"
- when: item.1['Name']=='template_name'
+ ansible.builtin.debug:
+ msg: "{{item.1}}"
+ when: item.1['Name']=='template_name'
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml
index afb472fa4..40cd30991 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_lcd_display_string_deploy.yml
@@ -1,46 +1,43 @@
---
-- hosts: ome
- connection: local
- name:
- - Deploy this template with desired LCD string on the target servers.
- - Track the template deploy operation job till completion.
- gather_facts: False
+- name: "Deploy this template with desired LCD string on the target servers.
+ Track the template deploy operation job till completion."
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 50
polling_interval: 5
template_name: "LCD Srting Deploy Template"
deployable_servicetag:
- - 'MXL1234'
- - 'MXL4567'
-
- collections:
- - dellemc.openmanage
+ - "MXL1234"
+ - "MXL4567"
tasks:
- - name: "Deploy Previously created LCD Template "
- ome_template:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- state: "deploy"
- template_name: "{{ template_name }}"
- device_service_tag: "{{ deployable_servicetag }}"
- register: result
- tags:
- - deploy
+ - name: Deploy Previously created LCD Template
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "deploy"
+ template_name: "{{ template_name }}"
+ device_service_tag: "{{ deployable_servicetag }}"
+ register: result
+ tags:
+ - deploy
+ delegate_to: localhost
- - name: "Track the deploy job till completion"
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "{{ result.return_id }}"
- register: job_result
- failed_when: "'job_info' not in job_result"
- until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
- retries: "{{ retries_count }}"
- delay: "{{ polling_interval }}"
- tags:
- - track_deploy \ No newline at end of file
+ - name: Track the deploy job till completion
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "{{ result.return_id }}"
+ register: job_result
+ failed_when: "'job_info' not in job_result"
+ until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
+ retries: "{{ retries_count }}"
+ delay: "{{ polling_interval }}"
+ tags:
+ - track_deploy
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml
index fee07b4e2..2b1569469 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan.yml
@@ -1,66 +1,64 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible template tag and untag.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible template tag and untag.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: Tag or untag vlans in template
- ome_template_network_vlan:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- template_id: 78
- nic_identifier: NIC Slot 4
- untagged_networks:
- - port: 1
- untagged_network_id: 12765
- - port: 2
- untagged_network_name: vlan2
- tagged_networks:
- - port: 1
- tagged_network_ids:
- - 12767
- - 12768
- - port: 4
- tagged_network_ids:
- - 12767
- - 12768
- tagged_network_names:
- - vlan3
- - port: 2
- tagged_network_names:
- - vlan4
- - vlan1
- tags:
- - tag_untag_vlan
+ - name: Tag or untag vlans in template
+ dellemc.openmanage.ome_template_network_vlan:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 78
+ nic_identifier: NIC Slot 4
+ untagged_networks:
+ - port: 1
+ untagged_network_id: 12765
+ - port: 2
+ untagged_network_name: vlan2
+ tagged_networks:
+ - port: 1
+ tagged_network_ids:
+ - 12767
+ - 12768
+ - port: 4
+ tagged_network_ids:
+ - 12767
+ - 12768
+ tagged_network_names:
+ - vlan3
+ - port: 2
+ tagged_network_names:
+ - vlan4
+ - vlan1
+ tags:
+ - tag_untag_vlan
+ delegate_to: localhost
- - name: Clear the tagged and untagged vLANs
- ome_template_network_vlan:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- template_id: 78
- nic_identifier: NIC Slot 4
- untagged_networks:
- # For removing the untagged vLANs for the port 1 and 2
- - port: 1
- untagged_network_id: 0
- - port: 2
- untagged_network_name: 0
- tagged_networks:
- # For removing the tagged vLANs for port 1 and 4
- - port: 1
- tagged_network_ids: []
- - port: 4
- tagged_network_ids: []
- tagged_network_names: []
- - port: 2
- tagged_network_names: []
- tags:
- - clear_tagged_untagged \ No newline at end of file
+ - name: Clear the tagged and untagged vLANs
+ dellemc.openmanage.ome_template_network_vlan:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 78
+ nic_identifier: NIC Slot 4
+ untagged_networks:
+ # For removing the untagged vLANs for the port 1 and 2
+ - port: 1
+ untagged_network_id: 0
+ - port: 2
+ untagged_network_name: 0
+ tagged_networks:
+ # For removing the tagged vLANs for port 1 and 4
+ - port: 1
+ tagged_network_ids: []
+ - port: 4
+ tagged_network_ids: []
+ tagged_network_names: []
+ - port: 2
+ tagged_network_names: []
+ tags:
+ - clear_tagged_untagged
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan_info.yml
new file mode 100644
index 000000000..a7dcd5902
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_network_vlan_info.yml
@@ -0,0 +1,31 @@
+---
+- name: Dell OpenManage Ansible template VLAN info.
+ hosts: ome
+ gather_facts: false
+
+ tasks:
+ - name: Retrieve network details of all templates.
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
+
+ - name: Retrieve network details using template ID
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 1234
+ delegate_to: localhost
+
+ - name: Retrieve network details using template name
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: template1
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml
index 9f93bbdfb..0b87188eb 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/template/ome_template_with_job_tracking.yml
@@ -1,48 +1,46 @@
---
-- hosts: ome
+- name: "OME - Create Template details tracking"
+ hosts: ome
+ gather_facts: false
vars:
retries_count: 50
- polling_interval: 5 #in seconds
- connection: local
- name: "OME - Create Template details tracking"
- gather_facts: False
-
- collections:
- - dellemc.openmanage
-
+ polling_interval: 5 # in seconds
tasks:
- - name: "Create template based on device id."
- ome_template:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- device_id: 12475
- attributes:
- Name: "New Template"
- Description: "New Template description"
- register: result
- failed_when: "'return_id' not in result"
+ - name: "Create template based on device id."
+ dellemc.openmanage.ome_template:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 12475
+ attributes:
+ Name: "New Template"
+ Description: "New Template description"
+ register: result
+ failed_when: "'return_id' not in result"
+ delegate_to: localhost
- - name: "Get the job id using return id from template."
- ome_template_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- template_id: "{{ result.return_id }}"
- register: facts_result
+ - name: "Get the job id using return id from template."
+ dellemc.openmanage.ome_template_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: "{{ result.return_id }}"
+ register: facts_result
+ delegate_to: localhost
- - name: "Get job details using job id from template task."
- ome_job_info:
- hostname: "{{ hostname }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "{{ facts_result.template_info[hostname].TaskId }}"
- register: job_result
- failed_when: job_result.job_info.LastRunStatus.Name == 'Failed'
- changed_when: job_result.job_info.LastRunStatus.Name == 'Completed'
- until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
- retries: "{{ retries_count }}"
- delay: "{{ polling_interval }}"
+ - name: "Get job details using job id from template task."
+ dellemc.openmanage.ome_job_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "{{ facts_result.template_info[hostname].TaskId }}"
+ register: job_result
+ failed_when: job_result.job_info.LastRunStatus.Name == 'Failed'
+ changed_when: job_result.job_info.LastRunStatus.Name == 'Completed'
+ until: job_result.job_info.LastRunStatus.Name == 'Completed' or job_result.job_info.LastRunStatus.Name == 'Failed'
+ retries: "{{ retries_count }}"
+ delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml
index b1589caea..da5b03d59 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user.yml
@@ -1,70 +1,71 @@
---
-- hosts: ome
- connection: local
- name: Dell OpenManage Ansible User service.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Dell OpenManage Ansible User service.
+ hosts: ome
+ gather_facts: false
tasks:
- - name: create new user.
- ome_user:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Create new user.
+ dellemc.openmanage.ome_user:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
attributes:
- UserName: "user1"
- Password: "UserPassword"
- RoleId: "10"
- Enabled: True
-
- - name: create user with all parameters
- ome_user:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ UserName: "user1"
+ Password: "UserPassword"
+ RoleId: "10"
+ Enabled: true
+ delegate_to: localhost
+
+ - name: Create user with all parameters
+ dellemc.openmanage.ome_user:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "present"
attributes:
- UserName: "user2"
- Description: "user2 description"
- Password: "UserPassword"
- RoleId: "10"
- Enabled: True
- DirectoryServiceId: 0
- UserTypeId: 1
- Locked: False
- Name: "user2"
+ UserName: "user2"
+ Description: "user2 description"
+ Password: "UserPassword"
+ RoleId: "10"
+ Enabled: true
+ DirectoryServiceId: 0
+ UserTypeId: 1
+ Locked: false
+ Name: "user2"
+ delegate_to: localhost
- - name: modify existing user
- ome_user:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Modify existing user
+ dellemc.openmanage.ome_user:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "present"
attributes:
- UserName: "user3"
- RoleId: "10"
- Enabled: True
- Description: "Modify user Description"
+ UserName: "user3"
+ RoleId: "10"
+ Enabled: true
+ Description: "Modify user Description"
+ delegate_to: localhost
- - name: delete existing user using id.
- ome_user:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ - name: Delete existing user using id.
+ dellemc.openmanage.ome_user:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "absent"
user_id: 61874
-
- - name: delete existing user using name.
- ome_user:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ delegate_to: localhost
+
+ - name: Delete existing user using name.
+ dellemc.openmanage.ome_user:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
state: "absent"
- name: "name" \ No newline at end of file
+ name: "name"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml b/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml
index 6016d5025..7adf8a7a7 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/ome/user/ome_user_info.yml
@@ -1,33 +1,32 @@
---
-- hosts: ome
- connection: local
- name: Fetching ome user facts.
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Fetching ome user facts.
+ hosts: ome
+ gather_facts: false
tasks:
- name: Retrieve basic details of all accounts.
- ome_user_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_user_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
+ delegate_to: localhost
- name: Retrieve details of a specific account identified by its account ID.
- ome_user_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_user_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
- account_id: "{{account_id}}"
-
+ account_id: "{{ account_id }}"
+ delegate_to: localhost
+
- name: Retrieve details of a specific user using filter with UserName.
- ome_user_info:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
+ dellemc.openmanage.ome_user_info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
system_query_options:
- filter: "UserName eq 'test'" \ No newline at end of file
+ filter: "UserName eq 'test'"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml
index 15fa188dd..b4313efcc 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware.yml
@@ -1,32 +1,27 @@
---
-- hosts: redfish_hosts
- connection: local
+- name: "Ansible Module for Simple Firmware Update"
+ hosts: redfish_hosts
gather_facts: false
- name: "Ansible Module for Simple Firmware Update"
-
- collections:
- - dellemc.openmanage
tasks:
-
- - name: "Update the firmware from a single executable file available in a local path"
- redfish_firmware:
+ - name: Update the firmware from a single executable file available in a local path
+ dellemc.openmanage.redfish_firmware:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
image_uri: "/home/firmware_repo/component.exe"
-
tags:
- local-update
+ delegate_to: localhost
- - name: "Update the firmware from a single executable file available in a HTTP protocol"
- redfish_firmware:
+ - name: Update the firmware from a single executable file available in a HTTP protocol
+ dellemc.openmanage.redfish_firmware:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
image_uri: "http://192.168.0.1/firmware_repo/component.exe"
-
tags:
- http-update
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml
index 105f4189a..eb779e2a1 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_http_jobtracking.yml
@@ -1,38 +1,34 @@
---
-- hosts: redfish_hosts
- connection: local
+- name: Ansible Module for Simple Firmware Update
+ hosts: redfish_hosts
gather_facts: false
- name: "Ansible Module for Simple Firmware Update"
vars:
retries_count: 100
polling_interval: 5
reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
- collections:
- - dellemc.openmanage
-
tasks:
-
- - name: "Update the firmware from a single executable file available in a HTTP protocol"
- redfish_firmware:
+ - name: Update the firmware from a single executable file available in a HTTP protocol
+ dellemc.openmanage.redfish_firmware:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
image_uri: "http://192.168.0.1/firmware_repo/component.exe"
register: result
+ delegate_to: localhost
- - name: "Update the firmware from a single executable with job tracking till completion"
- uri:
+ - name: Update the firmware from a single executable with job tracking till completion
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -40,43 +36,46 @@
until: job_result.json.TaskState == 'Completed' or job_result.json.TaskState == 'Pending'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
- - name: "Update the firmware from a single executable reboot."
- uri:
+ - name: Update the firmware from a single executable reboot.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ reboot_uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "POST"
body_format: raw
body: '{"ResetType": "ForceRestart"}'
- use_proxy: yes
+ use_proxy: true
status_code: 204
- return_content: no
- validate_certs: no
- force_basic_auth: yes
+ return_content: false
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
register: reboot_result
changed_when: reboot_result.status == 204
when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.'
+ delegate_to: localhost
- - name: "Update the firmware from a single executable Waits for 4 minutes."
- wait_for:
+ - name: Update the firmware from a single executable Waits for 4 minutes.
+ ansible.builtin.wait_for:
timeout: 240
when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.'
+ delegate_to: localhost
- - name: "Update the firmware from a single executable with job tracking till completion."
- uri:
+ - name: Update the firmware from a single executable with job tracking till completion.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -84,9 +83,10 @@
until: final_result.json.TaskState == 'Completed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
- - name: "Update the firmware from a single executable fact."
- set_fact:
+ - name: Update the firmware from a single executable fact.
+ ansible.builtin.set_fact:
job_details: "{{ final_result.json }}"
failed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus != "OK"
- changed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus == "OK" \ No newline at end of file
+ changed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus == "OK"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml
index 8ea91cc3a..e62d5c81f 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_from_local_jobtracking.yml
@@ -1,38 +1,34 @@
---
-- hosts: redfish_hosts
- connection: local
+- name: "Ansible Module for Simple Firmware Update"
+ hosts: redfish_hosts
gather_facts: false
- name: "Ansible Module for Simple Firmware Update"
vars:
retries_count: 100
polling_interval: 5
reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
- collections:
- - dellemc.openmanage
-
tasks:
-
- - name: "Update the firmware from a single executable file available in a local path"
- redfish_firmware:
+ - name: Update the firmware from a single executable file available in a local path
+ dellemc.openmanage.redfish_firmware:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
image_uri: "/home/firmware_repo/component.exe"
register: result
+ delegate_to: localhost
- - name: "Update the firmware from a single executable with job tracking till completion."
- uri:
+ - name: Update the firmware from a single executable with job tracking till completion.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -40,43 +36,46 @@
until: job_result.json.TaskState == 'Completed' or job_result.json.TaskState == 'Pending'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
- - name: "Update the firmware from a single executable reboot."
- uri:
+ - name: Update the firmware from a single executable reboot.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ reboot_uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "POST"
body_format: raw
body: '{"ResetType": "ForceRestart"}'
- use_proxy: yes
+ use_proxy: true
status_code: 204
- return_content: no
- validate_certs: no
- force_basic_auth: yes
+ return_content: false
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
register: reboot_result
changed_when: reboot_result.status == 204
when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.'
+ delegate_to: localhost
- - name: "Update the firmware from a single executable Waits for 4 minutes."
- wait_for:
+ - name: Update the firmware from a single executable Waits for 4 minutes.
+ ansible.builtin.wait_for:
timeout: 240
when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.'
+ delegate_to: localhost
- - name: "Update the firmware from a single executable with job tracking till completion."
- uri:
+ - name: Update the firmware from a single executable with job tracking till completion.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -84,9 +83,10 @@
until: final_result.json.TaskState == 'Completed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
- - name: "Update the firmware from a single executable fact."
- set_fact:
+ - name: Update the firmware from a single executable fact.
+ ansible.builtin.set_fact:
job_details: "{{ final_result.json }}"
failed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus != "OK"
- changed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus == "OK" \ No newline at end of file
+ changed_when: final_result.json.TaskState == "Completed" and final_result.json.TaskStatus == "OK"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_rollback.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_rollback.yml
new file mode 100644
index 000000000..520288419
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/firmware/redfish_firmware_rollback.yml
@@ -0,0 +1,45 @@
+---
+- name: "Ansible Module for Simple Firmware Rollback"
+ hosts: redfish_hosts
+ gather_facts: false
+ tasks:
+
+ - name: Rollback a BIOS component firmware
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "BIOS"
+ delegate_to: localhost
+ tags: rollback-bios
+
+ - name: Rollback all NIC cards with a name starting from 'Broadcom Gigabit'.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Broadcom Gigabit Ethernet.*"
+ delegate_to: localhost
+ tags: rollback-match
+
+ - name: Rollback all the component firmware except BIOS component.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "(?!BIOS).*"
+ delegate_to: localhost
+ tags: rollback-except
+
+ - name: Rollback all the available firmware component.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ name: ".*"
+ delegate_to: localhost
+ tags: rollback-all
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml
index 7fa5e40cc..0c7a7b755 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_event_subscription.yml
@@ -1,46 +1,42 @@
---
-- hosts: redfish
- connection: local
- name: Configure Redfish subscriptions
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure Redfish subscriptions
+ hosts: redfish
+ gather_facts: false
tasks:
- name: Add Redfish metric subscription
- redfish_event_subscription:
- baseuri: "{{ baseuri }}"
+ dellemc.openmanage.redfish_event_subscription:
+ baseuri: "{{ baseuri }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
destination: "https://192.168.1.100:8188"
event_type: MetricReport
event_format_type: MetricReport
state: present
-
tags: add_metric_subscription
+ delegate_to: localhost
- name: Add Redfish alert subscription
- redfish_event_subscription:
- baseuri: "{{ baseuri }}"
+ dellemc.openmanage.redfish_event_subscription:
+ baseuri: "{{ baseuri }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
destination: "https://server01.example.com:8188"
event_type: Alert
event_format_type: Event
state: present
-
tags: add_alert_subscription
-
+ delegate_to: localhost
+
- name: Delete Redfish subscription with a specified destination
- redfish_event_subscription:
- baseuri: "{{ baseuri }}"
+ dellemc.openmanage.redfish_event_subscription:
+ baseuri: "{{ baseuri }}"
username: "{{ username }}"
- password: "{{ password }}"
+ password: "{{ password }}"
ca_path: "/path/to/ca_cert.pem"
destination: "https://server01.example.com:8188"
state: absent
-
- tags: delete_subscription \ No newline at end of file
+ tags: delete_subscription
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml
index bacce0ccb..e2b15b085 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/redfish_powerstate.yml
@@ -1,26 +1,24 @@
---
-- hosts: redfish
- connection: local
- name: Configure Server Power Setting
- gather_facts: False
-
- collections:
- - dellemc.openmanage
+- name: Configure Server Power Setting
+ hosts: redfish
+ gather_facts: false
tasks:
- - name: Manage power state of the first device.
- redfish_powerstate:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- reset_type: "On"
+ - name: Manage power state of the first device.
+ dellemc.openmanage.redfish_powerstate:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "On"
+ delegate_to: localhost
- - name: Manage power state of a specified device.
- redfish_powerstate:
- baseuri: "{{ baseuri }}"
- username: "{{ username }}"
- password: "{{ password }}"
- ca_path: "/path/to/ca_cert.pem"
- reset_type: "ForceOff"
- resource_id: "System.Embedded.1"
+ - name: Manage power state of a specified device.
+ dellemc.openmanage.redfish_powerstate:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "ForceOff"
+ resource_id: "System.Embedded.1"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml
index 0c1380a05..def3dc33e 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume.yml
@@ -1,19 +1,14 @@
---
-- hosts: redfish_hosts
- connection: local
+- name: "Redfish Storage Volume - Ansible Module"
+ hosts: redfish_hosts
gather_facts: false
- name: "Redfish Storage Volume - Ansible Module"
vars:
retries_count: 15
polling_interval: 5
- collections:
- - dellemc.openmanage
-
tasks:
-
- name: Create a volume with supported options.
- redfish_storage_volume:
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -33,9 +28,10 @@
register: result
tags:
- create_volume1
-
+ delegate_to: localhost
+
- name: Create a volume with minimum options.
- redfish_storage_volume:
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -44,12 +40,103 @@
controller_id: "RAID.Slot.1-1"
volume_type: "NonRedundant"
drives:
- - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
- tags:
- - create_volume2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ tags:
+ - create_volume2
+ delegate_to: localhost
+
+ - name: Create a RAID6 volume.
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID6"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-3
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-4
+ tags:
+ - create_raid6
+ delegate_to: localhost
+
+ - name: Create a RAID60 volume.
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID60"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-3
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-4
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-5
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-6
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-7
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-8
+ tags:
+ - create_raid60
+ delegate_to: localhost
+
+ - name: Create a RAID0 on PERC controller on reset
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ apply_time: OnReset
+ tags:
+ - create_raid0_with_onreset
+ delegate_to: localhost
+
+ - name: Create a RAID0 on BOSS controller with restart
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ apply_time: OnReset
+ reboot_server: true
+ tags:
+ - create_raid0_boss_with_onreset
+ delegate_to: localhost
+
+ - name: Create a RAID0 on BOSS controller with force restart
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ reboot_server: true
+ force_reboot: true
+ tags:
+ - create_raid0_boss_with_onreset_force
+ delegate_to: localhost
- name: Modify a volume's encryption type settings.
- redfish_storage_volume:
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -60,9 +147,10 @@
encrypted: true
tags:
- modify_volume
-
+ delegate_to: localhost
+
- name: Initialize an existing volume.
- redfish_storage_volume:
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -72,9 +160,10 @@
initialize_type: "Slow"
tags:
- initialize_volume
-
+ delegate_to: localhost
+
- name: Delete an existing volume.
- redfish_storage_volume:
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -83,3 +172,4 @@
volume_id: "Disk.Virtual.5:RAID.Slot.1-1"
tags:
- delete_volume
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml
index fcf596cd0..def58f5f6 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_create_job_tracking.yml
@@ -1,19 +1,15 @@
---
-- hosts: redfish_hosts
- connection: local
+- name: "Redfish Storage Volume - Ansible Module"
+ hosts: redfish_hosts
gather_facts: false
- name: "Redfish Storage Volume - Ansible Module"
vars:
retries_count: 100
polling_interval: 10
reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
- collections:
- - dellemc.openmanage
-
tasks:
- - name: "Create a storage volume"
- redfish_storage_volume:
+ - name: Create a storage volume"
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -29,18 +25,19 @@
register: result
tags:
- create_volume
+ delegate_to: localhost
- - name: "View the job details to track the status of the create storage volume task"
- uri:
+ - name: View the job details to track the status of the create storage volume task"
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -51,38 +48,40 @@
delay: "{{ polling_interval }}"
tags:
- job-tracking
-
- - name: "Reboot the system if the job status is pending."
- uri:
+ delegate_to: localhost
+
+ - name: Reboot the system if the job status is pending."
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ reboot_uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "POST"
body_format: raw
body: '{"ResetType": "ForceRestart"}'
- use_proxy: yes
+ use_proxy: true
status_code: 204
- return_content: no
- validate_certs: no
- force_basic_auth: yes
+ return_content: false
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
- register: reboot_result
- changed_when: reboot_result.status == 204
+ register: reboot_result
+ changed_when: reboot_result.status == 204
when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.'
+ delegate_to: localhost
- - name: "View the job details to verify if the task status is completed."
- uri:
+ - name: View the job details to verify if the task status is completed."
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -91,3 +90,4 @@
until: final_result.json.TaskState == 'Completed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml
index 34a821d75..5c2858396 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_delete_job_tracking.yml
@@ -1,19 +1,15 @@
---
-- hosts: redfish_hosts
- connection: local
+- name: "Redfish Storage Volume - Ansible Module"
+ hosts: redfish_hosts
gather_facts: false
- name: "Redfish Storage Volume - Ansible Module"
vars:
retries_count: 100
polling_interval: 10
reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
- collections:
- - dellemc.openmanage
-
tasks:
- - name: "Delete an existing volume."
- redfish_storage_volume:
+ - name: Delete an existing volume.
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -23,18 +19,19 @@
register: result
tags:
- delete_volume
+ delegate_to: localhost
- - name: "View the job details to track the status of the delete storage volume task"
- uri:
+ - name: View the job details to track the status of the delete storage volume task
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -45,38 +42,40 @@
delay: "{{ polling_interval }}"
tags:
- job-tracking
-
- - name: "Reboot the system if the job status is pending."
- uri:
+ delegate_to: localhost
+
+ - name: Reboot the system if the job status is pending.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ reboot_uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "POST"
body_format: raw
body: '{"ResetType": "ForceRestart"}'
- use_proxy: yes
+ use_proxy: true
status_code: 204
- return_content: no
- validate_certs: no
- force_basic_auth: yes
+ return_content: false
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
- register: reboot_result
- changed_when: reboot_result.status == 204
+ register: reboot_result
+ changed_when: reboot_result.status == 204
when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.'
+ delegate_to: localhost
- - name: "View the job details to verify if the task status is completed."
- uri:
+ - name: View the job details to verify if the task status is completed.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -85,3 +84,4 @@
until: final_result.json.TaskState == 'Completed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml
index fb79a2885..4609977b5 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_initialize_job_tracking.yml
@@ -1,19 +1,15 @@
---
-- hosts: redfish_hosts
- connection: local
+- name: "Redfish Storage Volume - Ansible Module"
+ hosts: redfish_hosts
gather_facts: false
- name: "Redfish Storage Volume - Ansible Module"
vars:
retries_count: 100
polling_interval: 10
reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
- collections:
- - dellemc.openmanage
-
tasks:
- - name: "Initialize an existing volume."
- redfish_storage_volume:
+ - name: Initialize an existing volume.
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -24,18 +20,19 @@
register: result
tags:
- initialize_volume
+ delegate_to: localhost
- - name: "View the job details to track the status of the initialization task"
- uri:
+ - name: View the job details to track the status of the initialization task
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -46,38 +43,40 @@
delay: "{{ polling_interval }}"
tags:
- job-tracking
-
- - name: "Reboot the system if the job status is pending."
- uri:
+ delegate_to: localhost
+
+ - name: Reboot the system if the job status is pending.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ reboot_uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "POST"
body_format: raw
body: '{"ResetType": "ForceRestart"}'
- use_proxy: yes
+ use_proxy: true
status_code: 204
- return_content: no
- validate_certs: no
- force_basic_auth: yes
+ return_content: false
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
- register: reboot_result
- changed_when: reboot_result.status == 204
+ register: reboot_result
+ changed_when: reboot_result.status == 204
when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.'
+ delegate_to: localhost
- - name: "View the job details to verify if the task status is completed."
- uri:
+ - name: View the job details to verify if the task status is completed.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -86,3 +85,4 @@
until: final_result.json.TaskState == 'Completed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml
index 02bbc19d1..5a9d088ce 100644
--- a/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml
+++ b/ansible_collections/dellemc/openmanage/playbooks/redfish/storage/redfish_storage_volume_modify_job_tracking.yml
@@ -1,19 +1,15 @@
---
-- hosts: redfish_hosts
- connection: local
+- name: "Redfish Storage Volume - Ansible Module"
+ hosts: redfish_hosts
gather_facts: false
- name: "Redfish Storage Volume - Ansible Module"
vars:
retries_count: 100
polling_interval: 10
reboot_uri: "/redfish/v1/Systems/System.Embedded.1/Actions/ComputerSystem.Reset"
- collections:
- - dellemc.openmanage
-
tasks:
- - name: "Modify storage volume encryption settings."
- redfish_storage_volume:
+ - name: Modify storage volume encryption settings.
+ dellemc.openmanage.redfish_storage_volume:
baseuri: "{{ baseuri }}"
username: "{{ username }}"
password: "{{ password }}"
@@ -25,18 +21,19 @@
register: result
tags:
- modify_volume
+ delegate_to: localhost
- - name: "View the job details to track the status of the modify storage volume encryption task"
- uri:
+ - name: View the job details to track the status of the modify storage volume encryption task
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -47,38 +44,40 @@
delay: "{{ polling_interval }}"
tags:
- job-tracking
-
- - name: "Reboot the system if the job status is pending."
- uri:
+ delegate_to: localhost
+
+ - name: Reboot the system if the job status is pending.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ reboot_uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "POST"
body_format: raw
body: '{"ResetType": "ForceRestart"}'
- use_proxy: yes
+ use_proxy: true
status_code: 204
- return_content: no
- validate_certs: no
- force_basic_auth: yes
+ return_content: false
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
- register: reboot_result
- changed_when: reboot_result.status == 204
+ register: reboot_result
+ changed_when: reboot_result.status == 204
when: job_result.json.TaskState == 'Pending' and job_result.json.Messages.0.Message == 'Task successfully scheduled.'
+ delegate_to: localhost
- - name: "View the job details to verify if the task status is completed."
- uri:
+ - name: View the job details to verify if the task status is completed.
+ ansible.builtin.uri:
url: "https://{{ baseuri }}{{ result.task.uri }}"
user: "{{ username }}"
password: "{{ password }}"
method: "GET"
- use_proxy: yes
+ use_proxy: true
status_code: 200, 202
- return_content: yes
- validate_certs: no
- force_basic_auth: yes
+ return_content: true
+ validate_certs: false
+ force_basic_auth: true
headers:
Content-Type: "application/json"
Accept: "application/json"
@@ -87,3 +86,4 @@
until: final_result.json.TaskState == 'Completed'
retries: "{{ retries_count }}"
delay: "{{ polling_interval }}"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/README.md b/ansible_collections/dellemc/openmanage/playbooks/roles/README.md
new file mode 100644
index 000000000..0535438b3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/README.md
@@ -0,0 +1,56 @@
+# Roles Playbooks
+
+### Using the Example playbooks
+
+1. Update the inventory
+Update the idrac IP's, hostnames in the `inventory` file.
+
+1. Update the varaibles required for a role under `vars_files` folder.
+
+1. Run the following command to run the role
+`ansible-playbook <role_folder>/<role_file>.yml -i inventory
+
+### Folder Tree
+
+```
+.
+├── idrac_attributes
+│ └── idrac_attributes.yml
+├── idrac_bios
+│ └── idrac_bios.yml
+├── idrac_certificate
+│ └── idrac_certificate.yml
+├── idrac_export_server_config_profile
+│ └── idrac_export_server_config_profile.yml
+├── idrac_firmware
+│ └── idrac_firmware.yml
+├── idrac_gather_facts
+│ └── idrac_gather_facts.yml
+├── idrac_import_server_config_profile
+│ └── idrac_import_server_config_profile.yml
+├── idrac_os_deployment
+│ └── idrac_os_deployment.yml
+├── idrac_reset
+│ └── idrac_reset.yml
+├── idrac_server_powerstate
+│ └── idrac_server_powerstate.yml
+├── idrac_storage_controller
+│ └── idrac_storage_controller.yml
+├── inventory
+├── README.md
+├── redfish_firmware
+│ └── redfish_firmware.yml
+├── redfish_storage_volume
+│ └── redfish_storage_volume.yml
+└── vars_files
+ ├── bios.yml
+ ├── certificates.yml
+ ├── credentials.yml
+ ├── export.yml
+ ├── firmware.yml
+ ├── import.yml
+ ├── osd.yml
+ ├── reset.yml
+ └── storage_controller.yml
+ └── storage.yml
+```
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_attributes/idrac_attributes.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_attributes/idrac_attributes.yml
new file mode 100644
index 000000000..3bd391f9b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_attributes/idrac_attributes.yml
@@ -0,0 +1,9 @@
+---
+- name: Manage attributes configurations in iDRAC
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/attributes.yml
+ roles:
+ - dellemc.openmanage.idrac_attributes
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_bios/idrac_bios.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_bios/idrac_bios.yml
new file mode 100644
index 000000000..6ef77791a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_bios/idrac_bios.yml
@@ -0,0 +1,9 @@
+---
+- name: Manage bios configurations in iDRAC
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/bios.yml
+ roles:
+ - dellemc.openmanage.idrac_bios
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_certificate/idrac_certificate.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_certificate/idrac_certificate.yml
new file mode 100644
index 000000000..0f0abd34e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_certificate/idrac_certificate.yml
@@ -0,0 +1,9 @@
+---
+- name: Manage certificates in iDRAC
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/certificates.yml
+ roles:
+ - dellemc.openmanage.idrac_certificate
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_export_server_config_profile/idrac_export_server_config_profile.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_export_server_config_profile/idrac_export_server_config_profile.yml
new file mode 100644
index 000000000..c11d7a8fc
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_export_server_config_profile/idrac_export_server_config_profile.yml
@@ -0,0 +1,9 @@
+---
+- name: Export Server configuration profile
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/export.yml
+ roles:
+ - dellemc.openmanage.idrac_export_server_config_profile
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_firmware/idrac_firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_firmware/idrac_firmware.yml
new file mode 100644
index 000000000..4cdbba537
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_firmware/idrac_firmware.yml
@@ -0,0 +1,11 @@
+---
+- name: Firmware update using catalog for iDRAC
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/firmware.yml
+ vars:
+ hostname: "{{ inventory_hostname }}"
+ roles:
+ - dellemc.openmanage.idrac_firmware
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_gather_facts/idrac_gather_facts.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_gather_facts/idrac_gather_facts.yml
new file mode 100644
index 000000000..d49b08bc0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_gather_facts/idrac_gather_facts.yml
@@ -0,0 +1,10 @@
+---
+- name: Get iDRAC information
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ vars:
+ target: "{{ gather_facts_target }}"
+ roles:
+ - dellemc.openmanage.idrac_gather_facts
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_import_server_config_profile/idrac_import_server_config_profile.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_import_server_config_profile/idrac_import_server_config_profile.yml
new file mode 100644
index 000000000..7b2025325
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_import_server_config_profile/idrac_import_server_config_profile.yml
@@ -0,0 +1,9 @@
+---
+- name: Import Server configuration profile
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/import.yml
+ roles:
+ - dellemc.openmanage.idrac_import_server_config_profile
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_job_queue/idrac_job_queue.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_job_queue/idrac_job_queue.yml
new file mode 100644
index 000000000..b234ca8af
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_job_queue/idrac_job_queue.yml
@@ -0,0 +1,10 @@
+---
+- name: Manage the iDRAC lifecycle controller job queue.
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ vars:
+ clear_job_queue: true
+ roles:
+ - dellemc.openmanage.idrac_job_queue
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_os_deployment/idrac_os_deployment.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_os_deployment/idrac_os_deployment.yml
new file mode 100644
index 000000000..79f08501d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_os_deployment/idrac_os_deployment.yml
@@ -0,0 +1,9 @@
+---
+- name: Operating System Deployment in iDRAC
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/osd.yml
+ roles:
+ - dellemc.openmanage.idrac_os_deployment
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_reset/idrac_reset.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_reset/idrac_reset.yml
new file mode 100644
index 000000000..b46e3dfb0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_reset/idrac_reset.yml
@@ -0,0 +1,9 @@
+---
+- name: Reset iDRAC
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/reset.yml
+ roles:
+ - dellemc.openmanage.idrac_reset
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_server_powerstate/idrac_server_powerstate.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_server_powerstate/idrac_server_powerstate.yml
new file mode 100644
index 000000000..b2e7c1f46
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_server_powerstate/idrac_server_powerstate.yml
@@ -0,0 +1,10 @@
+---
+- name: Manage iDRAC server power cycle
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ vars:
+ reset_type: "On"
+ roles:
+ - dellemc.openmanage.idrac_server_powerstate
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_storage_controller/idrac_storage_controller.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_storage_controller/idrac_storage_controller.yml
new file mode 100644
index 000000000..b8763f4ac
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/idrac_storage_controller/idrac_storage_controller.yml
@@ -0,0 +1,9 @@
+---
+- name: Manage storage controller in iDRAC
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/storage_controller.yml
+ roles:
+ - dellemc.openmanage.idrac_storage_controller
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/inventory b/ansible_collections/dellemc/openmanage/playbooks/roles/inventory
new file mode 100644
index 000000000..5da778f1c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/inventory
@@ -0,0 +1,4 @@
+[idrac]
+192.168.0.1
+192.168.0.2
+192.168.0.3
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/redfish_firmware/redfish_firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/redfish_firmware/redfish_firmware.yml
new file mode 100644
index 000000000..915d56627
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/redfish_firmware/redfish_firmware.yml
@@ -0,0 +1,9 @@
+---
+- name: Firmware update using DUP file
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/firmware.yml
+ roles:
+ - dellemc.openmanage.redfish_firmware
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/redfish_storage_volume/redfish_storage_volume.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/redfish_storage_volume/redfish_storage_volume.yml
new file mode 100644
index 000000000..06684710d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/redfish_storage_volume/redfish_storage_volume.yml
@@ -0,0 +1,9 @@
+---
+- name: Redfish Storage volume Configurations
+ hosts: idrac
+ gather_facts: false
+ vars_files:
+ - ../vars_files/credentials.yml
+ - ../vars_files/storage.yml
+ roles:
+ - dellemc.openmanage.redfish_storage_volume
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/attributes.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/attributes.yml
new file mode 100644
index 000000000..86278b227
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/attributes.yml
@@ -0,0 +1,8 @@
+idrac_attributes:
+ SNMP.1.AgentCommunity: test
+ SNMP.1.AgentEnable: Enabled
+ SNMP.1.DiscoveryPort: 161
+system_attributes:
+ ServerOS.1.HostName: demohostname
+lifecycle_controller_attributes:
+ LCAttributes.1.AutoUpdate: Disabled
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/bios.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/bios.yml
new file mode 100644
index 000000000..cf60e098e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/bios.yml
@@ -0,0 +1,13 @@
+attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+apply_time: "Immediate"
+maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+clear_pending: false
+reset_bios: false
+reset_type: "graceful_restart"
+job_wait: true
+job_wait_timeout: 1200
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/certificates.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/certificates.yml
new file mode 100644
index 000000000..32ebd4aca
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/certificates.yml
@@ -0,0 +1,13 @@
+command: "generate_csr"
+certificate_type: "HTTPS"
+certificate_path: "/home/omam/mycerts"
+cert_params:
+ common_name: "sample.domain.com"
+ organization_unit: "OrgUnit"
+ locality_name: "Bangalore"
+ state_name: "Karnataka"
+ country_code: "IN"
+ email_address: "admin@domain.com"
+ organization_name: "OrgName"
+ subject_alt_name:
+ - 192.168.0.3
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/credentials.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/credentials.yml
new file mode 100644
index 000000000..78990bb87
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/credentials.yml
@@ -0,0 +1,27 @@
+hostname: "{{ inventory_hostname }}"
+username: "username"
+password: "password"
+
+# Required for Export Role
+idrac_ip: "{{ inventory_hostname }}"
+idrac_user: "{{ username }}"
+idrac_password: "{{ password }}"
+
+validate_certs: false
+proxy_params:
+ server: 192.168.0.6
+ port: 8080
+ username: "proxy_user"
+ password: "proxy_password"
+
+repo_details:
+ proto: "https"
+ ip: "192.168.0.5"
+ username: "username"
+ password: "password"
+
+http_url: "{{ repo_details.proto }}://{{ repo_details.ip }}/{{ http_path }}"
+cifs_url: "//{{ repo_details.ip }}"
+nfs_url: "{{ repo_details.ip }}:/{{ nfs_path }}"
+
+gather_facts_target: "System"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/export.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/export.yml
new file mode 100644
index 000000000..6749763ee
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/export.yml
@@ -0,0 +1,14 @@
+share_parameters:
+ share_name: "{{ repo_url }}/myshare"
+ share_user: "{{ repo_details.username }}"
+ share_password: "{{ repo_details.password }}"
+ scp_file: "filename.xml"
+ proxy_support: false
+ proxy_server: "{{ proxy_params.server }}"
+ proxy_port: "{{ proxy_params.port }}"
+ proxy_type: socks4
+ ignore_certificate_warning: ignore
+target: ["ALL"]
+include_in_export: default
+export_format: "XML"
+export_use: "Default"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/firmware.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/firmware.yml
new file mode 100644
index 000000000..588d8376b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/firmware.yml
@@ -0,0 +1,16 @@
+# Role idrac_firmware Vars
+share_name: "{{ repo_url }}/firmware_repo/"
+catalog_file_name: "MyCatalog.xml"
+reboot: true
+job_wait: true
+apply_update: true
+proxy_support: ParametersProxy
+proxy_server: "{{ proxy_params.server }}"
+proxy_type: HTTP
+proxy_port: "{{ proxy_params.port }}"
+proxy_uname: "{{ proxy_params.username }}"
+proxy_passwd: "{{ proxy_params.password }}"
+
+# Role redfish_firmware Vars
+image_uri: "{{ repo_url }}/firmware_repo/component.exe"
+transfer_protocol: "HTTP"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/import.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/import.yml
new file mode 100644
index 000000000..3696a7d63
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/import.yml
@@ -0,0 +1,11 @@
+share_parameters:
+ share_name: "{{ repo_url }}/scp_share"
+ share_user: "{{ repo_details.username }}"
+ share_password: "{{ repo_details.password }}"
+ scp_file: "filename.xml"
+ proxy_support: false
+ proxy_server: "{{ proxy_params.server }}"
+ proxy_port: "{{ proxy_params.port }}"
+ proxy_type: socks4
+ ignore_certificate_warning: ignore
+target: ["ALL"]
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/osd.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/osd.yml
new file mode 100644
index 000000000..c27c7c2d3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/osd.yml
@@ -0,0 +1,27 @@
+os_name: rhel
+os_version: 9
+source:
+ protocol: "{{ repo_details.proto }}"
+ hostname: "{{ repo_details.ip }}"
+ iso_path: "/users/rhel"
+ iso_name: "esxi.iso"
+ # ks_path: "/users/rhel/myks.cfg"
+ # is_custom_iso: true
+ # username: "username"
+ # password: "password"
+destination:
+ protocol: "{{ repo_details.proto }}"
+ hostname: "{{ repo_details.ip }}"
+ iso_path: "/{{ http_path }}"
+ # iso_name: "my_custom_rhel_name.iso"
+ mountpoint: "/var/www/repo"
+ os_type: "linux"
+ username: "username"
+ password: "password"
+
+wait_for_os_deployment: true
+os_deployment_timeout: 30
+eject_iso: true
+delete_custom_iso: true
+rhel_rootpw: ""
+esxi_rootpw: ""
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/reset.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/reset.yml
new file mode 100644
index 000000000..808aa0683
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/reset.yml
@@ -0,0 +1,3 @@
+wait_for_idrac: true
+force_restart: false
+reset_to_default: "Default"
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/storage.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/storage.yml
new file mode 100644
index 000000000..ba22d319f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/storage.yml
@@ -0,0 +1,12 @@
+state: "present"
+raid_type: "RAID1"
+name: "VD0"
+controller_id: "RAID.Slot.1-1"
+drives:
+ - Disk.Bay.5:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.6:Enclosure.Internal.0-1:RAID.Slot.1-1
+block_size_bytes: 512
+capacity_bytes: 299439751168
+optimum_io_size_bytes: 65536
+encryption_types: NativeDriveEncryption
+encrypted: true
diff --git a/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/storage_controller.yml b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/storage_controller.yml
new file mode 100644
index 000000000..7d754c646
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/playbooks/roles/vars_files/storage_controller.yml
@@ -0,0 +1,21 @@
+controller_id: "RAID.Integrated.1-1"
+attributes:
+ ControllerMode: "RAID"
+apply_time: "Immediate"
+maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 1200
+disks:
+ id: "Disk.Bay.0:Enclosure.Internal.0-0:RAID.Integrated.1-1"
+ status: "online"
+volumes:
+ id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ blink: true
+reset_config: false
+set_controller_key: false
+re_key: false
+key: "PassPhrase@123"
+key_id: "mykeyid123"
+old_key: "OldPhassParse@123"
+mode: "LKM"
+remove_key: false
diff --git a/ansible_collections/dellemc/openmanage/plugins/README.md b/ansible_collections/dellemc/openmanage/plugins/README.md
index e5200a2da..7711a1d84 100644
--- a/ansible_collections/dellemc/openmanage/plugins/README.md
+++ b/ansible_collections/dellemc/openmanage/plugins/README.md
@@ -10,6 +10,8 @@ Here are the list of modules and module_utils supported by Dell.
├── omem_auth_options.py
├── oment_auth_options.py
└── redfish_auth_options.py
+├── inventory
+ └── ome_inventory.py
├── module_utils
├── dellemc_idrac.py
├── idrac_redfish.py
@@ -19,8 +21,6 @@ Here are the list of modules and module_utils supported by Dell.
└── modules
├── dellemc_configure_idrac_eventing.py
├── dellemc_configure_idrac_services.py
- ├── dellemc_get_firmware_inventory.py
- ├── dellemc_get_system_inventory.py
├── dellemc_idrac_lc_attributes.py
├── dellemc_idrac_storage_volume.py
├── dellemc_system_lockdown_mode.py
@@ -30,11 +30,13 @@ Here are the list of modules and module_utils supported by Dell.
├── idrac_certificates.py
├── idrac_firmware.py
├── idrac_firmware_info.py
+ ├── idrac_license.py
├── idrac_lifecycle_controller_job_status_info.py
├── idrac_lifecycle_controller_jobs.py
├── idrac_lifecycle_controller_logs.py
├── idrac_lifecycle_controller_status_info.py
├── idrac_network.py
+ ├── idrac_network_attributes.py
├── idrac_os_deployment.py
├── idrac_redfish_storage_controller.py
├── idrac_reset.py
@@ -43,8 +45,13 @@ Here are the list of modules and module_utils supported by Dell.
├── idrac_system_info.py
├── idrac_timezone_ntp.py
├── idrac_user.py
+ ├── idrac_user_info.py
├── idrac_virtual_media.py
├── ome_active_directory.py
+ ├── ome_alert_policies_message_id_info.py
+ ├── ome_alert_policies_info.py
+ ├── ome_alert_policies_actions_info.py
+ ├── ome_alert_policies_category_info.py
├── ome_application_alerts_smtp.py
├── ome_application_alerts_syslog.py
├── ome_application_certificate.py
@@ -83,18 +90,23 @@ Here are the list of modules and module_utils supported by Dell.
├── ome_network_vlan_info.py
├── ome_powerstate.py
├── ome_profile.py
+ ├── ome_profile_info.py
├── ome_server_interface_profile_info.py
├── ome_server_interface_profiles.py
+ ├── ome_smart_fabric_info.py
├── ome_smart_fabric.py
+ ├── ome_smart_fabric_uplink_info.py
├── ome_smart_fabric_uplink.py
├── ome_template.py
├── ome_template_identity_pool.py
├── ome_template_info.py
├── ome_template_network_vlan.py
+ ├── ome_template_network_vlan_info.py
├── ome_user.py
├── ome_user_info.py
├── redfish_event_subscription.py
├── redfish_firmware.py
+ ├── redfish_firmware_rollback.py
├── redfish_powerstate.py
└── redfish_storage_volume.py
-``` \ No newline at end of file
+```
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py
index 5ca16d6d7..cdad6d1b8 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/idrac_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -18,17 +18,23 @@ class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
idrac_ip:
- required: True
+ required: true
type: str
description: iDRAC IP Address.
idrac_user:
- required: True
+ required: true
type: str
- description: iDRAC username.
+ description:
+ - iDRAC username.
+ - If the username is not provided, then the environment variable C(IDRAC_USERNAME) is used.
+ - "Example: export IDRAC_USERNAME=username"
idrac_password:
- required: True
+ required: true
type: str
- description: iDRAC user password.
+ description:
+ - iDRAC user password.
+ - If the password is not provided, then the environment variable C(IDRAC_PASSWORD) is used.
+ - "Example: export IDRAC_PASSWORD=password"
aliases: ['idrac_pwd']
idrac_port:
type: int
@@ -36,11 +42,11 @@ options:
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py
index f0ebb7e3a..cc4a6289d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/network_share_options.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.0.0
-# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -18,7 +18,7 @@ class ModuleDocFragment(object):
DOCUMENTATION = r'''
options:
share_name:
- required: True
+ required: true
type: str
description: Network share or a local path.
share_user:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py
index b84c50d55..7627a6621 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/ome_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,26 +20,32 @@ options:
hostname:
description: OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
type: str
- required: True
+ required: true
username:
- description: OpenManage Enterprise or OpenManage Enterprise Modular username.
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular username.
+ - If the username is not provided, then the environment variable C(OME_USERNAME) is used.
+ - "Example: export OME_USERNAME=username"
type: str
- required: True
+ required: true
password:
- description: OpenManage Enterprise or OpenManage Enterprise Modular password.
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular password.
+ - If the password is not provided, then the environment variable C(OME_PASSWORD) is used.
+ - "Example: export OME_PASSWORD=password"
type: str
- required: True
+ required: true
port:
description: OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
type: int
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py
index d8c616b2a..e611e02df 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/omem_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,26 +20,32 @@ options:
hostname:
description: OpenManage Enterprise Modular IP address or hostname.
type: str
- required: True
+ required: true
username:
- description: OpenManage Enterprise Modular username.
+ description:
+ - OpenManage Enterprise Modular username.
+ - If the username is not provided, then the environment variable C(OME_USERNAME) is used.
+ - "Example: export OME_USERNAME=username"
type: str
- required: True
+ required: true
password:
- description: OpenManage Enterprise Modular password.
+ description:
+ - OpenManage Enterprise Modular password.
+ - If the password is not provided, then the environment variable C(OME_PASSWORD) is used.
+ - "Example: export OME_PASSWORD=password"
type: str
- required: True
+ required: true
port:
description: OpenManage Enterprise Modular HTTPS port.
type: int
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py
index 85b1553f7..676ded435 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/oment_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,26 +20,32 @@ options:
hostname:
description: OpenManage Enterprise IP address or hostname.
type: str
- required: True
+ required: true
username:
- description: OpenManage Enterprise username.
+ description:
+ - OpenManage Enterprise username.
+ - If the username is not provided, then the environment variable C(OME_USERNAME) is used.
+ - "Example: export OME_USERNAME=username"
type: str
- required: True
+ required: true
password:
- description: OpenManage Enterprise password.
+ description:
+ - OpenManage Enterprise password.
+ - If the password is not provided, then the environment variable C(OME_PASSWORD) is used.
+ - "Example: export OME_PASSWORD=password"
type: str
- required: True
+ required: true
port:
description: OpenManage Enterprise HTTPS port.
type: int
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py
index 8eb1eda15..452bc8a2d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py
+++ b/ansible_collections/dellemc/openmanage/plugins/doc_fragments/redfish_auth_options.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,22 +20,28 @@ options:
baseuri:
description: "IP address of the target out-of-band controller. For example- <ipaddress>:<port>."
type: str
- required: True
+ required: true
username:
- description: Username of the target out-of-band controller.
+ description:
+ - Username of the target out-of-band controller.
+ - If the username is not provided, then the environment variable C(IDRAC_USERNAME) is used.
+ - "Example: export IDRAC_USERNAME=username"
type: str
- required: True
+ required: true
password:
- description: Password of the target out-of-band controller.
+ description:
+ - Password of the target out-of-band controller.
+ - If the password is not provided, then the environment variable C(IDRAC_PASSWORD) is used.
+ - "Example: export IDRAC_PASSWORD=password"
type: str
- required: True
+ required: true
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
diff --git a/ansible_collections/dellemc/openmanage/plugins/inventory/ome_inventory.py b/ansible_collections/dellemc/openmanage/plugins/inventory/ome_inventory.py
new file mode 100644
index 000000000..93171a429
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/inventory/ome_inventory.py
@@ -0,0 +1,208 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.4.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = """
+---
+name: ome_inventory
+short_description: Group inventory plugin on OpenManage Enterprise.
+description: This plugin allows to retrieve inventory hosts from groups on OpenManage Enterprise.
+version_added: "7.1.0"
+options:
+ hostname:
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular IP address or hostname.
+ - If the value is not specified in the task, the value of environment variable C(OME_HOSTNAME) will be used instead.
+ env:
+ - name: OME_HOSTNAME
+ type: str
+ required: true
+ username:
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular username.
+ - If the value is not specified in the task, the value of environment variable C(OME_USERNAME) will be used instead.
+ env:
+ - name: OME_USERNAME
+ type: str
+ required: true
+ password:
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular password.
+ - If the value is not specified in the task, the value of environment variable C(OME_PASSWORD) will be used instead.
+ env:
+ - name: OME_PASSWORD
+ type: str
+ required: true
+ port:
+ description:
+ - OpenManage Enterprise or OpenManage Enterprise Modular HTTPS port.
+ - If the value is not specified in the task, the value of environment variable C(OME_PORT) will be used instead.
+ type: int
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ ome_group_name:
+ description: Group name.
+ type: str
+ required: false
+ host_vars:
+ description: To include host related variables in the inventory source.
+ type: dict
+ required: false
+ group_vars:
+ description: To include group variables in the inventory source.
+ type: dict
+ required: false
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this plugin on a system that has direct access to Dell OpenManage Enterprise.
+"""
+
+from ansible.plugins.inventory import BaseInventoryPlugin
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination
+
+GROUP_API = "GroupService/Groups"
+
+
+class InventoryModule(BaseInventoryPlugin):
+
+ NAME = "dellemc.openmanage.ome_inventory"
+
+ def __init__(self):
+ super(InventoryModule, self).__init__()
+ self.config = None
+
+ def _get_connection_resp(self):
+ port = self.get_option("port") if "port" in self.config else 443
+ validate_certs = self.get_option("validate_certs") if "validate_certs" in self.config else False
+ module_params = {"hostname": self.get_option("hostname"), "username": self.get_option("username"),
+ "password": self.get_option("password"), "port": port, "validate_certs": validate_certs}
+ if "ca_path" in self.config:
+ module_params.update({"ca_path": self.get_option("ca_path")})
+ with RestOME(module_params, req_session=False) as ome:
+ all_group_data = get_all_data_with_pagination(ome, GROUP_API)
+ return all_group_data
+
+ def _set_host_vars(self, host):
+ self.inventory.set_variable(host, "idrac_ip", host)
+ self.inventory.set_variable(host, "baseuri", host)
+ self.inventory.set_variable(host, "hostname", host)
+ if "host_vars" in self.config:
+ host_vars = self.get_option("host_vars")
+ for key, val in dict(host_vars).items():
+ self.inventory.set_variable(host, key, val)
+
+ def _set_group_vars(self, group):
+ self.inventory.add_group(group)
+ if "group_vars" in self.config:
+ group_vars = self.get_option("group_vars")
+ if group in dict(group_vars):
+ for key, val in dict(dict(group_vars)[group]).items():
+ self.inventory.set_variable(group, key, val)
+
+ def _get_device_host(self, mgmt):
+ if len(mgmt["DeviceManagement"]) == 1 and mgmt["DeviceManagement"][0]["NetworkAddress"].startswith("["):
+ dev_host = mgmt["DeviceManagement"][0]["NetworkAddress"][1:-1]
+ elif len(mgmt["DeviceManagement"]) == 2 and mgmt["DeviceManagement"][0]["NetworkAddress"].startswith("["):
+ dev_host = mgmt["DeviceManagement"][1]["NetworkAddress"]
+ else:
+ dev_host = mgmt["DeviceManagement"][0]["NetworkAddress"]
+ return dev_host
+
+ def _get_all_devices(self, device_uri):
+ device_host = []
+ device_host_uri = device_uri.strip("/api/")
+ port = self.get_option("port") if "port" in self.config else 443
+ validate_certs = self.get_option("validate_certs") if "validate_certs" in self.config else False
+ module_params = {
+ "hostname": self.get_option("hostname"),
+ "username": self.get_option("username"),
+ "password": self.get_option("password"),
+ "port": port,
+ "validate_certs": validate_certs}
+ if "ca_path" in self.config:
+ module_params.update({"ca_path": self.get_option("ca_path")})
+ with RestOME(module_params, req_session=False) as ome:
+ device_resp = get_all_data_with_pagination(ome, device_host_uri)
+ device_data = device_resp.get("report_list", [])
+ if device_data is not None:
+ for mgmt in device_data:
+ if (len(mgmt["DeviceManagement"]) != 0):
+ device_host.append(self._get_device_host(mgmt))
+ return device_host
+
+ def _set_child_group(self, group_data):
+ port = self.get_option("port") if "port" in self.config else 443
+ validate_certs = self.get_option("validate_certs") if "validate_certs" in self.config else False
+ module_params = {"hostname": self.get_option("hostname"), "username": self.get_option("username"),
+ "password": self.get_option("password"), "port": port, "validate_certs": validate_certs}
+ if "ca_path" in self.config:
+ module_params.update({"ca_path": self.get_option("ca_path")})
+ with RestOME(module_params, req_session=False) as ome:
+ for gdata in group_data:
+ group_name = gdata["Name"]
+ subgroup_uri = gdata["SubGroups@odata.navigationLink"].strip("/api/")
+ sub_group = get_all_data_with_pagination(ome, subgroup_uri)
+ gdata = sub_group.get("report_list", [])
+ if gdata:
+ self._add_group_data(gdata)
+ self._add_child_group_data(group_name, gdata)
+
+ def _add_child_group_data(self, group_name, gdata):
+ for child_name in gdata:
+ self.inventory.add_child(group_name, child_name["Name"])
+
+ def _add_group_data(self, group_data):
+ visible_gdata = list(filter(lambda d: d.get("Visible") in [False], group_data))
+ if visible_gdata:
+ for gp in visible_gdata:
+ group_data.remove(gp)
+ for gdata in group_data:
+ self._set_group_vars(gdata["Name"])
+ device_ip = self._get_all_devices(gdata["AllLeafDevices@odata.navigationLink"])
+ for hst in device_ip:
+ self.inventory.add_host(host=hst, group=gdata["Name"])
+ self._set_host_vars(hst)
+ self._set_child_group(group_data)
+
+ def _populate(self, all_group_data):
+ group_data = all_group_data.get("report_list", [])
+ group_name = str(self.get_option("ome_group_name")) if "ome_group_name" in self.config else None
+ if group_name is not None:
+ group_data = list(filter(lambda d: d.get("Name").lower() in [group_name.lower()], group_data))
+ elif group_name is None:
+ group_data = list(filter(lambda d: d.get("Name") in ["All Devices"], group_data))
+ self._add_group_data(group_data)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self.config = self._read_config_data(path)
+ all_group_data = self._get_connection_resp()
+ self._populate(all_group_data)
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
index fee5339c5..b2b2240d0 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/dellemc_idrac.py
@@ -1,7 +1,7 @@
# -*- coding: utf-8 -*-
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
@@ -29,10 +29,10 @@
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
+from ansible.module_utils.common.parameters import env_fallback
try:
from omsdk.sdkinfra import sdkinfra
from omsdk.sdkcreds import UserCredentials
- from omsdk.sdkfile import FileOnShare, file_share_manager
from omsdk.sdkprotopref import ProtoPreference, ProtocolEnum
from omsdk.http.sdkwsmanbase import WsManOptions
HAS_OMSDK = True
@@ -42,8 +42,8 @@ except ImportError:
idrac_auth_params = {
"idrac_ip": {"required": True, "type": 'str'},
- "idrac_user": {"required": True, "type": 'str'},
- "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_user": {"required": True, "type": 'str', "fallback": (env_fallback, ['IDRAC_USERNAME'])},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True, "fallback": (env_fallback, ['IDRAC_PASSWORD'])},
"idrac_port": {"required": False, "default": 443, "type": 'int'},
"validate_certs": {"type": "bool", "default": True},
"ca_path": {"type": "path"},
@@ -55,7 +55,7 @@ class iDRACConnection:
def __init__(self, module_params):
if not HAS_OMSDK:
- raise ImportError("Dell EMC OMSDK library is required for this module")
+ raise ImportError("Dell OMSDK library is required for this module")
self.idrac_ip = module_params['idrac_ip']
self.idrac_user = module_params['idrac_user']
self.idrac_pwd = module_params['idrac_password']
@@ -72,7 +72,7 @@ class iDRACConnection:
self.ca_path = self._get_omam_ca_env()
verify_ssl = self.ca_path
timeout = module_params.get("timeout", 30)
- if not timeout or type(timeout) != int:
+ if not timeout or not isinstance(timeout, int):
timeout = 30
self.pOp = WsManOptions(port=self.idrac_port, read_timeout=timeout, verify_ssl=verify_ssl)
self.sdk = sdkinfra()
@@ -81,6 +81,7 @@ class iDRACConnection:
raise RuntimeError(msg)
def __enter__(self):
+ self.idrac_ip = self.idrac_ip.strip('[]')
self.sdk.importPath()
protopref = ProtoPreference(ProtocolEnum.WSMAN)
protopref.include_only(ProtocolEnum.WSMAN)
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py
index 168c8277d..cf4581e89 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/idrac_redfish.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
-# Dell EMC OpenManage Ansible Modules
-# Version 5.5.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.0.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -36,11 +36,13 @@ import os
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.parameters import env_fallback
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import config_ipv6
idrac_auth_params = {
"idrac_ip": {"required": True, "type": 'str'},
- "idrac_user": {"required": True, "type": 'str'},
- "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True},
+ "idrac_user": {"required": True, "type": 'str', "fallback": (env_fallback, ['IDRAC_USERNAME'])},
+ "idrac_password": {"required": True, "type": 'str', "aliases": ['idrac_pwd'], "no_log": True, "fallback": (env_fallback, ['IDRAC_PASSWORD'])},
"idrac_port": {"required": False, "default": 443, "type": 'int'},
"validate_certs": {"type": "bool", "default": True},
"ca_path": {"type": "path"},
@@ -108,6 +110,7 @@ class iDRACRedfishAPI(object):
self.session_id = None
self.protocol = 'https'
self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.ipaddress = config_ipv6(self.ipaddress)
def _get_url(self, uri):
return "{0}://{1}:{2}{3}".format(self.protocol, self.ipaddress, self.port, uri)
@@ -203,7 +206,7 @@ class iDRACRedfishAPI(object):
This method fetches the connected server generation.
:return: 14, 4.11.11.11
"""
- model, firmware_version = None, None
+ firmware_version = None
response = self.invoke_request(MANAGER_URI, 'GET')
if response.status_code == 200:
generation = int(re.search(r"\d+(?=G)", response.json_data["Model"]).group())
@@ -250,7 +253,7 @@ class iDRACRedfishAPI(object):
return response
def export_scp(self, export_format=None, export_use=None, target=None,
- job_wait=False, share=None):
+ job_wait=False, share=None, include_in_export="Default"):
"""
This method exports system configuration details from the system.
:param export_format: XML or JSON.
@@ -275,6 +278,21 @@ class iDRACRedfishAPI(object):
payload["ShareParameters"]["Username"] = share["username"]
if share.get("password") is not None:
payload["ShareParameters"]["Password"] = share["password"]
+ if share.get("ignore_certificate_warning") is not None:
+ payload["ShareParameters"]["IgnoreCertificateWarning"] = share["ignore_certificate_warning"]
+ if share.get("proxy_support") is not None:
+ payload["ShareParameters"]["ProxySupport"] = share["proxy_support"]
+ if share.get("proxy_type") is not None:
+ payload["ShareParameters"]["ProxyType"] = share["proxy_type"]
+ if share.get("proxy_port") is not None:
+ payload["ShareParameters"]["ProxyPort"] = share["proxy_port"]
+ if share.get("proxy_server") is not None:
+ payload["ShareParameters"]["ProxyServer"] = share["proxy_server"]
+ if share.get("proxy_username") is not None:
+ payload["ShareParameters"]["ProxyUserName"] = share["proxy_username"]
+ if share.get("proxy_password") is not None:
+ payload["ShareParameters"]["ProxyPassword"] = share["proxy_password"]
+ payload["IncludeInExport"] = include_in_export
response = self.invoke_request(EXPORT_URI, "POST", data=payload)
if response.status_code == 202 and job_wait:
task_uri = response.headers["Location"]
@@ -311,10 +329,21 @@ class iDRACRedfishAPI(object):
payload["ShareParameters"]["Username"] = share["username"]
if share.get("password") is not None:
payload["ShareParameters"]["Password"] = share["password"]
+ if share.get("ignore_certificate_warning") is not None:
+ payload["ShareParameters"]["IgnoreCertificateWarning"] = share["ignore_certificate_warning"]
+ if share.get("proxy_support") is not None:
+ payload["ShareParameters"]["ProxySupport"] = share["proxy_support"]
+ if share.get("proxy_type") is not None:
+ payload["ShareParameters"]["ProxyType"] = share["proxy_type"]
+ if share.get("proxy_port") is not None:
+ payload["ShareParameters"]["ProxyPort"] = share["proxy_port"]
+ if share.get("proxy_server") is not None:
+ payload["ShareParameters"]["ProxyServer"] = share["proxy_server"]
+ if share.get("proxy_username") is not None:
+ payload["ShareParameters"]["ProxyUserName"] = share["proxy_username"]
+ if share.get("proxy_password") is not None:
+ payload["ShareParameters"]["ProxyPassword"] = share["proxy_password"]
response = self.invoke_request(IMPORT_URI, "POST", data=payload)
- if response.status_code == 202 and job_wait:
- task_uri = response.headers["Location"]
- response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
return response
def import_preview(self, import_buffer=None, target=None, share=None, job_wait=False):
@@ -335,6 +364,20 @@ class iDRACRedfishAPI(object):
payload["ShareParameters"]["Username"] = share["username"]
if share.get("password") is not None:
payload["ShareParameters"]["Password"] = share["password"]
+ if share.get("ignore_certificate_warning") is not None:
+ payload["ShareParameters"]["IgnoreCertificateWarning"] = share["ignore_certificate_warning"]
+ if share.get("proxy_support") is not None:
+ payload["ShareParameters"]["ProxySupport"] = share["proxy_support"]
+ if share.get("proxy_type") is not None:
+ payload["ShareParameters"]["ProxyType"] = share["proxy_type"]
+ if share.get("proxy_port") is not None:
+ payload["ShareParameters"]["ProxyPort"] = share["proxy_port"]
+ if share.get("proxy_server") is not None:
+ payload["ShareParameters"]["ProxyServer"] = share["proxy_server"]
+ if share.get("proxy_username") is not None:
+ payload["ShareParameters"]["ProxyUserName"] = share["proxy_username"]
+ if share.get("proxy_password") is not None:
+ payload["ShareParameters"]["ProxyPassword"] = share["proxy_password"]
response = self.invoke_request(IMPORT_PREVIEW, "POST", data=payload)
if response.status_code == 202 and job_wait:
task_uri = response.headers["Location"]
@@ -356,6 +399,21 @@ class iDRACRedfishAPI(object):
response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
return response
+ def import_preview_scp(self, import_buffer=None, target=None, job_wait=False):
+ """
+ This method imports preview system configuration details to the system.
+ :param import_buffer: import buffer payload content xml or json format
+ :param target: IDRAC or NIC or ALL or BIOS or RAID.
+ :param job_wait: True or False decide whether to wait till the job completion.
+ :return: json response
+ """
+ payload = {"ImportBuffer": import_buffer, "ShareParameters": {"Target": target}}
+ response = self.invoke_request(IMPORT_PREVIEW, "POST", data=payload)
+ if response.status_code == 202 and job_wait:
+ task_uri = response.headers["Location"]
+ response = self.wait_for_job_complete(task_uri, job_wait=job_wait)
+ return response
+
def get_idrac_local_account_attr(self, idrac_attribues, fqdd=None):
"""
This method filtered from all the user attributes from the given idrac attributes.
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py
index cdb5ddf2c..cd0bb6be0 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/ome.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -34,13 +34,15 @@ import json
import os
import time
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.common.parameters import env_fallback
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import config_ipv6
ome_auth_params = {
"hostname": {"required": True, "type": "str"},
- "username": {"required": True, "type": "str"},
- "password": {"required": True, "type": "str", "no_log": True},
+ "username": {"required": True, "type": "str", "fallback": (env_fallback, ['OME_USERNAME'])},
+ "password": {"required": True, "type": "str", "no_log": True, "fallback": (env_fallback, ['OME_PASSWORD'])},
"port": {"type": "int", "default": 443},
"validate_certs": {"type": "bool", "default": True},
"ca_path": {"type": "path"},
@@ -54,6 +56,7 @@ SESSION_RESOURCE_COLLECTION = {
JOB_URI = "JobService/Jobs({job_id})"
JOB_SERVICE_URI = "JobService/Jobs"
+HOST_UNRESOLVED_MSG = "Unable to resolve hostname or IP {0}."
class OpenURLResponse(object):
@@ -90,7 +93,7 @@ class RestOME(object):
def __init__(self, module_params=None, req_session=False):
self.module_params = module_params
- self.hostname = self.module_params["hostname"]
+ self.hostname = str(self.module_params["hostname"]).strip('][')
self.username = self.module_params["username"]
self.password = self.module_params["password"]
self.port = self.module_params["port"]
@@ -101,6 +104,7 @@ class RestOME(object):
self.session_id = None
self.protocol = 'https'
self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.hostname = config_ipv6(self.hostname)
def _get_base_url(self):
"""builds base url"""
@@ -261,7 +265,7 @@ class RestOME(object):
device_id = device_info["Id"]
return {"Id": device_id, "value": device_info}
- def get_all_items_with_pagination(self, uri):
+ def get_all_items_with_pagination(self, uri, query_param=None):
"""
This implementation mainly to get all available items from ome for pagination
supported GET uri
@@ -269,7 +273,7 @@ class RestOME(object):
:return: dict.
"""
try:
- resp = self.invoke_request('GET', uri)
+ resp = self.invoke_request('GET', uri, query_param=query_param)
data = resp.json_data
total_items = data.get("value", [])
total_count = data.get('@odata.count', 0)
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py
index 59c467057..8a26eaf60 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/redfish.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -34,11 +34,13 @@ import os
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.six.moves.urllib.parse import urlencode
+from ansible.module_utils.common.parameters import env_fallback
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import config_ipv6
redfish_auth_params = {
"baseuri": {"required": True, "type": "str"},
- "username": {"required": True, "type": "str"},
- "password": {"required": True, "type": "str", "no_log": True},
+ "username": {"required": True, "type": "str", "fallback": (env_fallback, ['IDRAC_USERNAME'])},
+ "password": {"required": True, "type": "str", "no_log": True, "fallback": (env_fallback, ['IDRAC_PASSWORD'])},
"validate_certs": {"type": "bool", "default": True},
"ca_path": {"type": "path"},
"timeout": {"type": "int", "default": 30},
@@ -49,6 +51,8 @@ SESSION_RESOURCE_COLLECTION = {
"SESSION_ID": "/redfish/v1/Sessions/{Id}",
}
+HOST_UNRESOLVED_MSG = "Unable to resolve hostname or IP {0}."
+
class OpenURLResponse(object):
"""Handles HTTPResponse"""
@@ -101,6 +105,7 @@ class Redfish(object):
self.protocol = 'https'
self.root_uri = '/redfish/v1/'
self._headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}
+ self.hostname = config_ipv6(self.hostname)
def _get_base_url(self):
"""builds base url"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
index d0da26e57..3d8abfbe5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
+++ b/ansible_collections/dellemc/openmanage/plugins/module_utils/utils.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
# Dell OpenManage Ansible Modules
-# Version 6.1.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 8.2.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
@@ -34,17 +34,25 @@ NO_CHANGES_MSG = "No changes found to be applied."
RESET_UNTRACK = "iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply."
RESET_SUCCESS = "iDRAC has been reset successfully."
RESET_FAIL = "Unable to reset the iDRAC. For changes to reflect, manually reset the iDRAC."
+INVALID_ID_MSG = "Unable to complete the operation because " + \
+ "the value `{0}` for the input `{1}` parameter is invalid."
SYSTEM_ID = "System.Embedded.1"
MANAGER_ID = "iDRAC.Embedded.1"
SYSTEMS_URI = "/redfish/v1/Systems"
MANAGERS_URI = "/redfish/v1/Managers"
+CHASSIS_URI = "/redfish/v1/Chassis"
IDRAC_RESET_URI = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset"
SYSTEM_RESET_URI = "/redfish/v1/Systems/{res_id}/Actions/ComputerSystem.Reset"
MANAGER_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs?$expand=*($levels=1)"
MANAGER_JOB_ID_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{0}"
-
+GET_IDRAC_FIRMWARE_VER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1?$select=FirmwareVersion"
+HOSTNAME_REGEX = r"^(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])$"
import time
+from datetime import datetime
+import re
+from ansible.module_utils.six.moves.urllib.error import HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
@@ -69,6 +77,21 @@ def strip_substr_dict(odata_dict, chkstr='@odata.', case_sensitive=False):
return odata_dict
+def config_ipv6(hostname):
+ ip_addr, port = hostname, None
+ if hostname.count(':') == 1:
+ ip_addr, port = hostname.split(':')
+ if not re.match(HOSTNAME_REGEX, ip_addr):
+ if ']:' in ip_addr:
+ ip_addr, port = ip_addr.split(']:')
+ ip_addr = ip_addr.strip('[]')
+ if port is None or port == "":
+ hostname = "[{0}]".format(ip_addr)
+ else:
+ hostname = "[{0}]:{1}".format(ip_addr, port)
+ return hostname
+
+
def job_tracking(rest_obj, job_uri, max_job_wait_sec=600, job_state_var=('LastRunStatus', 'Id'),
job_complete_states=(2060, 2020, 2090), job_fail_states=(2070, 2101, 2102, 2103),
job_running_states=(2050, 2040, 2030, 2100),
@@ -265,8 +288,8 @@ def reset_idrac(idrac_restobj, wait_time_sec=300, res_id=MANAGER_ID, interval=30
track_failed = True
reset_msg = "iDRAC reset triggered successfully."
try:
- resp = idrac_restobj.invoke_request(IDRAC_RESET_URI.format(res_id=res_id), 'POST',
- data={"ResetType": "GracefulRestart"})
+ idrac_restobj.invoke_request(IDRAC_RESET_URI.format(res_id=res_id), 'POST',
+ data={"ResetType": "GracefulRestart"})
if wait_time_sec:
track_failed, reset_msg = wait_after_idrac_reset(idrac_restobj, wait_time_sec, interval)
reset = True
@@ -348,3 +371,157 @@ def get_system_res_id(idrac):
res_uri = member[0].get('@odata.id')
res_id = res_uri.split("/")[-1]
return res_id, error_msg
+
+
+def get_all_data_with_pagination(ome_obj, uri, query_param=None):
+ """To get all the devices with pagination based on the filter provided."""
+ query, resp, report_list = "", None, []
+ try:
+ resp = ome_obj.invoke_request('GET', uri, query_param=query_param)
+ next_uri = resp.json_data.get("@odata.nextLink", None)
+ report_list = resp.json_data.get("value")
+ if query_param is not None:
+ for k, v in query_param.items():
+ query += "{0}={1}".format(k, v.replace(" ", "%20"))
+ while next_uri is not None:
+ next_uri_query = "{0}&{1}".format(next_uri.strip("/api"), query) if query else next_uri.strip("/api")
+ resp = ome_obj.invoke_request('GET', next_uri_query)
+ report_list.extend(resp.json_data.get("value"))
+ next_uri = resp.json_data.get("@odata.nextLink", None)
+ except (URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+ return {"resp_obj": resp, "report_list": report_list}
+
+
+def remove_key(data, regex_pattern='@odata.'):
+ '''
+ :param data: the dict/list to be stripped of unwanted keys
+ :param remove_char: the substring to be checked among the keys
+ :return: dict/list
+ '''
+ try:
+ if isinstance(data, dict):
+ for key in list(data.keys()):
+ if re.match(regex_pattern, key):
+ data.pop(key, None)
+ else:
+ remove_key(data[key], regex_pattern)
+ elif isinstance(data, list):
+ for item in data:
+ remove_key(item, regex_pattern)
+ except Exception:
+ pass
+ return data
+
+
+def wait_for_redfish_reboot_job(redfish_obj, res_id, payload=None, wait_time_sec=300):
+ reset, job_resp, msg = False, {}, ""
+ try:
+ resp = redfish_obj.invoke_request('POST', SYSTEM_RESET_URI.format(res_id=res_id), data=payload, api_timeout=120)
+ time.sleep(10)
+ if wait_time_sec and resp.status_code == 204:
+ resp = redfish_obj.invoke_request("GET", MANAGER_JOB_URI)
+ reboot_job_lst = list(filter(lambda d: (d["JobType"] in ["RebootNoForce"]), resp.json_data["Members"]))
+ job_resp = max(reboot_job_lst, key=lambda d: datetime.strptime(d["StartTime"], "%Y-%m-%dT%H:%M:%S"))
+ if job_resp:
+ reset = True
+ else:
+ msg = RESET_FAIL
+ except Exception:
+ reset = False
+ return job_resp, reset, msg
+
+
+def wait_for_redfish_job_complete(redfish_obj, job_uri, job_wait=True, wait_timeout=120, sleep_time=10):
+ max_sleep_time = wait_timeout
+ sleep_interval = sleep_time
+ job_msg = "The job is not complete after {0} seconds.".format(wait_timeout)
+ job_resp = {}
+ if job_wait:
+ while max_sleep_time:
+ if max_sleep_time > sleep_interval:
+ max_sleep_time = max_sleep_time - sleep_interval
+ else:
+ sleep_interval = max_sleep_time
+ max_sleep_time = 0
+ time.sleep(sleep_interval)
+ job_resp = redfish_obj.invoke_request("GET", job_uri, api_timeout=120)
+ if job_resp.json_data.get("PercentComplete") == 100:
+ time.sleep(10)
+ return job_resp, ""
+ if job_resp.json_data.get("JobState") == "RebootFailed":
+ time.sleep(10)
+ return job_resp, job_msg
+ else:
+ time.sleep(10)
+ job_resp = redfish_obj.invoke_request("GET", job_uri, api_timeout=120)
+ return job_resp, ""
+ return job_resp, job_msg
+
+
+def get_dynamic_uri(idrac_obj, base_uri, search_label=''):
+ resp = idrac_obj.invoke_request(method='GET', uri=base_uri).json_data
+ if search_label:
+ if search_label in resp:
+ return resp[search_label]
+ return None
+ return resp
+
+
+def get_scheduled_job_resp(idrac_obj, job_type):
+ # job_type can be like 'NICConfiguration' or 'BIOSConfiguration'
+ job_resp = {}
+ job_list = idrac_obj.invoke_request(
+ MANAGER_JOB_URI, "GET").json_data.get('Members', [])
+ for each_job in job_list:
+ if each_job.get("JobType") == job_type and each_job.get("JobState") in ["Scheduled", "Running", "Starting"]:
+ job_resp = each_job
+ break
+ return job_resp
+
+
+def delete_job(idrac_obj, job_id):
+ resp = idrac_obj.invoke_request(uri=MANAGER_JOB_ID_URI.format(job_id), method="DELETE")
+ return resp.json_data
+
+
+def get_current_time(redfish_obj):
+ res_id = get_manager_res_id(redfish_obj)
+ resp = redfish_obj.invoke_request(MANAGERS_URI + '/' + res_id, "GET")
+ curr_time = resp.json_data.get("DateTime")
+ date_offset = resp.json_data.get("DateTimeLocalOffset")
+ return curr_time, date_offset
+
+
+def xml_data_conversion(attr_dict, fqdd=None):
+ component = """<Component FQDD="{0}">{1}</Component>"""
+ attr = ""
+ for k, v in attr_dict.items():
+ key = re.sub(r"\.(?!\d)", "#", k)
+ attr += '<Attribute Name="{0}">{1}</Attribute>'.format(key, v)
+ root = component.format(fqdd, attr)
+ return root
+
+
+def validate_and_get_first_resource_id_uri(module, idrac, base_uri):
+ odata = '@odata.id'
+ found = False
+ res_id_uri = None
+ res_id_input = module.params.get('resource_id')
+ res_id_members = get_dynamic_uri(idrac, base_uri, 'Members')
+ for each in res_id_members:
+ if res_id_input and res_id_input == each[odata].split('/')[-1]:
+ res_id_uri = each[odata]
+ found = True
+ break
+ if not found and res_id_input:
+ return res_id_uri, INVALID_ID_MSG.format(
+ res_id_input, 'resource_id')
+ elif res_id_input is None:
+ res_id_uri = res_id_members[0][odata]
+ return res_id_uri, ''
+
+
+def get_idrac_firmware_version(idrac):
+ firm_version = idrac.invoke_request(method='GET', uri=GET_IDRAC_FIRMWARE_VER_URI)
+ return firm_version.json_data.get('FirmwareVersion', '')
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py
index 945fd90e9..4a88a38e5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_eventing.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -88,7 +88,7 @@ options:
choices: [Enabled, Disabled]
smtp_ip_address:
type: str
- description: SMTP IP address for communication.
+ description: Enter the IPv4 or IPv6 address of the SMTP server or the FQDN or DNS name.
smtp_port:
type: str
description: SMTP Port number for access.
@@ -99,12 +99,13 @@ options:
type: str
description: Password for SMTP authentication.
requirements:
- - "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "omsdk >= 1.2.503"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -112,9 +113,9 @@ EXAMPLES = """
---
- name: Configure the iDRAC eventing attributes
dellemc.openmanage.dellemc_configure_idrac_eventing:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
destination_number: "2"
destination: "1.1.1.1"
@@ -194,7 +195,6 @@ try:
AlertEnable_IPMILanTypes,
SMTPAuthentication_RemoteHostsTypes)
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py
index 5a0eacf1b..e69563eee 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_configure_idrac_services.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -28,15 +28,15 @@ description:
- This module allows to configure the iDRAC services related attributes.
options:
idrac_ip:
- required: True
+ required: true
type: str
description: iDRAC IP Address.
idrac_user:
- required: True
+ required: true
type: str
description: iDRAC username.
idrac_password:
- required: True
+ required: true
type: str
description: iDRAC user password.
aliases: ['idrac_pwd']
@@ -46,11 +46,11 @@ options:
default: 443
validate_certs:
description:
- - If C(False), the SSL certificates will not be validated.
- - Configure C(False) only on personally controlled sites where self-signed certificates are used.
- - Prior to collection version C(5.0.0), the I(validate_certs) is C(False) by default.
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is C(false) by default.
type: bool
- default: True
+ default: true
version_added: 5.0.0
ca_path:
description:
@@ -137,11 +137,12 @@ options:
The community name is checked by the remote system to which the traps are sent.
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -149,24 +150,24 @@ EXAMPLES = """
---
- name: Configure the iDRAC services attributes
dellemc.openmanage.dellemc_configure_idrac_services:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- enable_web_server: "Enabled"
- http_port: 80
- https_port: 443
- ssl_encryption: "Auto_Negotiate"
- tls_protocol: "TLS_1_2_Only"
- timeout: "1800"
- snmp_enable: "Enabled"
- snmp_protocol: "SNMPv3"
- community_name: "public"
- alert_port: 162
- discovery_port: 161
- trap_format: "SNMPv3"
- ipmi_lan:
- community_name: "public"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ enable_web_server: "Enabled"
+ http_port: 80
+ https_port: 443
+ ssl_encryption: "Auto_Negotiate"
+ tls_protocol: "TLS_1_2_Only"
+ timeout: "1800"
+ snmp_enable: "Enabled"
+ snmp_protocol: "SNMPv3"
+ community_name: "public"
+ alert_port: 162
+ discovery_port: 161
+ trap_format: "SNMPv3"
+ ipmi_lan:
+ community_name: "public"
"""
RETURN = r'''
@@ -234,7 +235,6 @@ try:
AgentEnable_SNMPTypes,
SNMPProtocol_SNMPTypes)
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py
deleted file mode 100644
index d667c916e..000000000
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_firmware_inventory.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
----
-module: dellemc_get_firmware_inventory
-short_description: Get Firmware Inventory
-version_added: "1.0.0"
-deprecated:
- removed_at_date: "2023-01-15"
- why: Replaced with M(dellemc.openmanage.idrac_firmware_info).
- alternative: Use M(dellemc.openmanage.idrac_firmware_info) instead.
- removed_from_collection: dellemc.openmanage
-description: Get Firmware Inventory.
-extends_documentation_fragment:
- - dellemc.openmanage.idrac_auth_options
-
-requirements:
- - "omsdk >= 1.2.488"
- - "python >= 3.8.6"
-author: "Rajeev Arakkal (@rajeevarakkal)"
-notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
- - This module supports C(check_mode).
-"""
-
-EXAMPLES = """
----
-- name: Get Installed Firmware Inventory
- dellemc.openmanage.dellemc_get_firmware_inventory:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
-"""
-
-RETURNS = """
-ansible_facts:
- description: Displays components and their firmware versions. Also, list of the firmware
- dictionaries (one dictionary per firmware).
- returned: success
- type: complex
- sample: {
- [
- {
- "BuildNumber": "0",
- "Classifications": "10",
- "ComponentID": "101100",
- "ComponentType": "FRMW",
- "DeviceID": null,
- "ElementName": "Power Supply.Slot.1",
- "FQDD": "PSU.Slot.1",
- "IdentityInfoType": "OrgID:ComponentType:ComponentID",
- "IdentityInfoValue": "DCIM:firmware:101100",
- "InstallationDate": "2018-01-18T07:25:08Z",
- "InstanceID": "DCIM:INSTALLED#0x15__PSU.Slot.1",
- "IsEntity": "true",
- "Key": "DCIM:INSTALLED#0x15__PSU.Slot.1",
- "MajorVersion": "0",
- "MinorVersion": "1",
- "RevisionNumber": "7",
- "RevisionString": null,
- "Status": "Installed",
- "SubDeviceID": null,
- "SubVendorID": null,
- "Updateable": "true",
- "VendorID": null,
- "VersionString": "00.1D.7D",
- "impactsTPMmeasurements": "false"
- }
- ]
- }
-"""
-
-
-import traceback
-from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
-from ansible.module_utils.basic import AnsibleModule
-try:
- from omsdk.sdkfile import LocalFile
- from omsdk.catalog.sdkupdatemgr import UpdateManager
- from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper
- HAS_OMSDK = True
-except ImportError:
- HAS_OMSDK = False
-
-
-def run_get_firmware_inventory(idrac, module):
- """
- Get Firmware Inventory
- Keyword arguments:
- idrac -- iDRAC handle
- module -- Ansible module
- """
-
- msg = {}
- # msg['changed'] = False
- msg['failed'] = False
- msg['msg'] = {}
- error = False
-
- try:
- # idrac.use_redfish = True
- msg['msg'] = idrac.update_mgr.InstalledFirmware
- if "Status" in msg['msg']:
- if msg['msg']['Status'] != "Success":
- msg['failed'] = True
-
- except Exception as err:
- error = True
- msg['msg'] = "Error: %s" % str(err)
- msg['exception'] = traceback.format_exc()
- msg['failed'] = True
-
- return msg, error
-
-
-# Main
-def main():
- module = AnsibleModule(
- argument_spec=idrac_auth_params,
- supports_check_mode=True)
-
- try:
- with iDRACConnection(module.params) as idrac:
- msg, err = run_get_firmware_inventory(idrac, module)
- except (ImportError, ValueError, RuntimeError) as e:
- module.fail_json(msg=str(e))
-
- if err:
- module.fail_json(**msg)
- module.exit_json(ansible_facts={idrac.ipaddr: {'Firmware Inventory': msg['msg']}})
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py
deleted file mode 100644
index e6a2d9eaf..000000000
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_get_system_inventory.py
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
----
-module: dellemc_get_system_inventory
-short_description: Get the PowerEdge Server System Inventory
-version_added: "1.0.0"
-deprecated:
- removed_at_date: "2023-01-15"
- why: Replaced with M(dellemc.openmanage.idrac_system_info).
- alternative: Use M(dellemc.openmanage.idrac_system_info) instead.
- removed_from_collection: dellemc.openmanage
-description:
- - Get the PowerEdge Server System Inventory.
-extends_documentation_fragment:
- - dellemc.openmanage.idrac_auth_options
-
-requirements:
- - "omsdk >= 1.2.488"
- - "python >= 3.8.6"
-author: "Rajeev Arakkal (@rajeevarakkal)"
-notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
- - This module supports C(check_mode).
-"""
-
-EXAMPLES = """
----
-- name: Get System Inventory
- dellemc.openmanage.dellemc_get_system_inventory:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
-"""
-
-RETURNS = """
-ansible_facts:
- description: Displays the Dell EMC PowerEdge Server System Inventory.
- returned: success
- type: complex
- sample: {
- "SystemInventory": {
- "BIOS": [
- {
- "BIOSReleaseDate": "10/19/2017",
- "FQDD": "BIOS.Setup.1-1",
- "InstanceID": "DCIM:INSTALLED#741__BIOS.Setup.00",
- "Key": "DCIM:INSTALLED#741__BIOS.Setup.00",
- "SMBIOSPresent": "True",
- "VersionString": "1.2.11"
- }
- ],
- "CPU": [
- {
- "CPUFamily": "Intel(R) Xeon(TM)",
- "Characteristics": "64-bit capable",
- "CurrentClockSpeed": "2.3 GHz",
- "DeviceDescription": "CPU 1",
- "ExecuteDisabledCapable": "Yes",
- }
- ]
- }
-}
-msg:
- description: Details of the Error occurred.
- returned: on error
- type: dict
- sample: {
- "error": {
- "code": "Base.1.0.GeneralError",
- "message": "A general error has occurred. See ExtendedInfo for more information.",
- "@Message.ExtendedInfo": [
- {
- "MessageId": "GEN1234",
- "RelatedProperties": [],
- "Message": "Unable to process the request because an error occurred.",
- "MessageArgs": [],
- "Severity": "Critical",
- "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
- }
- ]
- }
- }
-"""
-
-
-from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
-from ansible.module_utils.basic import AnsibleModule
-
-
-# Get System Inventory
-def run_get_system_inventory(idrac, module):
- msg = {}
- msg['changed'] = False
- msg['failed'] = False
- err = False
-
- try:
- # idrac.use_redfish = True
- idrac.get_entityjson()
- msg['msg'] = idrac.get_json_device()
- except Exception as e:
- err = True
- msg['msg'] = "Error: %s" % str(e)
- msg['failed'] = True
- return msg, err
-
-
-# Main
-def main():
- module = AnsibleModule(
- argument_spec=idrac_auth_params,
- supports_check_mode=True)
-
- try:
- with iDRACConnection(module.params) as idrac:
- msg, err = run_get_system_inventory(idrac, module)
- except (ImportError, ValueError, RuntimeError) as e:
- module.fail_json(msg=str(e))
-
- if err:
- module.fail_json(**msg)
- module.exit_json(ansible_facts={idrac.ipaddr: {'SystemInventory': msg['msg']}})
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py
index eec09c1c8..7762bc0b9 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_lc_attributes.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -61,11 +61,12 @@ options:
default: Enabled
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -73,11 +74,11 @@ EXAMPLES = """
---
- name: Set up iDRAC LC Attributes
dellemc.openmanage.dellemc_idrac_lc_attributes:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- csior: "Enabled"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ csior: "Enabled"
"""
RETURN = r'''
@@ -141,7 +142,6 @@ from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
try:
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
index 01c915eae..e8021db18 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_idrac_storage_volume.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -123,10 +123,11 @@ options:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
'''
@@ -172,7 +173,7 @@ EXAMPLES = r'''
span_length: 3
span_depth: 1
drives:
- location: [7,3,5]
+ location: [7, 3, 5]
disk_cache_policy: "Disabled"
write_cache_policy: "WriteBack"
read_cache_policy: "NoReadAhead"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py
index 3be038e44..412e5a6f9 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/dellemc_system_lockdown_mode.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -59,11 +59,12 @@ options:
choices: [Enabled, Disabled]
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Felix Stephen (@felixs88)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module does not support C(check_mode).
"""
@@ -71,11 +72,11 @@ EXAMPLES = """
---
- name: Check System Lockdown Mode
dellemc.openmanage.dellemc_system_lockdown_mode:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- lockdown_mode: "Disabled"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ lockdown_mode: "Disabled"
"""
RETURN = r'''
@@ -144,7 +145,6 @@ from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
try:
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py
index c9c80854a..1b61b3ebe 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_attributes.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -32,7 +32,7 @@ options:
To view the list of attributes in Attribute Registry for iDRAC9 and above,
see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1)
and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
- - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - "For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
(for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is
<GroupName>.<Instance>.<AttributeName> (for Example, 'SNMP.1.AgentCommunity')."
@@ -43,7 +43,7 @@ options:
part of the Integrated Dell Remote Access Controller Attribute Registry. To view the list of attributes in Attribute Registry for iDRAC9 and above,
see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1)
and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
- - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - "For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
(for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is
<GroupName>.<Instance>.<AttributeName> (for Example, 'ThermalSettings.1.ThermalProfile')."
@@ -54,7 +54,7 @@ options:
part of the Integrated Dell Remote Access Controller Attribute Registry.To view the list of attributes in Attribute Registry for iDRAC9 and above,
see, U(https://I(idrac_ip)/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1)
and U(https://I(idrac_ip)/redfish/v1/Registries/ManagerAttributeRegistry)."
- - "For iDRAC7 and iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - "For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
(for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is
<GroupName>.<Instance>.<AttributeName> (for Example, 'LCAttributes.1.AutoUpdate')."
@@ -69,7 +69,7 @@ author:
notes:
- Run this module from a system that has direct access to Dell iDRAC.
- This module supports C(check_mode).
- - For iDRAC7 and iDRAC8 based servers, the value provided for the attributes are not be validated.
+ - For iDRAC8 based servers, the value provided for the attributes are not be validated.
Ensure appropriate values are passed.
'''
@@ -188,7 +188,7 @@ EXAMPLES = """
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
idrac_attributes:
- Time.1.TimeZone: CST6CDT
+ Time.1.Timezone: CST6CDT
NTPConfigGroup.1.NTPEnable: Enabled
NTPConfigGroup.1.NTP1: 192.168.0.5
NTPConfigGroup.1.NTP2: 192.168.0.6
@@ -260,9 +260,8 @@ error_info:
import json
import re
-from ssl import SSLError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_manager_res_id
from ansible.module_utils.basic import AnsibleModule
@@ -285,7 +284,7 @@ def xml_data_conversion(attrbite, fqdd=None):
attr = ""
json_data = {}
for k, v in attrbite.items():
- key = re.sub(r"(?<=\d)\.", "#", k)
+ key = re.sub(r"\.(?!\d)", "#", k)
attr += '<Attribute Name="{0}">{1}</Attribute>'.format(key, v)
json_data[key] = str(v)
root = component.format(fqdd, attr)
@@ -510,13 +509,13 @@ def main():
res_id = get_manager_res_id(idrac)
diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr = fetch_idrac_uri_attr(idrac, module, res_id)
process_check_mode(module, diff)
- resp = update_idrac_attributes(idrac, module, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr)
+ update_idrac_attributes(idrac, module, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr)
module.exit_json(msg=SUCCESS_MSG, changed=True)
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
+ except (IOError, ValueError, TypeError, ConnectionError, AttributeError, IndexError, KeyError) as err:
module.fail_json(msg=str(err), error_info=json.load(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
index 8cd9c5e7b..aba65f3f6 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_bios.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.2.0
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 7.6.0
+# Copyright (C) 2018-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -67,12 +67,12 @@ options:
- "The format is YYYY-MM-DDThh:mm:ss<offset>"
- "<offset> is the time offset from UTC that the current timezone set in
iDRAC in the format: +05:30 for IST."
- required: True
+ required: true
duration:
type: int
description:
- The duration in seconds for the maintenance window.
- required: True
+ required: true
attributes:
type: dict
description:
@@ -124,19 +124,21 @@ options:
type: int
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
default: 1200
requirements:
- "omsdk >= 1.2.490"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
- "Jagadeesh N V (@jagadeeshnv)"
+ - "Shivam Sharma (@shivam-sharma)"
notes:
- omsdk is required to be installed only for I(boot_sources) operation.
- This module requires 'Administrator' privilege for I(idrac_user).
- Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -144,20 +146,20 @@ EXAMPLES = """
---
- name: Configure generic attributes of the BIOS
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
attributes:
- BootMode : "Bios"
+ BootMode: "Bios"
OneTimeBootMode: "Enabled"
BootSeqRetry: "Enabled"
- name: Configure PXE generic attributes
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
attributes:
PxeDev1EnDis: "Enabled"
@@ -169,82 +171,82 @@ EXAMPLES = """
- name: Configure BIOS attributes at Maintenance window
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
apply_time: AtMaintenanceWindowStart
maintenance_window:
start_time: "2022-09-30T05:15:40-05:00"
duration: 600
attributes:
- BootMode : "Bios"
+ BootMode: "Bios"
OneTimeBootMode: "Enabled"
BootSeqRetry: "Enabled"
- name: Clear pending BIOS attributes
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- clear_pending: yes
+ clear_pending: true
- name: Reset BIOS attributes to default settings.
dellemc.openmanage.idrac_bios:
- idrac_ip: "{{ idrac_ip }}"
- idrac_user: "{{ idrac_user }}"
- idrac_password: "{{ idrac_pwd }}"
- validate_certs: False
- reset_bios: yes
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ validate_certs: false
+ reset_bios: true
- name: Configure boot sources
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-2-3"
- Enabled : true
- Index : 0
+ - Name: "NIC.Integrated.1-2-3"
+ Enabled: true
+ Index: 0
- name: Configure multiple boot sources
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Enabled : true
- Index : 0
- - Name : "NIC.Integrated.2-2-2"
- Enabled : true
- Index : 1
- - Name : "NIC.Integrated.3-3-3"
- Enabled : true
- Index : 2
+ - Name: "NIC.Integrated.1-1-1"
+ Enabled: true
+ Index: 0
+ - Name: "NIC.Integrated.2-2-2"
+ Enabled: true
+ Index: 1
+ - Name: "NIC.Integrated.3-3-3"
+ Enabled: true
+ Index: 2
- name: Configure boot sources - Enabling
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Enabled : true
+ - Name: "NIC.Integrated.1-1-1"
+ Enabled: true
- name: Configure boot sources - Index
dellemc.openmanage.idrac_bios:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
boot_sources:
- - Name : "NIC.Integrated.1-1-1"
- Index : 0
+ - Name: "NIC.Integrated.1-1-1"
+ Index: 0
"""
RETURN = """
@@ -343,6 +345,7 @@ UNSUPPORTED_APPLY_TIME = "Apply time {0} is not supported."
MAINTENANCE_OFFSET = "The maintenance time must be post-fixed with local offset to {0}."
MAINTENANCE_TIME = "The specified maintenance time window occurs in the past, " \
"provide a future time to schedule the maintenance window."
+NEGATIVE_TIMEOUT_MESSAGE = "The parameter job_wait_timeout value cannot be negative or zero."
POWER_CHECK_RETRIES = 30
POWER_CHECK_INTERVAL = 10
@@ -561,7 +564,7 @@ def track_log_entry(redfish_obj):
else:
# msg = "{0}{1}".format(BIOS_RESET_TRIGGERED, "LOOPOVER")
msg = BIOS_RESET_TRIGGERED
- except Exception as ex:
+ except Exception:
# msg = "{0}{1}".format(BIOS_RESET_TRIGGERED, str(ex))
msg = BIOS_RESET_TRIGGERED
return msg
@@ -573,7 +576,7 @@ def reset_bios(module, redfish_obj):
module.exit_json(status_msg=BIOS_RESET_PENDING, failed=True)
if module.check_mode:
module.exit_json(status_msg=CHANGES_MSG, changed=True)
- resp = redfish_obj.invoke_request(RESET_BIOS_DEFAULT, "POST", data="{}", dump=True)
+ redfish_obj.invoke_request(RESET_BIOS_DEFAULT, "POST", data="{}", dump=True)
reset_success = reset_host(module, redfish_obj)
if not reset_success:
module.exit_json(failed=True, status_msg="{0} {1}".format(RESET_TRIGGERRED, HOST_RESTART_FAILED))
@@ -598,7 +601,7 @@ def clear_pending_bios(module, redfish_obj):
module.exit_json(status_msg=SUCCESS_CLEAR, changed=True)
if module.check_mode:
module.exit_json(status_msg=CHANGES_MSG, changed=True)
- resp = redfish_obj.invoke_request(CLEAR_PENDING_URI, "POST", data="{}", dump=False)
+ redfish_obj.invoke_request(CLEAR_PENDING_URI, "POST", data="{}", dump=False)
module.exit_json(status_msg=SUCCESS_CLEAR, changed=True)
@@ -698,7 +701,7 @@ def apply_attributes(module, redfish_obj, pending, rf_settings):
payload["@Redfish.SettingsApplyTime"] = rf_set
resp = redfish_obj.invoke_request(BIOS_SETTINGS, "PATCH", data=payload)
if rf_set:
- tmp_resp = redfish_obj.invoke_request(resp.headers["Location"], "GET")
+ redfish_obj.invoke_request(resp.headers["Location"], "GET")
job_id = resp.headers["Location"].split("/")[-1]
else:
if aplytm == "Immediate":
@@ -758,6 +761,11 @@ def attributes_config(module, redfish_obj):
job_id=job_id, changed=True)
+def validate_negative_job_time_out(module):
+ if module.params.get("job_wait_timeout") <= 0:
+ module.fail_json(msg=NEGATIVE_TIMEOUT_MESSAGE)
+
+
def main():
specs = {
"share_name": {"type": 'str'},
@@ -785,6 +793,7 @@ def main():
required_if=[["apply_time", "AtMaintenanceWindowStart", ("maintenance_window",)],
["apply_time", "InMaintenanceWindowOnReset", ("maintenance_window",)]],
supports_check_mode=True)
+ validate_negative_job_time_out(module)
try:
msg = {}
if module.params.get("boot_sources") is not None:
@@ -810,7 +819,8 @@ def main():
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
- module.exit_json(msg=str(err), unreachable=True)
+ message = err.reason if err.reason else str(err)
+ module.exit_json(msg=message, unreachable=True)
except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError) as e:
module.fail_json(msg=str(e))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py
index ad563c5ce..1e28ef30d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_boot.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.1.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 8.0.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -69,7 +69,7 @@ options:
- C(continuous) The system boots to the target specified in the I(boot_source_override_target)
until this property is set to Disabled.
- The state is set to C(once) for the one-time boot override and C(continuous) for the
- remain-active-until—canceled override. If the state is set C(once), the value is reset
+ remain-active-until—canceled override. If the state is set C(once) or C(continuous), the value is reset
to C(disabled) after the I(boot_source_override_target) actions have completed successfully.
- Changes to this options do not alter the BIOS persistent boot order configuration.
- This is mutually exclusive with I(boot_options).
@@ -101,8 +101,8 @@ options:
type: str
description:
- C(none) Host system is not rebooted and I(job_wait) is not applicable.
- - C(force_reset) Forcefully reboot the Host system.
- - C(graceful_reset) Gracefully reboot the Host system.
+ - C(force_restart) Forcefully reboot the Host system.
+ - C(graceful_restart) Gracefully reboot the Host system.
choices: [graceful_restart, force_restart, none]
default: graceful_restart
job_wait:
@@ -115,7 +115,7 @@ options:
type: int
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
default: 900
resource_id:
type: str
@@ -413,7 +413,7 @@ def apply_boot_settings(module, idrac, payload, res_id):
def configure_boot_settings(module, idrac, res_id):
- job_resp, diff_change, payload = {}, [], {"Boot": {}}
+ job_resp, payload = {}, {"Boot": {}}
boot_order = module.params.get("boot_order")
override_mode = module.params.get("boot_source_override_mode")
override_enabled = module.params.get("boot_source_override_enabled")
@@ -457,7 +457,7 @@ def configure_boot_settings(module, idrac, res_id):
def configure_idrac_boot(module, idrac, res_id):
boot_options = module.params.get("boot_options")
- inv_boot_options, diff_change, payload, job_resp, boot_attr = [], [], {}, {}, {}
+ inv_boot_options, diff_change, payload, job_resp = [], [], {}, {}
if boot_options is not None:
boot_option_data = get_existing_boot_options(idrac, res_id)
for each in boot_options:
@@ -551,8 +551,8 @@ def main():
except HTTPError as err:
if err.code == 401:
module.fail_json(msg=AUTH_ERROR_MSG.format(module.params["idrac_ip"]))
- module.fail_json(msg=str(err), error_info=json.load(err))
- except URLError as err:
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError:
module.exit_json(msg=AUTH_ERROR_MSG.format(module.params["idrac_ip"]), unreachable=True)
except (ImportError, ValueError, RuntimeError, SSLValidationError,
ConnectionError, KeyError, TypeError, IndexError) as e:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py
index f5471a3ad..a429c639b 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_certificates.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.5.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.6.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -14,7 +14,7 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-DOCUMENTATION = r'''
+DOCUMENTATION = r"""
---
module: idrac_certificates
short_description: Configure certificates for iDRAC
@@ -32,17 +32,19 @@ options:
- C(export), export the certificate. This requires I(certificate_path).
- C(reset), reset the certificate to default settings. This is applicable only for C(HTTPS).
type: str
- choices: ['import', 'export', 'generate_csr', 'reset']
+ choices: [import, export, generate_csr, reset]
default: 'generate_csr'
certificate_type:
description:
- Type of the iDRAC certificate.
- C(HTTPS) The Dell self-signed SSL certificate.
- C(CA) Certificate Authority(CA) signed SSL certificate.
- - C(CSC) The custom signed SSL certificate.
+ - C(CUSTOMCERTIFICATE) The custom PKCS12 certificate and private key. Export of custom certificate is supported only on iDRAC firmware version 7.00.00.00
+ and above.
+ - C(CSC) The custom signing SSL certificate.
- C(CLIENT_TRUST_CERTIFICATE) Client trust certificate.
type: str
- choices: ['HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE']
+ choices: [HTTPS, CA, CUSTOMCERTIFICATE, CSC, CLIENT_TRUST_CERTIFICATE]
default: 'HTTPS'
certificate_path:
description:
@@ -52,6 +54,13 @@ options:
passphrase:
description: The passphrase string if the certificate to be imported is passphrase protected.
type: str
+ ssl_key:
+ description:
+ - Absolute path of the private or SSL key file.
+ - This is applicable only when I(command) is C(import) and I(certificate_type) is C(HTTPS).
+ - Uploading the SSL key to iDRAC is supported on firmware version 6.00.02.00 and above.
+ type: path
+ version_added: 8.6.0
cert_params:
description: Certificate parameters to generate signing request.
type: dict
@@ -59,31 +68,30 @@ options:
common_name:
description: The common name of the certificate.
type: str
- required: True
+ required: true
organization_unit:
description: The name associated with an organizational unit. For example department name.
type: str
- required: True
+ required: true
locality_name:
description: The city or other location where the entity applying for certification is located.
type: str
- required: True
+ required: true
state_name:
description: The state where the entity applying for certification is located.
type: str
- required: True
+ required: true
country_code:
description: The country code of the country where the entity applying for certification is located.
type: str
- required: True
+ required: true
email_address:
description: The email associated with the CSR.
type: str
- required: True
organization_name:
description: The name associated with an organization.
type: str
- required: True
+ required: true
subject_alt_name:
description: The alternative domain names associated with the request.
type: list
@@ -97,24 +105,27 @@ options:
- To reset the iDRAC after the certificate operation.
- This is applicable when I(command) is C(import) or C(reset).
type: bool
- default: True
+ default: true
wait:
description:
- Maximum wait time for iDRAC to start after the reset, in seconds.
- - This is applicable when I(command) is C(import) or C(reset) and I(reset) is C(True).
+ - This is applicable when I(command) is C(import) or C(reset) and I(reset) is C(true).
type: int
default: 300
requirements:
- "python >= 3.8.6"
author:
- "Jagadeesh N V(@jagadeeshnv)"
+ - "Rajshekar P(@rajshekarp87)"
+ - "Kristian Lamb V(@kristian_lamb)"
notes:
- - The certificate operations are supported on iDRAC firmware 5.10.10.00 and above.
+ - The certificate operations are supported on iDRAC firmware version 6.10.80.00 and above.
- Run this module from a system that has direct access to Dell iDRAC.
- This module supports C(check_mode).
-'''
+ - This module supports IPv4 and IPv6 addresses.
+"""
-EXAMPLES = r'''
+EXAMPLES = r"""
---
- name: Generate HTTPS certificate signing request
dellemc.openmanage.idrac_certificates:
@@ -146,6 +157,17 @@ EXAMPLES = r'''
certificate_type: "HTTPS"
certificate_path: "/path/to/cert.pem"
+- name: Import an HTTPS certificate along with its private key.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "/path/to/cert.pem"
+ ssl_key: "/path/to/private_key.pem"
+
- name: Export a HTTPS certificate.
dellemc.openmanage.idrac_certificates:
idrac_ip: "192.168.0.1"
@@ -166,6 +188,17 @@ EXAMPLES = r'''
certificate_type: "CSC"
certificate_path: "/path/to/cert.pem"
+- name: Import a custom certificate with a passphrase.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ command: "import"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "/path/to/idrac_cert.p12"
+ passphrase: "cert_passphrase"
+ reset: false
+
- name: Export a Client trust certificate.
dellemc.openmanage.idrac_certificates:
idrac_ip: "192.168.0.1"
@@ -175,7 +208,7 @@ EXAMPLES = r'''
command: "export"
certificate_type: "CLIENT_TRUST_CERTIFICATE"
certificate_path: "/home/omam/mycert_dir"
-'''
+"""
RETURN = r'''
---
@@ -183,7 +216,7 @@ msg:
type: str
description: Status of the certificate configuration operation.
returned: always
- sample: "Successfully performed the operation generate_csr."
+ sample: "Successfully performed the 'generate_csr' certificate operation."
certificate_path:
type: str
description: The csr or exported certificate file path
@@ -221,43 +254,50 @@ from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import reset_idrac
-NOT_SUPPORTED_ACTION = "Certificate {op} not supported for the specified certificate type {certype}."
-SUCCESS_MSG = "Successfully performed the '{command}' operation."
+IMPORT_SSL_CERTIFICATE = "#DelliDRACCardService.ImportSSLCertificate"
+EXPORT_SSL_CERTIFICATE = "#DelliDRACCardService.ExportSSLCertificate"
+IDRAC_CARD_SERVICE_ACTION_URI = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions"
+
+NOT_SUPPORTED_ACTION = "Certificate '{operation}' not supported for the specified certificate type '{cert_type}'."
+SUCCESS_MSG = "Successfully performed the '{command}' certificate operation."
+SUCCESS_MSG_SSL = "Successfully performed the SSL key upload and '{command}' certificate operation."
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_MSG = "Changes found to be applied."
+WAIT_NEGATIVE_OR_ZERO_MSG = "The value for the `wait` parameter cannot be negative or zero."
SYSTEM_ID = "System.Embedded.1"
MANAGER_ID = "iDRAC.Embedded.1"
-ACTIONS_PFIX = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService."
+ACTIONS_PFIX = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService."
SYSTEMS_URI = "/redfish/v1/Systems"
MANAGERS_URI = "/redfish/v1/Managers"
-IDRAC_SERVICE = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService"
+IDRAC_SERVICE = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService"
CSR_SSL = "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR"
-IMPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate"
-EXPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate"
-RESET_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg"
+IMPORT_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ImportSSLCertificate"
+UPLOAD_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.UploadSSLKey"
+EXPORT_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ExportSSLCertificate"
+RESET_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.SSLResetCfg"
IDRAC_RESET = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset"
idrac_service_actions = {
- "#DelliDRACCardService.DeleteCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.DeleteCertificate",
- "#DelliDRACCardService.ExportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportCertificate",
- "#DelliDRACCardService.ExportSSLCertificate": EXPORT_SSL,
+ "#DelliDRACCardService.DeleteCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.DeleteCertificate",
+ "#DelliDRACCardService.ExportCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ExportCertificate",
+ EXPORT_SSL_CERTIFICATE: EXPORT_SSL,
"#DelliDRACCardService.FactoryIdentityCertificateGenerateCSR":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR",
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR",
"#DelliDRACCardService.FactoryIdentityExportCertificate":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityExportCertificate",
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityExportCertificate",
"#DelliDRACCardService.FactoryIdentityImportCertificate":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityImportCertificate",
- "#DelliDRACCardService.GenerateSEKMCSR": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.GenerateSEKMCSR",
- "#DelliDRACCardService.ImportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportCertificate",
- "#DelliDRACCardService.ImportSSLCertificate": IMPORT_SSL,
- "#DelliDRACCardService.SSLResetCfg": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg",
- "#DelliDRACCardService.iDRACReset": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.iDRACReset"
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityImportCertificate",
+ "#DelliDRACCardService.GenerateSEKMCSR": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.GenerateSEKMCSR",
+ "#DelliDRACCardService.ImportCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ImportCertificate",
+ IMPORT_SSL_CERTIFICATE: IMPORT_SSL,
+ "#DelliDRACCardService.UploadSSLKey": UPLOAD_SSL,
+ "#DelliDRACCardService.SSLResetCfg": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.SSLResetCfg",
+ "#DelliDRACCardService.iDRACReset": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.iDRACReset"
}
rfish_cert_coll = {'Server': {
"@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/NetworkProtocol/HTTPS/Certificates"
}}
-out_mapper = {}
out_file_path = {"CSRString": 'certificate_path',
"CertificateFile": 'certificate_path'}
changed_map = {"generate_csr": False, "import": True, "export": False, "reset": True}
@@ -271,91 +311,111 @@ csr_transform = {"common_name": "CommonName",
"organization_name": "Organization",
"subject_alt_name": 'AlternativeNames'}
action_url_map = {"generate_csr": {},
- "import": {'Server': "#DelliDRACCardService.ImportSSLCertificate",
- 'CA': "#DelliDRACCardService.ImportSSLCertificate",
- 'CSC': "#DelliDRACCardService.ImportSSLCertificate",
- 'ClientTrustCertificate': "#DelliDRACCardService.ImportSSLCertificate"},
- "export": {'Server': "#DelliDRACCardService.ExportSSLCertificate",
- 'CA': "#DelliDRACCardService.ExportSSLCertificate",
- 'CSC': "#DelliDRACCardService.ExportSSLCertificate",
- 'ClientTrustCertificate': "#DelliDRACCardService.ExportSSLCertificate"},
+ "import": {'Server': IMPORT_SSL_CERTIFICATE,
+ 'CA': IMPORT_SSL_CERTIFICATE,
+ 'CustomCertificate': IMPORT_SSL_CERTIFICATE,
+ 'CSC': IMPORT_SSL_CERTIFICATE,
+ 'ClientTrustCertificate': IMPORT_SSL_CERTIFICATE},
+ "export": {'Server': EXPORT_SSL_CERTIFICATE,
+ 'CA': EXPORT_SSL_CERTIFICATE,
+ 'CustomCertificate': EXPORT_SSL_CERTIFICATE,
+ 'CSC': EXPORT_SSL_CERTIFICATE,
+ 'ClientTrustCertificate': EXPORT_SSL_CERTIFICATE},
"reset": {'Server': "#DelliDRACCardService.SSLResetCfg"}}
dflt_url_map = {"generate_csr": {'Server': CSR_SSL},
"import": {'Server': IMPORT_SSL,
'CA': IMPORT_SSL,
+ 'CUSTOMCERTIFICATE': IMPORT_SSL,
'CSC': IMPORT_SSL,
'ClientTrustCertificate': IMPORT_SSL},
"export": {'Server': EXPORT_SSL,
'CA': EXPORT_SSL,
+ 'CUSTOMCERTIFICATE': EXPORT_SSL,
'CSC': EXPORT_SSL,
'ClientTrustCertificate': EXPORT_SSL},
"reset": {'Server': RESET_SSL}}
-certype_map = {'HTTPS': "Server", 'CA': "CA", 'CSC': "CSC",
+certype_map = {'HTTPS': "Server", 'CA': "CA", 'CUSTOMCERTIFICATE': "CustomCertificate", 'CSC': "CSC",
'CLIENT_TRUST_CERTIFICATE': "ClientTrustCertificate"}
-def get_ssl_payload(module, op, certype):
+def get_ssl_payload(module, operation, cert_type):
payload = {}
method = 'POST'
- if op == 'import':
- payload["CertificateType"] = certype
- if module.params.get('passphrase'):
- payload['Passphrase'] = module.params.get('passphrase')
- fpath = module.params.get('certificate_path')
- try:
- if str(fpath).lower().endswith('.p12') or str(fpath).lower().endswith(
- '.pfx'): # Linux generates .p12 Windows .pfx
- with open(fpath, 'rb') as cert:
- cert_content = cert.read()
- cert_file = base64.encodebytes(cert_content).decode('ascii')
- else:
- with open(fpath, "r") as cert:
- cert_file = cert.read()
- except OSError as file_err:
- module.exit_json(msg=str(file_err), failed=True)
- payload['SSLCertificateFile'] = cert_file
- elif op == 'export':
- payload['SSLCertType'] = certype
- elif op == 'generate_csr':
- payload = {}
- cert_params = module.params.get("cert_params")
- for k, v in csr_transform.items():
- payload[v] = cert_params.get(k)
- if rfish_cert_coll.get(certype):
- payload["CertificateCollection"] = rfish_cert_coll.get(certype)
- elif op == 'reset':
- payload = "{}"
+
+ if operation == 'import':
+ payload = _build_import_payload(module, cert_type)
+ elif operation == 'export':
+ payload = {"SSLCertType": cert_type}
+ elif operation == 'generate_csr':
+ payload = _build_generate_csr_payload(module, cert_type)
+ elif operation == 'reset':
+ payload = '{}'
+
return payload, method
+def _build_import_payload(module, cert_type):
+ payload = {"CertificateType": cert_type}
+
+ if module.params.get('passphrase'):
+ payload['Passphrase'] = module.params.get('passphrase')
+
+ cert_path = module.params.get('certificate_path')
+ try:
+ if str(cert_path).lower().endswith('.p12') or str(cert_path).lower().endswith('.pfx'):
+ with open(cert_path, 'rb') as cert_file:
+ cert_content = cert_file.read()
+ cert_file_content = base64.encodebytes(cert_content).decode('ascii')
+ else:
+ with open(cert_path, "r") as cert_file:
+ cert_file_content = cert_file.read()
+ except OSError as file_error:
+ module.exit_json(msg=str(file_error), failed=True)
+
+ payload['SSLCertificateFile'] = cert_file_content
+ return payload
+
+
+def _build_generate_csr_payload(module, cert_type):
+ payload = {}
+ cert_params = module.params.get("cert_params")
+
+ for key, value in csr_transform.items():
+ if cert_params.get(key) is not None:
+ payload[value] = cert_params.get(key)
+
+ if rfish_cert_coll.get(cert_type):
+ payload["CertificateCollection"] = rfish_cert_coll.get(cert_type)
+
+ return payload
+
+
payload_map = {"Server": get_ssl_payload,
"CA": get_ssl_payload,
+ "CustomCertificate": get_ssl_payload,
"CSC": get_ssl_payload,
"ClientTrustCertificate": get_ssl_payload}
-def get_res_id(idrac, certype):
+def get_res_id(idrac, cert_type):
cert_map = {"Server": MANAGER_ID}
try:
- resp = idrac.invoke_request("GET", cert_map.get(certype, MANAGERS_URI))
+ resp = idrac.invoke_request(cert_map.get(cert_type, MANAGERS_URI), "GET")
membs = resp.json_data.get("Members")
res_uri = membs[0].get('@odata.id') # Getting the first item
res_id = res_uri.split("/")[-1]
except Exception:
- res_id = cert_map.get(certype, MANAGER_ID)
+ res_id = cert_map.get(cert_type, MANAGER_ID)
return res_id
def get_idrac_service(idrac, res_id):
srvc = IDRAC_SERVICE.format(res_id=res_id)
- try:
- resp = idrac.invoke_request('GET', "{0}/{1}".format(MANAGERS_URI, res_id))
- srvc_data = resp.json_data
- dell_srvc = srvc_data['Links']['Oem']['Dell']['DelliDRACCardService']
- srvc = dell_srvc.get("@odata.id", IDRAC_SERVICE.format(res_id=res_id))
- except Exception:
- srvc = IDRAC_SERVICE.format(res_id=res_id)
+ resp = idrac.invoke_request(f"{MANAGERS_URI}/{res_id}", 'GET')
+ srvc_data = resp.json_data
+ dell_srvc = srvc_data['Links']['Oem']['Dell']['DelliDRACCardService']
+ srvc = dell_srvc.get("@odata.id", IDRAC_SERVICE.format(res_id=res_id))
return srvc
@@ -365,45 +425,63 @@ def get_actions_map(idrac, idrac_service_uri):
resp = idrac.invoke_request(idrac_service_uri, 'GET')
srvc_data = resp.json_data
actions = dict((k, v.get('target')) for k, v in srvc_data.get('Actions').items())
- except Exception as exc:
+ except Exception:
actions = idrac_service_actions
return actions
-def get_cert_url(actions, op, certype, res_id):
- idrac_key = action_url_map.get(op).get(certype)
+def get_cert_url(actions, operation, cert_type, res_id):
+ idrac_key = action_url_map.get(operation).get(cert_type)
dynurl = actions.get(idrac_key)
if not dynurl:
- dynurl = dflt_url_map.get(op).get(certype)
+ dynurl = dflt_url_map.get(operation).get(cert_type)
if dynurl:
dynurl = dynurl.format(res_id=res_id)
return dynurl
-def certificate_action(module, idrac, actions, op, certype, res_id):
- cert_url = get_cert_url(actions, op, certype, res_id)
+def upload_ssl_key(module, idrac, actions, ssl_key, res_id):
+ if not os.path.exists(ssl_key) or os.path.isdir(ssl_key):
+ module.exit_json(msg=f"Unable to locate the SSL key file at {ssl_key}.", failed=True)
+
+ try:
+ with open(ssl_key, "r") as file:
+ scert_file = file.read()
+ except OSError as err:
+ module.exit_json(msg=str(err), failed=True)
+
+ if not module.check_mode:
+ upload_url = actions.get("#DelliDRACCardService.UploadSSLKey")
+ if not upload_url:
+ module.exit_json("Upload of SSL key not supported", failed=True)
+
+ payload = {}
+ payload = {'SSLKeyString': scert_file}
+ idrac.invoke_request(upload_url.format(res_id=res_id), "POST", data=payload)
+
+
+def certificate_action(module, idrac, actions, operation, cert_type, res_id):
+ cert_url = get_cert_url(actions, operation, cert_type, res_id)
if not cert_url:
- module.exit_json(msg=NOT_SUPPORTED_ACTION.format(op=op, certype=module.params.get('certificate_type')))
- cert_payload, method = payload_map.get(certype)(module, op, certype)
- exit_certificates(module, idrac, cert_url, cert_payload, method, certype, res_id)
+ module.exit_json(msg=NOT_SUPPORTED_ACTION.format(operation=operation, cert_type=module.params.get('certificate_type')))
+ cert_payload, method = payload_map.get(cert_type)(module, operation, cert_type)
+ exit_certificates(module, idrac, cert_url, cert_payload, method, cert_type, res_id)
def write_to_file(module, cert_data, dkey):
- f_ext = {'HTTPS': ".pem", 'CA': ".pem", 'CSC': ".crt", 'CLIENT_TRUST_CERTIFICATE': ".crt"}
+ f_ext = {'HTTPS': ".pem", 'CA': ".pem", "CUSTOMCERTIFICATE": ".crt", 'CSC': ".crt", 'CLIENT_TRUST_CERTIFICATE': ".crt"}
path = module.params.get('certificate_path')
if not (os.path.exists(path) or os.path.isdir(path)):
- module.exit_json(msg="Provided directory path '{0}' is not valid.".format(path), failed=True)
+ module.exit_json(msg=f"Provided directory path '{path}' is not valid.", failed=True)
if not os.access(path, os.W_OK):
- module.exit_json(msg="Provided directory path '{0}' is not writable. Please check if you "
- "have appropriate permissions.".format(path), failed=True)
+ module.exit_json(msg=f"Provided directory path '{path}' is not writable. Please check if you "
+ "have appropriate permissions.", failed=True)
d = datetime.now()
if module.params.get('command') == 'generate_csr':
ext = '.txt'
else:
ext = f_ext.get(module.params.get('certificate_type'))
- cert_file_name = "{0}_{1}{2}{3}_{4}{5}{6}_{7}{8}".format(
- module.params["idrac_ip"], d.date().year, d.date().month, d.date().day,
- d.time().hour, d.time().minute, d.time().second, module.params.get('certificate_type'), ext)
+ cert_file_name = f"{module.params['idrac_ip']}_{d.strftime('%Y%m%d_%H%M%S')}_{module.params.get('certificate_type')}{ext}"
file_name = os.path.join(path, cert_file_name)
write_data = cert_data.pop(dkey, None)
with open(file_name, "w") as fp:
@@ -412,48 +490,42 @@ def write_to_file(module, cert_data, dkey):
def format_output(module, cert_data):
- # cert_data = strip_substr_dict(cert_data, chkstr='@odata')
- result = {}
cp = cert_data.copy()
klist = cp.keys()
for k in klist:
if "message" in k.lower():
cert_data.pop(k, None)
- if k in out_mapper:
- cert_data[out_mapper.get(k)] = cert_data.pop(k, None)
if k in out_file_path:
write_to_file(module, cert_data, k)
- if result:
- cert_data.update({'result': result})
cert_data.pop("CertificateCollection", None)
return cert_data
-def get_export_data(idrac, certype, res_id):
+def get_export_data(idrac, cert_type, res_id):
try:
- resp = idrac.invoke_request(EXPORT_SSL.format(res_id=res_id), "POST", data={"SSLCertType": certype})
+ resp = idrac.invoke_request(EXPORT_SSL.format(res_id=res_id), "POST", data={"SSLCertType": cert_type})
cert_data = resp.json_data
except Exception:
cert_data = {"CertificateFile": ""}
return cert_data.get("CertificateFile")
-def exit_certificates(module, idrac, cert_url, cert_payload, method, certype, res_id):
+def exit_certificates(module, idrac, cert_url, cert_payload, method, cert_type, res_id):
cmd = module.params.get('command')
changed = changed_map.get(cmd)
reset = changed_map.get(cmd) and module.params.get('reset')
result = {"changed": changed}
reset_msg = ""
if changed:
- reset_msg = " Reset iDRAC to apply new certificate." \
- " Until iDRAC is reset, the old certificate will be active."
+ reset_msg = "Reset iDRAC to apply the new certificate." \
+ " Until the iDRAC is reset, the old certificate will remain active."
if module.params.get('command') == 'import':
- export_cert = get_export_data(idrac, certype, res_id)
+ export_cert = get_export_data(idrac, cert_type, res_id)
if cert_payload.get('SSLCertificateFile') in export_cert:
module.exit_json(msg=NO_CHANGES_MSG)
if module.check_mode and changed:
module.exit_json(msg=CHANGES_MSG, changed=changed)
- if module.params.get('command') == 'reset' and certype == "Server":
+ if module.params.get('command') == 'reset' and cert_type == "Server":
resp = idrac.invoke_request(cert_url, method, data=cert_payload, dump=False)
else:
resp = idrac.invoke_request(cert_url, method, data=cert_payload)
@@ -462,7 +534,10 @@ def exit_certificates(module, idrac, cert_url, cert_payload, method, certype, re
result.update(cert_output)
if reset:
reset, track_failed, reset_msg = reset_idrac(idrac, module.params.get('wait'), res_id)
- result['msg'] = "{0}{1}".format(SUCCESS_MSG.format(command=cmd), reset_msg)
+ if cmd == "import" and cert_type == "Server" and module.params.get('ssl_key'):
+ result['msg'] = "{0} {1}".format(SUCCESS_MSG_SSL.format(command=cmd), reset_msg)
+ else:
+ result['msg'] = "{0}{1}".format(SUCCESS_MSG.format(command=cmd), reset_msg)
module.exit_json(**result)
@@ -471,8 +546,9 @@ def main():
"command": {"type": 'str', "default": 'generate_csr',
"choices": ['generate_csr', 'export', 'import', 'reset']},
"certificate_type": {"type": 'str', "default": 'HTTPS',
- "choices": ['HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE']},
+ "choices": ['HTTPS', 'CA', 'CUSTOMCERTIFICATE', 'CSC', 'CLIENT_TRUST_CERTIFICATE']},
"certificate_path": {"type": 'path'},
+ "ssl_key": {"type": 'path'},
"passphrase": {"type": 'str', "no_log": True},
"cert_params": {"type": 'dict', "options": {
"common_name": {"type": 'str', "required": True},
@@ -480,7 +556,7 @@ def main():
"locality_name": {"type": 'str', "required": True},
"state_name": {"type": 'str', "required": True},
"country_code": {"type": 'str', "required": True},
- "email_address": {"type": 'str', "required": True},
+ "email_address": {"type": 'str'},
"organization_name": {"type": 'str', "required": True},
"subject_alt_name": {"type": 'list', "elements": 'str', "default": []}
}},
@@ -500,21 +576,26 @@ def main():
try:
with iDRACRedfishAPI(module.params) as idrac:
- certype = certype_map.get(module.params.get('certificate_type'))
- op = module.params.get('command')
+ cert_type = certype_map.get(module.params.get('certificate_type'))
+ operation = module.params.get('command')
res_id = module.params.get('resource_id')
if not res_id:
- res_id = get_res_id(idrac, certype)
+ res_id = get_res_id(idrac, cert_type)
idrac_service_uri = get_idrac_service(idrac, res_id)
actions_map = get_actions_map(idrac, idrac_service_uri)
- certificate_action(module, idrac, actions_map, op, certype, res_id)
+ if operation in ["import", "reset"] and module.params.get('reset') and module.params.get('wait') <= 0:
+ module.exit_json(msg=WAIT_NEGATIVE_OR_ZERO_MSG, failed=True)
+ ssl_key = module.params.get('ssl_key')
+ if operation == "import" and ssl_key is not None and cert_type == "Server":
+ upload_ssl_key(module, idrac, actions_map, ssl_key, res_id)
+ certificate_action(module, idrac, actions_map, operation, cert_type, res_id)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (ImportError, ValueError, RuntimeError, SSLValidationError,
ConnectionError, KeyError, TypeError, IndexError) as e:
- module.fail_json(msg=str(e))
+ module.exit_json(msg=str(e), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py
index e4d966345..8172e6838 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2018-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -30,7 +30,7 @@ options:
share_name:
description: Network share path of update repository. CIFS, NFS, HTTP, HTTPS and FTP share types are supported.
type: str
- required: True
+ required: true
share_user:
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
part of a domain else 'user'. This option is mandatory for CIFS Network Share.
@@ -47,44 +47,85 @@ options:
job_wait:
description: Whether to wait for job completion or not.
type: bool
- default: True
+ default: true
catalog_file_name:
description: Catalog file name relative to the I(share_name).
type: str
default: 'Catalog.xml'
ignore_cert_warning:
description: Specifies if certificate warnings are ignored when HTTPS share is used.
- If C(True) option is set, then the certificate warnings are ignored.
+ If C(true) option is set, then the certificate warnings are ignored.
type: bool
- default: True
+ default: true
apply_update:
description:
- - If I(apply_update) is set to C(True), then the packages are applied.
- - If I(apply_update) is set to C(False), no updates are applied, and a catalog report
+ - If I(apply_update) is set to C(true), then the packages are applied.
+ - If I(apply_update) is set to C(false), no updates are applied, and a catalog report
of packages is generated and returned.
type: bool
- default: True
+ default: true
reboot:
description:
- Provides the option to apply the update packages immediately or in the next reboot.
- - If I(reboot) is set to C(True), then the packages are applied immediately.
- - If I(reboot) is set to C(False), then the packages are staged and applied in the next reboot.
+ - If I(reboot) is set to C(true), then the packages are applied immediately.
+ - If I(reboot) is set to C(false), then the packages are staged and applied in the next reboot.
- Packages that do not require a reboot are applied immediately irrespective of I (reboot).
type: bool
- default: False
+ default: false
+ proxy_support:
+ description:
+ - Specifies if a proxy should be used.
+ - Proxy parameters are applicable on C(HTTP), C(HTTPS), and C(FTP) share type of repositories.
+ - C(ParametersProxy), sets the proxy parameters for the current firmware operation.
+ - C(DefaultProxy), iDRAC uses the proxy values set by default.
+ - Default Proxy can be set in the Lifecycle Controller attributes using M(dellemc.openmanage.idrac_attributes).
+ - C(Off), will not use the proxy.
+ - For iDRAC8 based servers, use proxy server with basic authentication.
+ - "For iDRAC9 based servers, ensure that you use digest authentication for the proxy server,
+ basic authentication is not supported."
+ choices: ["ParametersProxy", "DefaultProxy", "Off"]
+ type: str
+ default: "Off"
+ proxy_server:
+ description:
+ - The IP address of the proxy server.
+ - "This IP will not be validated. The download job will be created even for invalid I(proxy_server).
+ Please check the results of the job for error details."
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ type: str
+ proxy_port:
+ description:
+ - The Port for the proxy server.
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ type: int
+ proxy_type:
+ description:
+ - The proxy type of the proxy server.
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ - "Note: SOCKS4 proxy does not support IPv6 address."
+ choices: [HTTP, SOCKS]
+ type: str
+ proxy_uname:
+ description: The user name for the proxy server.
+ type: str
+ proxy_passwd:
+ description: The password for the proxy server.
+ type: str
requirements:
- - "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "omsdk >= 1.2.503"
+ - "python >= 3.9.6"
author:
- "Rajeev Arakkal (@rajeevarakkal)"
- "Felix Stephen (@felixs88)"
+ - "Jagadeesh N V (@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- Module will report success based on the iDRAC firmware update parent job status if there are no individual
component jobs present.
- For server with iDRAC firmware 5.00.00.00 and later, if the repository contains unsupported packages, then the
module will return success with a proper message.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip) and I(share_name).
- This module supports C(check_mode).
'''
@@ -97,9 +138,9 @@ EXAMPLES = """
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.0:/share"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
catalog_file_name: "Catalog.xml"
- name: Update firmware from repository on a CIFS Share
@@ -111,9 +152,9 @@ EXAMPLES = """
share_name: "full_cifs_path"
share_user: "share_user"
share_password: "share_password"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
catalog_file_name: "Catalog.xml"
- name: Update firmware from repository on a HTTP
@@ -123,9 +164,9 @@ EXAMPLES = """
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "http://downloads.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
- name: Update firmware from repository on a HTTPS
dellemc.openmanage.idrac_firmware:
@@ -134,9 +175,26 @@ EXAMPLES = """
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "https://downloads.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ reboot: true
+ job_wait: true
+ apply_update: true
+
+- name: Update firmware from repository on a HTTPS via proxy
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_name: "https://downloads.dell.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: ParametersProxy
+ proxy_server: 192.168.1.10
+ proxy_type: HTTP
+ proxy_port: 80
+ proxy_uname: "proxy_user"
+ proxy_passwd: "proxy_pwd"
- name: Update firmware from repository on a FTP
dellemc.openmanage.idrac_firmware:
@@ -144,10 +202,10 @@ EXAMPLES = """
idrac_user: "user_name"
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "ftp://ftp.dell.com"
- reboot: True
- job_wait: True
- apply_update: True
+ share_name: "ftp://ftp.mydomain.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
"""
RETURN = """
@@ -182,12 +240,11 @@ from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac i
from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.parse import urlparse
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
try:
from omsdk.sdkcreds import UserCredentials
from omsdk.sdkfile import FileOnShare
- from omsdk.http.sdkwsmanbase import WsManProtocolBase
HAS_OMSDK = True
except ImportError:
HAS_OMSDK = False
@@ -195,6 +252,7 @@ except ImportError:
SHARE_TYPE = {'nfs': 'NFS', 'cifs': 'CIFS', 'ftp': 'FTP',
'http': 'HTTP', 'https': 'HTTPS', 'tftp': 'TFTP'}
CERT_WARN = {True: 'On', False: 'Off'}
+PROXY_SUPPORT = {"DefaultProxy": "Use_Default_Settings", "Off": "Off", "ParametersProxy": "Use_Custom_Settings"}
IDRAC_PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService"
PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/DellSoftwareInstallationService/Actions/" \
"DellSoftwareInstallationService.InstallFromRepository"
@@ -202,6 +260,9 @@ GET_REPO_BASED_UPDATE_LIST_PATH = "/redfish/v1/Dell/Systems/System.Embedded.1/De
"Actions/DellSoftwareInstallationService.GetRepoBasedUpdateList"
JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}"
iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+LOG_SERVICE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog"
+iDRAC9_LC_LOG = "/redfish/v1/Managers/iDRAC.Embedded.1/LogServices/Lclog/Entries"
+iDRAC8_LC_LOG = "/redfish/v1/Managers/iDRAC.Embedded.1/Logs/Lclog"
MESSAGE = "Firmware versions on server match catalog, applicable updates are not present in the repository."
EXIT_MESSAGE = "The catalog in the repository specified in the operation has the same firmware versions " \
"as currently present on the server."
@@ -345,16 +406,59 @@ def handle_HTTP_error(module, httperr):
module.fail_json(msg=err_message)
-def update_firmware_url_redfish(module, idrac, share_name, apply_update, reboot, job_wait, payload, repo_urls):
+def get_error_syslog(idrac, curr_time, uri):
+ error_log_found = False
+ msg = None
+ # 'SYS226' Unable to transfer a file, Catalog/Catalog.xml, because of the
+ # reason described by the code 404 sent by the HTTP remote host server.
+ # 'SYS252' Unable to transfer a file, Catalog/Catalog.xml, because the file is
+ # not available at the remote host location.
+ # 'SYS261' Unable to transfer the file, Catalog/catalog.xml, because initial network
+ # connection to the remote host server is not successfully started.
+ error_log_ids = ['SYS229', 'SYS227', 'RED132', 'JCP042', 'RED068', 'RED137']
+ intrvl = 5
+ retries = 60 // intrvl
+ try:
+ if not curr_time:
+ resp = idrac.invoke_request(LOG_SERVICE_URI, "GET")
+ uri = resp.json_data.get('Entries').get('@odata.id')
+ curr_time = resp.json_data.get('DateTime')
+ fltr = "?$filter=Created%20ge%20'{0}'".format(curr_time)
+ fltr_uri = "{0}{1}".format(uri, fltr)
+ while retries:
+ resp = idrac.invoke_request(fltr_uri, "GET")
+ logs_list = resp.json_data.get("Members")
+ for log in logs_list:
+ for err_id in error_log_ids:
+ if err_id in log.get('MessageId'):
+ error_log_found = True
+ msg = log.get('Message')
+ break
+ if msg or error_log_found:
+ break
+ if msg or error_log_found:
+ break
+ retries = retries - 1
+ time.sleep(intrvl)
+ else:
+ msg = "No Error log found."
+ error_log_found = False
+ except Exception:
+ msg = "No Error log found."
+ error_log_found = False
+ return error_log_found, msg
+
+
+def update_firmware_url_redfish(module, idrac, share_path, apply_update, reboot, job_wait, payload, repo_urls):
"""Update firmware through HTTP/HTTPS/FTP and return the job details."""
- repo_url = urlparse(share_name)
+ repo_url = urlparse(share_path)
job_details, status = {}, {}
ipaddr = repo_url.netloc
share_type = repo_url.scheme
sharename = repo_url.path.strip('/')
- payload['IPAddress'] = ipaddr
if repo_url.path:
payload['ShareName'] = sharename
+ payload['IPAddress'] = ipaddr
payload['ShareType'] = SHARE_TYPE[share_type]
install_url = PATH
get_repo_url = GET_REPO_BASED_UPDATE_LIST_PATH
@@ -363,8 +467,18 @@ def update_firmware_url_redfish(module, idrac, share_name, apply_update, reboot,
install_url = actions.get("#DellSoftwareInstallationService.InstallFromRepository", {}).get("target", PATH)
get_repo_url = actions.get("#DellSoftwareInstallationService.GetRepoBasedUpdateList", {}).\
get("target", GET_REPO_BASED_UPDATE_LIST_PATH)
+ try:
+ log_resp = idrac.invoke_request(LOG_SERVICE_URI, "GET")
+ log_uri = log_resp.json_data.get('Entries').get('@odata.id')
+ curr_time = log_resp.json_data.get('DateTime')
+ except Exception:
+ log_uri = iDRAC9_LC_LOG
+ curr_time = None
resp = idrac.invoke_request(install_url, method="POST", data=payload)
+ error_log_found, msg = get_error_syslog(idrac, curr_time, log_uri)
job_id = get_jobid(module, resp)
+ if error_log_found:
+ module.exit_json(msg=msg, failed=True, job_id=job_id)
resp, msg = wait_for_job_completion(module, JOB_URI.format(job_id=job_id), job_wait, reboot, apply_update)
if not msg:
status = resp.json_data
@@ -388,17 +502,25 @@ def update_firmware_url_omsdk(module, idrac, share_name, catalog_file_name, appl
ipaddr = repo_url.netloc
share_type = repo_url.scheme
sharename = repo_url.path.strip('/')
+ proxy_support = PROXY_SUPPORT[module.params["proxy_support"]]
+ proxy_type = module.params.get("proxy_type") if module.params.get("proxy_type") is not None else "HTTP"
+ proxy_server = module.params.get("proxy_server") if module.params.get("proxy_server") is not None else ""
+ proxy_port = module.params.get("proxy_port") if module.params.get("proxy_port") is not None else 80
+ proxy_uname = module.params.get("proxy_uname")
+ proxy_passwd = module.params.get("proxy_passwd")
if ipaddr == "downloads.dell.com":
- status = idrac.update_mgr.update_from_dell_repo_url(ipaddress=ipaddr, share_type=share_type,
- share_name=sharename, catalog_file=catalog_file_name,
- apply_update=apply_update, reboot_needed=reboot,
- ignore_cert_warning=ignore_cert_warning, job_wait=job_wait)
+ status = idrac.update_mgr.update_from_dell_repo_url(
+ ipaddress=ipaddr, share_type=share_type, share_name=sharename, catalog_file=catalog_file_name,
+ apply_update=apply_update, reboot_needed=reboot, ignore_cert_warning=ignore_cert_warning, job_wait=job_wait,
+ proxy_support=proxy_support, proxy_type=proxy_type, proxy_server=proxy_server, proxy_port=proxy_port,
+ proxy_uname=proxy_uname, proxy_passwd=proxy_passwd)
get_check_mode_status(status, module)
else:
- status = idrac.update_mgr.update_from_repo_url(ipaddress=ipaddr, share_type=share_type,
- share_name=sharename, catalog_file=catalog_file_name,
- apply_update=apply_update, reboot_needed=reboot,
- ignore_cert_warning=ignore_cert_warning, job_wait=job_wait)
+ status = idrac.update_mgr.update_from_repo_url(
+ ipaddress=ipaddr, share_type=share_type, share_name=sharename, catalog_file=catalog_file_name,
+ apply_update=apply_update, reboot_needed=reboot, ignore_cert_warning=ignore_cert_warning, job_wait=job_wait,
+ proxy_support=proxy_support, proxy_type=proxy_type, proxy_server=proxy_server,
+ proxy_port=proxy_port, proxy_uname=proxy_uname, proxy_passwd=proxy_passwd)
get_check_mode_status(status, module)
return status, job_details
@@ -434,8 +556,8 @@ def update_firmware_omsdk(idrac, module):
upd_share = FileOnShare(remote="{0}{1}{2}".format(share_name, os.sep, catalog_file_name),
mount_point=module.params['share_mnt'], isFolder=False,
creds=UserCredentials(share_user, share_pwd))
- msg['update_status'] = idrac.update_mgr.update_from_repo(upd_share, apply_update=apply_update,
- reboot_needed=reboot, job_wait=job_wait)
+ msg['update_status'] = idrac.update_mgr.update_from_repo(
+ upd_share, apply_update=apply_update, reboot_needed=reboot, job_wait=job_wait,)
get_check_mode_status(msg['update_status'], module)
json_data, repo_status, failed = msg['update_status']['job_details'], False, False
@@ -512,6 +634,20 @@ def update_firmware_redfish(idrac, module, repo_urls):
payload['Password'] = share_pwd
if share_name.lower().startswith(('http://', 'https://', 'ftp://')):
+ proxy = module.params.get("proxy_support")
+ if proxy == "ParametersProxy":
+ proxy_dict = {"proxy_server": "ProxyServer",
+ "proxy_port": "ProxyPort",
+ "proxy_support": "ProxySupport",
+ "proxy_type": "ProxyType",
+ "proxy_uname": "ProxyUname",
+ "proxy_passwd": "ProxyPasswd"}
+ for pk, pv in proxy_dict.items():
+ prm = module.params.get(pk)
+ if prm is not None:
+ payload[pv] = prm
+ elif proxy == "DefaultProxy":
+ payload["ProxySupport"] = module.params.get("proxy_support")
msg['update_status'], job_details = update_firmware_url_redfish(
module, idrac, share_name, apply_update, reboot, job_wait, payload, repo_urls)
if job_details:
@@ -523,8 +659,8 @@ def update_firmware_redfish(idrac, module, repo_urls):
payload['ShareName'] = '\\'.join(cifs[3:])
payload['ShareType'] = 'CIFS'
else:
- nfs = urlparse(share_name)
- payload['IPAddress'] = nfs.scheme
+ nfs = urlparse("nfs://" + share_name)
+ payload['IPAddress'] = nfs.netloc.strip(':')
payload['ShareName'] = nfs.path.strip('/')
payload['ShareType'] = 'NFS'
resp = idrac.invoke_request(PATH, method="POST", data=payload)
@@ -596,19 +732,30 @@ def update_firmware_redfish(idrac, module, repo_urls):
def main():
specs = {
"share_name": {"required": True, "type": 'str'},
- "share_user": {"required": False, "type": 'str'},
- "share_password": {"required": False, "type": 'str', "aliases": ['share_pwd'], "no_log": True},
- "share_mnt": {"required": False, "type": 'str'},
-
- "catalog_file_name": {"required": False, "type": 'str', "default": "Catalog.xml"},
- "reboot": {"required": False, "type": 'bool', "default": False},
- "job_wait": {"required": False, "type": 'bool', "default": True},
- "ignore_cert_warning": {"required": False, "type": 'bool', "default": True},
- "apply_update": {"required": False, "type": 'bool', "default": True},
+ "share_user": {"type": 'str'},
+ "share_password": {"type": 'str', "aliases": ['share_pwd'], "no_log": True},
+ "share_mnt": {"type": 'str'},
+
+ "catalog_file_name": {"type": 'str', "default": "Catalog.xml"},
+ "reboot": {"type": 'bool', "default": False},
+ "job_wait": {"type": 'bool', "default": True},
+ "ignore_cert_warning": {"type": 'bool', "default": True},
+ "apply_update": {"type": 'bool', "default": True},
+ # proxy params
+ "proxy_support": {"default": 'Off', "type": 'str', "choices": ["Off", "ParametersProxy", "DefaultProxy"]},
+ "proxy_type": {"type": 'str', "choices": ["HTTP", "SOCKS"]},
+ "proxy_server": {"type": 'str'},
+ "proxy_port": {"type": 'int'},
+ "proxy_uname": {"type": 'str'},
+ "proxy_passwd": {"type": 'str', "no_log": True},
}
specs.update(idrac_auth_params)
module = AnsibleModule(
argument_spec=specs,
+ required_if=[
+ # ['proxy_type', 'SOCKS', ('proxy_port',)],
+ ['proxy_support', 'ParametersProxy', ('proxy_server', 'proxy_type', 'proxy_port',)],
+ ],
supports_check_mode=True)
redfish_check = False
@@ -637,7 +784,10 @@ def main():
status = update_firmware_omsdk(idrac, module)
except HTTPError as err:
module.fail_json(msg=str(err), update_status=json.load(err))
- except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
+ except URLError as err:
+ message = err.reason if err.reason else err(str)
+ module.exit_json(msg=message, unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError, SSLError) as e:
module.fail_json(msg=str(e))
except Exception as exc:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py
index 3f644f85e..b4e4a37e4 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_firmware_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -24,10 +24,11 @@ extends_documentation_fragment:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Rajeev Arakkal (@rajeevarakkal)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -35,10 +36,10 @@ EXAMPLES = """
---
- name: Get Installed Firmware Inventory
dellemc.openmanage.idrac_firmware_info:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
"""
RETURN = r'''
@@ -109,13 +110,6 @@ from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac i
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-try:
- from omsdk.sdkfile import LocalFile
- from omsdk.catalog.sdkupdatemgr import UpdateManager
- from omdrivers.helpers.iDRAC.UpdateHelper import UpdateHelper
- HAS_OMSDK = True
-except ImportError:
- HAS_OMSDK = False
# Main
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_license.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_license.py
new file mode 100644
index 000000000..565c61cd4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_license.py
@@ -0,0 +1,1118 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.7.0
+# Copyright (C) 2024 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+---
+module: idrac_license
+short_description: Configure iDRAC licenses
+version_added: "8.7.0"
+description:
+ - This module allows to import, export and delete licenses on iDRAC.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ license_id:
+ description:
+ - Entitlement ID of the license that is to be imported, exported or deleted.
+ - I(license_id) is required when I(delete) is C(true) or I(export) is C(true).
+ type: str
+ aliases: ['entitlement_id']
+ delete:
+ description:
+ - Delete the license from the iDRAC.
+ - When I(delete) is C(true), then I(license_id) is required.
+ - I(delete) is mutually exclusive with I(export) and I(import).
+ type: bool
+ default: false
+ export:
+ description:
+ - Export the license from the iDRAC.
+ - When I(export) is C(true), I(license_id) and I(share_parameters) is required.
+ - I(export) is mutually exclusive with I(delete) and I(import).
+ type: bool
+ default: false
+ import:
+ description:
+ - Import the license from the iDRAC.
+ - When I(import) is C(true), I(share_parameters) is required.
+ - I(import) is mutually exclusive with I(delete) and I(export).
+ type: bool
+ default: false
+ share_parameters:
+ description:
+ - Parameters that are required for the import and export operation of a license.
+ - I(share_parameters) is required when I(export) or I(import) is C(true).
+ type: dict
+ suboptions:
+ share_type:
+ description:
+ - Share type of the network share.
+ - C(local) uses local path for I(import) and I(export) operation.
+ - C(nfs) uses NFS share for I(import) and I(export) operation.
+ - C(cifs) uses CIFS share for I(import) and I(export) operation.
+ - C(http) uses HTTP share for I(import) and I(export) operation.
+ - C(https) uses HTTPS share for I(import) and I(export) operation.
+ type: str
+ choices: [local, nfs, cifs, http, https]
+ default: local
+ file_name:
+ description:
+ - License file name for I(import) and I(export) operation.
+ - I(file_name) is required when I(import) is C(true).
+ - For the I(import) operation, when I(share_type) is C(local), the supported extensions for I(file_name) are '.txt' and '.xml'.
+ For other share types, the supported extension is '.xml'
+ type: str
+ ip_address:
+ description:
+ - IP address of the network share.
+ - I(ip_address) is required when I(share_type) is C(nfs), C(cifs), C(http) or C(https).
+ type: str
+ share_name:
+ description:
+ - Network share or local path of the license file.
+ type: str
+ workgroup:
+ description:
+ - Workgroup of the network share.
+ - I(workgroup) is applicable only when I(share_type) is C(cifs).
+ type: str
+ username:
+ description:
+ - Username of the network share.
+ - I(username) is required when I(share_type) is C(cifs).
+ type: str
+ password:
+ description:
+ - Password of the network share.
+ - I(password) is required when I(share_type) is C(cifs).
+ type: str
+ ignore_certificate_warning:
+ description:
+ - Ignores the certificate warning while connecting to Share and is only applicable when I(share_type) is C(https).
+ - C(off) ignores the certificate warning.
+ - C(on) does not ignore the certificate warning.
+ type: str
+ choices: ["off", "on"]
+ default: "off"
+ proxy_support:
+ description:
+ - Specifies if proxy is to be used or not.
+ - C(off) does not use proxy settings.
+ - C(default_proxy) uses the default proxy settings.
+ - C(parameters_proxy) uses the specified proxy settings. I(proxy_server) is required when I(proxy_support) is C(parameters_proxy).
+ - I(proxy_support) is only applicable when I(share_type) is C(https) or C(https).
+ type: str
+ choices: ["off", "default_proxy", "parameters_proxy"]
+ default: "off"
+ proxy_type:
+ description:
+ - The proxy type of the proxy server.
+ - C(http) to select HTTP proxy.
+ - C(socks) to select SOCKS proxy.
+ - I(proxy_type) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ choices: [http, socks]
+ default: http
+ proxy_server:
+ description:
+ - The IP address of the proxy server.
+ - I(proxy_server) is required when I(proxy_support) is C(parameters_proxy).
+ - I(proxy_server) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ proxy_port:
+ description:
+ - The port of the proxy server.
+ - I(proxy_port) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: int
+ default: 80
+ proxy_username:
+ description:
+ - The username of the proxy server.
+ - I(proxy_username) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ proxy_password:
+ description:
+ - The password of the proxy server.
+ - I(proxy_password) is only applicable when I(share_type) is C(https) or C(https) and when I(proxy_support) is C(parameters_proxy).
+ type: str
+ resource_id:
+ type: str
+ description:
+ - Id of the resource.
+ - If the value for resource ID is not provided, the module picks the first resource ID available from the list of system resources returned by the iDRAC.
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Rajshekar P(@rajshekarp87)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports only iDRAC9 and above.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module does not support C(check_mode).
+ - When I(share_type) is C(local) for I(import) and I(export) operations, job_details are not displayed.
+"""
+
+EXAMPLES = r"""
+---
+- name: Export a license from iDRAC to local
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "local"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+
+- name: Export a license from iDRAC to NFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "nfs"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+
+- name: Export a license from iDRAC to CIFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "cifs"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ workgroup: "workgroup"
+
+- name: Export a license from iDRAC to HTTP share via proxy
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "http"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ proxy_support: "parameters_proxy"
+ proxy_type: socks
+ proxy_server: "192.168.0.2"
+ proxy_port: 1080
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+
+- name: Export a license from iDRAC to HTTPS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENSE_123"
+ export: true
+ share_parameters:
+ share_type: "https"
+ share_name: "/path/to/share"
+ file_name: "license_file"
+ ip_address: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ignore_certificate_warning: "on"
+
+- name: Import a license to iDRAC from local
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: local
+ share_name: "/path/to/share"
+
+- name: Import a license to iDRAC from NFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: nfs
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+
+- name: Import a license to iDRAC from CIFS share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: cifs
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+
+- name: Import a license to iDRAC from HTTP share
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: http
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+
+- name: Import a license to iDRAC from HTTPS share via proxy
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ import: true
+ share_parameters:
+ file_name: "license_file_name.xml"
+ share_type: https
+ ip_address: "192.168.0.1"
+ share_name: "/path/to/share"
+ username: "username"
+ password: "password"
+ proxy_support: "parameters_proxy"
+ proxy_server: "192.168.0.2"
+ proxy_port: 808
+ proxy_username: "proxy_username"
+ proxy_password: "proxy_password"
+
+- name: Delete a License from iDRAC
+ dellemc.openmanage.idrac_license:
+ idrac_ip: 198.162.0.1
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ license_id: "LICENCE_123"
+ delete: true
+"""
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the license operation.
+ returned: always
+ sample: "Successfully exported the license."
+job_details:
+ description: Returns the output for status of the job.
+ returned: For import and export operations
+ type: dict
+ sample: {
+ "ActualRunningStartTime": "2024-01-09T05:16:19",
+ "ActualRunningStopTime": "2024-01-09T05:16:19",
+ "CompletionTime": "2024-01-09T05:16:19",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Completed",
+ "JobType": "LicenseExport",
+ "Message": "The command was successful.",
+ "MessageArgs": [],
+ "MessageId": "LIC900",
+ "Name": "Export: License",
+ "PercentComplete": 100,
+ "StartTime": "2024-01-09T05:16:19",
+ "TargetSettingsURI": null
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.8.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "Base.1.8.AccessDenied",
+ "Message": "The authentication credentials included with this request are missing or invalid.",
+ "MessageArgs": [],
+ "RelatedProperties": [],
+ "Severity": "Critical",
+ "Resolution": "Attempt to ensure that the URI is correct and that the service has the appropriate credentials."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+import os
+import base64
+from urllib.error import HTTPError, URLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.compat.version import LooseVersion
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (
+ get_idrac_firmware_version, get_dynamic_uri, get_manager_res_id,
+ validate_and_get_first_resource_id_uri, remove_key, idrac_redfish_job_tracking)
+
+REDFISH = "/redfish/v1"
+MANAGERS_URI = "/redfish/v1/Managers"
+IDRAC_JOB_URI = "{res_uri}/Jobs/{job_id}"
+
+OEM = "Oem"
+MANUFACTURER = "Dell"
+LICENSE_MANAGEMENT_SERVICE = "DellLicenseManagementService"
+ACTIONS = "Actions"
+EXPORT_LOCAL = "#DellLicenseManagementService.ExportLicense"
+EXPORT_NETWORK_SHARE = "#DellLicenseManagementService.ExportLicenseToNetworkShare"
+IMPORT_LOCAL = "#DellLicenseManagementService.ImportLicense"
+IMPORT_NETWORK_SHARE = "#DellLicenseManagementService.ImportLicenseFromNetworkShare"
+ODATA = "@odata.id"
+ODATA_REGEX = "(.*?)@odata"
+
+INVALID_LICENSE_MSG = "License with ID '{license_id}' does not exist on the iDRAC."
+SUCCESS_EXPORT_MSG = "Successfully exported the license."
+SUCCESS_DELETE_MSG = "Successfully deleted the license."
+SUCCESS_IMPORT_MSG = "Successfully imported the license."
+FAILURE_MSG = "Unable to '{operation}' the license with id '{license_id}' as it does not exist."
+FAILURE_IMPORT_MSG = "Unable to import the license."
+NO_FILE_MSG = "License file not found."
+UNSUPPORTED_FIRMWARE_MSG = "iDRAC firmware version is not supported."
+NO_OPERATION_SKIP_MSG = "Task is skipped as none of import, export or delete is specified."
+INVALID_FILE_MSG = "File extension is invalid. Supported extensions for local 'share_type' " \
+ "are: .txt and .xml, and for network 'share_type' is: .xml."
+INVALID_DIRECTORY_MSG = "Provided directory path '{path}' is not valid."
+INSUFFICIENT_DIRECTORY_PERMISSION_MSG = "Provided directory path '{path}' is not writable. " \
+ "Please check if the directory has appropriate permissions"
+MISSING_FILE_NAME_PARAMETER_MSG = "Missing required parameter 'file_name'."
+
+PROXY_SUPPORT = {"off": "Off", "default_proxy": "DefaultProxy", "parameters_proxy": "ParametersProxy"}
+
+
+class License():
+ def __init__(self, idrac, module):
+ """
+ Initializes the class instance with the provided idrac and module parameters.
+
+ :param idrac: The idrac parameter.
+ :type idrac: Any
+ :param module: The module parameter.
+ :type module: Any
+ """
+ self.idrac = idrac
+ self.module = module
+
+ def execute(self):
+ """
+ Executes the function with the given module.
+
+ :param module: The module to execute.
+ :type module: Any
+ :return: None
+ """
+
+ def check_license_id(self, license_id):
+ """
+ Check the license ID for a given operation.
+
+ :param self: The object instance.
+ :param module: The Ansible module.
+ :param license_id: The ID of the license to check.
+ :param operation: The operation to perform.
+ :return: The response from the license URL.
+ """
+ license_uri = self.get_license_url()
+ license_url = license_uri + f"/{license_id}"
+ try:
+ response = self.idrac.invoke_request(license_url, 'GET')
+ return response
+ except Exception:
+ self.module.exit_json(msg=INVALID_LICENSE_MSG.format(license_id=license_id), skipped=True)
+
+ def get_license_url(self):
+ """
+ Retrieves the license URL for the current user.
+
+ :return: The license URL as a string.
+ """
+ v1_resp = get_dynamic_uri(self.idrac, REDFISH)
+ license_service_url = v1_resp.get('LicenseService', {}).get(ODATA, {})
+ license_service_resp = get_dynamic_uri(self.idrac, license_service_url)
+ license_url = license_service_resp.get('Licenses', {}).get(ODATA, {})
+ return license_url
+
+ def get_job_status(self, license_job_response):
+ """
+ Get the status of a job.
+
+ Args:
+ module (object): The module object.
+ license_job_response (object): The response object for the license job.
+
+ Returns:
+ dict: The job details.
+ """
+ res_uri = validate_and_get_first_resource_id_uri(self.module, self.idrac, MANAGERS_URI)
+ job_tracking_uri = license_job_response.headers.get("Location")
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = IDRAC_JOB_URI.format(job_id=job_id, res_uri=res_uri[0])
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(self.idrac, job_uri)
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ if job_failed:
+ self.module.exit_json(
+ msg=job_dict.get('Message'),
+ failed=True,
+ job_details=job_dict)
+ return job_dict
+
+ def get_share_details(self):
+ """
+ Retrieves the share details from the given module.
+
+ Args:
+ module (object): The module object containing the share parameters.
+
+ Returns:
+ dict: A dictionary containing the share details with the following keys:
+ - IPAddress (str): The IP address of the share.
+ - ShareName (str): The name of the share.
+ - UserName (str): The username for accessing the share.
+ - Password (str): The password for accessing the share.
+ """
+ share_details = {}
+ share_details["IPAddress"] = self.module.params.get('share_parameters').get('ip_address')
+ share_details["ShareName"] = self.module.params.get('share_parameters').get('share_name')
+ share_details["UserName"] = self.module.params.get('share_parameters').get('username')
+ share_details["Password"] = self.module.params.get('share_parameters').get('password')
+ return share_details
+
+ def get_proxy_details(self):
+ """
+ Retrieves the proxy details based on the provided module parameters.
+
+ Args:
+ self: The instance of the class.
+ module: The module object containing the parameters.
+
+ Returns:
+ dict: A dictionary containing the proxy details.
+ """
+ proxy_details = {}
+ proxy_details["ShareType"] = self.module.params.get('share_parameters').get('share_type').upper()
+ share_details = self.get_share_details()
+ proxy_details.update(share_details)
+ proxy_details["IgnoreCertWarning"] = self.module.params.get('share_parameters').get('ignore_certificate_warning').capitalize()
+ if self.module.params.get('share_parameters').get('proxy_support') == "parameters_proxy":
+ proxy_details["ProxySupport"] = PROXY_SUPPORT[self.module.params.get('share_parameters').get('proxy_support')]
+ proxy_details["ProxyType"] = self.module.params.get('share_parameters').get('proxy_type').upper()
+ proxy_details["ProxyServer"] = self.module.params.get('share_parameters').get('proxy_server')
+ proxy_details["ProxyPort"] = str(self.module.params.get('share_parameters').get('proxy_port'))
+ if self.module.params.get('share_parameters').get('proxy_username') and self.module.params.get('share_parameters').get('proxy_password'):
+ proxy_details["ProxyUname"] = self.module.params.get('share_parameters').get('proxy_username')
+ proxy_details["ProxyPasswd"] = self.module.params.get('share_parameters').get('proxy_password')
+ return proxy_details
+
+
+class DeleteLicense(License):
+ def execute(self):
+ """
+ Executes the delete operation for a given license ID.
+
+ Args:
+ module (object): The Ansible module object.
+
+ Returns:
+ object: The response object from the delete operation.
+ """
+ license_id = self.module.params.get('license_id')
+ self.check_license_id(license_id)
+ license_url = self.get_license_url()
+ delete_license_url = license_url + f"/{license_id}"
+ delete_license_response = self.idrac.invoke_request(delete_license_url, 'DELETE')
+ status = delete_license_response.status_code
+ if status == 204:
+ self.module.exit_json(msg=SUCCESS_DELETE_MSG, changed=True)
+ else:
+ self.module.exit_json(msg=FAILURE_MSG.format(operation="delete", license_id=license_id), failed=True)
+
+
+class ExportLicense(License):
+ STATUS_SUCCESS = [200, 202]
+
+ def execute(self):
+ """
+ Executes the export operation for a given license ID.
+
+ :param module: The Ansible module object.
+ :type module: AnsibleModule
+
+ :return: The response from the export operation.
+ :rtype: Response
+ """
+ share_type = self.module.params.get('share_parameters').get('share_type')
+ license_id = self.module.params.get('license_id')
+ self.check_license_id(license_id)
+ export_license_url = self.__get_export_license_url()
+ job_status = {}
+ if share_type == "local":
+ export_license_response = self.__export_license_local(export_license_url)
+ elif share_type in ["http", "https"]:
+ export_license_response = self.__export_license_http(export_license_url)
+ job_status = self.get_job_status(export_license_response)
+ elif share_type == "cifs":
+ export_license_response = self.__export_license_cifs(export_license_url)
+ job_status = self.get_job_status(export_license_response)
+ elif share_type == "nfs":
+ export_license_response = self.__export_license_nfs(export_license_url)
+ job_status = self.get_job_status(export_license_response)
+ status = export_license_response.status_code
+ if status in self.STATUS_SUCCESS:
+ self.module.exit_json(msg=SUCCESS_EXPORT_MSG, changed=True, job_details=job_status)
+ else:
+ self.module.exit_json(msg=FAILURE_MSG.format(operation="export", license_id=license_id), failed=True, job_details=job_status)
+
+ def __export_license_local(self, export_license_url):
+ """
+ Export the license to a local directory.
+
+ Args:
+ module (object): The Ansible module object.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ object: The license status after exporting.
+ """
+ payload = {}
+ payload["EntitlementID"] = self.module.params.get('license_id')
+ path = self.module.params.get('share_parameters').get('share_name')
+ if not (os.path.exists(path) or os.path.isdir(path)):
+ self.module.exit_json(msg=INVALID_DIRECTORY_MSG.format(path=path), failed=True)
+ if not os.access(path, os.W_OK):
+ self.module.exit_json(msg=INSUFFICIENT_DIRECTORY_PERMISSION_MSG.format(path=path), failed=True)
+ license_name = self.module.params.get('share_parameters').get('file_name')
+ if license_name:
+ license_file_name = f"{license_name}_iDRAC_license.txt"
+ else:
+ license_file_name = f"{self.module.params['license_id']}_iDRAC_license.txt"
+ license_status = self.idrac.invoke_request(export_license_url, "POST", data=payload)
+ license_data = license_status.json_data
+ license_file = license_data.get("LicenseFile")
+ file_name = os.path.join(path, license_file_name)
+ with open(file_name, "w") as fp:
+ fp.writelines(license_file)
+ return license_status
+
+ def __export_license_http(self, export_license_url):
+ """
+ Export the license using the HTTP protocol.
+
+ Args:
+ module (object): The module object.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ str: The export status.
+ """
+ payload = {}
+ payload["EntitlementID"] = self.module.params.get('license_id')
+ proxy_details = self.get_proxy_details()
+ payload.update(proxy_details)
+ export_status = self.__export_license(payload, export_license_url)
+ return export_status
+
+ def __export_license_cifs(self, export_license_url):
+ """
+ Export the license using CIFS share type.
+
+ Args:
+ module (object): The Ansible module object.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ str: The export status.
+ """
+ payload = {}
+ payload["EntitlementID"] = self.module.params.get('license_id')
+ payload["ShareType"] = "CIFS"
+ if self.module.params.get('share_parameters').get('workgroup'):
+ payload["Workgroup"] = self.module.params.get('share_parameters').get('workgroup')
+ share_details = self.get_share_details()
+ payload.update(share_details)
+ export_status = self.__export_license(payload, export_license_url)
+ return export_status
+
+ def __export_license_nfs(self, export_license_url):
+ """
+ Export the license using NFS share type.
+
+ Args:
+ module (object): The Ansible module object.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ dict: The export status of the license.
+ """
+ payload = {}
+ payload["EntitlementID"] = self.module.params.get('license_id')
+ payload["ShareType"] = "NFS"
+ payload["IPAddress"] = self.module.params.get('share_parameters').get('ip_address')
+ payload["ShareName"] = self.module.params.get('share_parameters').get('share_name')
+ export_status = self.__export_license(payload, export_license_url)
+ return export_status
+
+ def __get_export_license_url(self):
+ """
+ Get the export license URL.
+
+ :param module: The module object.
+ :type module: object
+ :return: The export license URL.
+ :rtype: str
+ """
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, MANAGERS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ resp = get_dynamic_uri(self.idrac, uri)
+ url = resp.get('Links', {}).get(OEM, {}).get(MANUFACTURER, {}).get(LICENSE_MANAGEMENT_SERVICE, {}).get(ODATA, {})
+ action_resp = get_dynamic_uri(self.idrac, url)
+ license_service = EXPORT_LOCAL if self.module.params.get('share_parameters').get('share_type') == "local" else EXPORT_NETWORK_SHARE
+ export_url = action_resp.get(ACTIONS, {}).get(license_service, {}).get('target', {})
+ return export_url
+
+ def __export_license(self, payload, export_license_url):
+ """
+ Export the license to a file.
+
+ Args:
+ module (object): The Ansible module object.
+ payload (dict): The payload containing the license information.
+ export_license_url (str): The URL for exporting the license.
+
+ Returns:
+ dict: The license status after exporting.
+ """
+ license_name = self.module.params.get('share_parameters').get('file_name')
+ if license_name:
+ license_file_name = f"{license_name}_iDRAC_license.xml"
+ else:
+ license_file_name = f"{self.module.params['license_id']}_iDRAC_license.xml"
+ payload["FileName"] = license_file_name
+ license_status = self.idrac.invoke_request(export_license_url, "POST", data=payload)
+ return license_status
+
+
+class ImportLicense(License):
+ STATUS_SUCCESS = [200, 202]
+
+ def execute(self):
+ """
+ Executes the import license process based on the given module parameters.
+
+ Args:
+ module (object): The Ansible module object.
+
+ Returns:
+ object: The response object from the import license API call.
+ """
+ if not self.module.params.get('share_parameters').get('file_name'):
+ self.module.exit_json(msg=MISSING_FILE_NAME_PARAMETER_MSG, failed=True)
+ share_type = self.module.params.get('share_parameters').get('share_type')
+ self.__check_file_extension()
+ import_license_url = self.__get_import_license_url()
+ resource_id = get_manager_res_id(self.idrac)
+ job_status = {}
+ if share_type == "local":
+ import_license_response = self.__import_license_local(import_license_url, resource_id)
+ elif share_type in ["http", "https"]:
+ import_license_response = self.__import_license_http(import_license_url, resource_id)
+ job_status = self.get_job_status(import_license_response)
+ elif share_type == "cifs":
+ import_license_response = self.__import_license_cifs(import_license_url, resource_id)
+ job_status = self.get_job_status(import_license_response)
+ elif share_type == "nfs":
+ import_license_response = self.__import_license_nfs(import_license_url, resource_id)
+ job_status = self.get_job_status(import_license_response)
+ status = import_license_response.status_code
+ if status in self.STATUS_SUCCESS:
+ self.module.exit_json(msg=SUCCESS_IMPORT_MSG, changed=True, job_details=job_status)
+ else:
+ self.module.exit_json(msg=FAILURE_IMPORT_MSG, failed=True, job_details=job_status)
+
+ def __import_license_local(self, import_license_url, resource_id):
+ """
+ Import a license locally.
+
+ Args:
+ module (object): The Ansible module object.
+ import_license_url (str): The URL for importing the license.
+ resource_id (str): The ID of the resource.
+
+ Returns:
+ dict: The import status of the license.
+ """
+ payload = {}
+ path = self.module.params.get('share_parameters').get('share_name')
+ if not (os.path.exists(path) or os.path.isdir(path)):
+ self.module.exit_json(msg=INVALID_DIRECTORY_MSG.format(path=path), failed=True)
+ file_path = self.module.params.get('share_parameters').get('share_name') + "/" + self.module.params.get('share_parameters').get('file_name')
+ file_exits = os.path.exists(file_path)
+ if file_exits:
+ with open(file_path, "rb") as cert:
+ cert_content = cert.read()
+ read_file = base64.encodebytes(cert_content).decode('ascii')
+ else:
+ self.module.exit_json(msg=NO_FILE_MSG, failed=True)
+ payload["LicenseFile"] = read_file
+ payload["FQDD"] = resource_id
+ payload["ImportOptions"] = "Force"
+ try:
+ import_status = self.idrac.invoke_request(import_license_url, "POST", data=payload)
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ message_details = filter_err.get('error').get('@Message.ExtendedInfo')[0]
+ message_id = message_details.get('MessageId')
+ if 'LIC018' in message_id:
+ self.module.exit_json(msg=message_details.get('Message'), skipped=True)
+ else:
+ self.module.exit_json(msg=message_details.get('Message'), error_info=filter_err, failed=True)
+ return import_status
+
+ def __import_license_http(self, import_license_url, resource_id):
+ """
+ Imports a license using HTTP.
+
+ Args:
+ module (object): The Ansible module object.
+ import_license_url (str): The URL for importing the license.
+ resource_id (str): The ID of the resource.
+
+ Returns:
+ object: The import status.
+ """
+ payload = {}
+ payload["LicenseName"] = self.module.params.get('share_parameters').get('file_name')
+ payload["FQDD"] = resource_id
+ payload["ImportOptions"] = "Force"
+ proxy_details = self.get_proxy_details()
+ payload.update(proxy_details)
+ import_status = self.idrac.invoke_request(import_license_url, "POST", data=payload)
+ return import_status
+
+ def __import_license_cifs(self, import_license_url, resource_id):
+ """
+ Imports a license using CIFS share type.
+
+ Args:
+ self (object): The instance of the class.
+ module (object): The Ansible module object.
+ import_license_url (str): The URL for importing the license.
+ resource_id (str): The ID of the resource.
+
+ Returns:
+ object: The import status of the license.
+ """
+ payload = {}
+ payload["ShareType"] = "CIFS"
+ payload["LicenseName"] = self.module.params.get('share_parameters').get('file_name')
+ payload["FQDD"] = resource_id
+ payload["ImportOptions"] = "Force"
+ if self.module.params.get('share_parameters').get('workgroup'):
+ payload["Workgroup"] = self.module.params.get('share_parameters').get('workgroup')
+ share_details = self.get_share_details()
+ payload.update(share_details)
+ import_status = self.idrac.invoke_request(import_license_url, "POST", data=payload)
+ return import_status
+
+ def __import_license_nfs(self, import_license_url, resource_id):
+ """
+ Import a license from an NFS share.
+
+ Args:
+ module (object): The Ansible module object.
+ import_license_url (str): The URL for importing the license.
+ resource_id (str): The ID of the resource.
+
+ Returns:
+ dict: The import status of the license.
+ """
+ payload = {}
+ payload["ShareType"] = "NFS"
+ payload["IPAddress"] = self.module.params.get('share_parameters').get('ip_address')
+ payload["ShareName"] = self.module.params.get('share_parameters').get('share_name')
+ payload["LicenseName"] = self.module.params.get('share_parameters').get('file_name')
+ payload["FQDD"] = resource_id
+ payload["ImportOptions"] = "Force"
+ import_status = self.idrac.invoke_request(import_license_url, "POST", data=payload)
+ return import_status
+
+ def __check_file_extension(self):
+ """
+ Check if the file extension of the given file name is valid.
+
+ :param module: The Ansible module object.
+ :type module: AnsibleModule
+
+ :return: None
+ """
+ share_type = self.module.params.get('share_parameters').get('share_type')
+ file_name = self.module.params.get('share_parameters').get('file_name')
+ valid_extensions = {".txt", ".xml"} if share_type == "local" else {".xml"}
+ file_extension = any(file_name.lower().endswith(ext) for ext in valid_extensions)
+ if not file_extension:
+ self.module.exit_json(msg=INVALID_FILE_MSG, failed=True)
+
+ def __get_import_license_url(self):
+ """
+ Get the import license URL.
+
+ :param module: The module object.
+ :type module: object
+ :return: The import license URL.
+ :rtype: str
+ """
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, MANAGERS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ resp = get_dynamic_uri(self.idrac, uri)
+ url = resp.get('Links', {}).get(OEM, {}).get(MANUFACTURER, {}).get(LICENSE_MANAGEMENT_SERVICE, {}).get(ODATA, {})
+ action_resp = get_dynamic_uri(self.idrac, url)
+ license_service = IMPORT_LOCAL if self.module.params.get('share_parameters').get('share_type') == "local" else IMPORT_NETWORK_SHARE
+ import_url = action_resp.get(ACTIONS, {}).get(license_service, {}).get('target', {})
+ return import_url
+
+ def get_job_status(self, license_job_response):
+ res_uri = validate_and_get_first_resource_id_uri(self.module, self.idrac, MANAGERS_URI)
+ job_tracking_uri = license_job_response.headers.get("Location")
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = IDRAC_JOB_URI.format(job_id=job_id, res_uri=res_uri[0])
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(self.idrac, job_uri)
+ job_dict = remove_key(job_dict, regex_pattern=ODATA_REGEX)
+ if job_failed:
+ if job_dict.get('MessageId') == 'LIC018':
+ self.module.exit_json(msg=job_dict.get('Message'), skipped=True, job_details=job_dict)
+ else:
+ self.module.exit_json(
+ msg=job_dict.get('Message'),
+ failed=True,
+ job_details=job_dict)
+ return job_dict
+
+
+class LicenseType:
+ _license_classes = {
+ "import": ImportLicense,
+ "export": ExportLicense,
+ "delete": DeleteLicense,
+ }
+
+ @staticmethod
+ def license_operation(idrac, module):
+ """
+ Perform a license operation based on the given parameters.
+
+ :param idrac: The IDRAC object.
+ :type idrac: IDRAC
+ :param module: The Ansible module object.
+ :type module: AnsibleModule
+ :return: The license class object based on the license type.
+ :rtype: LicenseType
+ """
+ license_type = next((param for param in ["import", "export", "delete"] if module.params[param]), None)
+ if not license_type:
+ module.exit_json(msg=NO_OPERATION_SKIP_MSG, skipped=True)
+ license_class = LicenseType._license_classes.get(license_type)
+ return license_class(idrac, module)
+
+
+def main():
+ """
+ Main function that serves as the entry point for the program.
+
+ This function retrieves the argument specification using the `get_argument_spec` function and updates it with the `idrac_auth_params`.
+ It then creates an `AnsibleModule` object with the updated argument specification, specifying the mutually exclusive arguments,
+ required arguments if conditions are met, and setting `supports_check_mode` to `False`.
+
+ The function then attempts to establish a connection with the iDRAC Redfish API using the `iDRACRedfishAPI` class.
+ It retrieves the iDRAC firmware version using the `get_idrac_firmware_version` function and checks if it is less than or equal to '3.0'.
+ If it is, the function exits with a message indicating that the iDRAC firmware version is not supported and sets `failed` to `True`.
+
+ If the iDRAC firmware version is supported, the function creates a `LicenseType` object using the `license_operation` method of the
+ `LicenseType` class and calls the `execute` method on the `license_obj` object, passing in the `module` object.
+
+ If an `HTTPError` occurs, the function loads the error response as JSON, removes a specific key using a regular expression pattern,
+ and exits with the error message, the filtered error information, and sets `failed` to `True`.
+
+ If a `URLError` occurs, the function exits with the error message and sets `unreachable` to `True`.
+
+ If any of the following errors occur: `SSLValidationError`, `ConnectionError`, `TypeError`, `ValueError`, or `OSError`, the function
+ exits with the error message and sets `failed` to `True`.
+
+ Parameters:
+ None
+
+ Returns:
+ None
+ """
+ specs = get_argument_spec()
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[("import", "export", "delete")],
+ required_if=[
+ ["import", True, ("share_parameters",)],
+ ["export", True, ("license_id", "share_parameters",)],
+ ["delete", True, ("license_id",)]
+ ],
+ supports_check_mode=False
+ )
+
+ try:
+ with iDRACRedfishAPI(module.params) as idrac:
+ idrac_firmware_version = get_idrac_firmware_version(idrac)
+ if LooseVersion(idrac_firmware_version) <= '3.0':
+ module.exit_json(msg=UNSUPPORTED_FIRMWARE_MSG, failed=True)
+ license_obj = LicenseType.license_operation(idrac, module)
+ if license_obj:
+ license_obj.execute()
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern=ODATA_REGEX)
+ module.exit_json(msg=str(err), error_info=filter_err, failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+def get_argument_spec():
+ """
+ Returns a dictionary containing the argument spec for the get_argument_spec function.
+ The argument spec is a dictionary that defines the parameters and their types and options for the function.
+ The dictionary has the following keys:
+ - "license_id": A string representing the license ID.
+ - "delete": A boolean representing whether to delete the license.
+ - "export": A boolean representing whether to export the license.
+ - "import": A boolean representing whether to import the license.
+ - "share_parameters": A dictionary representing the share parameters.
+ - "type": A string representing the share type.
+ - "options": A dictionary representing the options for the share parameters.
+ - "share_type": A string representing the share type.
+ - "file_name": A string representing the file name.
+ - "ip_address": A string representing the IP address.
+ - "share_name": A string representing the share name.
+ - "workgroup": A string representing the workgroup.
+ - "username": A string representing the username.
+ - "password": A string representing the password.
+ - "ignore_certificate_warning": A string representing whether to ignore certificate warnings.
+ - "proxy_support": A string representing the proxy support.
+ - "proxy_type": A string representing the proxy type.
+ - "proxy_server": A string representing the proxy server.
+ - "proxy_port": A integer representing the proxy port.
+ - "proxy_username": A string representing the proxy username.
+ - "proxy_password": A string representing the proxy password.
+ - "required_if": A list of lists representing the required conditions for the share parameters.
+ - "required_together": A list of lists representing the required conditions for the share parameters.
+ - "resource_id": A string representing the resource ID.
+ """
+ return {
+ "license_id": {"type": 'str', "aliases": ['entitlement_id']},
+ "delete": {"type": 'bool', "default": False},
+ "export": {"type": 'bool', "default": False},
+ "import": {"type": 'bool', "default": False},
+ "share_parameters": {
+ "type": 'dict',
+ "options": {
+ "share_type": {
+ "type": 'str',
+ "default": 'local',
+ "choices": ['local', 'nfs', 'cifs', 'http', 'https']
+ },
+ "file_name": {"type": 'str'},
+ "ip_address": {"type": 'str'},
+ "share_name": {"type": 'str'},
+ "workgroup": {"type": 'str'},
+ "username": {"type": 'str'},
+ "password": {"type": 'str', "no_log": True},
+ "ignore_certificate_warning": {
+ "type": 'str',
+ "default": "off",
+ "choices": ["off", "on"]
+ },
+ "proxy_support": {
+ "type": 'str',
+ "default": "off",
+ "choices": ["off", "default_proxy", "parameters_proxy"]
+ },
+ "proxy_type": {
+ "type": 'str',
+ "default": 'http',
+ "choices": ['http', 'socks']
+ },
+ "proxy_server": {"type": 'str'},
+ "proxy_port": {"type": 'int', "default": 80},
+ "proxy_username": {"type": 'str'},
+ "proxy_password": {"type": 'str', "no_log": True}
+ },
+ "required_if": [
+ ["share_type", "local", ["share_name"]],
+ ["share_type", "nfs", ["ip_address", "share_name"]],
+ ["share_type", "cifs", ["ip_address", "share_name", "username", "password"]],
+ ["share_type", "http", ["ip_address", "share_name"]],
+ ["share_type", "https", ["ip_address", "share_name"]],
+ ["proxy_support", "parameters_proxy", ["proxy_server"]]
+ ],
+ "required_together": [
+ ("username", "password"),
+ ("proxy_username", "proxy_password")
+ ]
+ },
+ "resource_id": {"type": 'str'}
+ }
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py
index 2d555f9a2..c9376f4e2 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_job_status_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -23,17 +23,18 @@ extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
options:
job_id:
- required: True
+ required: true
type: str
description: JOB ID in the format "JID_123456789012".
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Rajeev Arakkal (@rajeevarakkal)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -41,11 +42,11 @@ EXAMPLES = """
---
- name: Show status of a Lifecycle Control job
dellemc.openmanage.idrac_lifecycle_controller_job_status_info:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
- ca_path: "/path/to/ca_cert.pem"
- job_id: "JID_1234567890"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: "JID_1234567890"
"""
RETURN = r'''
@@ -113,7 +114,7 @@ def main():
try:
with iDRACConnection(module.params) as idrac:
- job_id, msg, failed = module.params.get('job_id'), {}, False
+ job_id, msg = module.params.get('job_id'), {}
msg = idrac.job_mgr.get_job_status(job_id)
if msg.get('Status') == "Found Fault":
module.fail_json(msg="Job ID is invalid.")
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py
index 984f8e3f4..60d1aaacb 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_jobs.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -31,12 +31,13 @@ options:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module does not support C(check_mode).
"""
EXAMPLES = """
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py
index 74606260c..4a9f30f68 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_logs.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -28,7 +28,7 @@ options:
- Network share or local path.
- CIFS, NFS network share types are supported.
type: str
- required: True
+ required: true
share_user:
type: str
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
@@ -40,18 +40,19 @@ options:
job_wait:
description: Whether to wait for the running job completion or not.
type: bool
- default: True
+ default: true
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Rajeev Arakkal (@rajeevarakkal)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- Exporting data to a local share is supported only on iDRAC9-based PowerEdge Servers and later.
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module does not support C(check_mode).
"""
@@ -133,11 +134,13 @@ error_info:
"""
+import socket
+import json
+import copy
from ansible_collections.dellemc.openmanage.plugins.module_utils.dellemc_idrac import iDRACConnection, idrac_auth_params
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-import json
try:
from omsdk.sdkfile import file_share_manager
from omsdk.sdkcreds import UserCredentials
@@ -181,6 +184,10 @@ def run_export_lc_logs(idrac, module):
creds=UserCredentials(module.params['share_user'],
module.params['share_password']),
isFolder=True)
+ data = socket.getaddrinfo(module.params["idrac_ip"], module.params["idrac_port"])
+ if "AF_INET6" == data[0][0]._name_:
+ ip = copy.deepcopy(module.params["idrac_ip"])
+ lclog_file_name_format = "{ip}_%Y%m%d_%H%M%S_LC_Log.log".format(ip=ip.replace(":", ".").replace("..", "."))
lc_log_file = myshare.new_file(lclog_file_name_format)
job_wait = module.params['job_wait']
msg = idrac.log_mgr.lclog_export(lc_log_file, job_wait)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py
index 3d3bddc03..94f605b46 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_lifecycle_controller_status_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -19,18 +19,19 @@ module: idrac_lifecycle_controller_status_info
short_description: Get the status of the Lifecycle Controller
version_added: "2.1.0"
description:
- - This module shows the status of the Lifecycle Controller on a Dell EMC PowerEdge server.
+ - This module shows the status of the Lifecycle Controller on a Dell PowerEdge server.
extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Rajeev Arakkal (@rajeevarakkal)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -52,7 +53,7 @@ msg:
type: str
sample: "Successfully fetched the lifecycle controller status."
lc_status_info:
- description: Displays the status of the Lifecycle Controller on a Dell EMC PowerEdge server.
+ description: Displays the status of the Lifecycle Controller on a Dell PowerEdge server.
returned: success
type: dict
sample: {
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py
index 8f2930165..b03d0dc2c 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -136,13 +136,14 @@ options:
description: Enter the static IP subnet mask to iDRAC.
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -150,9 +151,9 @@ EXAMPLES = """
---
- name: Configure iDRAC network settings
dellemc.openmanage.idrac_network:
- idrac_ip: "192.168.0.1"
+ idrac_ip: "192.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
register_idrac_on_dns: Enabled
dns_idrac_name: None
@@ -246,10 +247,8 @@ try:
Selection_NICTypes, Failover_NICTypes,
AutoDetect_NICTypes, Autoneg_NICTypes,
Speed_NICTypes, Duplex_NICTypes, DHCPEnable_IPv4Types,
- DNSFromDHCP_IPv4Types, Enable_IPv4Types,
- DNSFromDHCP_IPv4StaticTypes)
+ Enable_IPv4Types, DNSFromDHCP_IPv4StaticTypes)
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network_attributes.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network_attributes.py
new file mode 100644
index 000000000..0103570be
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_network_attributes.py
@@ -0,0 +1,748 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_network_attributes
+short_description: Configures the iDRAC network attributes
+version_added: "8.4.0"
+description:
+ - This module allows you to configure the port and partition network attributes on the network interface cards.
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ network_adapter_id:
+ type: str
+ required: true
+ description:
+ - FQDD of the network adapter device that represents the physical network adapter capable of connecting to a computer network.
+ - An example of FQDD of the network adapter is 'NIC.Mezzanine.1A'
+ network_device_function_id:
+ type: str
+ required: true
+ description:
+ - FQDD of the network adapter device function that represents a logical interface exposed by the network adapter.
+ - An example of FQDD of the network adapter device function is 'NIC.Mezzanine.1A-1-1'
+ network_attributes:
+ type: dict
+ description:
+ - "Dictionary of network attributes and value. To view the list of attributes and its structure, see the below API
+ U(https://I(idrac_ip)/redfish/v1/Systems/System.Embedded.1/NetworkAdapters/<network_id>/NetworkDeviceFunctions/
+ <network_port_id>/Settings) and U(https://<idrac_ip>/redfish/v1/Schemas/NetworkDeviceFunction.v1_8_0.json)."
+ - I(network_attributes) is mutually exclusive with I(oem_network_attributes).
+ oem_network_attributes:
+ type: dict
+ description:
+ - "The attributes must be part of the Integrated Dell Remote Access Controller Attribute Registry.
+ To view the list of attributes in Attribute Registry for iDRAC9 and newer versions. For more information,
+ see, U(https://I(idrac_ip)/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/<network_id>/NetworkDeviceFunctions/
+ <network_port_id>/Oem/Dell/DellNetworkAttributes/<network_port_id>)
+ and U(https://I(idrac_ip)/redfish/v1/Registries/NetworkAttributesRegistry_<network_port_id>/
+ NetworkAttributesRegistry_network_port_id.json)."
+ - For iDRAC8 based servers, derive the network attribute name from Server Configuration Profile.
+ - I(oem_network_attributes) is mutually exclusive with I(network_attributes).
+ resource_id:
+ type: str
+ description:
+ - Id of the resource.
+ - If the value for resource ID is not provided, the module picks the first resource ID available from the list of system resources returned by the iDRAC.
+ clear_pending:
+ type: bool
+ default: false
+ description:
+ - This parameter allows you to clear all the pending OEM network attributes changes.
+ - C(false) does not perform any operation.
+ - C(true) discards any pending changes to network attributes, or if a job is in scheduled state, removes the job.
+ - I(apply_time) value will be ignored and will not have any impact for I(clear_pending) operation.
+ - This operation is not supported for iDRAC8.
+ apply_time:
+ type: str
+ required: true
+ description:
+ - Apply time of the I(network_attributes) and I(oem_network_attributes).
+ - This is applicable only to I(network_attributes) and I(oem_network_attributes).
+ - C(Immediate) allows the user to immediately reboot the host and apply the changes. I(job_wait)
+ is applicable. This is applicable for I(oem_network_attributes) and I(job_wait).
+ - C(OnReset) allows the user to apply the changes on the next reboot of the host server.
+ - C(AtMaintenanceWindowStart) allows the user to apply at the start of a maintenance window as specified
+ in I(maintenance_window). A reboot job is scheduled.
+ - C(InMaintenanceWindowOnReset) allows to apply after a manual reset but within the maintenance window as
+ specified in I(maintenance_window).
+ - This is not applicable for iDRAC8 and value will be ignored and will not have any impact for configuring I(oem_network_attributes).
+ choices: [Immediate, OnReset, AtMaintenanceWindowStart, InMaintenanceWindowOnReset]
+ maintenance_window:
+ type: dict
+ description:
+ - This option allows you to schedule the maintenance window.
+ - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset).
+ suboptions:
+ start_time:
+ type: str
+ required: true
+ description:
+ - The start time for the maintenance window to be scheduled.
+ - "The format is YYYY-MM-DDThh:mm:ss<offset>"
+ - "<offset> is the time offset from UTC that the current timezone set in
+ iDRAC in the format: +05:30 for IST."
+ duration:
+ type: int
+ required: true
+ description:
+ - The duration in seconds for the maintenance window.
+ job_wait:
+ type: bool
+ default: true
+ description:
+ - Provides the option to wait for job completion.
+ - This is applicable when I(apply_time) is C(Immediate) for I(oem_network_attributes).
+ job_wait_timeout:
+ type: int
+ default: 1200
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(true).
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Abhishek Sinha(@ABHISHEK-SINHA10)"
+notes:
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Configure OEM network attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: "NIC.Integrated.1"
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+
+- name: Configure OEM network attributes to apply on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ apply_time: OnReset
+
+- name: Configure OEM network attributes to apply at maintainance window
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+
+- name: Clearing the pending attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ clear_pending: true
+
+- name: Clearing the OEM pending attributes and apply the OEM network attributes
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ clear_pending: true
+ oem_network_attributes:
+ BannerMessageTimeout: "4"
+
+- name: Configure OEM network attributes and wait for the job
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: "Immediate"
+ oem_network_attributes:
+ LnkSpeed: "10MbpsHalf"
+ WakeOnLan: "Enabled"
+ VLanMode: "Enabled"
+ job_wait: true
+ job_wait_timeout: 2000
+
+- name: Configure redfish network attributes to update fiber channel on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ apply_time: OnReset
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: true
+
+- name: Configure redfish network attributes to apply on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: true
+ apply_time: OnReset
+
+- name: Configure redfish network attributes of iscsi to apply at maintainance window start
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ iSCSIBoot:
+ InitiatorIPAddress: 1.0.0.1
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+
+- name: Configure redfish network attributes to apply at maintainance window on reset
+ dellemc.openmanage.idrac_network_attributes:
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
+ network_id: NIC.Integrated.1
+ network_port_id: "NIC.Integrated.1-1-1"
+ network_attributes:
+ Ethernet:
+ VLAN:
+ VLANEnable: false
+ VLANId: 1
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of the attribute update operation.
+ returned: when network attributes is applied
+ type: str
+ sample: "Successfully updated the network attributes."
+invalid_attributes:
+ description: Dictionary of invalid attributes provided that cannot be applied.
+ returned: On invalid attributes or values
+ type: dict
+ sample: {
+ "IscsiInitiatorIpAddr": "Attribute is not valid.",
+ "IscsiInitiatorSubnet": "Attribute is not valid."
+ }
+job_status:
+ description: Returns the output for status of the job.
+ returned: always
+ type: dict
+ sample: {
+ "ActualRunningStartTime": null,
+ "ActualRunningStopTime": null,
+ "CompletionTime": null,
+ "Description": "Job Instance",
+ "EndTime": "TIME_NA",
+ "Id": "JID_XXXXXXXXX",
+ "JobState": "Scheduled",
+ "JobType": "NICConfiguration",
+ "Message": "Task successfully scheduled.",
+ "MessageArgs": [],
+ "MessageId": "JCP001",
+ "Name": "Configure: NIC.Integrated.1-1-1",
+ "PercentComplete": 0,
+ "StartTime": "2023-08-07T06:21:24",
+ "TargetSettingsURI": null
+ }
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+import time
+from urllib.error import HTTPError, URLError
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.compat.version import LooseVersion
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import (
+ idrac_auth_params, iDRACRedfishAPI)
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import (
+ delete_job, get_current_time, get_dynamic_uri, get_idrac_firmware_version,
+ get_scheduled_job_resp, remove_key, validate_and_get_first_resource_id_uri,
+ idrac_redfish_job_tracking, xml_data_conversion)
+
+REGISTRY_URI = '/redfish/v1/Registries'
+SYSTEMS_URI = "/redfish/v1/Systems"
+iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+
+SUCCESS_MSG = "Successfully updated the network attributes."
+SUCCESS_CLEAR_PENDING_ATTR_MSG = "Successfully cleared the pending network attributes."
+SCHEDULE_MSG = "Successfully scheduled the job for network attributes update."
+TIMEOUT_NEGATIVE_OR_ZERO_MSG = "The value for the `job_wait_timeout` parameter cannot be negative or zero."
+MAINTENACE_OFFSET_DIFF_MSG = "The maintenance time must be post-fixed with local offset to {0}."
+MAINTENACE_OFFSET_BEHIND_MSG = "The specified maintenance time window occurs in the past, provide a future time to schedule the maintenance window."
+APPLY_TIME_NOT_SUPPORTED_MSG = "Apply time {0} is not supported."
+INVALID_ATTR_MSG = "Unable to update the network attributes because invalid values are entered. " + \
+ "Enter the valid values for the network attributes and retry the operation."
+VALID_AND_INVALID_ATTR_MSG = "Successfully updated the network attributes for valid values. " + \
+ "Unable to update other attributes because invalid values are entered. Enter the valid values and retry the operation."
+NO_CHANGES_FOUND_MSG = "No changes found to be applied."
+CHANGES_FOUND_MSG = "Changes found to be applied."
+INVALID_ID_MSG = "Unable to complete the operation because " + \
+ "the value `{0}` for the input `{1}` parameter is invalid."
+JOB_RUNNING_CLEAR_PENDING_ATTR = "{0} Config job is running. Wait for the job to complete. Currently can not clear pending attributes."
+ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE = 'Attribute is not valid.'
+CLEAR_PENDING_NOT_SUPPORTED_WITHOUT_ATTR_IDRAC8 = "Clear pending is not supported."
+WAIT_TIMEOUT_MSG = "The job is not complete after {0} seconds."
+
+
+class IDRACNetworkAttributes:
+
+ def __init__(self, idrac, module):
+ self.module = module
+ self.idrac = idrac
+ self.redfish_uri = None
+ self.oem_uri = None
+
+ def __perform_validation_for_network_adapter_id(self):
+ odata = '@odata.id'
+ network_adapter_id = self.module.params.get('network_adapter_id')
+ network_adapter_id_uri, found_adapter = '', False
+ uri, error_msg = validate_and_get_first_resource_id_uri(
+ self.module, self.idrac, SYSTEMS_URI)
+ if error_msg:
+ self.module.exit_json(msg=error_msg, failed=True)
+ network_adapters = get_dynamic_uri(
+ self.idrac, uri, 'NetworkInterfaces')[odata]
+ network_adapter_list = get_dynamic_uri(
+ self.idrac, network_adapters, 'Members')
+ for each_adapter in network_adapter_list:
+ if network_adapter_id in each_adapter.get(odata):
+ found_adapter = True
+ network_adapter_id_uri = each_adapter.get(odata)
+ break
+ if not found_adapter:
+ self.module.exit_json(failed=True, msg=INVALID_ID_MSG.format(network_adapter_id,
+ 'network_adapter_id'))
+ return network_adapter_id_uri
+
+ def __perform_validation_for_network_device_function_id(self):
+ odata = '@odata.id'
+ network_device_function_id_uri, found_device = '', False
+ network_device_function_id = self.module.params.get(
+ 'network_device_function_id')
+ network_adapter_id_uri = self.__perform_validation_for_network_adapter_id()
+ network_devices = get_dynamic_uri(
+ self.idrac, network_adapter_id_uri, 'NetworkDeviceFunctions')[odata]
+ network_device_list = get_dynamic_uri(
+ self.idrac, network_devices, 'Members')
+ for each_device in network_device_list:
+ if network_device_function_id in each_device.get(odata):
+ found_device = True
+ network_device_function_id_uri = each_device.get(odata)
+ break
+ if not found_device:
+ self.module.exit_json(failed=True, msg=INVALID_ID_MSG.format(network_device_function_id,
+ 'network_device_function_id'))
+ return network_device_function_id_uri
+
+ def __get_registry_fw_less_than_6_more_than_3(self):
+ reg = {}
+ network_device_function_id = self.module.params.get(
+ 'network_device_function_id')
+ registry = get_dynamic_uri(self.idrac, REGISTRY_URI, 'Members')
+ for each_member in registry:
+ if network_device_function_id in each_member.get('@odata.id'):
+ location = get_dynamic_uri(
+ self.idrac, each_member.get('@odata.id'), 'Location')
+ if location:
+ uri = location[0].get('Uri')
+ attr = get_dynamic_uri(
+ self.idrac, uri, 'RegistryEntries').get('Attributes', {})
+ for each_attr in attr:
+ reg.update(
+ {each_attr['AttributeName']: each_attr['CurrentValue']})
+ break
+ return reg
+
+ def __validate_time(self, mtime):
+ curr_time, date_offset = get_current_time(self.idrac)
+ if not mtime.endswith(date_offset):
+ self.module.exit_json(
+ failed=True, msg=MAINTENACE_OFFSET_DIFF_MSG.format(date_offset))
+ if mtime < curr_time:
+ self.module.exit_json(
+ failed=True, msg=MAINTENACE_OFFSET_BEHIND_MSG)
+
+ def __get_redfish_apply_time(self, aplytm, rf_settings):
+ rf_set = {}
+ if rf_settings:
+ if aplytm not in rf_settings:
+ self.module.exit_json(
+ failed=True, msg=APPLY_TIME_NOT_SUPPORTED_MSG.format(aplytm))
+ elif 'Maintenance' in aplytm:
+ rf_set['ApplyTime'] = aplytm
+ m_win = self.module.params.get('maintenance_window')
+ self.__validate_time(m_win.get('start_time'))
+ rf_set['MaintenanceWindowStartTime'] = m_win.get('start_time')
+ rf_set['MaintenanceWindowDurationInSeconds'] = m_win.get(
+ 'duration')
+ else:
+ rf_set['ApplyTime'] = aplytm
+ return rf_set
+
+ def __get_registry_fw_less_than_3(self):
+ reg = {}
+ network_device_function_id = self.module.params.get(
+ 'network_device_function_id')
+ scp_response = self.idrac.export_scp(export_format="JSON", export_use="Default",
+ target="NIC", job_wait=True)
+ comp = scp_response.json_data.get("SystemConfiguration", {}).get("Components", {})
+ for each in comp:
+ if each.get('FQDD') == network_device_function_id:
+ for each_attr in each.get('Attributes'):
+ reg.update({each_attr['Name']: each_attr['Value']})
+ return reg
+
+ def get_current_server_registry(self):
+ reg = {}
+ oem_network_attributes = self.module.params.get(
+ 'oem_network_attributes')
+ network_attributes = self.module.params.get('network_attributes')
+ firm_ver = get_idrac_firmware_version(self.idrac)
+ if oem_network_attributes:
+ if LooseVersion(firm_ver) >= '6.0':
+ reg = get_dynamic_uri(self.idrac, self.oem_uri, 'Attributes')
+ elif '3.0' < LooseVersion(firm_ver) < '6.0':
+ reg = self.__get_registry_fw_less_than_6_more_than_3()
+ else:
+ reg = self.__get_registry_fw_less_than_3()
+ if network_attributes: # For Redfish
+ resp = get_dynamic_uri(self.idrac, self.redfish_uri)
+ reg.update({'Ethernet': resp.get('Ethernet', {})})
+ reg.update({'FibreChannel': resp.get('FibreChannel', {})})
+ reg.update({'iSCSIBoot': resp.get('iSCSIBoot', {})})
+ return reg
+
+ def extract_error_msg(self, resp):
+ error_info = {}
+ if resp.body:
+ error = resp.json_data.get('error')
+ for each_dict_err in error.get("@Message.ExtendedInfo"):
+ key = each_dict_err.get('MessageArgs')[0]
+ msg = each_dict_err.get('Message')
+ if key not in error_info:
+ error_info.update({key: msg})
+ return error_info
+
+ def get_diff_between_current_and_module_input(self, module_attr, server_attr):
+ diff, invalid = 0, {}
+ if module_attr is None:
+ module_attr = {}
+ for each_attr in module_attr:
+ if each_attr in server_attr:
+ data_type = type(server_attr[each_attr])
+ if not isinstance(module_attr[each_attr], data_type):
+ diff += 1
+ elif isinstance(module_attr[each_attr], dict) and isinstance(server_attr[each_attr], dict):
+ tmp_diff, tmp_invalid = self.get_diff_between_current_and_module_input(
+ module_attr[each_attr], server_attr[each_attr])
+ diff += tmp_diff
+ invalid.update(tmp_invalid)
+ elif module_attr[each_attr] != server_attr[each_attr]:
+ diff += 1
+ elif each_attr not in server_attr:
+ invalid.update(
+ {each_attr: ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE})
+ return diff, invalid
+
+ def validate_job_timeout(self):
+ if self.module.params.get("job_wait") and self.module.params.get("job_wait_timeout") <= 0:
+ self.module.exit_json(
+ msg=TIMEOUT_NEGATIVE_OR_ZERO_MSG, failed=True)
+
+ def apply_time(self, setting_uri):
+ resp = get_dynamic_uri(self.idrac, setting_uri, "@Redfish.Settings")
+ rf_settings = resp.get("SupportedApplyTimes", [])
+ apply_time = self.module.params.get('apply_time', {})
+ rf_set = self.__get_redfish_apply_time(apply_time, rf_settings)
+ return rf_set
+
+ def set_dynamic_base_uri_and_validate_ids(self):
+ network_device_function_id_uri = self.__perform_validation_for_network_device_function_id()
+ resp = get_dynamic_uri(self.idrac, network_device_function_id_uri)
+ self.oem_uri = resp.get('Links', {}).get('Oem', {}).get(
+ 'Dell', {}).get('DellNetworkAttributes', {}).get('@odata.id', {})
+ self.redfish_uri = network_device_function_id_uri
+
+
+class OEMNetworkAttributes(IDRACNetworkAttributes):
+ def __init__(self, idrac, module):
+ super().__init__(idrac, module)
+
+ def clear_pending(self):
+ firm_ver = get_idrac_firmware_version(self.idrac)
+ oem_network_attributes = self.module.params.get(
+ 'oem_network_attributes')
+ if LooseVersion(firm_ver) < '3.0':
+ if oem_network_attributes:
+ return None
+ self.module.exit_json(
+ msg=CLEAR_PENDING_NOT_SUPPORTED_WITHOUT_ATTR_IDRAC8)
+ resp = get_dynamic_uri(self.idrac, self.oem_uri, '@Redfish.Settings')
+ settings_uri = resp.get('SettingsObject').get('@odata.id')
+ settings_uri_resp = get_dynamic_uri(self.idrac, settings_uri)
+ pending_attributes = settings_uri_resp.get('Attributes')
+ clear_pending_uri = settings_uri_resp.get('Actions').get(
+ '#DellManager.ClearPending').get('target')
+ if not pending_attributes and not oem_network_attributes:
+ self.module.exit_json(msg=NO_CHANGES_FOUND_MSG)
+ job_resp = get_scheduled_job_resp(self.idrac, 'NICConfiguration')
+ job_id, job_state = job_resp.get('Id'), job_resp.get('JobState')
+ if job_id:
+ if job_state in ["Running"]:
+ job_resp = remove_key(job_resp, regex_pattern='(.*?)@odata')
+ self.module.exit_json(failed=True, msg=JOB_RUNNING_CLEAR_PENDING_ATTR.format('NICConfiguration'),
+ job_status=job_resp)
+ elif job_state in ["Starting", "Scheduled", "Scheduling"]:
+ if self.module.check_mode and not oem_network_attributes:
+ self.module.exit_json(msg=CHANGES_FOUND_MSG, changed=True)
+ if not self.module.check_mode:
+ delete_job(self.idrac, job_id)
+ if self.module.check_mode and not oem_network_attributes:
+ self.module.exit_json(msg=CHANGES_FOUND_MSG, changed=True)
+ time.sleep(5)
+ settings_uri_resp = get_dynamic_uri(self.idrac, settings_uri)
+ pending_attributes = settings_uri_resp.get('Attributes')
+ if pending_attributes and not self.module.check_mode:
+ self.idrac.invoke_request(
+ clear_pending_uri, "POST", data="{}", dump=False)
+ if not oem_network_attributes:
+ self.module.exit_json(
+ msg=SUCCESS_CLEAR_PENDING_ATTR_MSG, changed=True)
+
+ def perform_operation(self):
+ oem_network_attributes = self.module.params.get(
+ 'oem_network_attributes')
+ network_device_function_id = self.module.params.get(
+ 'network_device_function_id')
+ apply_time = self.module.params.get('apply_time')
+ job_wait = self.module.params.get('job_wait')
+ invalid_attr = {}
+ firm_ver = get_idrac_firmware_version(self.idrac)
+ if LooseVersion(firm_ver) < '3.0':
+ root = """<SystemConfiguration>{0}</SystemConfiguration>"""
+ scp_payload = root.format(xml_data_conversion(
+ oem_network_attributes, network_device_function_id))
+ resp = self.idrac.import_scp(
+ import_buffer=scp_payload, target="NIC", job_wait=False)
+ else:
+ payload = {'Attributes': oem_network_attributes}
+ apply_time_setting = self.apply_time(self.oem_uri)
+ if apply_time_setting:
+ payload.update(
+ {"@Redfish.SettingsApplyTime": apply_time_setting})
+ patch_uri = get_dynamic_uri(self.idrac, self.oem_uri).get(
+ '@Redfish.Settings').get('SettingsObject').get('@odata.id')
+ resp = self.idrac.invoke_request(
+ method='PATCH', uri=patch_uri, data=payload)
+ job_wait = job_wait if apply_time == "Immediate" else False
+ invalid_attr = self.extract_error_msg(resp)
+ return resp, invalid_attr, job_wait
+
+
+class NetworkAttributes(IDRACNetworkAttributes):
+ def __init__(self, idrac, module):
+ super().__init__(idrac, module)
+
+ def perform_operation(self):
+ updatable_fields = ['Ethernet', 'iSCSIBoot', 'FibreChannel']
+ network_attributes = self.module.params.get('network_attributes')
+ apply_time = self.module.params.get('apply_time')
+ job_wait = self.module.params.get('job_wait')
+ payload, invalid_attr = {}, {}
+ for each_attr in network_attributes:
+ if each_attr in updatable_fields:
+ payload.update({each_attr: network_attributes[each_attr]})
+ apply_time_setting = self.apply_time(self.redfish_uri)
+ if apply_time_setting:
+ payload.update({"@Redfish.SettingsApplyTime": apply_time_setting})
+ resp = get_dynamic_uri(self.idrac, self.redfish_uri)
+ patch_uri = resp.get(
+ "@Redfish.Settings", {}).get("SettingsObject", {}).get("@odata.id", {})
+ resp = self.idrac.invoke_request(
+ method='PATCH', uri=patch_uri, data=payload)
+ invalid_attr = self.extract_error_msg(resp)
+ job_wait = job_wait if apply_time == "Immediate" else False
+ return resp, invalid_attr, job_wait
+
+
+def perform_operation_for_main(idrac, module, obj, diff, _invalid_attr):
+ job_wait_timeout = module.params.get('job_wait_timeout')
+ if diff:
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND_MSG, changed=True,
+ invalid_attributes=_invalid_attr)
+ else:
+ job_resp, invalid_attr, job_wait = obj.perform_operation()
+ job_dict = {}
+ if (job_tracking_uri := job_resp.headers.get("Location")):
+ job_id = job_tracking_uri.split("/")[-1]
+ job_uri = iDRAC_JOB_URI.format(job_id=job_id)
+ if job_wait:
+ job_failed, msg, job_dict, wait_time = idrac_redfish_job_tracking(idrac, job_uri,
+ max_job_wait_sec=job_wait_timeout,
+ sleep_interval_secs=1)
+ job_dict = remove_key(job_dict,
+ regex_pattern='(.*?)@odata')
+ if int(wait_time) >= int(job_wait_timeout):
+ module.exit_json(msg=WAIT_TIMEOUT_MSG.format(
+ job_wait_timeout), changed=True, job_status=job_dict)
+ if job_failed:
+ module.fail_json(
+ msg=job_dict.get("Message"), invalid_attributes=invalid_attr, job_status=job_dict)
+ else:
+ job_resp = idrac.invoke_request(job_uri, 'GET')
+ job_dict = job_resp.json_data
+ job_dict = remove_key(job_dict,
+ regex_pattern='(.*?)@odata')
+
+ if job_dict.get('JobState') == "Completed":
+ firm_ver = get_idrac_firmware_version(idrac)
+ msg = SUCCESS_MSG if not invalid_attr else VALID_AND_INVALID_ATTR_MSG
+ if LooseVersion(firm_ver) < '3.0' and isinstance(obj, OEMNetworkAttributes):
+ message_id = job_dict.get("MessageId")
+ if message_id == "SYS053":
+ module.exit_json(msg=msg, changed=True, job_status=job_dict)
+ elif message_id == "SYS055":
+ module.exit_json(
+ msg=VALID_AND_INVALID_ATTR_MSG, changed=True, job_status=job_dict)
+ elif message_id == "SYS067":
+ module.fail_json(msg=INVALID_ATTR_MSG,
+ job_status=job_dict)
+ else:
+ module.fail_json(msg=job_dict.get("Message"))
+ else:
+ msg = SCHEDULE_MSG
+ module.exit_json(msg=msg, invalid_attributes=invalid_attr,
+ job_status=job_dict, changed=True)
+ else:
+ if module.check_mode:
+ module.exit_json(msg=NO_CHANGES_FOUND_MSG,
+ invalid_attributes=_invalid_attr)
+ # When user has given only invalid attribute, diff will 0 and _invalid_attr will have dictionary,
+ elif _invalid_attr: # Expecting HTTP Error from server.
+ job_resp, invalid_attr, job_wait = obj.perform_operation()
+ module.exit_json(msg=NO_CHANGES_FOUND_MSG,
+ invalid_attributes=_invalid_attr)
+
+
+def main():
+ try:
+ specs = {
+ "network_adapter_id": {"type": 'str', "required": True},
+ "network_device_function_id": {"type": 'str', "required": True},
+ "network_attributes": {"type": 'dict'},
+ "oem_network_attributes": {"type": 'dict'},
+ "resource_id": {"type": 'str'},
+ "clear_pending": {"type": 'bool', "default": False},
+ "apply_time": {"type": 'str', "required": True,
+ "choices": ['Immediate', 'OnReset', 'AtMaintenanceWindowStart', 'InMaintenanceWindowOnReset']},
+ "maintenance_window": {"type": 'dict',
+ "options": {"start_time": {"type": 'str', "required": True},
+ "duration": {"type": 'int', "required": True}}},
+ "job_wait": {"type": "bool", "default": True},
+ "job_wait_timeout": {"type": "int", "default": 1200}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(argument_spec=specs,
+ mutually_exclusive=[
+ ('network_attributes', 'oem_network_attributes')],
+ required_if=[["apply_time", "AtMaintenanceWindowStart", ("maintenance_window",)],
+ ["apply_time", "InMaintenanceWindowOnReset", ("maintenance_window",)]],
+ supports_check_mode=True)
+ with iDRACRedfishAPI(module.params, req_session=True) as idrac:
+ if module_attribute := module.params.get('network_attributes'):
+ network_attr_obj = NetworkAttributes(idrac, module)
+ else:
+ module_attribute = module.params.get('oem_network_attributes')
+ network_attr_obj = OEMNetworkAttributes(idrac, module)
+ network_attr_obj.set_dynamic_base_uri_and_validate_ids()
+ network_attr_obj.validate_job_timeout()
+ if module.params.get('clear_pending') and 'clear_pending' in dir(network_attr_obj):
+ network_attr_obj.clear_pending()
+ server_reg = network_attr_obj.get_current_server_registry()
+ diff, invalid_attr = network_attr_obj.get_diff_between_current_and_module_input(
+ module_attribute, server_reg)
+ perform_operation_for_main(idrac,
+ module, network_attr_obj, diff, invalid_attr)
+ except HTTPError as err:
+ filter_err = remove_key(json.load(err), regex_pattern='(.*?)@odata')
+ module.exit_json(msg=str(err), error_info=filter_err, failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py
index 797534e39..f07d16868 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_os_deployment.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -22,7 +22,7 @@ extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
options:
share_name:
- required: True
+ required: true
description: CIFS or NFS Network share.
type: str
share_user:
@@ -34,7 +34,7 @@ options:
type: str
aliases: ['share_pwd']
iso_image:
- required: True
+ required: true
description: Network ISO name.
type: str
expose_duration:
@@ -44,12 +44,13 @@ options:
default: 1080
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Jagadeesh N V (@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module does not support C(check_mode).
'''
@@ -62,7 +63,7 @@ EXAMPLES = r'''
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.0:/nfsfileshare"
- iso_image: "unattended_os_image.iso"
+ iso_image: "unattended_os_image.iso"
expose_duration: 180
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py
index a506e5ce2..40cc5768d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_redfish_storage_controller.py
@@ -3,8 +3,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.3.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -50,10 +50,10 @@ options:
- C(ChangePDStateToOnline) - To set the disk status to online. I(target) is required for this operation.
- C(ChangePDStateToOffline) - To set the disk status to offline. I(target) is required for this operation.
- C(LockVirtualDisk) - To encrypt the virtual disk. I(volume_id) is required for this operation.
+ - C(OnlineCapacityExpansion) - To expand the size of virtual disk. I(volume_id), and I(target) or I(size) is required for this operation.
choices: [ResetConfig, AssignSpare, SetControllerKey, RemoveControllerKey, ReKey, UnassignSpare,
EnableControllerEncryption, BlinkTarget, UnBlinkTarget, ConvertToRAID, ConvertToNonRAID,
- ChangePDStateToOnline, ChangePDStateToOffline, LockVirtualDisk]
- default: AssignSpare
+ ChangePDStateToOnline, ChangePDStateToOffline, LockVirtualDisk, OnlineCapacityExpansion]
type: str
target:
description:
@@ -62,6 +62,7 @@ options:
C(ChangePDStateToOnline), C(ChangePDStateToOffline), C(ConvertToRAID), or C(ConvertToNonRAID).
- If I(volume_id) is not specified or empty, this physical drive will be
assigned as a global hot spare when I(command) is C(AssignSpare).
+ - When I(command) is C(OnlineCapacityExpansion), then I(target) is mutually exclusive with I(size).
- "Notes: Global or Dedicated hot spare can be assigned only once for a physical disk,
Re-assign cannot be done when I(command) is C(AssignSpare)."
type: list
@@ -81,6 +82,7 @@ options:
- Fully Qualified Device Descriptor (FQDD) of the storage controller. For example-'RAID.Slot.1-1'.
- This option is mandatory when I(command) is C(ResetConfig), C(SetControllerKey),
C(RemoveControllerKey), C(ReKey), or C(EnableControllerEncryption).
+ - This option is mandatory for I(attributes).
type: str
key:
description:
@@ -115,26 +117,78 @@ options:
choices: [LKM, SEKM]
default: LKM
type: str
+ size:
+ description:
+ - Capacity of the virtual disk to be expanded in MB.
+ - Check mode and Idempotency is not supported for I(size).
+ - Minimum Online Capacity Expansion size must be greater than 100 MB of the current size.
+ - When I(command) is C(OnlineCapacityExpansion), then I(size) is mutually exclusive with I(target).
+ type: int
+ attributes:
+ type: dict
+ description:
+ - Dictionary of controller attributes and value pair.
+ - This feature is only supported for iDRAC9 with firmware version 6.00.00.00 and above
+ - I(controller_id) is required for this operation.
+ - I(apply_time) and I(maintenance_window) is applicable for I(attributes).
+ - I(attributes) is mutually exclusive with I(command).
+ - Use U(https://I(idrac_ip)/redfish/v1/Schemas/DellOemStorageController.json) to view the attributes.
+ apply_time:
+ type: str
+ description:
+ - Apply time of the I(attributes).
+ - This is applicable only to I(attributes).
+ - "C(Immediate) Allows the user to immediately reboot the host and apply the changes. I(job_wait)
+ is applicable."
+ - C(OnReset) Allows the user to apply the changes on the next reboot of the host server.
+ - "C(AtMaintenanceWindowStart) Allows the user to apply at the start of a maintenance window as specified
+ in I(maintenance_window)."
+ - "C(InMaintenanceWindowOnReset) Allows to apply after a manual reset but within the maintenance window as
+ specified in I(maintenance_window)."
+ choices: [Immediate, OnReset, AtMaintenanceWindowStart, InMaintenanceWindowOnReset]
+ default: Immediate
+ maintenance_window:
+ type: dict
+ description:
+ - Option to schedule the maintenance window.
+ - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset).
+ suboptions:
+ start_time:
+ type: str
+ description:
+ - The start time for the maintenance window to be scheduled.
+ - "The format is YYYY-MM-DDThh:mm:ss<offset>"
+ - "<offset> is the time offset from UTC that the current timezone set in
+ iDRAC in the format: +05:30 for IST."
+ required: true
+ duration:
+ type: int
+ description:
+ - The duration in seconds for the maintenance window.
+ default: 900
job_wait:
description:
- Provides the option if the module has to wait for the job to be completed.
+ - This is applicable for I(attributes) when I(apply_time) is C(Immediate).
type: bool
- default: False
+ default: false
job_wait_timeout:
description:
- The maximum wait time of job completion in seconds before the job tracking is stopped.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 120
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Jagadeesh N V (@jagadeeshnv)"
- "Felix Stephen (@felixs88)"
- "Husniya Hameed (@husniya_hameed)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
notes:
- Run this module from a system that has direct access to Dell iDRAC.
- - This module always reports as changes found when C(ReKey), C(BlinkTarget), and C(UnBlinkTarget).
+ - This module is supported on iDRAC9.
+ - This module always reports as changes found when I(command) is C(ReKey), C(BlinkTarget), and C(UnBlinkTarget).
- This module supports C(check_mode).
'''
@@ -346,6 +400,60 @@ EXAMPLES = r'''
volume_id: "Disk.Virtual.0:RAID.SL.3-1"
tags:
- lock
+
+- name: Online Capacity Expansion of a volume using target
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "OnlineCapacityExpansion"
+ volume_id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ target:
+ - "Disk.Bay.2:Enclosure.Internal.0-0:RAID.Integrated.1-1"
+ tags:
+ - oce_target
+
+- name: Online Capacity Expansion of a volume using size
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "OnlineCapacityExpansion"
+ volume_id: "Disk.Virtual.0:RAID.Integrated.1-1"
+ size: 362785
+ tags:
+ - oce_size
+
+- name: Set controller attributes.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: "RAID.Slot.1-1"
+ attributes:
+ ControllerMode: "HBA"
+ apply_time: "OnReset"
+ tags:
+ - controller-attribute
+
+- name: Configure controller attributes at Maintenance window
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: "RAID.Slot.1-1"
+ attributes:
+ CheckConsistencyMode: Normal
+ CopybackMode: "Off"
+ LoadBalanceMode: Disabled
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 1200
'''
RETURN = r'''
@@ -425,6 +533,10 @@ CONTROLLER_URI = "/redfish/v1/Dell/Systems/{system_id}/Storage/DellController/{c
VOLUME_URI = "/redfish/v1/Systems/{system_id}/Storage/{controller_id}/Volumes"
PD_URI = "/redfish/v1/Systems/System.Embedded.1/Storage/{controller_id}/Drives/{drive_id}"
JOB_URI_OEM = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/{job_id}"
+CONTROLLERS_URI = "/redfish/v1/Systems/{system_id}/Storage/{controller_id}/Controllers/{controller_id}"
+MANAGER_URI = "/redfish/v1/Managers/iDRAC.Embedded.1"
+SETTINGS_URI = "/redfish/v1/Systems/{system_id}/Storage/{controller_id}/Controllers/{controller_id}/Settings"
+OCE_MIN_PD_RAID_MAPPING = {'RAID0': 1, 'RAID5': 1, 'RAID6': 1, 'RAID10': 2}
JOB_SUBMISSION = "Successfully submitted the job that performs the '{0}' operation."
JOB_COMPLETION = "Successfully performed the '{0}' operation."
@@ -432,8 +544,23 @@ CHANGES_FOUND = "Changes found to be applied."
NO_CHANGES_FOUND = "No changes found to be applied."
TARGET_ERR_MSG = "The Fully Qualified Device Descriptor (FQDD) of the target {0} must be only one."
PD_ERROR_MSG = "Unable to locate the physical disk with the ID: {0}"
+VD_ERROR_MSG = "Unable to locate the virtual disk with the ID: {0}"
ENCRYPT_ERR_MSG = "The storage controller '{0}' does not support encryption."
PHYSICAL_DISK_ERR = "Volume is not encryption capable."
+OCE_RAID_TYPE_ERR = "Online Capacity Expansion is not supported for {0} virtual disks."
+OCE_SIZE_100MB = "Minimum Online Capacity Expansion size must be greater than 100 MB of the current size {0}."
+OCE_TARGET_EMPTY = "Provided list of targets is empty."
+OCE_TARGET_RAID1_ERR = "Cannot add more than two disks to RAID1 virtual disk."
+UNSUPPORTED_APPLY_TIME = "Apply time {0} is not supported."
+MAINTENANCE_OFFSET = "The maintenance time must be post-fixed with local offset to {0}."
+MAINTENANCE_TIME = "The specified maintenance time window occurs in the past, " \
+ "provide a future time to schedule the maintenance window."
+HBA_MODE = "Other attributes cannot be updated when ControllerMode is provided as input."
+INVALID_ATTRIBUTES = "The following attributes are invalid: {0}"
+CONTROLLER_ID_REQUIRED = "controller_id is required to perform this operation."
+JOB_COMPLETION_ATTRIBUTES = "Successfully applied the controller attributes."
+JOB_SUBMISSION_ATTRIBUTES = "Successfully submitted the job that configures the controller attributes."
+ERR_MSG = "Unable to configure the controller attribute(s) settings."
def check_id_exists(module, redfish_obj, key, item_id, uri):
@@ -441,9 +568,9 @@ def check_id_exists(module, redfish_obj, key, item_id, uri):
try:
resp = redfish_obj.invoke_request("GET", uri.format(system_id=SYSTEM_ID, controller_id=item_id))
if not resp.success:
- module.fail_json(msg=msg)
+ module.exit_json(msg=msg, failed=True)
except HTTPError as err:
- module.fail_json(msg=msg, error_info=json.load(err))
+ module.exit_json(msg=msg, error_info=json.load(err), failed=True)
def ctrl_key(module, redfish_obj):
@@ -626,7 +753,7 @@ def target_identify_pattern(module, redfish_obj):
def lock_virtual_disk(module, redfish_obj):
- volume, command = module.params.get("volume_id"), module.params["command"]
+ volume = module.params.get("volume_id")
resp, job_uri, job_id = None, None, None
controller_id = volume[0].split(":")[-1]
check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI)
@@ -660,6 +787,68 @@ def lock_virtual_disk(module, redfish_obj):
return resp, job_uri, job_id
+def online_capacity_expansion(module, redfish_obj):
+ payload = None
+ volume_id = module.params.get("volume_id")
+ target = module.params.get("target")
+ size = module.params.get("size")
+ if not isinstance(volume_id, list):
+ volume_id = [volume_id]
+ if len(volume_id) != 1:
+ module.exit_json(msg=TARGET_ERR_MSG.format("virtual drive"), failed=True)
+
+ controller_id = volume_id[0].split(":")[-1]
+ volume_uri = VOLUME_URI + "/{volume_id}"
+ try:
+ volume_resp = redfish_obj.invoke_request("GET", volume_uri.format(system_id=SYSTEM_ID,
+ controller_id=controller_id,
+ volume_id=volume_id[0]))
+ except HTTPError:
+ module.exit_json(msg=VD_ERROR_MSG.format(volume_id[0]), failed=True)
+
+ try:
+ raid_type = volume_resp.json_data.get("RAIDType")
+ if raid_type in ['RAID50', 'RAID60']:
+ module.exit_json(msg=OCE_RAID_TYPE_ERR.format(raid_type), failed=True)
+
+ if target is not None:
+ if not target:
+ module.exit_json(msg=OCE_TARGET_EMPTY, failed=True)
+
+ if raid_type == 'RAID1':
+ module.fail_json(msg=OCE_TARGET_RAID1_ERR)
+
+ current_pd = []
+ links = volume_resp.json_data.get("Links")
+ if links:
+ for disk in volume_resp.json_data.get("Links").get("Drives"):
+ drive = disk["@odata.id"].split('/')[-1]
+ current_pd.append(drive)
+ drives_to_add = [each_drive for each_drive in target if each_drive not in current_pd]
+ if module.check_mode and drives_to_add and len(drives_to_add) % OCE_MIN_PD_RAID_MAPPING[raid_type] == 0:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif len(drives_to_add) == 0 or len(drives_to_add) % OCE_MIN_PD_RAID_MAPPING[raid_type] != 0:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ payload = {"TargetFQDD": volume_id[0], "PDArray": drives_to_add}
+
+ elif size:
+ vd_size = volume_resp.json_data.get("CapacityBytes")
+ vd_size_MB = vd_size // (1024 * 1024)
+ if (size - vd_size_MB) < 100:
+ module.exit_json(msg=OCE_SIZE_100MB.format(vd_size_MB), failed=True)
+ payload = {"TargetFQDD": volume_id[0], "Size": size}
+
+ resp = redfish_obj.invoke_request("POST", RAID_ACTION_URI.format(system_id=SYSTEM_ID,
+ action="OnlineCapacityExpansion"),
+ data=payload)
+ job_uri = resp.headers.get("Location")
+ job_id = job_uri.split("/")[-1]
+ return resp, job_uri, job_id
+ except HTTPError as err:
+ err = json.load(err).get("error").get("@Message.ExtendedInfo", [{}])[0].get("Message")
+ module.exit_json(msg=err, failed=True)
+
+
def validate_inputs(module):
module_params = module.params
command = module_params.get("command")
@@ -689,13 +878,113 @@ def validate_inputs(module):
module.fail_json(msg=TARGET_ERR_MSG.format("physical disk"))
+def get_current_time(redfish_obj):
+ try:
+ resp = redfish_obj.invoke_request("GET", MANAGER_URI)
+ curr_time = resp.json_data.get("DateTime")
+ date_offset = resp.json_data.get("DateTimeLocalOffset")
+ except Exception:
+ return None, None
+ return curr_time, date_offset
+
+
+def validate_time(module, redfish_obj, mtime):
+ curr_time, date_offset = get_current_time(redfish_obj)
+ if not mtime.endswith(date_offset):
+ module.exit_json(failed=True, status_msg=MAINTENANCE_OFFSET.format(date_offset))
+ if mtime < curr_time:
+ module.exit_json(failed=True, status_msg=MAINTENANCE_TIME)
+
+
+def get_attributes(module, redfish_obj):
+ resp_data = {}
+ controller_id = module.params["controller_id"]
+ try:
+ resp = redfish_obj.invoke_request("GET", CONTROLLERS_URI.format(system_id=SYSTEM_ID,
+ controller_id=controller_id))
+ resp_data = resp.json_data
+ except HTTPError:
+ resp_data = {}
+ return resp_data
+
+
+def check_attr_exists(module, curr_attr, inp_attr):
+ invalid_attr = []
+ pending_attr = {}
+ diff = 0
+ for each in inp_attr:
+ if each not in curr_attr.keys():
+ invalid_attr.append(each)
+ elif curr_attr[each] != inp_attr[each]:
+ diff = 1
+ pending_attr[each] = inp_attr[each]
+ if invalid_attr:
+ module.exit_json(msg=INVALID_ATTRIBUTES.format(invalid_attr), failed=True)
+ if diff and module.check_mode:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif not diff:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ return pending_attr
+
+
+def get_redfish_apply_time(module, redfish_obj, apply_time, time_settings):
+ time_set = {}
+ if time_settings:
+ if 'Maintenance' in apply_time:
+ if apply_time not in time_settings:
+ module.exit_json(failed=True, status_msg=UNSUPPORTED_APPLY_TIME.format(apply_time))
+ else:
+ time_set['ApplyTime'] = apply_time
+ m_win = module.params.get('maintenance_window')
+ validate_time(module, redfish_obj, m_win.get('start_time'))
+ time_set['MaintenanceWindowStartTime'] = m_win.get('start_time')
+ time_set['MaintenanceWindowDurationInSeconds'] = m_win.get('duration')
+ else:
+ time_set['ApplyTime'] = apply_time
+ return time_set
+
+
+def apply_attributes(module, redfish_obj, pending, time_settings):
+ payload = {"Oem": {"Dell": {"DellStorageController": pending}}}
+ apply_time = module.params.get('apply_time')
+ time_set = get_redfish_apply_time(module, redfish_obj, apply_time, time_settings)
+ if time_set:
+ payload["@Redfish.SettingsApplyTime"] = time_set
+ try:
+ resp = redfish_obj.invoke_request("PATCH", SETTINGS_URI.format(system_id=SYSTEM_ID,
+ controller_id=module.params["controller_id"]),
+ data=payload)
+ if resp.status_code == 202 and "error" in resp.json_data:
+ msg_err_id = resp.json_data.get("error").get("@Message.ExtendedInfo", [{}])[0].get("MessageId")
+ if "Created" not in msg_err_id:
+ module.exit_json(msg=ERR_MSG, error_info=resp.json_data, failed=True)
+ except HTTPError as err:
+ err = json.load(err).get("error")
+ module.exit_json(msg=ERR_MSG, error_info=err, failed=True)
+ job_id = resp.headers["Location"].split("/")[-1]
+ return job_id, time_set
+
+
+def set_attributes(module, redfish_obj):
+ resp_data = get_attributes(module, redfish_obj)
+ curr_attr = resp_data.get("Oem").get("Dell").get("DellStorageController")
+ inp_attr = module.params.get("attributes")
+ if inp_attr.get("ControllerMode") and len(inp_attr.keys()) > 1:
+ module.exit_json(msg=HBA_MODE, failed=True)
+ pending = check_attr_exists(module, curr_attr, inp_attr)
+ time_settings = resp_data.get("@Redfish.Settings", {}).get("SupportedApplyTimes", [])
+ job_id, time_set = apply_attributes(module, redfish_obj, pending, time_settings)
+ return job_id, time_set
+
+
def main():
specs = {
- "command": {"required": False, "default": "AssignSpare",
+ "attributes": {"type": 'dict'},
+ "command": {"required": False,
"choices": ["ResetConfig", "AssignSpare", "SetControllerKey", "RemoveControllerKey",
"ReKey", "UnassignSpare", "EnableControllerEncryption", "BlinkTarget",
"UnBlinkTarget", "ConvertToRAID", "ConvertToNonRAID", "ChangePDStateToOnline",
- "ChangePDStateToOffline", "LockVirtualDisk"]},
+ "ChangePDStateToOffline", "LockVirtualDisk", "OnlineCapacityExpansion"]},
"controller_id": {"required": False, "type": "str"},
"volume_id": {"required": False, "type": "list", "elements": "str"},
"target": {"required": False, "type": "list", "elements": "str", "aliases": ["drive_id"]},
@@ -703,12 +992,20 @@ def main():
"key_id": {"required": False, "type": "str"},
"old_key": {"required": False, "type": "str", "no_log": True},
"mode": {"required": False, "choices": ["LKM", "SEKM"], "default": "LKM"},
+ "apply_time": {"type": 'str', "default": 'Immediate',
+ "choices": ['Immediate', 'OnReset', 'AtMaintenanceWindowStart', 'InMaintenanceWindowOnReset']},
+ "maintenance_window": {"type": 'dict',
+ "options": {"start_time": {"type": 'str', "required": True},
+ "duration": {"type": 'int', "required": False, "default": 900}}},
"job_wait": {"required": False, "type": "bool", "default": False},
- "job_wait_timeout": {"required": False, "type": "int", "default": 120}
+ "job_wait_timeout": {"required": False, "type": "int", "default": 120},
+ "size": {"required": False, "type": "int"}
}
specs.update(redfish_auth_params)
module = AnsibleModule(
argument_spec=specs,
+ mutually_exclusive=[('attributes', 'command'), ("target", "size")],
+ required_one_of=[('attributes', 'command')],
required_if=[
["command", "SetControllerKey", ["controller_id", "key", "key_id"]],
["command", "ReKey", ["controller_id", "mode"]], ["command", "ResetConfig", ["controller_id"]],
@@ -718,10 +1015,15 @@ def main():
["command", "UnBlinkTarget", ["target", "volume_id"], True], ["command", "ConvertToRAID", ["target"]],
["command", "ConvertToNonRAID", ["target"]], ["command", "ChangePDStateToOnline", ["target"]],
["command", "ChangePDStateToOffline", ["target"]],
- ["command", "LockVirtualDisk", ["volume_id"]]
+ ["command", "LockVirtualDisk", ["volume_id"]], ["command", "OnlineCapacityExpansion", ["volume_id"]],
+ ["command", "OnlineCapacityExpansion", ["target", "size"], True],
+ ["command", "LockVirtualDisk", ["volume_id"]],
+ ["apply_time", "AtMaintenanceWindowStart", ("maintenance_window",)],
+ ["apply_time", "InMaintenanceWindowOnReset", ("maintenance_window",)]
],
supports_check_mode=True)
- validate_inputs(module)
+ if not bool(module.params["attributes"]):
+ validate_inputs(module)
try:
command = module.params["command"]
with Redfish(module.params, req_session=True) as redfish_obj:
@@ -742,6 +1044,33 @@ def main():
resp, job_uri, job_id = change_pd_status(module, redfish_obj)
elif command == "LockVirtualDisk":
resp, job_uri, job_id = lock_virtual_disk(module, redfish_obj)
+ elif command == "OnlineCapacityExpansion":
+ resp, job_uri, job_id = online_capacity_expansion(module, redfish_obj)
+
+ if module.params["attributes"]:
+ controller_id = module.params["controller_id"]
+ if controller_id is None:
+ module.exit_json(msg=CONTROLLER_ID_REQUIRED, failed=True)
+ check_id_exists(module, redfish_obj, "controller_id", controller_id, CONTROLLER_URI)
+ job_id, time_set = set_attributes(module, redfish_obj)
+ job_uri = JOB_URI_OEM.format(job_id=job_id)
+ if time_set["ApplyTime"] == "Immediate" and module.params["job_wait"]:
+ resp, msg = wait_for_job_completion(redfish_obj, job_uri, job_wait=module.params["job_wait"],
+ wait_timeout=module.params["job_wait_timeout"])
+ job_data = strip_substr_dict(resp.json_data)
+ if job_data["JobState"] == "Failed":
+ changed, failed = False, True
+ else:
+ changed, failed = True, False
+ module.exit_json(msg=JOB_COMPLETION_ATTRIBUTES, task={"id": job_id, "uri": job_uri},
+ status=job_data, changed=changed, failed=failed)
+ else:
+ resp, msg = wait_for_job_completion(redfish_obj, job_uri, job_wait=False,
+ wait_timeout=module.params["job_wait_timeout"])
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=JOB_SUBMISSION_ATTRIBUTES, task={"id": job_id, "uri": job_uri},
+ status=job_data)
+
oem_job_url = JOB_URI_OEM.format(job_id=job_id)
job_wait = module.params["job_wait"]
if job_wait:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
index 8de5ffc9f..2c28c9a5f 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_reset.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -27,12 +27,13 @@ extends_documentation_fragment:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
index 67a02c12e..bd7fe2c67 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_server_config_profile.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,7 @@ short_description: Export or Import iDRAC Server Configuration Profile (SCP)
version_added: "2.1.0"
description:
- Export the Server Configuration Profile (SCP) from the iDRAC or import from a
- network share (CIFS, NFS, HTTP, HTTPS) or a local file.
+ network share (CIFS, NFS, HTTP, HTTPS) or a local path.
extends_documentation_fragment:
- dellemc.openmanage.idrac_auth_options
options:
@@ -35,13 +35,13 @@ options:
job_wait:
description: Whether to wait for job completion or not.
type: bool
- required: True
+ required: true
share_name:
description:
- Network share or local path.
- CIFS, NFS, HTTP, and HTTPS network share types are supported.
+ - I(share_name) is mutually exclusive with I(import_buffer).
type: str
- required: True
share_user:
description: Network share user in the format 'user@domain' or 'domain\\user' if user is
part of a domain else 'user'. This option is mandatory for CIFS Network Share.
@@ -59,14 +59,27 @@ options:
type: str
scp_components:
description:
- - If C(ALL), this module exports or imports all components configurations from SCP file.
- - If C(IDRAC), this module exports or imports iDRAC configuration from SCP file.
- - If C(BIOS), this module exports or imports BIOS configuration from SCP file.
- - If C(NIC), this module exports or imports NIC configuration from SCP file.
- - If C(RAID), this module exports or imports RAID configuration from SCP file.
- type: str
- choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
+ - If C(ALL), this option exports or imports all components configurations from the SCP file.
+ - If C(IDRAC), this option exports or imports iDRAC configuration from the SCP file.
+ - If C(BIOS), this option exports or imports BIOS configuration from the SCP file.
+ - If C(NIC), this option exports or imports NIC configuration from the SCP file.
+ - If C(RAID), this option exports or imports RAID configuration from the SCP file.
+ - If C(FC), this option exports or imports FiberChannel configurations from the SCP file.
+ - If C(InfiniBand), this option exports or imports InfiniBand configuration from the SCP file.
+ - If C(SupportAssist), this option exports or imports SupportAssist configuration from the SCP file.
+ - If C(EventFilters), this option exports or imports EventFilters configuration from the SCP file.
+ - If C(System), this option exports or imports System configuration from the SCP file.
+ - If C(LifecycleController), this option exports or imports SupportAssist configuration from the SCP file.
+ - If C(AHCI), this option exports or imports EventFilters configuration from the SCP file.
+ - If C(PCIeSSD), this option exports or imports PCIeSSD configuration from the SCP file.
+ - When I(command) is C(export) or C(import) I(target) with multiple components is supported only
+ on iDRAC9 with firmware 6.10.00.00 and above.
+ type: list
+ choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID', 'FC', 'InfiniBand', 'SupportAssist',
+ 'EventFilters', 'System', 'LifecycleController', 'AHCI', 'PCIeSSD']
default: 'ALL'
+ elements: str
+ aliases: ['target']
shutdown_type:
description:
- This option is applicable for C(import) command.
@@ -90,22 +103,105 @@ options:
choices: ['JSON', 'XML']
default: 'XML'
export_use:
- description: Specify the type of server configuration profile (SCP) to be exported.
- This option is applicable for C(export) command.
+ description:
+ - Specify the type of Server Configuration Profile (SCP) to be exported.
+ - This option is applicable when I(command) is C(export).
+ - C(Default) Creates a non-destructive snapshot of the configuration.
+ - C(Replace) Replaces a server with another or restores the servers settings to a known baseline.
+ - C(Clone) Clones settings from one server to another server with the identical hardware setup.
+ All settings except I/O identity are updated (e.g. will reset RAID). The settings in this export
+ will be destructive when uploaded to another system.
type: str
choices: ['Default', 'Clone', 'Replace']
default: 'Default'
+ version_added: 7.3.0
+ ignore_certificate_warning:
+ description:
+ - If C(ignore), it ignores the certificate warnings.
+ - If C(showerror), it shows the certificate warnings.
+ - I(ignore_certificate_warning) is considered only when I(share_name) is of type HTTPS and is
+ supported only on iDRAC9.
+ type: str
+ choices: [ignore, showerror]
+ default: ignore
+ version_added: 7.3.0
+ include_in_export:
+ description:
+ - This option is applicable when I(command) is C(export).
+ - If C(default), it exports the default Server Configuration Profile.
+ - If C(readonly), it exports the SCP with readonly attributes.
+ - If C(passwordhashvalues), it exports the SCP with password hash values.
+ - If C(customtelemetry), exports the SCP with custom telemetry attributes supported only in the iDRAC9.
+ type: str
+ choices: [default, readonly, passwordhashvalues, customtelemetry]
+ default: default
+ version_added: 7.3.0
+ import_buffer:
+ description:
+ - Used to import the buffer input of xml or json into the iDRAC.
+ - This option is applicable when I(command) is C(import) and C(preview).
+ - I(import_buffer) is mutually exclusive with I(share_name).
+ type: str
+ version_added: 7.3.0
+ proxy_support:
+ description:
+ - Proxy to be enabled or disabled.
+ - I(proxy_support) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: bool
+ default: false
+ version_added: 7.3.0
+ proxy_type:
+ description:
+ - C(http) to select HTTP type proxy.
+ - C(socks4) to select SOCKS4 type proxy.
+ - I(proxy_type) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ choices: [http, socks4]
+ default: http
+ version_added: 7.3.0
+ proxy_server:
+ description:
+ - I(proxy_server) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).
+ - I(proxy_server) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ version_added: 7.3.0
+ proxy_port:
+ description:
+ - Proxy port to authenticate.
+ - I(proxy_port) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).
+ - I(proxy_port) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ default: "80"
+ version_added: 7.3.0
+ proxy_username:
+ description:
+ - Proxy username to authenticate.
+ - I(proxy_username) is considered only when I(share_name) is of type HTTP or HTTPS
+ and is supported only on iDRAC9.
+ type: str
+ version_added: 7.3.0
+ proxy_password:
+ description:
+ - Proxy password to authenticate.
+ - I(proxy_password) is considered only when I(share_name) is of type HTTP or HTTPS
+ and is supported only on iDRAC9.
+ type: str
+ version_added: 7.3.0
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.14"
author:
- "Jagadeesh N V(@jagadeeshnv)"
- "Felix Stephen (@felixs88)"
+ - "Jennifer John (@Jennifer-John)"
+ - "Shivam Sharma (@ShivamSh3)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- This module supports C(check_mode).
- - To import Server Configuration Profile (SCP) on the iDRAC7 and iDRAC8-based servers,
+ - To import Server Configuration Profile (SCP) on the iDRAC8-based servers,
the servers must have iDRAC Enterprise license or later.
+ - For C(import) operation, C(check_mode) is supported only when I(target) is C(ALL).
+ - This module supports IPv4 and IPv6 addresses.
'''
EXAMPLES = r'''
@@ -117,11 +213,12 @@ EXAMPLES = r'''
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
- scp_components: IDRAC
+ scp_components:
+ - IDRAC
scp_file: example_file
export_format: JSON
export_use: Clone
- job_wait: True
+ job_wait: true
- name: Import SCP with IDRAC components in JSON format from a local path
dellemc.openmanage.idrac_server_config_profile:
@@ -131,11 +228,12 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
command: import
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.json
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: False
+ job_wait: false
- name: Export SCP with BIOS components in XML format to a NFS share path with auto-generated file name
dellemc.openmanage.idrac_server_config_profile:
@@ -144,10 +242,11 @@ EXAMPLES = r'''
idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
- scp_components: "BIOS"
+ scp_components:
+ - BIOS
export_format: XML
export_use: Default
- job_wait: True
+ job_wait: true
- name: Import SCP with BIOS components in XML format from a NFS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -157,11 +256,12 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
command: import
- scp_components: "BIOS"
+ scp_components:
+ - BIOS
scp_file: 192.168.0.1_20210618_162856.xml
shutdown_type: NoReboot
end_host_power_state: "Off"
- job_wait: False
+ job_wait: false
- name: Export SCP with RAID components in XML format to a CIFS share path with share user domain name
dellemc.openmanage.idrac_server_config_profile:
@@ -172,12 +272,12 @@ EXAMPLES = r'''
share_name: "\\\\192.168.0.2\\share"
share_user: share_username@domain
share_password: share_password
- share_mnt: /mnt/cifs
scp_file: example_file.xml
- scp_components: "RAID"
+ scp_components:
+ - RAID
export_format: XML
export_use: Default
- job_wait: True
+ job_wait: true
- name: Import SCP with RAID components in XML format from a CIFS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -188,13 +288,13 @@ EXAMPLES = r'''
share_name: "\\\\192.168.0.2\\share"
share_user: share_username
share_password: share_password
- share_mnt: /mnt/cifs
command: import
- scp_components: "RAID"
+ scp_components:
+ - RAID
scp_file: example_file.xml
shutdown_type: Forced
end_host_power_state: "On"
- job_wait: True
+ job_wait: true
- name: Export SCP with ALL components in JSON format to a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
@@ -206,9 +306,10 @@ EXAMPLES = r'''
share_user: share_username
share_password: share_password
scp_file: example_file.json
- scp_components: ALL
+ scp_components:
+ - ALL
export_format: JSON
- job_wait: False
+ job_wait: false
- name: Import SCP with ALL components in JSON format from a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
@@ -223,7 +324,7 @@ EXAMPLES = r'''
scp_file: example_file.json
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: True
+ job_wait: true
- name: Export SCP with ALL components in XML format to a HTTPS share path without SCP file name
dellemc.openmanage.idrac_server_config_profile:
@@ -234,10 +335,11 @@ EXAMPLES = r'''
share_name: "https://192.168.0.4/share"
share_user: share_username
share_password: share_password
- scp_components: ALL
+ scp_components:
+ - ALL
export_format: XML
export_use: Replace
- job_wait: True
+ job_wait: true
- name: Import SCP with ALL components in XML format from a HTTPS share path
dellemc.openmanage.idrac_server_config_profile:
@@ -252,9 +354,9 @@ EXAMPLES = r'''
scp_file: 192.168.0.1_20160618_164647.xml
shutdown_type: Graceful
end_host_power_state: "On"
- job_wait: False
+ job_wait: false
-- name: Preview SCP with ALL components in XML format from a CIFS share path
+- name: Preview SCP with IDRAC components in XML format from a CIFS share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -264,11 +366,12 @@ EXAMPLES = r'''
share_user: share_username
share_password: share_password
command: preview
- scp_components: "ALL"
+ scp_components:
+ - ALL
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
-- name: Preview SCP with ALL components in JSON format from a NFS share path
+- name: Preview SCP with IDRAC components in JSON format from a NFS share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -276,11 +379,12 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
share_name: "192.168.0.2:/share"
command: preview
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
-- name: Preview SCP with ALL components in XML format from a HTTP share path
+- name: Preview SCP with IDRAC components in XML format from a HTTP share path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -290,11 +394,12 @@ EXAMPLES = r'''
share_user: share_username
share_password: share_password
command: preview
- scp_components: "ALL"
+ scp_components:
+ - ALL
scp_file: example_file.xml
- job_wait: True
+ job_wait: true
-- name: Preview SCP with ALL components in XML format from a local path
+- name: Preview SCP with IDRAC components in XML format from a local path
dellemc.openmanage.idrac_server_config_profile:
idrac_ip: "{{ idrac_ip }}"
idrac_user: "{{ idrac_user }}"
@@ -302,9 +407,72 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
share_name: "/scp_folder"
command: preview
- scp_components: "IDRAC"
+ scp_components:
+ - IDRAC
scp_file: example_file.json
- job_wait: False
+ job_wait: false
+
+- name: Import SCP with IDRAC components in XML format from the XML content.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ job_wait: true
+ import_buffer: "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'>
+ Disabled</Attribute></Component></SystemConfiguration>"
+
+- name: Export SCP with ALL components in XML format using HTTP proxy.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ scp_components:
+ - ALL
+ share_name: "http://192.168.0.1/http-share"
+ proxy_support: true
+ proxy_server: 192.168.0.5
+ proxy_port: 8080
+ proxy_username: proxy_username
+ proxy_password: proxy_password
+ proxy_type: http
+ include_in_export: passwordhashvalues
+ job_wait: true
+
+- name: Import SCP with IDRAC and BIOS components in XML format using SOCKS4 proxy
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ - BIOS
+ share_name: "https://192.168.0.1/http-share"
+ proxy_support: true
+ proxy_server: 192.168.0.6
+ proxy_port: 8080
+ proxy_type: socks4
+ scp_file: filename.xml
+ job_wait: true
+
+- name: Import SCP with IDRAC components in JSON format from the JSON content.
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "/path/to/ca_cert.pem"
+ command: import
+ scp_components:
+ - IDRAC
+ job_wait: true
+ import_buffer: "{\"SystemConfiguration\": {\"Components\": [{\"FQDD\": \"iDRAC.Embedded.1\",\"Attributes\":
+ [{\"Name\": \"SNMP.1#AgentCommunity\",\"Value\": \"public1\"}]}]}}"
'''
RETURN = r'''
@@ -357,13 +525,12 @@ error_info:
import os
import json
-import re
-import copy
from datetime import datetime
from os.path import exists
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
-from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import idrac_redfish_job_tracking, \
+ strip_substr_dict
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.parse import urlparse
@@ -374,6 +541,21 @@ CHANGES_FOUND = "Changes found to be applied."
NO_CHANGES_FOUND = "No changes found to be applied."
INVALID_FILE = "Invalid file path provided."
JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/Jobs/{job_id}"
+IGNORE_WARNING = {"ignore": "Enabled", "showerror": "Disabled"}
+IN_EXPORTS = {"default": "Default", "readonly": "IncludeReadOnly", "passwordhashvalues": "IncludePasswordHashValues",
+ "customtelemetry": "IncludeCustomTelemetry"}
+SCP_ALL_ERR_MSG = "The option ALL cannot be used with options IDRAC, BIOS, NIC, or RAID."
+MUTUALLY_EXCLUSIVE = "import_buffer is mutually exclusive with {0}."
+PROXY_ERR_MSG = "proxy_support is enabled but all of the following are missing: proxy_server"
+iDRAC_JOB_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/{job_id}"
+FAIL_MSG = "Failed to {0} scp."
+TARGET_INVALID_MSG = "Unable to {command} the {invalid_targets} from the SCP file\
+ because the values {invalid_targets} are invalid.\
+ The valid values are {valid_targets}. Enter the valid values and retry the operation."
+DOMAIN_LIST = ["\\", "@"]
+ERROR_CODES = ["SYS041", "SYS044", "SYS045", "SYS046", "SYS047", "SYS048", "SYS050", "SYS051", "SYS062",
+ "SYS063", "SYS064", "SYS065", "SYS067", "SYS068", "SYS070", "SYS071", "SYS072",
+ "SYS073", "SYS075", "SYS076", "SYS077", "SYS078", "SYS079", "SYS080"]
def get_scp_file_format(module):
@@ -394,7 +576,8 @@ def get_scp_file_format(module):
def response_format_change(response, params, file_name):
resp = {}
if params["job_wait"]:
- response = response.json_data
+ if hasattr(response, "json_data"):
+ response = response.json_data
response.pop("Description", None)
response.pop("Name", None)
response.pop("EndTime", None)
@@ -404,8 +587,7 @@ def response_format_change(response, params, file_name):
if response.get("Oem") is not None:
response.update(response["Oem"]["Dell"])
response.pop("Oem", None)
- sep = "/" if "/" in params["share_name"] else "\\"
- response["file"] = "{0}{1}{2}".format(params["share_name"], sep, file_name)
+ response = get_file(params, response, file_name)
response["retval"] = True
else:
location = response.headers.get("Location")
@@ -417,13 +599,41 @@ def response_format_change(response, params, file_name):
resp["Status"] = "Success"
resp["Message"] = "none"
resp["StatusCode"] = response.status_code
- sep = "/" if "/" in params["share_name"] else "\\"
- resp["file"] = "{0}{1}{2}".format(params["share_name"], sep, file_name)
+ resp = get_file(params, resp, file_name)
resp["retval"] = True
response = resp
return response
+def get_file(params, response, file_name):
+ if params.get("share_name") is not None:
+ sep = "/" if "/" in params.get("share_name") else "\\"
+ response["file"] = "{0}{1}{2}".format(params.get("share_name"), sep, file_name)
+ return response
+
+
+def get_proxy_share(module):
+ proxy_share = {}
+ proxy_support = module.params.get("proxy_support")
+ proxy_type = module.params["proxy_type"]
+ proxy_server = module.params.get("proxy_server")
+ proxy_port = module.params["proxy_port"]
+ proxy_username = module.params.get("proxy_username")
+ proxy_password = module.params.get("proxy_password")
+ if proxy_support is True and proxy_server is None:
+ module.fail_json(msg=PROXY_ERR_MSG)
+ if proxy_support is True:
+ proxy_share["proxy_server"] = proxy_server
+ proxy_share["proxy_username"] = proxy_username
+ proxy_share["proxy_password"] = proxy_password
+ proxy_share["proxy_port"] = proxy_port
+ proxy_share["proxy_type"] = proxy_type.upper()
+ proxy_share["proxy_support"] = "Enabled"
+ else:
+ proxy_share["proxy_support"] = "Disabled"
+ return proxy_share
+
+
def run_export_import_scp_http(idrac, module):
share_url = urlparse(module.params["share_name"])
share = {}
@@ -435,40 +645,69 @@ def run_export_import_scp_http(idrac, module):
scp_file_name_format = scp_file
share["username"] = module.params.get("share_user")
share["password"] = module.params.get("share_password")
+ scp_target = ",".join(module.params["scp_components"])
command = module.params["command"]
+ if share["share_type"] == "HTTPS":
+ share["ignore_certificate_warning"] = IGNORE_WARNING[module.params["ignore_certificate_warning"]]
if command == "import":
- scp_response = idrac.import_scp_share(shutdown_type=module.params["shutdown_type"],
- host_powerstate=module.params["end_host_power_state"],
- job_wait=module.params["job_wait"],
- target=module.params["scp_components"], share=share, )
+ perform_check_mode(module, idrac)
+ if share["share_type"] in ["HTTP", "HTTPS"]:
+ proxy_share = get_proxy_share(module)
+ share.update(proxy_share)
+ idrac_import_scp_params = {
+ "target": scp_target, "share": share, "job_wait": module.params["job_wait"],
+ "host_powerstate": module.params["end_host_power_state"], "shutdown_type": module.params["shutdown_type"]
+ }
+ scp_response = idrac.import_scp_share(**idrac_import_scp_params)
+ scp_response = wait_for_job_tracking_redfish(module, idrac, scp_response)
elif command == "export":
scp_file_name_format = get_scp_file_format(module)
share["file_name"] = scp_file_name_format
+ include_in_export = IN_EXPORTS[module.params["include_in_export"]]
+ if share["share_type"] in ["HTTP", "HTTPS"]:
+ proxy_share = get_proxy_share(module)
+ share.update(proxy_share)
scp_response = idrac.export_scp(export_format=module.params["export_format"],
export_use=module.params["export_use"],
- target=module.params["scp_components"],
- job_wait=module.params["job_wait"], share=share, )
+ target=scp_target,
+ job_wait=False, share=share, # Hardcoding it as false because job tracking is done in idrac_redfish.py as well.
+ include_in_export=include_in_export)
+ scp_response = wait_for_job_tracking_redfish(
+ module, idrac, scp_response
+ )
scp_response = response_format_change(scp_response, module.params, scp_file_name_format)
- if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
- module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ exit_on_failure(module, scp_response, command)
return scp_response
+def perform_check_mode(module, idrac, http_share=True):
+ if module.check_mode:
+ module.params["job_wait"] = True
+ scp_resp = preview_scp_redfish(module, idrac, http_share, import_job_wait=True)
+ if "SYS081" in scp_resp["MessageId"] or "SYS082" in scp_resp["MessageId"]:
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif "SYS069" in scp_resp["MessageId"]:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ else:
+ module.fail_json(msg=scp_resp)
+
+
def get_scp_share_details(module):
share_name = module.params.get("share_name")
command = module.params["command"]
scp_file_name_format = get_scp_file_format(module)
- if ":" in share_name:
- nfs_split = share_name.split(":")
- share = {"share_ip": nfs_split[0], "share_name": nfs_split[1], "share_type": "NFS"}
+ if ":/" in share_name:
+ nfs_split = share_name.split(":/", 1)
+ share = {"share_ip": nfs_split[0], "share_name": "/{0}".format(nfs_split[1]), "share_type": "NFS"}
if command == "export":
share["file_name"] = scp_file_name_format
elif "\\" in share_name:
- ip_pattern = re.compile(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}")
- share_path = re.split(ip_pattern, share_name)
- share_ip = re.findall(ip_pattern, share_name)
- share_path_name = "\\".join(list(filter(None, share_path[-1].split("\\"))))
- share = {"share_ip": share_ip[0], "share_name": share_path_name, "share_type": "CIFS",
+ cifs_share = share_name.split("\\", 3)
+ share_ip = cifs_share[2]
+ share_path_name = cifs_share[-1]
+ if not any(domain in module.params.get("share_user") for domain in DOMAIN_LIST):
+ module.params["share_user"] = ".\\{0}".format(module.params.get("share_user"))
+ share = {"share_ip": share_ip, "share_name": share_path_name, "share_type": "CIFS",
"username": module.params.get("share_user"), "password": module.params.get("share_password")}
if command == "export":
share["file_name"] = scp_file_name_format
@@ -482,20 +721,24 @@ def get_scp_share_details(module):
def export_scp_redfish(module, idrac):
command = module.params["command"]
share, scp_file_name_format = get_scp_share_details(module)
+ scp_components = ",".join(module.params["scp_components"])
+ include_in_export = IN_EXPORTS[module.params["include_in_export"]]
if share["share_type"] == "LOCAL":
scp_response = idrac.export_scp(export_format=module.params["export_format"],
export_use=module.params["export_use"],
- target=module.params["scp_components"],
+ target=scp_components, include_in_export=include_in_export,
job_wait=False, share=share, )
scp_response = wait_for_response(scp_response, module, share, idrac)
else:
scp_response = idrac.export_scp(export_format=module.params["export_format"],
export_use=module.params["export_use"],
- target=module.params["scp_components"],
- job_wait=module.params["job_wait"], share=share, )
+ target=scp_components, include_in_export=include_in_export,
+ job_wait=False, share=share, ) # Assigning it as false because job tracking is done in idrac_redfish.py as well.
+ scp_response = wait_for_job_tracking_redfish(
+ module, idrac, scp_response
+ )
scp_response = response_format_change(scp_response, module.params, scp_file_name_format)
- if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
- module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ exit_on_failure(module, scp_response, command)
return scp_response
@@ -516,136 +759,198 @@ def wait_for_response(scp_resp, module, share, idrac):
def preview_scp_redfish(module, idrac, http_share, import_job_wait=False):
+ import_buffer = module.params.get("import_buffer")
command = module.params["command"]
- scp_target = module.params["scp_components"]
+ scp_targets = 'ALL' # Assigning it as ALL because it is the only target for preview.
job_wait_option = module.params["job_wait"]
if command == "import":
job_wait_option = import_job_wait
- if http_share:
- share_url = urlparse(module.params["share_name"])
- share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'),
- "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"),
- "username": module.params.get("share_user"), "password": module.params.get("share_password")}
+ share = {}
+ if not import_buffer:
+ if http_share:
+ share_url = urlparse(module.params["share_name"])
+ share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'),
+ "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"),
+ "username": module.params.get("share_user"), "password": module.params.get("share_password")}
+ if http_share == "HTTPS":
+ share["ignore_certificate_warning"] = IGNORE_WARNING[module.params["ignore_certificate_warning"]]
+ else:
+ share, _scp_file_name_format = get_scp_share_details(module)
+ share["file_name"] = module.params.get("scp_file")
+ buffer_text = get_buffer_text(module, share)
+ scp_response = idrac.import_preview(import_buffer=buffer_text, target=scp_targets,
+ share=share, job_wait=False) # Assigning it as false because job tracking is done in idrac_redfish.py as well
+ scp_response = wait_for_job_tracking_redfish(
+ module, idrac, scp_response)
else:
- share, scp_file_name_format = get_scp_share_details(module)
- share["file_name"] = module.params.get("scp_file")
+ scp_response = idrac.import_preview(import_buffer=import_buffer, target=scp_targets, job_wait=job_wait_option)
+ scp_response = response_format_change(scp_response, module.params, share.get("file_name"))
+ exit_on_failure(module, scp_response, command)
+ return scp_response
+
+
+def exit_on_failure(module, scp_response, command):
+ if isinstance(scp_response, dict) and (scp_response.get("TaskStatus") == "Critical" or
+ scp_response.get("JobState") in ("Failed", "CompletedWithErrors")):
+ module.fail_json(msg=FAIL_MSG.format(command), scp_status=scp_response)
+
+
+def get_buffer_text(module, share):
buffer_text = None
if share["share_type"] == "LOCAL":
- scp_target = "ALL"
file_path = "{0}{1}{2}".format(share["share_name"], os.sep, share["file_name"])
if not exists(file_path):
module.fail_json(msg=INVALID_FILE)
with open(file_path, "r") as file_obj:
buffer_text = file_obj.read()
- scp_response = idrac.import_preview(import_buffer=buffer_text, target=scp_target,
- share=share, job_wait=job_wait_option)
- scp_response = response_format_change(scp_response, module.params, share["file_name"])
- if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
- module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
- return scp_response
+ return buffer_text
def import_scp_redfish(module, idrac, http_share):
+ import_buffer = module.params.get("import_buffer")
command = module.params["command"]
- scp_target = module.params["scp_components"]
- job_wait = copy.copy(module.params["job_wait"])
- if module.check_mode:
- module.params["job_wait"] = True
- scp_resp = preview_scp_redfish(module, idrac, http_share, import_job_wait=True)
- if "SYS081" in scp_resp["MessageId"] or "SYS082" in scp_resp["MessageId"]:
- module.exit_json(msg=CHANGES_FOUND, changed=True)
- else:
- module.fail_json(msg=scp_resp)
- if http_share:
- share_url = urlparse(module.params["share_name"])
- share = {"share_ip": share_url.netloc, "share_name": share_url.path.strip('/'),
- "share_type": share_url.scheme.upper(), "file_name": module.params.get("scp_file"),
- "username": module.params.get("share_user"), "password": module.params.get("share_password")}
- else:
- share, scp_file_name_format = get_scp_share_details(module)
+ scp_targets = ",".join(module.params["scp_components"])
+ perform_check_mode(module, idrac, http_share)
+ share = {}
+ if not import_buffer:
+ share, _scp_file_name_format = get_scp_share_details(module)
share["file_name"] = module.params.get("scp_file")
- buffer_text = None
- share_dict = share
- if share["share_type"] == "LOCAL":
- scp_target = "ALL"
- file_path = "{0}{1}{2}".format(share["share_name"], os.sep, share["file_name"])
- if not exists(file_path):
- module.fail_json(msg=INVALID_FILE)
- with open(file_path, "r") as file_obj:
- buffer_text = file_obj.read()
- share_dict = {}
- module.params["job_wait"] = job_wait
- scp_response = idrac.import_scp_share(shutdown_type=module.params["shutdown_type"],
- host_powerstate=module.params["end_host_power_state"],
- job_wait=module.params["job_wait"],
- target=scp_target,
- import_buffer=buffer_text, share=share_dict, )
- scp_response = response_format_change(scp_response, module.params, share["file_name"])
- if isinstance(scp_response, dict) and scp_response.get("TaskStatus") == "Critical":
- module.fail_json(msg="Failed to {0} scp.".format(command), scp_status=scp_response)
+ buffer_text = get_buffer_text(module, share)
+ share_dict = share
+ if share["share_type"] == "LOCAL":
+ share_dict = {}
+ idrac_import_scp_params = {
+ "import_buffer": buffer_text, "target": scp_targets, "share": share_dict, "job_wait": module.params["job_wait"],
+ "host_powerstate": module.params["end_host_power_state"], "shutdown_type": module.params["shutdown_type"]
+ }
+ scp_response = idrac.import_scp_share(**idrac_import_scp_params)
+ scp_response = wait_for_job_tracking_redfish(module, idrac, scp_response)
+ else:
+ scp_response = idrac.import_scp(import_buffer=import_buffer, target=scp_targets, job_wait=module.params["job_wait"])
+ scp_response = response_format_change(scp_response, module.params, share.get("file_name"))
+ exit_on_failure(module, scp_response, command)
return scp_response
-def main():
- specs = {
- "command": {"required": False, "type": 'str',
- "choices": ['export', 'import', 'preview'], "default": 'export'},
- "job_wait": {"required": True, "type": 'bool'},
- "share_name": {"required": True, "type": 'str'},
- "share_user": {"required": False, "type": 'str'},
- "share_password": {"required": False, "type": 'str',
- "aliases": ['share_pwd'], "no_log": True},
- "scp_components": {"required": False,
- "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'],
- "default": 'ALL'},
- "scp_file": {"required": False, "type": 'str'},
- "shutdown_type": {"required": False,
- "choices": ['Graceful', 'Forced', 'NoReboot'],
- "default": 'Graceful'},
- "end_host_power_state": {"required": False,
- "choices": ['On', 'Off'],
- "default": 'On'},
- "export_format": {"required": False, "type": 'str',
- "choices": ['JSON', 'XML'], "default": 'XML'},
- "export_use": {"required": False, "type": 'str',
- "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'}
+def wait_for_job_tracking_redfish(module, idrac, scp_response):
+ job_id = scp_response.headers["Location"].split("/")[-1]
+ if module.params["job_wait"]:
+ job_failed, _msg, job_dict, _wait_time = idrac_redfish_job_tracking(
+ idrac, iDRAC_JOB_URI.format(job_id=job_id))
+ if job_failed or job_dict.get("MessageId", "") in ERROR_CODES:
+ module.exit_json(failed=True, status_msg=job_dict, job_id=job_id, msg=FAIL_MSG.format(module.params["command"]))
+ scp_response = job_dict
+ return scp_response
+
+
+def validate_input(module, scp_components):
+ if len(scp_components) != 1 and "ALL" in scp_components:
+ module.fail_json(msg=SCP_ALL_ERR_MSG)
+ if module.params["command"] in ["import", "preview"]:
+ if module.params.get("import_buffer") is not None:
+ if module.params.get("scp_file") is not None:
+ module.fail_json(msg=MUTUALLY_EXCLUSIVE.format("scp_file"))
+ if module.params.get("share_name") is not None:
+ module.fail_json(msg=MUTUALLY_EXCLUSIVE.format("share_name"))
+
+
+def validate_scp_components(module, idrac):
+ components = idrac.invoke_request(REDFISH_SCP_BASE_URI, "GET")
+ all_components = strip_substr_dict(components.json_data)
+ scp_components = module.params.get("scp_components")
+ command = module.params.get("command")
+ oem = all_components['Actions']['Oem']
+ operation_dict = {
+ "export": "ExportSystemConfiguration",
+ "import": "ImportSystemConfiguration",
+ "preview": "ImportSystemConfigurationPreview"
}
+ for each in oem:
+ if each.endswith(operation_dict.get(command.lower())):
+ allowable = oem.get(each).get('ShareParameters').get('Target@Redfish.AllowableValues')
+ invalid_comp = list(set(scp_components) - set(allowable))
+ if invalid_comp:
+ msg = TARGET_INVALID_MSG.format(command=command, invalid_targets=invalid_comp, valid_targets=allowable)
+ module.exit_json(msg=msg, failed=True)
+
+
+class ImportCommand():
+ def __init__(self, idrac, http_share, module):
+ self.idrac = idrac
+ self.http_share = http_share
+ self.module = module
+
+ def execute(self):
+ changed = False
+ if self.http_share:
+ scp_status = run_export_import_scp_http(self.idrac, self.module)
+ if "SYS069" in scp_status.get("MessageId", ""):
+ changed = False
+ elif "SYS053" in scp_status.get("MessageId", ""):
+ changed = True
+ else:
+ scp_status = import_scp_redfish(self.module, self.idrac, self.http_share)
+ if "No changes were applied" not in scp_status.get('Message', ""):
+ changed = True
+ elif "SYS043" in scp_status.get("MessageId", ""):
+ changed = True
+ elif "SYS069" in scp_status.get("MessageId", ""):
+ changed = False
+ return scp_status, changed
+
+
+class ExportCommand():
+ def __init__(self, idrac, http_share, module):
+ self.idrac = idrac
+ self.http_share = http_share
+ self.module = module
+
+ def execute(self):
+ if self.http_share:
+ scp_status = run_export_import_scp_http(self.idrac, self.module)
+ else:
+ scp_status = export_scp_redfish(self.module, self.idrac)
+ return scp_status, False
+
+
+class PreviewCommand():
+ def __init__(self, idrac, http_share, module):
+ self.idrac = idrac
+ self.http_share = http_share
+ self.module = module
+
+ def execute(self):
+ scp_status = preview_scp_redfish(self.module, self.idrac, self.http_share, import_job_wait=False)
+ return scp_status, False
+
+
+def main():
+ specs = get_argument_spec()
specs.update(idrac_auth_params)
module = AnsibleModule(
argument_spec=specs,
required_if=[
- ["command", "import", ["scp_file"]],
- ["command", "preview", ["scp_file"]],
+ ["command", "export", ["share_name"]],
+ ["proxy_support", True, ["proxy_server"]]
],
supports_check_mode=True)
+ validate_input(module, module.params.get("scp_components"))
try:
- changed = False
- http_share = module.params["share_name"].lower().startswith(('http://', 'https://'))
+ http_share = False
+ if module.params.get("share_name") is not None:
+ http_share = module.params["share_name"].lower().startswith(('http://', 'https://'))
with iDRACRedfishAPI(module.params) as idrac:
+ validate_scp_components(module, idrac)
command = module.params['command']
if command == 'import':
- if http_share:
- scp_status = run_export_import_scp_http(idrac, module)
- if "SYS069" in scp_status.get("MessageId", ""):
- changed = False
- elif "SYS053" in scp_status.get("MessageId", ""):
- changed = True
- else:
- scp_status = import_scp_redfish(module, idrac, http_share)
- if "No changes were applied" not in scp_status.get('Message', ""):
- changed = True
- elif "SYS043" in scp_status.get("MessageId", ""):
- changed = True
- elif "SYS069" in scp_status.get("MessageId", ""):
- changed = False
- elif command == "export":
- if http_share:
- scp_status = run_export_import_scp_http(idrac, module)
- else:
- scp_status = export_scp_redfish(module, idrac)
+ command_obj = ImportCommand(idrac, http_share, module)
+ elif command == 'export':
+ command_obj = ExportCommand(idrac, http_share, module)
else:
- scp_status = preview_scp_redfish(module, idrac, http_share, import_job_wait=False)
+ command_obj = PreviewCommand(idrac, http_share, module)
+ scp_status, changed = command_obj.execute()
+
if module.params.get('job_wait'):
scp_status = strip_substr_dict(scp_status)
msg = "Successfully {0}ed the Server Configuration Profile."
@@ -654,7 +959,7 @@ def main():
msg = "Successfully triggered the job to {0} the Server Configuration Profile."
module.exit_json(msg=msg.format(command), scp_status=scp_status)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (ImportError, ValueError, RuntimeError, SSLValidationError,
@@ -662,5 +967,42 @@ def main():
module.fail_json(msg=str(e))
+def get_argument_spec():
+ return {
+ "command": {"required": False, "type": 'str',
+ "choices": ['export', 'import', 'preview'], "default": 'export'},
+ "job_wait": {"required": True, "type": 'bool'},
+ "share_name": {"required": False, "type": 'str'},
+ "share_user": {"required": False, "type": 'str'},
+ "share_password": {"required": False, "type": 'str',
+ "aliases": ['share_pwd'], "no_log": True},
+ "scp_components": {"type": "list", "required": False, "elements": "str",
+ "choices": ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID', 'FC', 'InfiniBand', 'SupportAssist',
+ 'EventFilters', 'System', 'LifecycleController', 'AHCI', 'PCIeSSD'],
+ "default": ['ALL'], "aliases": ["target"]},
+ "scp_file": {"required": False, "type": 'str'},
+ "shutdown_type": {"required": False,
+ "choices": ['Graceful', 'Forced', 'NoReboot'],
+ "default": 'Graceful'},
+ "end_host_power_state": {"required": False,
+ "choices": ['On', 'Off'],
+ "default": 'On'},
+ "export_format": {"required": False, "type": 'str',
+ "choices": ['JSON', 'XML'], "default": 'XML'},
+ "export_use": {"required": False, "type": 'str',
+ "choices": ['Default', 'Clone', 'Replace'], "default": 'Default'},
+ "ignore_certificate_warning": {"required": False, "choices": ["ignore", "showerror"], "default": "ignore"},
+ "include_in_export": {"required": False, "type": "str", "default": "default",
+ "choices": ["default", "readonly", "passwordhashvalues", "customtelemetry"]},
+ "import_buffer": {"type": "str", "required": False},
+ "proxy_support": {"type": "bool", "required": False, "default": False},
+ "proxy_type": {"type": "str", "required": False, "choices": ["http", "socks4"], "default": "http"},
+ "proxy_server": {"type": "str", "required": False},
+ "proxy_port": {"type": "str", "required": False, "default": "80"},
+ "proxy_username": {"type": "str", "required": False},
+ "proxy_password": {"type": "str", "required": False, "no_log": True},
+ }
+
+
if __name__ == '__main__':
main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py
index d078b0851..562ccc1ff 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_syslog.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -31,13 +31,14 @@ options:
default: Enabled
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -45,27 +46,27 @@ EXAMPLES = """
---
- name: Enable iDRAC syslog
dellemc.openmanage.idrac_syslog:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- share_password: "share_user_pwd"
- share_user: "share_user_name"
- share_mnt: "/mnt/share"
- syslog: "Enabled"
+ share_name: "192.168.0.2:/share"
+ share_password: "share_user_pwd"
+ share_user: "share_user_name"
+ share_mnt: "/mnt/share"
+ syslog: "Enabled"
- name: Disable iDRAC syslog
dellemc.openmanage.idrac_syslog:
- idrac_ip: "192.168.0.1"
- idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_ip: "192.168.0.1"
+ idrac_user: "user_name"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
- share_name: "192.168.0.2:/share"
- share_password: "share_user_pwd"
- share_user: "share_user_name"
- share_mnt: "/mnt/share"
- syslog: "Disabled"
+ share_name: "192.168.0.2:/share"
+ share_password: "share_user_pwd"
+ share_user: "share_user_name"
+ share_mnt: "/mnt/share"
+ syslog: "Disabled"
"""
RETURN = r'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py
index 61827f2df..21dbb105f 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_system_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -25,10 +25,11 @@ extends_documentation_fragment:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author: "Rajeev Arakkal (@rajeevarakkal)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py
index 6227571c0..82864340b 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_timezone_ntp.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -70,13 +70,14 @@ options:
requirements:
- "omsdk >= 1.2.488"
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Anooja Vardhineni (@anooja-vardhineni)"
notes:
- This module requires 'Administrator' privilege for I(idrac_user).
- - Run this module from a system that has direct access to Dell EMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
+ - This module supports both IPv4 and IPv6 address for I(idrac_ip).
- This module supports C(check_mode).
"""
@@ -84,9 +85,9 @@ EXAMPLES = """
---
- name: Configure time zone and NTP on iDRAC
dellemc.openmanage.idrac_timezone_ntp:
- idrac_ip: "190.168.0.1"
+ idrac_ip: "190.168.0.1"
idrac_user: "user_name"
- idrac_password: "user_password"
+ idrac_password: "user_password"
ca_path: "/path/to/ca_cert.pem"
setup_idrac_timezone: "UTC"
enable_ntp: Enabled
@@ -158,7 +159,6 @@ import json
try:
from omdrivers.enums.iDRAC.iDRAC import NTPEnable_NTPConfigGroupTypes
from omsdk.sdkfile import file_share_manager
- from omsdk.sdkcreds import UserCredentials
except ImportError:
pass
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py
index df9f9adbe..bcd16b872 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2018-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -31,13 +31,11 @@ options:
description:
- Select C(present) to create or modify a user account.
- Select C(absent) to remove a user account.
- - Ensure Lifecycle Controller is available because the user operation
- uses the capabilities of Lifecycle Controller.
choices: [present, absent]
default: present
user_name:
type: str
- required: True
+ required: true
description: Provide the I(user_name) of the account to be created, deleted or modified.
user_password:
type: str
@@ -59,7 +57,13 @@ options:
access virtual console, access virtual media, and execute debug commands.
- A user with C(ReadOnly) privilege can only log in to iDRAC.
- A user with C(None), no privileges assigned.
+ - Will be ignored, if custom_privilege parameter is provided.
choices: [Administrator, ReadOnly, Operator, None]
+ custom_privilege:
+ type: int
+ description:
+ - The privilege level assigned to the user.
+ version_added: "8.1.0"
ipmi_lan_privilege:
type: str
description: The Intelligent Platform Management Interface LAN privilege level assigned to the user.
@@ -100,7 +104,7 @@ requirements:
- "python >= 3.8.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC iDRAC.
+ - Run this module from a system that has direct access to Dell iDRAC.
- This module supports C(check_mode).
"""
@@ -211,8 +215,11 @@ from ansible.module_utils.basic import AnsibleModule
ACCOUNT_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/"
ATTRIBUTE_URI = "/redfish/v1/Managers/iDRAC.Embedded.1/Attributes/"
-PRIVILEGE = {"Administrator": 511, "Operator": 499, "ReadOnly": 1, "None": 0}
+USER_ROLES = {"Administrator": 511, "Operator": 499, "ReadOnly": 1, "None": 0}
ACCESS = {0: "Disabled", 1: "Enabled"}
+INVALID_PRIVILAGE_MSG = "custom_privilege value should be from 0 to 511."
+INVALID_PRIVILAGE_MIN = 0
+INVALID_PRIVILAGE_MAX = 511
def compare_payload(json_payload, idrac_attr):
@@ -270,10 +277,13 @@ def get_payload(module, slot_id, action=None):
:param slot_id: slot id for user slot
:return: json data with slot id
"""
+ user_privilege = module.params["custom_privilege"] if "custom_privilege" in module.params and \
+ module.params["custom_privilege"] is not None else USER_ROLES.get(module.params["privilege"])
+
slot_payload = {"Users.{0}.UserName": module.params["user_name"],
"Users.{0}.Password": module.params["user_password"],
"Users.{0}.Enable": ACCESS.get(module.params["enable"]),
- "Users.{0}.Privilege": PRIVILEGE.get(module.params["privilege"]),
+ "Users.{0}.Privilege": user_privilege,
"Users.{0}.IpmiLanPrivilege": module.params["ipmi_lan_privilege"],
"Users.{0}.IpmiSerialPrivilege": module.params["ipmi_serial_privilege"],
"Users.{0}.SolEnable": ACCESS.get(module.params["sol_enable"]),
@@ -378,6 +388,14 @@ def remove_user_account(module, idrac, slot_uri, slot_id):
return response, msg
+def validate_input(module):
+ if module.params["state"] == "present":
+ user_privilege = module.params["custom_privilege"] if "custom_privilege" in module.params and \
+ module.params["custom_privilege"] is not None else USER_ROLES.get(module.params["privilege"], 0)
+ if INVALID_PRIVILAGE_MIN > user_privilege or user_privilege > INVALID_PRIVILAGE_MAX:
+ module.fail_json(msg=INVALID_PRIVILAGE_MSG)
+
+
def main():
specs = {
"state": {"required": False, "choices": ['present', 'absent'], "default": "present"},
@@ -385,6 +403,7 @@ def main():
"user_name": {"required": True},
"user_password": {"required": False, "no_log": True},
"privilege": {"required": False, "choices": ['Administrator', 'ReadOnly', 'Operator', 'None']},
+ "custom_privilege": {"required": False, "type": "int"},
"ipmi_lan_privilege": {"required": False, "choices": ['Administrator', 'Operator', 'User', 'No Access']},
"ipmi_serial_privilege": {"required": False, "choices": ['Administrator', 'Operator', 'User', 'No Access']},
"enable": {"required": False, "type": "bool"},
@@ -398,6 +417,7 @@ def main():
argument_spec=specs,
supports_check_mode=True)
try:
+ validate_input(module)
with iDRACRedfishAPI(module.params, req_session=True) as idrac:
user_attr, slot_uri, slot_id, empty_slot_id, empty_slot_uri = get_user_account(module, idrac)
if module.params["state"] == "present":
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user_info.py
new file mode 100644
index 000000000..6d06a60be
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_user_info.py
@@ -0,0 +1,232 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: idrac_user_info
+short_description: Retrieve details of all users or a specific user on iDRAC.
+version_added: "7.0.0"
+description:
+ - "This module retrieves the list and basic details of all users or details of a specific user on
+ iDRAC"
+extends_documentation_fragment:
+ - dellemc.openmanage.idrac_auth_options
+options:
+ user_id:
+ description:
+ - Sequential user id numbers that supports from 1 to 16.
+ - I(user_id) is mutually exclusive with I(username)
+ type: int
+ username:
+ type: str
+ description:
+ - Username of the account that is created in iDRAC local users.
+ - I(username) is mutually exclusive with I(user_id)
+requirements:
+ - "python >= 3.8.6"
+author: "Husniya Hameed(@husniya_hameed)"
+notes:
+ - Run this module on a system that has direct access to Dell iDRAC.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Retrieve basic details of all user accounts.
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve user details using user_id
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ user_id: 1
+
+- name: Retrieve user details using username
+ dellemc.openmanage.idrac_user_info:
+ idrac_ip: 198.162.0.1
+ idrac_user: idrac_user
+ idrac_password: idrac_password
+ ca_path: "/path/to/ca_cert.pem"
+ username: user_name
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of user information retrieval.
+ returned: always
+ type: str
+ sample: "Successfully retrieved the user information."
+user_info:
+ description: Information about the user.
+ returned: success
+ type: list
+ sample: [{
+ "Description": "User Account",
+ "Enabled": false,
+ "Id": "1",
+ "Locked": false,
+ "Name": "User Account",
+ "Password": null,
+ "RoleId": "None",
+ "UserName": ""
+ }]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+
+import json
+from ssl import SSLError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, idrac_auth_params
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+
+ACCOUNT = "/redfish/v1"
+SUCCESS_MSG = "Successfully retrieved the information of {0} user(s)."
+UNSUCCESS_MSG = "Unable to retrieve the user information."
+INVALID_USERID = "'user_id' is not valid."
+INVALID_USERNAME = "'username' is not valid."
+SUCCESSFUL_MSG = "Successfully retrieved the user information."
+
+
+def get_accounts_uri(idrac):
+ try:
+ account_path = idrac.invoke_request(ACCOUNT, 'GET')
+ account_service = account_path.json_data.get("AccountService").get("@odata.id")
+ accounts = idrac.invoke_request(account_service, "GET")
+ accounts_uri = accounts.json_data.get("Accounts").get("@odata.id")
+ except HTTPError:
+ accounts_uri = "/redfish/v1/AccountService/Accounts"
+ return accounts_uri
+
+
+def fetch_all_accounts(idrac, accounts_uri):
+ all_accounts = idrac.invoke_request("{0}?$expand=*($levels=1)".format(accounts_uri), 'GET')
+ all_accs = all_accounts.json_data.get("Members")
+ return all_accs
+
+
+def get_user_id_accounts(idrac, module, accounts_uri, user_id):
+ acc_dets_json_data = {}
+ try:
+ acc_uri = accounts_uri + "/{0}".format(user_id)
+ acc_dets = idrac.invoke_request(acc_uri, "GET")
+ acc_dets_json_data = strip_substr_dict(acc_dets.json_data)
+ if acc_dets_json_data.get("Oem") is not None:
+ acc_dets_json_data["Oem"]["Dell"] = strip_substr_dict(acc_dets_json_data["Oem"]["Dell"])
+ acc_dets_json_data.pop("Links", None)
+ except HTTPError:
+ module.exit_json(msg=INVALID_USERID, failed=True)
+ return acc_dets_json_data
+
+
+def get_user_name_accounts(idrac, module, accounts_uri, user_name):
+ all_accs = fetch_all_accounts(idrac, accounts_uri)
+ acc_dets_json_data = {}
+ for acc in all_accs:
+ if acc.get("UserName") == user_name:
+ acc.pop("Links", None)
+ acc_dets_json_data = strip_substr_dict(acc)
+ if acc_dets_json_data.get("Oem") is not None:
+ acc_dets_json_data["Oem"]["Dell"] = strip_substr_dict(acc_dets_json_data["Oem"]["Dell"])
+ break
+ if not bool(acc_dets_json_data):
+ module.fail_json(msg=INVALID_USERNAME, failed=True)
+ return acc_dets_json_data
+
+
+def get_all_accounts(idrac, account_uri):
+ all_accs = fetch_all_accounts(idrac, account_uri)
+ idrac_list = []
+ for acc in all_accs:
+ if acc.get("UserName") != "":
+ acc.pop("Links", None)
+ acc_dets_json_data = strip_substr_dict(acc)
+ if acc_dets_json_data.get("Oem") is not None:
+ acc_dets_json_data["Oem"]["Dell"] = strip_substr_dict(acc_dets_json_data["Oem"]["Dell"])
+ idrac_list.append(acc_dets_json_data)
+ return idrac_list
+
+
+def main():
+ specs = {
+ "user_id": {"type": 'int'},
+ "username": {"type": 'str'}
+ }
+ specs.update(idrac_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[
+ ('user_id', 'username')
+ ],
+ supports_check_mode=True
+ )
+ try:
+ with iDRACRedfishAPI(module.params, req_session=True) as idrac:
+ resp = []
+ msg = SUCCESSFUL_MSG
+ accounts_uri = get_accounts_uri(idrac)
+ user_id = module.params.get("user_id")
+ user_name = module.params.get("username")
+ if user_id is not None:
+ resp.append(get_user_id_accounts(idrac, module, accounts_uri, user_id))
+ elif user_name is not None:
+ resp.append(get_user_name_accounts(idrac, module, accounts_uri, user_name))
+ else:
+ resp.extend(get_all_accounts(idrac, accounts_uri))
+ resp_len = len(resp)
+ msg = SUCCESS_MSG.format(resp_len)
+ if resp:
+ module.exit_json(msg=msg, user_info=resp)
+ else:
+ module.fail_json(msg=UNSUCCESS_MSG, failed=True)
+ except HTTPError as err:
+ module.fail_json(msg=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py
index ac22541eb..4c5fb10db 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/idrac_virtual_media.py
@@ -33,22 +33,22 @@ options:
required: true
type: bool
description:
- - C(True) connects the remote image file.
- - C(False) ejects the remote image file if connected.
+ - C(true) connects the remote image file.
+ - C(false) ejects the remote image file if connected.
image:
type: path
description:
- The path of the image file. The supported file types are .img and .iso.
- The file name with .img extension is redirected as a virtual floppy and a file name with .iso extension is
redirected as a virtual CDROM.
- - This option is required when I(insert) is C(True).
+ - This option is required when I(insert) is C(true).
- "The following are the examples of the share location:
CIFS share: //192.168.0.1/file_path/image_name.iso,
NFS share: 192.168.0.2:/file_path/image_name.img,
HTTP share: http://192.168.0.3/file_path/image_name.iso,
HTTPS share: https://192.168.0.4/file_path/image_name.img"
- - CIFS share is not supported by iDRAC7 and iDRAC8.
- - HTTPS share with credentials is not supported by iDRAC7 and iDRAC8.
+ - CIFS share is not supported by iDRAC8.
+ - HTTPS share with credentials is not supported by iDRAC8.
index:
type: int
description:
@@ -67,12 +67,12 @@ options:
- This module always reports as the changes found when I(password) is provided.
media_type:
type: str
- description: Type of the image file. This is applicable when I(insert) is C(True).
+ description: Type of the image file. This is applicable when I(insert) is C(true).
choices: [CD, DVD, USBStick]
force:
type: bool
- description: C(True) ejects the image file if already connected and inserts the file provided in I(image).
- This is applicable when I(insert) is C(True).
+ description: C(true) ejects the image file if already connected and inserts the file provided in I(image).
+ This is applicable when I(insert) is C(true).
default: false
resource_id:
type: str
@@ -162,7 +162,7 @@ EXAMPLES = """
ca_path: "/path/to/ca_cert.pem"
force: true
virtual_media:
- insert: false
+ insert: false
- name: Insertion and ejection of image file in single task.
dellemc.openmanage.idrac_virtual_media:
@@ -313,7 +313,7 @@ def _validate_params(module, vr_members, rd_version):
def virtual_media_operation(idrac, module, payload, vr_id):
- err_payload, inserted = [], []
+ err_payload = []
force = module.params["force"]
for i in payload:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py
index 98235b9d3..6f420bec7 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_active_directory.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -92,34 +92,34 @@ options:
- Enables testing the connection to the domain controller.
- The connection to the domain controller is tested with the provided Active Directory service details.
- If test fails, module will error out.
- - If C(yes), I(domain_username) and I(domain_password) has to be provided.
- default: no
+ - If C(true), I(domain_username) and I(domain_password) has to be provided.
+ default: false
domain_password:
type: str
description:
- Provide the domain password.
- - This is applicable when I(test_connection) is C(yes).
+ - This is applicable when I(test_connection) is C(true).
domain_username:
type: str
description:
- Provide the domain username either in the UPN (username@domain) or NetBIOS (domain\\\\username) format.
- - This is applicable when I(test_connection) is C(yes).
+ - This is applicable when I(test_connection) is C(true).
validate_certificate:
type: bool
description:
- Enables validation of SSL certificate of the domain controller.
- - The module will always report change when this is C(yes).
- default: no
+ - The module will always report change when this is C(true).
+ default: false
certificate_file:
type: path
description:
- Provide the full path of the SSL certificate.
- The certificate should be a Root CA Certificate encoded in Base64 format.
- - This is applicable when I(validate_certificate) is C(yes).
+ - This is applicable when I(validate_certificate) is C(true).
requirements:
- "python >= 3.8.6"
notes:
- - The module will always report change when I(validate_certificate) is C(yes).
+ - The module will always report change when I(validate_certificate) is C(true).
- Run this module from a system that has direct access to OpenManage Enterprise.
- This module supports C(check_mode).
"""
@@ -136,7 +136,7 @@ EXAMPLES = """
domain_server:
- domainname.com
group_domain: domainname.com
- test_connection: yes
+ test_connection: true
domain_username: user@domainname
domain_password: domain_password
@@ -151,7 +151,7 @@ EXAMPLES = """
domain_server:
- 192.68.20.181
group_domain: domainname.com
- validate_certificate: yes
+ validate_certificate: true
certificate_file: "/path/to/certificate/file.cer"
- name: Modify domain controller IP address, network_timeout and group_domain
@@ -183,10 +183,10 @@ EXAMPLES = """
password: "password"
ca_path: "/path/to/ca_cert.pem"
name: my_ad2
- test_connection: yes
+ test_connection: true
domain_username: user@domainname
domain_password: domain_password
- validate_certificate: yes
+ validate_certificate: true
certificate_file: "/path/to/certificate/file.cer"
"""
@@ -397,7 +397,7 @@ def delete_ad(module, rest_obj, ad):
ad = rest_obj.strip_substr_dict(ad)
if module.check_mode:
module.exit_json(msg=CHANGES_FOUND, active_directory=ad, changed=True)
- resp = rest_obj.invoke_request('POST', DELETE_AD, data={"AccountProviderIds": [int(ad['Id'])]})
+ rest_obj.invoke_request('POST', DELETE_AD, data={"AccountProviderIds": [int(ad['Id'])]})
module.exit_json(msg=DELETE_SUCCESS, active_directory=ad, changed=True)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies.py
new file mode 100644
index 000000000..9e8a17fd2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies.py
@@ -0,0 +1,1114 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies
+short_description: Manage OME alert policies.
+version_added: "8.3.0"
+description: This module allows you to create, modify, or delete alert policies on OpenManage Enterprise or OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ name:
+ description:
+ - Name of an alert policy or a list of alert policies.
+ - More than one policy name is applicable when I(state) is C(absent) and I(state) is C(present) with only I(enable) provided.
+ type: list
+ elements: str
+ required: true
+ state:
+ description:
+ - C(present) allows you to create an alert policy or update if the policy name already exists.
+ - C(absent) allows you to delete an alert policy.
+ default: present
+ choices: [present, absent]
+ type: str
+ enable:
+ description:
+ - C(true) allows you to enable an alert policy.
+ - C(false) allows you to disable an alert policy.
+ - This is applicable only when I(state) is C(present).
+ type: bool
+ new_name:
+ description:
+ - New name for the alert policy.
+ - This is applicable only when I(state) is C(present), and an alert policy exists.
+ type: str
+ description:
+ description:
+ - Description for the alert policy.
+ - This is applicable only when I(state) is C(present)
+ type: str
+ device_service_tag:
+ description:
+ - List of device service tags on which the alert policy will be applicable.
+ - This option is mutually exclusive with I(device_group), I(specific_undiscovered_devices), I(any_undiscovered_devices) and I(all_devices).
+ - This is applicable only when I(state) is C(present)
+ type: list
+ elements: str
+ device_group:
+ description:
+ - List of device group names on which the alert policy is applicable.
+ - This option is mutually exclusive with I(device_service_tag), I(specific_undiscovered_devices), I(any_undiscovered_devices) and I(all_devices) .
+ - This is applicable only when I(state) is C(present)
+ type: list
+ elements: str
+ specific_undiscovered_devices:
+ description:
+ - List of undiscovered IPs, hostnames, or range of IPs of devices on which the alert policy is applicable.
+ - This option is mutually exclusive with I(device_service_tag), I(device_group), I(any_undiscovered_devices) and I(all_devices) .
+ - This is applicable only when I(state) is C(present)
+ - "Examples of valid IP range format:"
+ - " 10.35.0.0"
+ - " 10.36.0.0-10.36.0.255"
+ - " 10.37.0.0/24"
+ - " 2607:f2b1:f083:135::5500/118"
+ - " 2607:f2b1:f083:135::a500-2607:f2b1:f083:135::a600"
+ - " hostname.domain.com"
+ - "Examples of invalid IP range format:"
+ - " 10.35.0.*"
+ - " 10.36.0.0-255"
+ - " 10.35.0.0/255.255.255.0"
+ - These values will not be validated.
+ type: list
+ elements: str
+ any_undiscovered_devices:
+ description:
+ - This option indicates whether the alert policy is applicable to any undiscovered devices or not.
+ - This option is mutually exclusive with I(device_service_tag), I(specific_undiscovered_devices), I(device_group) and I(all_devices).
+ - This is applicable only when I(state) is C(present).
+ type: bool
+ all_devices:
+ description:
+ - This option indicates whether the alert policy is applicable to all the discovered and undiscovered devices or not.
+ - This option is mutually exclusive with I(device_service_tag), I(specific_undiscovered_devices), I(any_undiscovered_devices) and I(device_group).
+ - This is applicable only when I(state) is C(present).
+ type: bool
+ category:
+ description:
+ - Category of the alerts received.
+ - This is mutually exclusive with the I(message_ids), I(message_file).
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_category_info).
+ - This is applicable only when I(state) is C(present).
+ type: list
+ elements: dict
+ suboptions:
+ catalog_name:
+ description: Name of the catalog.
+ type: str
+ required: true
+ catalog_category:
+ description: Category of the catalog.
+ type: list
+ elements: dict
+ suboptions:
+ category_name:
+ description: Name of the category.
+ type: str
+ sub_category_names:
+ description: List of sub-categories.
+ type: list
+ elements: str
+ message_ids:
+ description:
+ - List of Message ids
+ - This is mutually exclusive with the I(category), I(message_file)
+ - This is applicable only when I(state) is C(present)
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_message_id_info).
+ type: list
+ elements: str
+ message_file:
+ description:
+ - Local path of a CSV formatted file with message IDs
+ - This is mutually exclusive with the I(category), I(message_ids)
+ - This is applicable only when I(state) is C(present)
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_message_id_info).
+ type: path
+ date_and_time:
+ description:
+ - Specifies the schedule for when the alert policy is applicable.
+ - I(date_and_time) is mandatory for creating a policy and optional when updating a policy.
+ - This is applicable only when I(state) is C(present).
+ type: dict
+ suboptions:
+ date_from:
+ description:
+ - "Start date in the format YYYY-MM-DD."
+ - This parameter to be provided in quotes.
+ type: str
+ required: true
+ date_to:
+ description:
+ - "End date in the format YYYY-MM-DD."
+ - This parameter to be provided in quotes.
+ type: str
+ time_from:
+ description:
+ - "Interval start time in the format HH:MM"
+ - This parameter to be provided in quotes.
+ - This is mandatory when I(time_interval) is C(true).
+ type: str
+ time_to:
+ description:
+ - "Interval end time in the format HH:MM"
+ - This parameter to be provided in quotes.
+ - This is mandatory when I(time_interval) is C(true)
+ type: str
+ days:
+ description: Required days of the week on which alert policy operation must be scheduled.
+ type: list
+ elements: str
+ choices: [monday, tuesday, wednesday, thursday, friday, saturday, sunday]
+ time_interval:
+ description: Enable the time interval for which alert policy must be scheduled.
+ type: bool
+ severity:
+ description:
+ - Severity of the alert policy.
+ - This is mandatory for creating a policy and optional for updating a policy.
+ - This is applicable only when I(state) is C(present).
+ type: list
+ elements: str
+ choices: [all, unknown, info, normal, warning, critical]
+ actions:
+ description:
+ - Actions to be triggered for the alert policy.
+ - This parameter is case-sensitive.
+ - This is mandatory for creating a policy and optional for updating a policy.
+ - This is applicable only when I(state) is C(present)
+ type: list
+ elements: dict
+ suboptions:
+ action_name:
+ description:
+ - Name of the action.
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_action_info).
+ - This is mandatory for creating a policy and optional for updating a policy.
+ - This parameter is case-sensitive.
+ type: str
+ required: true
+ parameters:
+ description:
+ - Predefined parameters required to set for I(action_name).
+ type: list
+ elements: dict
+ default: []
+ suboptions:
+ name:
+ description:
+ - Name of the predefined parameter.
+ - This is fetched from the M(dellemc.openmanage.ome_alert_policies_action_info).
+ type: str
+ value:
+ description:
+ - Value of the predefined parameter.
+ - These values will not be validated.
+ type: str
+requirements:
+ - "python >= 3.9.6"
+author: "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: "Create an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Alert Policy One"
+ device_service_tag:
+ - ABCD123
+ - SVC7845
+ category:
+ - catalog_name: Application
+ catalog_category:
+ - category_name: Audit
+ sub_category_names:
+ - Generic
+ - Devices
+ - catalog_name: iDRAC
+ catalog_category:
+ - category_name: Audit
+ sub_category_names:
+ - BIOS Management
+ - iDRAC Service Module
+ date_and_time:
+ date_from: "2023-10-10"
+ date_to: "2023-10-11"
+ time_from: "11:00"
+ time_to: "12:00"
+ severity:
+ - unknown
+ - critical
+ actions:
+ - action_name: Trap
+ parameters:
+ - name: "192.1.2.3:162"
+ value: true
+ - name: "traphostname.domain.com:162"
+ value: true
+ tags: create_alert_policy
+
+- name: "Update an alert Policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ new_name: "Update Policy Name"
+ device_group: "Group Name"
+ message_ids:
+ - AMP400
+ - CTL201
+ - BIOS101
+ date_and_time:
+ date_from: "2023-10-10"
+ date_to: "2023-10-11"
+ time_from: "11:00"
+ time_to: "12:00"
+ time_interval: true
+ actions:
+ - action_name: Trap
+ parameters:
+ - name: "192.1.2.3:162"
+ value: true
+ tags: update_alert_policy
+
+- name: "Enable an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Policy Name"
+ enable: true
+ tags: enable_alert_policy
+
+- name: "Disable multiple alert policies"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name:
+ - "Policy Name 1"
+ - "Policy Name 2"
+ enable: false
+ tags: disable_alert_policy
+
+- name: "Delete an alert policy"
+ dellemc.openamanage.ome_alert_policies:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ name:
+ - "Policy Name"
+ state: absent
+ tags: delete_alert_policy
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the alert policies operation.
+ returned: always
+ sample: "Successfully created the alert policy."
+status:
+ type: dict
+ description: The policy which was created or modified.
+ returned: when state is present
+ sample: {
+ "Id": 12345,
+ "Name": "Policy",
+ "Description": "Details of the Policy",
+ "Enabled": true,
+ "DefaultPolicy": false,
+ "Editable": true,
+ "Visible": true,
+ "PolicyData": {
+ "Catalogs": [
+ {
+ "CatalogName": "iDRAC",
+ "Categories": [
+ 4
+ ],
+ "SubCategories": [
+ 41
+ ]
+ },
+ {
+ "CatalogName": "Application",
+ "Categories": [
+ 0
+ ],
+ "SubCategories": [
+ 0
+ ]
+ }
+ ],
+ "Severities": [
+ 16,
+ 1,
+ 2,
+ 4,
+ 8
+ ],
+ "Devices": [
+ 10086,
+ 10088
+ ],
+ "DeviceTypes": [
+ 1000,
+ 2000
+ ],
+ "Groups": [],
+ "Schedule": {
+ "StartTime": "2023-06-06 15:02:46.000",
+ "EndTime": "2023-06-06 18:02:46.000",
+ "CronString": "* * * ? * * *"
+ },
+ "Actions": [
+ {
+ "Id": 8,
+ "Name": "Email",
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "subject",
+ "Value": "Device Name: $name, Device IP Address: $ip, Severity: $severity",
+ "Type": "string",
+ "TypeParams": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 1,
+ "Name": "to",
+ "Value": "test@org.com",
+ "Type": "string",
+ "TypeParams": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 1,
+ "Name": "from",
+ "Value": "abc@corp.com",
+ "Type": "string",
+ "TypeParams": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 1,
+ "Name": "message",
+ "Value": "Event occurred for Device Name: $name, Device IP Address: $ip",
+ "Type": "string",
+ "TypeParams": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ }
+ ]
+ }
+ ],
+ "UndiscoveredTargets": [],
+ "State": true,
+ "Owner": 10069
+ }
+}
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CMON7011",
+ "RelatedProperties": [],
+ "Message": "Unable to create or modify the alert policy because an invalid value [To Email] is entered for the action Email.",
+ "MessageArgs": [
+ "[To Email]",
+ "Email"
+ ],
+ "Severity": "Warning",
+ "Resolution": "Enter a valid value for the action identified in the message and retry the operation."
+ }
+ ]
+ }
+}
+'''
+
+import csv
+import os
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination, strip_substr_dict
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.common.dict_transformations import recursive_diff
+from datetime import datetime
+
+
+POLICIES_URI = "AlertService/AlertPolicies"
+MESSAGES_URI = "AlertService/AlertMessageDefinitions"
+ACTIONS_URI = "AlertService/AlertActionTemplates"
+SEVERITY_URI = "AlertService/AlertSeverities"
+DEVICES_URI = "DeviceService/Devices"
+GROUPS_URI = "GroupService/Groups"
+REMOVE_URI = "AlertService/Actions/AlertService.RemoveAlertPolicies"
+ENABLE_URI = "AlertService/Actions/AlertService.EnableAlertPolicies"
+DISABLE_URI = "AlertService/Actions/AlertService.DisableAlertPolicies"
+CATEGORY_URI = "AlertService/AlertCategories"
+SUCCESS_MSG = "Successfully {0}d the alert policy."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_MSG = "Changes found to be applied."
+INVALID_TIME = "The specified {0} date or {0} time `{1}` to schedule the policy is not valid. Enter a valid date and time."
+END_START_TIME = "The end time `{0}` to schedule the policy must be greater than the start time `{1}`."
+CATEGORY_FETCH_FAILED = "Unable to retrieve the category details from OpenManage Enterprise."
+INVALID_TARGETS = "Specify target devices to apply the alert policy."
+INVALID_CATEGORY_MESSAGE = "Specify categories or message to create the alert policy."
+INVALID_SCHEDULE = "Specify a date and time to schedule the alert policy."
+INVALID_ACTIONS = "Specify alert actions for the alert policy."
+INVALID_SEVERITY = "Specify the severity to create the alert policy."
+MULTIPLE_POLICIES = "Unable to update the alert policies because the number of alert policies entered are more than " \
+ "one. The update policy operation supports only one alert policy at a time."
+DISABLED_ACTION = "Action {0} is disabled. Enable it before applying to the alert policy."
+ACTION_INVALID_PARAM = "The Action {0} attribute contains invalid parameter name {1}. The valid values are {2}."
+ACTION_INVALID_VALUE = "The Action {0} attribute contains invalid value for {1} for parameter name {2}. The valid " \
+ "values are {3}."
+ACTION_DIS_EXIST = "Action {0} does not exist."
+SUBCAT_IN_CATEGORY = "The subcategory {0} does not exist in the category {1}."
+CATEGORY_IN_CATALOG = "The category {0} does not exist in the catalog {1}."
+OME_DATA_MSG = "The {0} with the following {1} do not exist: {2}."
+CATALOG_DIS_EXIST = "The catalog {0} does not exist."
+CSV_PATH = "The message file {0} does not exist."
+DEFAULT_POLICY_DELETE = "The following default policies cannot be deleted: {0}."
+POLICY_ENABLE_MISSING = "Unable to {0} the alert policies {1} because the policy names are invalid. Enter the valid " \
+ "alert policy names and retry the operation."
+NO_POLICY_EXIST = "The alert policy does not exist."
+SEPARATOR = ", "
+
+
+def get_alert_policies(rest_obj, name_list):
+ report = get_all_data_with_pagination(rest_obj, POLICIES_URI)
+ all_policies = report.get("report_list", [])
+ policies = []
+ nameset = set(name_list)
+ for policy in all_policies:
+ if policy.get("Name") in nameset:
+ policies.append(policy)
+ return policies
+
+
+def get_items_to_remove(filter_param, return_param_tuple, return_dict, all_items, mset):
+ collector = set()
+ for dev in all_items:
+ k = dev.get(filter_param)
+ if k in mset:
+ for v in return_param_tuple:
+ return_dict[v].append(dev.get(v))
+ collector.add(k)
+ return collector
+
+
+def validate_ome_data(module, rest_obj, item_list, filter_param, return_param_tuple, ome_uri, item_name="Items"):
+ mset = set(item_list)
+ return_dict = {v: [] for v in return_param_tuple}
+ # can be further optimized if len(mset) == 1
+ resp = rest_obj.invoke_request("GET", ome_uri)
+ all_items = resp.json_data.get("value", [])
+ dvdr = len(all_items) if len(all_items) else 100
+ collector = get_items_to_remove(filter_param, return_param_tuple, return_dict, all_items, mset)
+ mset = mset - collector
+ all_item_count = resp.json_data.get("@odata.count")
+ next_link = resp.json_data.get("@odata.nextLink")
+ if mset and next_link:
+ if len(mset) < (all_item_count // dvdr):
+ for item_id in mset:
+ query_param = {"$filter": f"{filter_param} eq '{item_id}'"}
+ resp = rest_obj.invoke_request('GET', ome_uri, query_param=query_param)
+ one_item = resp.json_data.get("value", [])
+ collector = collector | get_items_to_remove(filter_param, return_param_tuple, return_dict, one_item, mset)
+ mset = mset - collector
+ else:
+ while next_link and mset:
+ resp = rest_obj.invoke_request('GET', next_link.lstrip("/api"))
+ all_items = resp.json_data.get("value", [])
+ collector = get_items_to_remove(filter_param, return_param_tuple, return_dict, all_items, mset)
+ mset = mset - collector
+ next_link = resp.json_data.get("@odata.nextLink", None)
+ if mset:
+ module.exit_json(failed=True,
+ msg=OME_DATA_MSG.format(item_name, filter_param, SEPARATOR.join(mset)))
+ ret_list = [(return_dict[id]) for id in return_param_tuple]
+ return tuple(ret_list)
+
+
+def get_target_payload(module, rest_obj):
+ target_payload = {'AllTargets': False,
+ 'DeviceTypes': [],
+ 'Devices': [],
+ 'Groups': [],
+ 'UndiscoveredTargets': []}
+ mparams = module.params
+ target_provided = False
+ if mparams.get('all_devices'):
+ target_payload['AllTargets'] = True
+ target_provided = True
+ elif mparams.get('any_undiscovered_devices'):
+ target_payload['UndiscoveredTargets'] = ["ALL_UNDISCOVERED_TARGETS"]
+ target_provided = True
+ elif mparams.get('specific_undiscovered_devices'):
+ target_payload['UndiscoveredTargets'] = list(set(module.params.get('specific_undiscovered_devices')))
+ target_payload['UndiscoveredTargets'].sort()
+ target_provided = True
+ elif mparams.get('device_service_tag'):
+ devicetype, deviceids = validate_ome_data(module, rest_obj, mparams.get('device_service_tag'),
+ 'DeviceServiceTag', ('Type', 'Id'), DEVICES_URI, 'devices')
+ target_payload['Devices'] = deviceids
+ target_payload['Devices'].sort()
+ target_payload['DeviceTypes'] = list(set(devicetype))
+ target_payload['DeviceTypes'].sort()
+ target_provided = True
+ elif mparams.get('device_group'):
+ groups = validate_ome_data(module, rest_obj, mparams.get('device_group'), 'Name', ('Id',), GROUPS_URI, 'groups')
+ target_payload['Groups'] = groups[0]
+ target_payload['Groups'].sort()
+ target_provided = True
+ if not target_provided:
+ target_payload = {}
+ return target_payload
+
+
+def get_category_data_tree(rest_obj):
+ resp = rest_obj.invoke_request("GET", CATEGORY_URI)
+ cat_raw = resp.json_data.get("value", [])
+ cat_dict = dict(
+ (category.get("Name"),
+ dict((y.get("Name"),
+ {y.get("Id"): dict((z.get('Name'), z.get('Id')
+ ) for z in y.get("SubCategoryDetails"))}
+ ) for y in category.get("CategoriesDetails")
+ )
+ ) for category in cat_raw
+ )
+ return cat_dict
+
+
+def get_all_actions(rest_obj):
+ resp = rest_obj.invoke_request("GET", ACTIONS_URI)
+ actions = resp.json_data.get("value", [])
+ cmp_actions = dict((x.get("Name"), {"Id": x.get("Id"),
+ "Disabled": x.get("Disabled"),
+ "Parameters": dict((y.get("Name"), y.get("Value")) for y in x.get("ParameterDetails")),
+ "Type": dict((y.get("Name"),
+ ["true", "false"]
+ if y.get("Type") == "boolean"
+ else [z.get("Value") for z in y.get("TemplateParameterTypeDetails")
+ if y.get("Type") != "string"]) for y in x.get("ParameterDetails"))
+ }
+ ) for x in actions)
+ return cmp_actions
+
+
+def validate_time(module, time, time_format, time_type):
+ try:
+ ftime = datetime.strptime(time, time_format)
+ except ValueError:
+ module.exit_json(failed=True, msg=INVALID_TIME.format(time_type, time))
+ return ftime
+
+
+def get_ftime(module, inp_schedule, time_type, time_interval):
+ def_time = "00:00"
+ time_format = "%Y-%m-%d %H:%M:%S.%f"
+ hhmm = inp_schedule.get(f"time_{time_type}") if time_interval else def_time
+ date_x = inp_schedule.get(f"date_{time_type}")
+ time_x = None
+ if date_x:
+ dtime = f"{date_x} {hhmm}:00.000"
+ time_x = validate_time(module, dtime, time_format, time_type)
+ elif time_interval:
+ dtime = f"{hhmm}:00.000"
+ else:
+ dtime = ""
+ return dtime, time_x
+
+
+def get_schedule_payload(module):
+ schedule_payload = {}
+ inp_schedule = module.params.get('date_and_time')
+ if inp_schedule:
+ time_interval = bool(inp_schedule.get('time_interval'))
+ schedule_payload['Interval'] = time_interval
+ schedule_payload["StartTime"], start_time_x = get_ftime(module, inp_schedule, "from", time_interval)
+ schedule_payload["EndTime"], end_time_x = get_ftime(module, inp_schedule, "to", time_interval)
+ if inp_schedule.get('date_to') and end_time_x < start_time_x:
+ module.exit_json(failed=True, msg=END_START_TIME.format(end_time_x, start_time_x))
+ weekdays = {'monday': 'mon', 'tuesday': 'tue', 'wednesday': 'wed', 'thursday': 'thu', 'friday': 'fri',
+ 'saturday': 'sat', 'sunday': 'sun'}
+ inp_week_list = ['*']
+ cron_sep = ","
+ if inp_schedule.get('days'):
+ week_order = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']
+ inp_week_list = sorted(list(set(inp_schedule.get('days'))), key=week_order.index)
+ schedule_payload["CronString"] = f"* * * ? * {cron_sep.join([weekdays.get(x, '*') for x in inp_week_list])} *"
+ return {"Schedule": schedule_payload} if schedule_payload else {}
+
+
+def create_action_payload(inp_k, inp_val, ref_actions, module):
+ if ref_actions.get(inp_k).get('Disabled'):
+ module.exit_json(failed=True, msg=DISABLED_ACTION.format(inp_k))
+ pld = {
+ 'TemplateId': ref_actions.get(inp_k).get('Id'),
+ 'Name': inp_k,
+ 'ParameterDetails': {}
+ }
+ diff = set(inp_val.keys()) - set(ref_actions.get(inp_k).get('Parameters').keys())
+ if diff:
+ module.exit_json(failed=True,
+ msg=ACTION_INVALID_PARAM.format(
+ inp_k, SEPARATOR.join(diff), SEPARATOR.join(ref_actions.get(inp_k).get('Parameters').keys())))
+ for sub_k, sub_val in inp_val.items():
+ valid_values = ref_actions.get(inp_k).get('Type').get(sub_k)
+ if valid_values:
+ if str(sub_val).lower() not in valid_values:
+ module.exit_json(failed=True, msg=ACTION_INVALID_VALUE.format(inp_k, sub_val, sub_k, SEPARATOR.join(valid_values)))
+ else:
+ inp_val[sub_k] = str(sub_val).lower() if str(sub_val).lower() in ("true", "false") else sub_val
+ pld['ParameterDetails'] = inp_val
+ return pld
+
+
+def get_actions_payload(module, rest_obj):
+ action_payload = {}
+ inp_actions = module.params.get('actions')
+ if inp_actions:
+ ref_actions = get_all_actions(rest_obj)
+ inp_dict = {x.get("action_name"): {y.get("name"): y.get("value")
+ for y in x.get("parameters", [])} for x in inp_actions}
+ if 'Ignore' in inp_dict:
+ action_payload['Ignore'] = {'TemplateId': ref_actions.get('Ignore').get('Id'),
+ 'Name': "Ignore",
+ 'ParameterDetails': {}}
+ else:
+ for inp_k, inp_val in inp_dict.items():
+ if inp_k in ref_actions:
+ action_payload[inp_k] = create_action_payload(inp_k, inp_val, ref_actions, module)
+ else:
+ module.exit_json(failed=True, msg=ACTION_DIS_EXIST.format(inp_k))
+ return {"Actions": action_payload} if action_payload else {}
+
+
+def load_subcategory_data(module, inp_sub_cat_list, sub_cat_dict, key_id, payload_cat, payload_subcat, inp_category):
+ if inp_sub_cat_list:
+ for sub_cat in inp_sub_cat_list:
+ if sub_cat in sub_cat_dict:
+ payload_cat.append(key_id)
+ payload_subcat.append(
+ sub_cat_dict.get(sub_cat))
+ else:
+ module.exit_json(failed=True, msg=SUBCAT_IN_CATEGORY.format(sub_cat, inp_category.get('category_name')))
+ else:
+ payload_cat.append(key_id)
+ payload_subcat.append(0)
+
+
+def load_category_data(module, catalog_name, category_list, category_det, payload_cat, payload_subcat):
+ if category_list:
+ for inp_category in category_list:
+ if inp_category.get('category_name') in category_det:
+ resp_category_dict = category_det.get(inp_category.get('category_name'))
+ key_id = list(resp_category_dict.keys())[0]
+ sub_cat_dict = resp_category_dict.get(key_id)
+ inp_sub_cat_list = inp_category.get('sub_category_names')
+ load_subcategory_data(module, inp_sub_cat_list, sub_cat_dict, key_id, payload_cat, payload_subcat, inp_category)
+ else:
+ module.exit_json(failed=True, msg=CATEGORY_IN_CATALOG.format(inp_category.get('category_name'), catalog_name))
+ else:
+ payload_cat.append(0)
+ payload_subcat.append(0)
+
+
+def get_category_payloadlist(module, inp_catalog_list, cdict_ref):
+ payload_cat_list = []
+ for inp_catalog in inp_catalog_list:
+ new_dict = {}
+ catalog_name = inp_catalog.get('catalog_name')
+ if catalog_name in cdict_ref:
+ new_dict["CatalogName"] = catalog_name
+ payload_cat = []
+ category_det = cdict_ref.get(catalog_name)
+ payload_subcat = []
+ category_list = inp_catalog.get('catalog_category')
+ load_category_data(module, catalog_name, category_list, category_det, payload_cat, payload_subcat)
+ new_dict["Categories"] = payload_cat
+ new_dict['SubCategories'] = payload_subcat
+ else:
+ module.exit_json(failed=True, msg=CATALOG_DIS_EXIST.format(catalog_name))
+ payload_cat_list.append(new_dict)
+ return payload_cat_list
+
+
+def get_category_payload(module, rest_obj):
+ inp_catalog_list = module.params.get('category')
+ cdict_ref = get_category_data_tree(rest_obj)
+ if not cdict_ref:
+ module.exit_json(failed=True, msg=CATEGORY_FETCH_FAILED)
+ payload_cat_list = get_category_payloadlist(module, inp_catalog_list, cdict_ref)
+ return payload_cat_list
+
+
+def get_message_payload(module):
+ mlist = []
+ if module.params.get('message_file'):
+ csvpath = module.params.get('message_file')
+ if not os.path.isfile(csvpath):
+ module.exit_json(
+ failed=True, msg=CSV_PATH.format(csvpath))
+ with open(csvpath) as csvfile:
+ spamreader = csv.reader(csvfile)
+ for row in spamreader:
+ mlist.extend(row)
+ if mlist[0].lower().startswith('message'):
+ mlist.pop(0)
+ elif module.params.get('message_ids'):
+ mlist = module.params.get('message_ids')
+ return mlist
+
+
+def get_category_or_message(module, rest_obj):
+ cat_payload = {"Catalogs": {},
+ "MessageIds": []}
+ cat_msg_provided = False
+ if module.params.get('category'):
+ payload_cat_list = get_category_payload(module, rest_obj)
+ cat_dict = dict((x.get('CatalogName'), x) for x in payload_cat_list)
+ cat_msg_provided = True
+ cat_payload['Catalogs'] = cat_dict
+ else:
+ mlist = get_message_payload(module)
+ if mlist:
+ validate_ome_data(module, rest_obj, mlist, 'MessageId', ('MessageId',), MESSAGES_URI, 'messages')
+ cat_msg_provided = True
+ cat_payload['MessageIds'] = list(set(mlist))
+ cat_payload['MessageIds'].sort()
+ if not cat_msg_provided:
+ cat_payload = {}
+ return cat_payload
+
+
+def get_severity_payload(module, rest_obj):
+ try:
+ resp = rest_obj.invoke_request("GET", SEVERITY_URI)
+ severity_dict = dict((x.get('Name').lower(), x.get('Id'))
+ for x in resp.json_data.get("Value"))
+ except Exception:
+ severity_dict = {"unknown": 1, "info": 2,
+ "normal": 4, "warning": 8, "critical": 16}
+ inp_sev_list = module.params.get('severity')
+ sev_payload = {}
+ if inp_sev_list:
+ if 'all' in inp_sev_list:
+ sev_payload = {"Severities": list(severity_dict.values())}
+ else:
+ sev_payload = {"Severities": [
+ severity_dict.get(x) for x in inp_sev_list]}
+ sev_payload['Severities'].sort()
+ return sev_payload
+
+
+def transform_existing_policy_data(policy):
+ pdata = policy.get('PolicyData')
+ undiscovered = pdata.get('UndiscoveredTargets')
+ if undiscovered:
+ pdata['UndiscoveredTargets'] = [x.get('TargetAddress') for x in undiscovered]
+ actions = pdata.get('Actions')
+ if actions:
+ for action in actions:
+ if action.get('Name') == "RemoteCommand":
+ # Special case handling for RemoteCommand, appends 1 after every post call to "remotecommandaction"
+ action['ParameterDetails'] = dict((str(act_param.get('Name')).rstrip('1'), act_param.get('Value'))
+ for act_param in action.get('ParameterDetails', []))
+ else:
+ action['ParameterDetails'] = dict((act_param.get('Name'), act_param.get('Value'))
+ for act_param in action.get('ParameterDetails', []))
+ action.pop('Id', None)
+ pdata['Actions'] = dict((x.get('Name'), x) for x in actions)
+ catalogs = pdata.get('Catalogs')
+ pdata['Catalogs'] = dict((x.get('CatalogName'), x) for x in catalogs)
+ # for Devices, DeviceTypes, Groups, Severities
+ for pol_data in pdata.values():
+ if isinstance(pol_data, list):
+ pol_data.sort()
+ messages = pdata.get('MessageIds', [])
+ pdata['MessageIds'] = [m.strip("'") for m in messages]
+
+
+def format_payload(policy):
+ pdata = policy.get('PolicyData')
+ undiscovered = pdata.get('UndiscoveredTargets')
+ if undiscovered:
+ pdata['UndiscoveredTargets'] = [({"TargetAddress": x}) for x in undiscovered]
+ actions = pdata.get('Actions')
+ if actions:
+ for action in actions.values():
+ action['ParameterDetails'] = [
+ {"Name": k, "Value": v} for k, v in action.get('ParameterDetails', {}).items()]
+ pdata['Actions'] = list(actions.values())
+ catalogs = pdata.get('Catalogs')
+ pdata['Catalogs'] = list(catalogs.values())
+
+
+def compare_policy_payload(module, rest_obj, policy):
+ diff = 0
+ new_payload = {}
+ new_policy_data = {}
+ new_payload["PolicyData"] = new_policy_data
+ transform_existing_policy_data(policy)
+ payload_items = []
+ payload_items.append(get_target_payload(module, rest_obj))
+ payload_items.append(get_category_or_message(module, rest_obj))
+ payload_items.append(get_actions_payload(module, rest_obj))
+ payload_items.append(get_schedule_payload(module))
+ payload_items.append(get_severity_payload(module, rest_obj))
+ for payload in payload_items:
+ if payload:
+ new_policy_data.update(payload)
+ diff_tuple = recursive_diff(new_payload['PolicyData'], policy['PolicyData'])
+ if diff_tuple and diff_tuple[0]:
+ diff = diff + 1
+ policy['PolicyData'].update(payload)
+ if module.params.get('new_name'):
+ new_payload['Name'] = module.params.get('new_name')
+ if module.params.get('description'):
+ new_payload['Description'] = module.params.get('description')
+ if module.params.get('enable') is not None:
+ new_payload['Enabled'] = module.params.get('enable')
+ policy = strip_substr_dict(policy)
+ new_payload.pop('PolicyData', None)
+ diff_tuple = recursive_diff(new_payload, policy)
+ if diff_tuple and diff_tuple[0]:
+ diff = diff + 1
+ policy.update(diff_tuple[0])
+ return diff
+
+
+def get_policy_data(module, rest_obj):
+ policy_data = {}
+ target = get_target_payload(module, rest_obj)
+ if not target:
+ module.exit_json(failed=True, msg=INVALID_TARGETS)
+ policy_data.update(target)
+ cat_msg = get_category_or_message(module, rest_obj)
+ if not cat_msg:
+ module.exit_json(failed=True, msg=INVALID_CATEGORY_MESSAGE)
+ policy_data.update(cat_msg)
+ schedule = get_schedule_payload(module)
+ if not schedule:
+ module.exit_json(failed=True, msg=INVALID_SCHEDULE)
+ policy_data.update(schedule)
+ actions = get_actions_payload(module, rest_obj)
+ if not actions:
+ module.exit_json(failed=True, msg=INVALID_ACTIONS)
+ policy_data.update(actions)
+ sev_payload = get_severity_payload(module, rest_obj)
+ if not sev_payload.get('Severities'):
+ module.exit_json(failed=True, msg=INVALID_SEVERITY)
+ policy_data.update(sev_payload)
+ return policy_data
+
+
+def remove_policy(module, rest_obj, policies):
+ id_list = [x.get("Id")
+ for x in policies if x.get("DefaultPolicy") is False]
+ if len(id_list) != len(policies):
+ module.exit_json(failed=True,
+ msg=DEFAULT_POLICY_DELETE.format(SEPARATOR.join([x.get('Name') for x in policies if x.get('DefaultPolicy')])))
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ rest_obj.invoke_request("POST", REMOVE_URI, data={
+ "AlertPolicyIds": id_list})
+ module.exit_json(changed=True, msg=SUCCESS_MSG.format("delete"))
+
+
+def enable_toggle_policy(module, rest_obj, policies):
+ enabler = module.params.get('enable')
+ id_list = [x.get("Id") for x in policies if x.get("Enabled") is not enabler]
+ if not id_list:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ uri = ENABLE_URI if enabler else DISABLE_URI
+ rest_obj.invoke_request("POST", uri, data={"AlertPolicyIds": id_list})
+ module.exit_json(changed=True, msg=SUCCESS_MSG.format("enable" if enabler else "disable"))
+
+
+def update_policy(module, rest_obj, policy):
+ diff = compare_policy_payload(module, rest_obj, policy)
+ if not diff:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ format_payload(policy)
+ resp = rest_obj.invoke_request("PUT", f"{POLICIES_URI}({policy.get('Id')})", data=policy)
+ module.exit_json(changed=True, msg=SUCCESS_MSG.format("update"),
+ status=resp.json_data)
+
+
+def create_policy(module, rest_obj):
+ create_payload = {}
+ policy_data = get_policy_data(module, rest_obj)
+ create_payload['PolicyData'] = policy_data
+ create_payload['Name'] = module.params.get('name')[0]
+ create_payload['Description'] = module.params.get('description')
+ create_payload['Enabled'] = module.params.get(
+ 'enable') if module.params.get('enable', True) is not None else True
+ if module.check_mode:
+ module.exit_json(msg=CHANGES_MSG, changed=True)
+ format_payload(create_payload)
+ resp = rest_obj.invoke_request("POST", POLICIES_URI, data=create_payload)
+ module.exit_json(changed=True, msg=SUCCESS_MSG.format(
+ "create"), status=resp.json_data)
+
+
+def handle_policy_enable(module, rest_obj, policies, name_list):
+ if len(policies) == len(name_list):
+ enable_toggle_policy(module, rest_obj, policies)
+ else:
+ invalid_policies = set(name_list) - set(x.get("Name") for x in policies)
+ enabler = module.params.get('enable')
+ module.exit_json(failed=True, msg=POLICY_ENABLE_MISSING.format("enable" if enabler else "disable", SEPARATOR.join(invalid_policies)))
+
+
+def handle_absent_state(module, rest_obj, policies):
+ if policies:
+ remove_policy(module, rest_obj, policies)
+ else:
+ module.exit_json(msg=NO_POLICY_EXIST)
+
+
+def handle_present_state(module, rest_obj, policies, name_list, present_args):
+ present_args.remove('enable')
+ enable = module.params.get('enable')
+ if not any(module.params.get(prm) is not None for prm in present_args) and enable is not None:
+ handle_policy_enable(module, rest_obj, policies, name_list)
+ if len(name_list) > 1:
+ module.exit_json(failed=True, msg=MULTIPLE_POLICIES)
+ if policies:
+ update_policy(module, rest_obj, policies[0])
+ else:
+ create_policy(module, rest_obj)
+
+
+def main():
+ specs = {
+ "name": {'type': 'list', 'elements': 'str', 'required': True},
+ "state": {'default': 'present', 'choices': ['present', 'absent'], 'type': 'str'},
+ "enable": {'type': 'bool'},
+ "new_name": {'type': 'str'},
+ "description": {'type': 'str'},
+ "device_service_tag": {'type': 'list', 'elements': 'str'},
+ "device_group": {'type': 'list', 'elements': 'str'},
+ "specific_undiscovered_devices": {'type': 'list', 'elements': 'str'},
+ "any_undiscovered_devices": {'type': 'bool'},
+ "all_devices": {'type': 'bool'},
+ "category": {'type': 'list', 'elements': 'dict',
+ 'options': {'catalog_name': {'type': 'str', 'required': True},
+ 'catalog_category': {'type': 'list', 'elements': 'dict',
+ 'options': {'category_name': {'type': 'str'},
+ 'sub_category_names': {'type': 'list', 'elements': 'str'}
+ },
+ }
+ }
+ },
+ "message_ids": {'type': 'list', 'elements': 'str'},
+ "message_file": {'type': 'path'},
+ "date_and_time": {'type': 'dict',
+ 'options': {'date_from': {'type': 'str', 'required': True},
+ 'date_to': {'type': 'str'},
+ 'time_from': {'type': 'str'},
+ 'time_to': {'type': 'str'},
+ 'days': {'type': 'list', 'elements': 'str',
+ 'choices': ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday']},
+ 'time_interval': {'type': 'bool'}
+ },
+ 'required_if': [['time_interval', True, ('time_from', 'time_to')]]
+ },
+ "severity": {'type': 'list', 'elements': 'str', 'choices': ['info', 'normal', 'warning', 'critical', 'unknown', 'all']},
+ "actions": {'type': 'list', 'elements': 'dict',
+ 'options': {'action_name': {'type': 'str', 'required': True},
+ 'parameters': {'type': 'list', 'elements': 'dict', 'default': [],
+ 'options': {'name': {'type': 'str'},
+ 'value': {'type': 'str'}}
+ }
+ }
+ }
+ }
+ specs.update(ome_auth_params)
+ present_args = ['enable', 'new_name', 'description', 'device_service_tag', 'device_group',
+ 'specific_undiscovered_devices', 'any_undiscovered_devices', 'all_devices',
+ 'category', 'message_ids', 'message_file', 'date_and_time', 'severity', 'actions']
+ module = AnsibleModule(
+ argument_spec=specs,
+ required_if=[['state', 'present', present_args, True]],
+ mutually_exclusive=[('device_service_tag', 'device_group', 'any_undiscovered_devices', 'specific_undiscovered_devices', 'all_devices',),
+ ('message_ids', 'message_file', 'category',)],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ state = module.params.get('state')
+ name_list = list(set(module.params.get('name')))
+ policies = get_alert_policies(rest_obj, name_list)
+ if state == 'absent':
+ handle_absent_state(module, rest_obj, policies)
+ else:
+ handle_present_state(module, rest_obj, policies, name_list, present_args)
+ except HTTPError as err:
+ module.exit_json(failed=True, msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(failed=True, msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_actions_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_actions_info.py
new file mode 100644
index 000000000..0d1f0c726
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_actions_info.py
@@ -0,0 +1,290 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies_actions_info
+short_description: Get information on actions of alert policies.
+version_added: "8.2.0"
+description:
+ - This module retrieves the information on actions of alert policies for OpenManage Enterprise
+ and OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Kritika Bhateja (@Kritika-Bhateja-03)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Get action details of all alert policies.
+ dellemc.openmanage.ome_alert_policies_actions_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+'''
+
+RETURN = r'''
+---
+actions:
+ type: list
+ description: Returns the alert policies action information collected from the Device.
+ returned: success
+ sample: [
+ {
+ "Name": "Email",
+ "Description": "Email",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "subject",
+ "Value": "Device Name: $name, Device IP Address: $ip, Severity: $severity",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 2,
+ "Name": "to",
+ "Value": "",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 3,
+ "Name": "from",
+ "Value": "admin1@dell.com",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 4,
+ "Name": "message",
+ "Value": "Event occurred for Device Name: $name,
+ Device IP Address: $ip, Service Tag: $identifier, UTC Time: $time, Severity: $severity, Message ID: $messageId, $message",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 60,
+ "Name": "Trap",
+ "Description": "Trap",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "localhost:162",
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ },
+ {
+ "Id": 90,
+ "Name": "Syslog",
+ "Description": "Syslog",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "localhost.scomdev.com:555",
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ },
+ {
+ "Id": 2,
+ "Name": "localhost.scomdev.com:555",
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ },
+ {
+ "Id": 100,
+ "Name": "Ignore",
+ "Description": "Ignore",
+ "Disabled": false,
+ "ParameterDetails": []
+ },
+ {
+ "Id": 70,
+ "Name": "SMS",
+ "Description": "SMS",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "to",
+ "Value": "",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Id": 110,
+ "Name": "PowerControl",
+ "Description": "Power Control Action Template",
+ "Disabled": false,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "powercontrolaction",
+ "Value": "poweroff",
+ "Type": "singleSelect",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "option",
+ "Value": "powercycle"
+ },
+ {
+ "Name": "option",
+ "Value": "poweroff"
+ },
+ {
+ "Name": "option",
+ "Value": "poweron"
+ },
+ {
+ "Name": "option",
+ "Value": "gracefulshutdown"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Id": 111,
+ "Name": "RemoteCommand",
+ "Description": "RemoteCommand",
+ "Disabled": true,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "remotecommandaction",
+ "Value": null,
+ "Type": "singleSelect",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ },
+ {
+ "Id": 112,
+ "Name": "Mobile",
+ "Description": "Mobile",
+ "Disabled": false,
+ "ParameterDetails": []
+ }
+ ]
+}
+ ]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+msg:
+ description: Status of the alert policies actions fetch operation.
+ returned: always
+ type: str
+ sample: Successfully retrieved alert policies actions information.
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination
+
+ACTIONS_URI = "AlertService/AlertActionTemplates"
+SUCCESSFUL_MSG = "Successfully retrieved alert policies actions information."
+EMPTY_ALERT_POLICY_ACTION_MSG = "No alert policies action information were found."
+
+
+def main():
+ """ function to retrieve the information on actions of alert policies """
+ specs = ome_auth_params
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ actions_info = get_all_data_with_pagination(rest_obj, ACTIONS_URI)
+ if not actions_info.get("report_list", []):
+ module.exit_json(msg=EMPTY_ALERT_POLICY_ACTION_MSG, actions=[])
+ actions = remove_key(actions_info['report_list'])
+ module.exit_json(msg=SUCCESSFUL_MSG, actions=actions)
+ except HTTPError as err:
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_category_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_category_info.py
new file mode 100644
index 000000000..6d3151fe9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_category_info.py
@@ -0,0 +1,316 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies_category_info
+short_description: Retrieves information of all OME alert policy categories.
+version_added: "8.2.0"
+description: This module allows to retrieve all the alert policy categories for OpenManage Enterprise and OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Jagadeesh N V(@jagadeeshnv)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieve information about all the OME alert policy categories
+ dellemc.openmanage.ome_alert_policies_category_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of the alert policies category fetch operation.
+ returned: always
+ sample: "Successfully retrieved alert policies category information."
+categories:
+ type: list
+ description: Information about the alert categories.
+ returned: always
+ sample: [{
+ "CategoriesDetails": [
+ {
+ "CatalogName": "Application",
+ "Id": 5,
+ "Name": "Configuration",
+ "SubCategoryDetails": [
+ {
+ "Description": "Application",
+ "Id": 85,
+ "Name": "Application"
+ },
+ {
+ "Description": "Users",
+ "Id": 35,
+ "Name": "Users"
+ }
+ ]
+ },
+ {
+ "CatalogName": "Application",
+ "Id": 7,
+ "Name": "Miscellaneous",
+ "SubCategoryDetails": [
+ {
+ "Description": "Miscellaneous",
+ "Id": 20,
+ "Name": "Miscellaneous"
+ }
+ ]
+ },
+ {
+ "CatalogName": "Application",
+ "Id": 2,
+ "Name": "Storage",
+ "SubCategoryDetails": [
+ {
+ "Description": "Devices",
+ "Id": 90,
+ "Name": "Devices"
+ }
+ ]
+ },
+ {
+ "CatalogName": "Application",
+ "Id": 3,
+ "Name": "Updates",
+ "SubCategoryDetails": [
+ {
+ "Description": "Application",
+ "Id": 85,
+ "Name": "Application"
+ },
+ {
+ "Description": "Firmware",
+ "Id": 112,
+ "Name": "Firmware"
+ }
+ ]
+ }
+ ],
+ "IsBuiltIn": true,
+ "Name": "Application"
+ },
+ {
+ "CategoriesDetails": [
+ {
+ "CatalogName": "Dell Storage",
+ "Id": 2,
+ "Name": "Storage",
+ "SubCategoryDetails": [
+ {
+ "Description": "Other",
+ "Id": 7700,
+ "Name": "Other"
+ }
+ ]
+ },
+ {
+ "CatalogName": "Dell Storage",
+ "Id": 1,
+ "Name": "System Health",
+ "SubCategoryDetails": [
+ {
+ "Description": "Other",
+ "Id": 7700,
+ "Name": "Other"
+ },
+ {
+ "Description": "Storage",
+ "Id": 18,
+ "Name": "Storage"
+ }
+ ]
+ }
+ ],
+ "IsBuiltIn": true,
+ "Name": "Dell Storage"
+ },
+ {
+ "CategoriesDetails": [
+ {
+ "CatalogName": "iDRAC",
+ "Id": 4,
+ "Name": "Audit",
+ "SubCategoryDetails": [
+ {
+ "Description": "Auto System Reset",
+ "Id": 41,
+ "Name": "Auto System Reset"
+ },
+ {
+ "Description": "UEFI Event",
+ "Id": 55,
+ "Name": "UEFI Event"
+ },
+ {
+ "Description": "User Tracking",
+ "Id": 56,
+ "Name": "User Tracking"
+ }
+ ]
+ },
+ {
+ "CatalogName": "iDRAC",
+ "Id": 5,
+ "Name": "Configuration",
+ "SubCategoryDetails": [
+ {
+ "Description": "Auto-Discovery",
+ "Id": 49,
+ "Name": "Auto-Discovery"
+ },
+ {
+ "Description": "vFlash Event",
+ "Id": 66,
+ "Name": "vFlash Event"
+ },
+ {
+ "Description": "Virtual Console",
+ "Id": 7,
+ "Name": "Virtual Console"
+ }
+ ]
+ },
+ {
+ "CatalogName": "iDRAC",
+ "Id": 2,
+ "Name": "Storage",
+ "SubCategoryDetails": [
+ {
+ "Description": "Battery Event",
+ "Id": 108,
+ "Name": "Battery Event"
+ },
+ {
+ "Description": "Virtual Disk",
+ "Id": 46,
+ "Name": "Virtual Disk"
+ }
+ ]
+ },
+ {
+ "CatalogName": "iDRAC",
+ "Id": 1,
+ "Name": "System Health",
+ "SubCategoryDetails": [
+ {
+ "Description": "Amperage",
+ "Id": 67,
+ "Name": "Amperage"
+ },
+ {
+ "Description": "Auto System Reset",
+ "Id": 41,
+ "Name": "Auto System Reset"
+ },
+ {
+ "Description": "Voltage",
+ "Id": 40,
+ "Name": "Voltage"
+ }
+ ]
+ },
+ {
+ "CatalogName": "iDRAC",
+ "Id": 6,
+ "Name": "Work Notes",
+ "SubCategoryDetails": [
+ {
+ "Description": "BIOS Management",
+ "Id": 54,
+ "Name": "BIOS Management"
+ }
+ ]
+ }
+ ],
+ "IsBuiltIn": true,
+ "Name": "iDRAC"
+ }
+]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide
+ for more information about resource URI and its properties."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination
+
+ALERT_CATEGORY_URI = "AlertService/AlertCategories"
+SUCCESS_MSG = "Successfully retrieved alert policies category information."
+
+
+def get_formatted_categories(rest_obj):
+ report = get_all_data_with_pagination(rest_obj, ALERT_CATEGORY_URI)
+ categories = remove_key(report.get("report_list", []))
+ return categories
+
+
+def main():
+ specs = ome_auth_params
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ categories = get_formatted_categories(rest_obj)
+ module.exit_json(msg=SUCCESS_MSG, categories=categories)
+ except HTTPError as err:
+ module.exit_json(failed=True, msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ module.exit_json(failed=True, msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_info.py
new file mode 100644
index 000000000..d9a97c070
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_info.py
@@ -0,0 +1,201 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies_info
+short_description: Retrieves information of one or more OME alert policies.
+version_added: "8.2.0"
+description:
+ - This module retrieves the information of alert policies for OpenManage Enterprise
+ and OpenManage Enterprise Modular.
+ - A list of information about a specific OME alert policy using the policy name.
+ - A list of all the OME alert policies with their information when the policy name is not provided.
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ policy_name:
+ description: Name of the policy.
+ type: str
+requirements:
+ - "python >= 3.9.6"
+author: "Abhishek Sinha(@ABHISHEK-SINHA10)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve information about all OME alert policies.
+ dellemc.openmanage.ome_alert_policies_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve information about a specific OME alert policy using the policy name.
+ dellemc.openmanage.ome_alert_policies_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ policy_name: "Mobile Push Notification - Critical Alerts"
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Status of the alert policies info fetch operation.
+ returned: always
+ sample: "Successfully retrieved all the OME alert policies information."
+policies:
+ type: list
+ description: Retrieve information about all the OME alert policies.
+ returned: success
+ sample: [
+ {
+ "Id": 10006,
+ "Name": "Mobile Push Notification - Critical Alerts",
+ "Description": "This policy is applicable to critical alerts. Associated actions will be taken when a critical alert is received.",
+ "Enabled": true,
+ "DefaultPolicy": true,
+ "PolicyData": {
+ "Catalogs": [],
+ "Severities": [
+ 16
+ ],
+ "MessageIds": [],
+ "Devices": [],
+ "DeviceTypes": [],
+ "Groups": [],
+ "AllTargets": false,
+ "Schedule": {
+ "StartTime": null,
+ "EndTime": null,
+ "CronString": null,
+ "Interval": false
+ },
+ "Actions": [
+ {
+ "Id": 5,
+ "Name": "Mobile",
+ "ParameterDetails": [],
+ "TemplateId": 112
+ }
+ ],
+ "UndiscoveredTargets": []
+ },
+ "State": true,
+ "Visible": true,
+ "Owner": null,
+ }
+]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+
+ALERT_POLICY_URI = "AlertService/AlertPolicies"
+MODULE_SUCCESS_MESSAGE_ALL = "Successfully retrieved all the OME alert policies information."
+MODULE_SUCCESS_MESSAGE_SPECIFIC = "Successfully retrieved {0} OME alert policy information."
+POLICY_NAME_NOT_FOUND_OR_EMPTY = "The OME alert policy name {0} provided does not exist or empty."
+
+
+class OMEAlertPolicyInfo:
+
+ def __init__(self) -> None:
+ self.module = get_module_parameters()
+
+ def get_all_alert_policy_info(self, rest_obj) -> dict:
+ resp = rest_obj.invoke_request("GET", ALERT_POLICY_URI)
+ value = resp.json_data["value"]
+ output_all = {'msg': MODULE_SUCCESS_MESSAGE_ALL, 'value': remove_key(value)}
+ return output_all
+
+ def get_alert_policy_info(self, rest_obj) -> dict:
+ policy_name = self.module.params.get("policy_name")
+ if policy_name is not None:
+ output_not_found_or_empty = {'msg': POLICY_NAME_NOT_FOUND_OR_EMPTY.format(policy_name),
+ 'value': []}
+ if policy_name == "":
+ return output_not_found_or_empty
+ policies = self.get_all_alert_policy_info(rest_obj)
+ for each_element in policies["value"]:
+ if each_element["Name"] == policy_name:
+ output_specific = {'msg': MODULE_SUCCESS_MESSAGE_SPECIFIC.format(policy_name),
+ 'value': [each_element]}
+ return output_specific
+ return output_not_found_or_empty
+ return self.get_all_alert_policy_info(rest_obj)
+
+ def perform_module_operation(self) -> None:
+ try:
+ with RestOME(self.module.params, req_session=True) as rest_obj:
+ result = self.get_alert_policy_info(rest_obj)
+ self.module.exit_json(msg=result['msg'], policies=result['value'])
+ except HTTPError as err:
+ self.module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ self.module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
+ self.module.exit_json(msg=str(err), failed=True)
+
+
+def get_module_parameters() -> AnsibleModule:
+ specs = {
+ "policy_name": {"type": 'str'}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=specs,
+ supports_check_mode=True)
+ return module
+
+
+def main():
+ obj = OMEAlertPolicyInfo()
+ obj.perform_module_operation()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_message_id_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_message_id_info.py
new file mode 100644
index 000000000..577eac7d0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_alert_policies_message_id_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_alert_policies_message_id_info
+short_description: Get message ID information of alert policies.
+version_added: "8.2.0"
+description:
+ - "This module retrieves the message ID information of alert policies for OpenManage Enterprise
+ and OpenManage Enterprise Modular."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+requirements:
+ - "python >= 3.9.6"
+author: "Shivam Sharma (@ShivamSh3)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
+ or OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+ - This module supports IPv4 and IPv6 addresses.
+'''
+
+EXAMPLES = r'''
+---
+- name: Get message ID details of all alert policies
+ dellemc.openmanage.ome_alert_policies_message_id_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+'''
+
+RETURN = r'''
+---
+msg:
+ description: "Status of the alert policies message ids fetch operation."
+ returned: always
+ type: str
+ sample: "Successfully retrieved alert policies message ids information."
+message_ids:
+ type: dict
+ description: Details of the message ids.
+ returned: success
+ sample: [
+ {
+ "Category": "System Health",
+ "DetailedDescription": "The current sensor identified in the message has failed. This condition
+ can cause system performance issues and degradation in the monitoring capability of the system.",
+ "Message": "The ${0} sensor has failed, and the last recorded value by the sensor was ${1} A.",
+ "MessageId": "AMP400",
+ "Prefix": "AMP",
+ "RecommendedAction": "Check the Embedded System Management (ESM) Log for any sensor related faults.
+ If there is a failed sensor, replace the system board. For more information, contact your service provider.",
+ "SequenceNo": 400,
+ "Severity": "Critical",
+ "SubCategory": "Amperage"
+ },
+ {
+ "Category": "System Health",
+ "DetailedDescription": "The current sensor identified in the message has failed. This condition can cause
+ system performance issues and degradation in the monitoring capability of the system.",
+ "Message": "Unable to read the ${0} sensor value.",
+ "MessageId": "AMP401",
+ "Prefix": "AMP",
+ "RecommendedAction": "Check the Embedded System Management (ESM) Log for any sensor related faults. If
+ there is a failed sensor, replace the system board. For more information, contact your service provider.",
+ "SequenceNo": 401,
+ "Severity": "Warning",
+ "SubCategory": "Amperage"
+ }
+]
+error_info:
+ type: dict
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key, get_all_data_with_pagination
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+
+ALERT_MESSAGE_URI = "AlertService/AlertMessageDefinitions"
+SUCCESSFUL_MSG = "Successfully retrieved alert policies message ids information."
+EMPTY_MSG = "No alert policies message id information were found."
+
+
+def main():
+ specs = ome_auth_params
+ module = AnsibleModule(
+ argument_spec=specs,
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ message_id_info = get_all_data_with_pagination(rest_obj, ALERT_MESSAGE_URI)
+ if not message_id_info.get("report_list", []):
+ module.exit_json(msg=EMPTY_MSG, message_ids=[])
+ message_ids = remove_key(message_id_info['report_list'])
+ module.exit_json(msg=SUCCESSFUL_MSG, message_ids=message_ids)
+ except HTTPError as err:
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ module.exit_json(msg=str(err), failed=True)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py
index 66a8b26c0..58572bae0 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_smtp.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -38,8 +38,8 @@ options:
enable_authentication:
description:
- Enable or disable authentication to access the SMTP server.
- - The I(credentials) are mandatory if I(enable_authentication) is C(True).
- - The module will always report change when this is C(True).
+ - The I(credentials) are mandatory if I(enable_authentication) is C(true).
+ - The module will always report change when this is C(true).
type: bool
required: true
credentials:
@@ -59,8 +59,8 @@ options:
requirements:
- "python >= 3.8.6"
notes:
- - The module will always report change when I(enable_authentication) is C(True).
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise
+ - The module will always report change when I(enable_authentication) is C(true).
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
or OpenManage Enterprise Modular.
- This module support C(check_mode).
author:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py
index 12c212450..a72093752 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_alerts_syslog.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -31,14 +31,14 @@ options:
description: The ID of the syslog server.
type: int
choices: [1, 2, 3, 4]
- required: True
+ required: true
enabled:
description: Enable or disable syslog forwarding.
type: bool
destination_address:
description:
- The IP address, FQDN or hostname of the syslog server.
- - This is required if I(enabled) is C(True).
+ - This is required if I(enabled) is C(true).
type: str
port_number:
description: The UDP port number of the syslog server.
@@ -48,7 +48,7 @@ requirements:
author:
- Jagadeesh N V(@jagadeeshnv)
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise or Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py
index 3c9b26994..60f170f76 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_certificate.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -54,13 +54,22 @@ options:
email:
description: Email associated with the issuer. This option is applicable for C(generate_csr).
type: str
+ subject_alternative_names:
+ description:
+ - Subject alternative name required for the certificate signing request generation.
+ - Supports up to 4 comma separated values starting from primary, secondary, Tertiary and Quaternary values.
+ type: str
+ version_added: 8.1.0
upload_file:
type: str
description: Local path of the certificate file to be uploaded. This option is applicable for C(upload).
Once the certificate is uploaded, OpenManage Enterprise cannot be accessed for a few seconds.
requirements:
- - "python >= 3.8.6"
-author: "Felix Stephen (@felixs88)"
+ - "python >= 3.9.6"
+author:
+ - "Felix Stephen (@felixs88)"
+ - "Kritika Bhateja (@Kritika-Bhateja-03)"
+ - "Jennifer John (@Jennifer-John)"
'''
EXAMPLES = r'''
@@ -80,6 +89,22 @@ EXAMPLES = r'''
country: "US"
email: "support@dell.com"
+- name: Generate a certificate signing request with subject alternative names
+ dellemc.openmanage.ome_application_certificate:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "generate_csr"
+ distinguished_name: "hostname.com"
+ subject_alternative_names: "hostname1.chassis.com,hostname2.chassis.com"
+ department_name: "Remote Access Group"
+ business_name: "Dell Inc."
+ locality: "Round Rock"
+ country_state: "Texas"
+ country: "US"
+ email: "support@dell.com"
+
- name: Upload the certificate
dellemc.openmanage.ome_application_certificate:
hostname: "192.168.0.1"
@@ -134,7 +159,6 @@ error_info:
import json
import os
-from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
@@ -151,7 +175,8 @@ def get_resource_parameters(module):
"DepartmentName": module.params["department_name"],
"BusinessName": module.params["business_name"],
"Locality": module.params["locality"], "State": module.params["country_state"],
- "Country": module.params["country"], "Email": module.params["email"]}
+ "Country": module.params["country"], "Email": module.params["email"],
+ "San": get_san(module.params["subject_alternative_names"])}
else:
file_path = module.params["upload_file"]
uri = csr_uri.format("UploadCertificate")
@@ -163,6 +188,13 @@ def get_resource_parameters(module):
return method, uri, payload
+def get_san(subject_alternative_names):
+ if not subject_alternative_names:
+ return subject_alternative_names
+
+ return subject_alternative_names.replace(" ", "")
+
+
def main():
specs = {
"command": {"type": "str", "required": False,
@@ -175,6 +207,7 @@ def main():
"country": {"required": False, "type": "str"},
"email": {"required": False, "type": "str"},
"upload_file": {"required": False, "type": "str"},
+ "subject_alternative_names": {"required": False, "type": "str"}
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -202,7 +235,7 @@ def main():
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (IOError, ValueError, SSLError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
module.fail_json(msg=str(err))
except Exception as err:
module.fail_json(msg=str(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
index 67b00dc8b..65b1ae271 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_console_preferences.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -158,7 +158,7 @@ EXAMPLES = r'''
common_mac_addresses: "::"
server_initiated_discovery:
device_discovery_approval_policy: Automatic
- set_trap_destination: True
+ set_trap_destination: true
mx7000_onboarding_preferences: all
builtin_appliance_share:
share_options: CIFS
@@ -213,7 +213,7 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
server_initiated_discovery:
device_discovery_approval_policy: Automatic
- set_trap_destination: True
+ set_trap_destination: true
mx7000_onboarding_preferences: chassis
email_sender_settings: "admin@dell.com"
trap_forwarding_format: Original
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py
index 03eef19ed..ab8814a42 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_address.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -428,7 +428,7 @@ import socket
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
IP_CONFIG = "ApplicationService/Network/AddressConfiguration"
@@ -598,7 +598,7 @@ def get_network_config_data(rest_obj, module):
return int_adp, "POST", POST_IP_CONFIG
else:
return pri_adp, "POST", POST_IP_CONFIG
- except HTTPError as err:
+ except HTTPError:
pass
except Exception as err:
raise err
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py
index 3659d8a3d..0ca58de09 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_proxy.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -60,7 +60,7 @@ requirements:
author:
- "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support C(check_mode).
'''
@@ -147,7 +147,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
PROXY_CONFIG = "ApplicationService/Network/ProxyConfiguration"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py
index 2dfd13a58..91a0de1d7 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_settings.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -83,7 +83,7 @@ options:
requirements:
- "python >= 3.8.6"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise
or OpenManage Enterprise Modular.
- To configure other network settings such as network address, web server, and so on, refer to the respective
OpenManage Enterprise application network setting modules.
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py
index 381ef3191..baf533c0a 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_time.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -59,7 +59,7 @@ requirements:
author:
- "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -141,7 +141,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
TIME_CONFIG = "ApplicationService/Network/TimeConfiguration"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py
index adee29dc6..9e6cdffd5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_network_webserver.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -39,7 +39,7 @@ requirements:
author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py
index d2b23c256..af869fb16 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_application_security_settings.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -75,7 +75,7 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 120
fips_mode_enable:
@@ -96,7 +96,7 @@ author:
requirements:
- "python >= 3.8.6"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
@@ -148,7 +148,7 @@ EXAMPLES = r'''
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
- fips_mode_enable: yes
+ fips_mode_enable: true
'''
RETURN = r'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py
index 6b89fea16..adcc53566 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_chassis_slots.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -44,7 +44,7 @@ options:
slot_name:
type: str
description: Provide name for the slot.
- required: True
+ required: true
slot_options:
type: list
elements: dict
@@ -55,7 +55,7 @@ options:
chassis_service_tag:
type: str
description: Service tag of the chassis.
- required: True
+ required: true
slots:
type: list
elements: dict
@@ -66,17 +66,17 @@ options:
slot_number:
type: int
description: The slot number of the slot to be renamed.
- required: True
+ required: true
slot_name:
type: str
description: Provide name for the slot.
- required: True
+ required: true
requirements:
- "python >= 3.8.6"
notes:
- "This module initiates the refresh inventory task. It may take a minute for new names to be reflected.
If the task exceeds 300 seconds to refresh, the task times out."
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py
index 5cac7352d..fa0f2a90a 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_baseline.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.6.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -102,12 +102,14 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds.The job will only be tracked for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 10800
requirements:
- "python >= 3.8.6"
-author: "Sajna Shetty(@Sajna-Shetty)"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+ - "Abhishek Sinha(@Abhishek-Dell)"
notes:
- This module supports C(check_mode).
- Ensure that the devices have the required licenses to perform the baseline compliance operations.
@@ -288,12 +290,12 @@ error_info:
import json
import time
-import re
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.compat.version import LooseVersion
COMPLIANCE_BASELINE = "TemplateService/Baselines"
REMEDIATE_BASELINE = "TemplateService/Actions/TemplateService.Remediate"
@@ -744,11 +746,10 @@ def create_remediate_payload(noncomplaint_devices, baseline_info, rest_obj):
"RunLater": False
}
}
- pattern = re.compile(r'(1|2|3)\.(0|1|2|3|4)\.?')
- if pattern.match(ome_version):
- payload["TargetIds"] = noncomplaint_devices
- else:
+ if LooseVersion(ome_version) >= "3.5":
payload["DeviceIds"] = noncomplaint_devices
+ else:
+ payload["TargetIds"] = noncomplaint_devices
return payload
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py
index d96cd3769..8132ffe9d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_configuration_compliance_info.py
@@ -25,18 +25,18 @@ extends_documentation_fragment:
- dellemc.openmanage.oment_auth_options
options:
baseline:
- required: True
+ required: true
description:
- The name of the created baseline.
- A compliance report is generated even when the template is not associated with the baseline.
type: str
device_id:
- required: False
+ required: false
description:
- The ID of the target device which is associated with the I(baseline).
type: int
device_service_tag:
- required: False
+ required: false
description:
- The device service tag of the target device associated with the I(baseline).
- I(device_service_tag) is mutually exclusive with I(device_id).
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py
index 56c1def60..f6a085cd9 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_group.py
@@ -232,7 +232,6 @@ EXAMPLES = """
- fe80::ffff:ffff:ffff:ffff
- ::ffff:192.0.2.0/125
- fe80::ffff:ffff:ffff:1111-fe80::ffff:ffff:ffff:ffff
-
"""
@@ -281,7 +280,7 @@ from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
try:
from netaddr import IPAddress, IPNetwork, IPRange
@@ -511,7 +510,7 @@ def main():
group_id=group_id, changed=True)
else:
current_device_list = get_current_member_of_group(rest_obj, group_id)
- resp = remove_member_from_group(module, rest_obj, group_id, device_id, current_device_list)
+ remove_member_from_group(module, rest_obj, group_id, device_id, current_device_list)
module.exit_json(msg="Successfully removed member(s) from the device group.", changed=True)
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py
index 846dd5e82..62430402c 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_info.py
@@ -2,16 +2,13 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
-# Copyright (C) 2019-2022 Dell Inc.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
-# Other trademarks may be trademarks of their respective owners.
#
-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
@@ -63,9 +60,11 @@ options:
requirements:
- "python >= 3.8.6"
-author: "Sajna Shetty(@Sajna-Shetty)"
+author:
+ - "Sajna Shetty (@Sajna-Shetty)"
+ - "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -138,7 +137,6 @@ EXAMPLES = """
device_service_tag:
- MXL1234
- MXL4567
-
"""
RETURN = '''
@@ -199,6 +197,7 @@ device_info:
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import get_all_data_with_pagination
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
@@ -392,9 +391,13 @@ def main():
if device_facts.get("basic_inventory"):
query_param = _get_query_parameters(module.params)
if query_param is not None:
- resp = rest_obj.invoke_request('GET', device_facts["basic_inventory"], query_param=query_param)
- device_facts = resp.json_data
- resp_status.append(resp.status_code)
+ device_report = get_all_data_with_pagination(rest_obj, device_facts["basic_inventory"], query_param)
+ if not device_report.get("report_list", []):
+ module.exit_json(msg="No devices present.", device_info=[])
+ device_facts = {"@odata.context": device_report["resp_obj"].json_data["@odata.context"],
+ "@odata.count": len(device_report["report_list"]),
+ "value": device_report["report_list"]}
+ resp_status.append(device_report["resp_obj"].status_code)
else:
device_report = rest_obj.get_all_report_details(DEVICE_RESOURCE_COLLECTION[DEVICE_LIST]["resource"])
device_facts = {"@odata.context": device_report["resp_obj"].json_data["@odata.context"],
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
index 9b48e33dd..7de50f0fb 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_local_access_configuration.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -50,18 +50,19 @@ options:
type: bool
description:
- Enables or disables the chassis power button.
- - If C(False), the chassis cannot be turn on or turn off using the power button.
+ - If C(false), the chassis cannot be turn on or turn off using the power button.
enable_lcd_override_pin:
type: bool
description:
- Enables or disables the LCD override pin.
- - This is required when I(enable_chassis_power_button) is C(False).
+ - This is required when I(enable_chassis_power_button) is C(false).
disabled_button_lcd_override_pin:
- type: int
+ type: str
description:
- The six digit LCD override pin to change the power state of the chassis.
- - This is required when I(enable_lcd_override_pin) is C(True).
- - The module will always report change when I(disabled_button_lcd_override_pin) is C(True).
+ - This is required when I(enable_lcd_override_pin) is C(true).
+ - The module will always report change when I(disabled_button_lcd_override_pin) is C(true).
+ - 'The value must be specified in quotes. ex: "001100".'
quick_sync:
type: dict
description:
@@ -84,7 +85,7 @@ options:
description:
- Inactivity timeout in seconds or minutes.
- The range is 120 to 3600 in seconds, or 2 to 60 in minutes.
- - This option is required when I(enable_inactivity_timeout) is C(True).
+ - This option is required when I(enable_inactivity_timeout) is C(true).
timeout_limit_unit:
type: str
choices: [SECONDS, MINUTES]
@@ -92,7 +93,7 @@ options:
- Inactivity timeout limit unit.
- C(SECONDS) to set I(timeout_limit) in seconds.
- C(MINUTES) to set I(timeout_limit) in minutes.
- - This option is required when I(enable_inactivity_timeout) is C(True).
+ - This option is required when I(enable_inactivity_timeout) is C(true).
enable_read_authentication:
type: bool
description: Enables or disables the option to log in using your user credentials and to read the
@@ -132,10 +133,11 @@ requirements:
- "python >= 3.8.6"
author:
- "Felix Stephen (@felixs88)"
+ - "Shivam Sharma (@ShivamSh3)"
notes:
- Run this module from a system that has direct access to OpenManage Enterprise Modular.
- This module supports C(check_mode).
- - The module will always report change when I(enable_chassis_power_button) is C(True).
+ - The module will always report change when I(enable_chassis_power_button) is C(true).
"""
EXAMPLES = """
@@ -152,7 +154,7 @@ EXAMPLES = """
chassis_power_button:
enable_chassis_power_button: false
enable_lcd_override_pin: true
- disabled_button_lcd_override_pin: 123456
+ disabled_button_lcd_override_pin: "123456"
- name: Configure Quick sync and LCD settings of the chassis using device service tag.
dellemc.openmanage.ome_device_local_access_configuration:
@@ -184,7 +186,7 @@ EXAMPLES = """
chassis_power_button:
enable_chassis_power_button: false
enable_lcd_override_pin: true
- disabled_button_lcd_override_pin: 123456
+ disabled_button_lcd_override_pin: "123456"
quick_sync:
quick_sync_access: READ_WRITE
enable_read_authentication: true
@@ -417,7 +419,7 @@ def main():
chassis_power = {
"enable_chassis_power_button": {"type": "bool", "required": True},
"enable_lcd_override_pin": {"type": "bool", "required": False},
- "disabled_button_lcd_override_pin": {"type": "int", "required": False, "no_log": True}}
+ "disabled_button_lcd_override_pin": {"type": "str", "required": False, "no_log": True}}
quick_sync_options = {
"quick_sync_access": {"type": "str", "required": False, "choices": ["DISABLED", "READ_ONLY", "READ_WRITE"]},
"enable_inactivity_timeout": {"type": "bool", "required": False},
@@ -470,7 +472,7 @@ def main():
resp_data["QuickSync"]["TimeoutLimitUnit"] = "MINUTES"
module.exit_json(msg=SUCCESS_MSG, local_access_settings=resp_data, changed=True)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, SSLError, TypeError, ConnectionError, AttributeError, IndexError, KeyError, OSError) as err:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py
index 96a61a29b..9c73b7c46 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_location.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -59,7 +59,7 @@ requirements:
author:
- "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py
index e895472ea..0d4b0a483 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_mgmt_network.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -62,7 +62,7 @@ options:
description:
- "Enable or disable the automatic request to obtain an IPv4 address from the IPv4 Dynamic Host Configuration
Protocol (DHCP) server."
- - "C(NOTE) If this option is C(True), the values provided for I(static_ip_address), I(static_subnet_mask),
+ - "C(NOTE) If this option is C(true), the values provided for I(static_ip_address), I(static_subnet_mask),
and I(static_gateway) are not applied for these fields. However, the module may report changes."
type: bool
static_ip_address:
@@ -84,7 +84,7 @@ options:
description:
- This option allows to automatically request and obtain IPv4 address for the DNS Server from the DHCP server.
- This option is applicable when I(enable_dhcp) is true.
- - "C(NOTE) If this option is C(True), the values provided for I(static_preferred_dns_server) and
+ - "C(NOTE) If this option is C(true), the values provided for I(static_preferred_dns_server) and
I(static_alternate_dns_server) are not applied for these fields. However, the module may report changes."
type: bool
static_preferred_dns_server:
@@ -114,7 +114,7 @@ options:
advertisements(RA)"
- "If I(enable_auto_configuration) is C(true), OpenManage Enterprise Modular retrieves IP configuration
(IPv6 address, prefix, and gateway address) from a DHCPv6 server on the existing network."
- - "C(NOTE) If this option is C(True), the values provided for I(static_ip_address), I(static_prefix_length),
+ - "C(NOTE) If this option is C(true), the values provided for I(static_ip_address), I(static_prefix_length),
and I(static_gateway) are not applied for these fields. However, the module may report changes."
type: bool
static_ip_address:
@@ -136,7 +136,7 @@ options:
description:
- This option allows to automatically request and obtain a IPv6 address for the DNS server from the DHCP server.
- This option is applicable when I(enable_auto_configuration) is true
- - "C(NOTE) If this option is C(True), the values provided for I(static_preferred_dns_server) and I(static_alternate_dns_server)
+ - "C(NOTE) If this option is C(true), the values provided for I(static_preferred_dns_server) and I(static_alternate_dns_server)
are not applied for these fields. However, the module may report changes."
type: bool
static_preferred_dns_server:
@@ -231,7 +231,7 @@ options:
requirements:
- "python >= 3.8.6"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
@@ -267,7 +267,7 @@ EXAMPLES = """
use_dhcp_for_dns_domain_name: false
dns_name: "MX-SVCTAG"
dns_domain_name: "dnslocaldomain"
- auto_negotiation: no
+ auto_negotiation: false
network_speed: 100_MB
- name: Network settings for server
@@ -325,7 +325,7 @@ EXAMPLES = """
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
- device_id : 12345
+ device_id: 12345
management_vlan:
enable_vlan: true
vlan_id: 2345
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py
index 81475d48b..6d1518b34 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_network_services.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -83,7 +83,7 @@ requirements:
author:
- "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py
index ec99e693a..f0587791e 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_power_settings.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -81,7 +81,7 @@ requirements:
author:
- "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
index 183b7f67e..f12cf7078 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_device_quick_deploy.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.7.0
+# Copyright (C) 2022-2024 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -36,7 +36,7 @@ options:
- I(device_service_tag) is mutually exclusive with I(device_id).
setting_type:
type: str
- required: True
+ required: true
choices: [ServerQuickDeploy, IOMQuickDeploy]
description:
- The type of the Quick Deploy settings to be applied.
@@ -45,16 +45,16 @@ options:
job_wait:
type: bool
description: Determines whether to wait for the job completion or not.
- default: True
+ default: true
job_wait_timeout:
type: int
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
default: 120
quick_deploy_options:
type: dict
- required: True
+ required: true
description: The Quick Deploy settings for server and IOM quick deploy.
suboptions:
password:
@@ -70,7 +70,7 @@ options:
choices: [Static, DHCP]
description:
- IPv4 network type.
- - I(ipv4_network_type) is required if I(ipv4_enabled) is C(True).
+ - I(ipv4_network_type) is required if I(ipv4_enabled) is C(true).
- C(Static) to configure the static IP settings.
- C(DHCP) to configure the Dynamic IP settings.
ipv4_subnet_mask:
@@ -91,7 +91,7 @@ options:
choices: [Static, DHCP]
description:
- IPv6 network type.
- - I(ipv6_network_type) is required if I(ipv6_enabled) is C(True).
+ - I(ipv6_network_type) is required if I(ipv6_enabled) is C(true).
- C(Static) to configure the static IP settings.
- C(DHCP) to configure the Dynamic IP settings.
ipv6_prefix_length:
@@ -111,7 +111,7 @@ options:
suboptions:
slot_id:
type: int
- required: True
+ required: true
description: The ID of the slot.
slot_ipv4_address:
type: str
@@ -123,9 +123,10 @@ options:
type: int
description: The ID of the VLAN.
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
+ - "Shivam Sharma (@ShivamSh3)"
notes:
- Run this module from a system that has direct access to OpenManage Enterprise Modular.
- This module supports C(check_mode).
@@ -147,11 +148,11 @@ EXAMPLES = """
ca_path: "/path/to/ca_cert.pem"
quick_deploy_options:
password: "password"
- ipv4_enabled: True
+ ipv4_enabled: true
ipv4_network_type: Static
ipv4_subnet_mask: 255.255.255.0
ipv4_gateway: 192.168.0.1
- ipv6_enabled: True
+ ipv6_enabled: true
ipv6_network_type: Static
ipv6_prefix_length: 1
ipv6_gateway: "::"
@@ -175,11 +176,11 @@ EXAMPLES = """
ca_path: "/path/to/ca_cert.pem"
quick_deploy_options:
password: "password"
- ipv4_enabled: True
+ ipv4_enabled: true
ipv4_network_type: Static
ipv4_subnet_mask: 255.255.255.0
ipv4_gateway: 192.168.0.1
- ipv6_enabled: True
+ ipv6_enabled: true
ipv6_network_type: Static
ipv6_prefix_length: 1
ipv6_gateway: "::"
@@ -395,7 +396,6 @@ def ip_address_field(module, field, deploy_options, slot=False):
valid = validate_ip_address(module_params.get(val[0]), val[1])
if valid is False:
module.fail_json(msg=IP_FAIL_MSG.format(field_value, val[0]))
- return
def check_domain_service(module, rest_obj):
@@ -405,7 +405,6 @@ def check_domain_service(module, rest_obj):
err_message = json.load(err)
if err_message["error"]["@Message.ExtendedInfo"][0]["MessageId"] == "CGEN1006":
module.fail_json(msg=DOMAIN_FAIL_MSG)
- return
def get_ip_from_host(hostname):
@@ -446,33 +445,9 @@ def check_mode_validation(module, deploy_data):
ipv6_enabled_deploy = deploy_data["ProtocolTypeV6"]
ipv4_nt_deploy = deploy_data.get("NetworkTypeV4")
ipv6_nt_deploy = deploy_data.get("NetworkTypeV6")
- if ipv4_enabled is not None and ipv4_enabled is True or \
- ipv4_enabled_deploy is not None and ipv4_enabled_deploy is True:
- req_data["ProtocolTypeV4"] = None
- if ipv4_enabled is not None:
- req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
- ipv4_network_type = deploy_options.get("ipv4_network_type")
- req_data["NetworkTypeV4"] = ipv4_network_type
- if ipv4_network_type == "Static" or ipv4_nt_deploy is not None and ipv4_nt_deploy == "Static":
- req_data["IpV4SubnetMask"] = deploy_options.get("ipv4_subnet_mask")
- req_data["IpV4Gateway"] = deploy_options.get("ipv4_gateway")
- elif ipv4_enabled is not None and ipv4_enabled is False:
- req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
ipv6_enabled = deploy_options.get("ipv6_enabled")
- if ipv6_enabled is not None and ipv6_enabled is True or \
- ipv6_enabled_deploy is not None and ipv6_enabled_deploy is True:
- req_data["ProtocolTypeV6"] = None
- if ipv6_enabled is not None:
- req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
- ipv6_network_type = deploy_options.get("ipv6_network_type")
- req_data["NetworkTypeV6"] = ipv6_network_type
- if ipv6_network_type == "Static" or ipv6_nt_deploy is not None and ipv6_nt_deploy == "Static":
- req_data["PrefixLength"] = deploy_options.get("ipv6_prefix_length")
- if deploy_options.get("ipv6_prefix_length") is not None:
- req_data["PrefixLength"] = str(deploy_options.get("ipv6_prefix_length"))
- req_data["IpV6Gateway"] = deploy_options.get("ipv6_gateway")
- elif ipv6_enabled is not None and ipv6_enabled is False:
- req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
+ update_ipv4_data(req_data, ipv4_enabled, ipv4_enabled_deploy, ipv4_nt_deploy, deploy_options)
+ update_ipv6_data(req_data, ipv6_enabled, ipv6_enabled_deploy, ipv6_nt_deploy, deploy_options)
resp_data = {
"ProtocolTypeV4": str(ipv4_enabled_deploy).lower(), "NetworkTypeV4": deploy_data.get("NetworkTypeV4"),
"IpV4SubnetMask": deploy_data.get("IpV4SubnetMask"), "IpV4Gateway": deploy_data.get("IpV4Gateway"),
@@ -480,7 +455,9 @@ def check_mode_validation(module, deploy_data):
"PrefixLength": deploy_data.get("PrefixLength"), "IpV6Gateway": deploy_data.get("IpV6Gateway")}
resp_filter_data = dict([(k, v) for k, v in resp_data.items() if v is not None])
req_data_filter = dict([(k, v) for k, v in req_data.items() if v is not None])
- diff_changes = [bool(set(resp_filter_data.items()) ^ set(req_data_filter.items()))]
+ copy_resp_filter_data = copy.deepcopy(resp_filter_data)
+ copy_resp_filter_data.update(req_data_filter)
+ diff_changes = [bool(set(resp_filter_data.items()) ^ set(copy_resp_filter_data.items()))]
req_slot_payload, invalid_slot = [], []
slots = deploy_options.get("slots")
if slots is not None:
@@ -492,11 +469,16 @@ def check_mode_validation(module, deploy_data):
"SlotIPV6Address": each.get("slot_ipv6_address"), "VlanId": each.get("vlan_id")}
if each.get("vlan_id") is not None:
req_slot_1.update({"VlanId": str(each.get("vlan_id"))})
+ else:
+ req_slot_1.update({"VlanId": ""})
req_filter_slot = dict([(k, v) for k, v in req_slot_1.items() if v is not None])
exist_slot_1 = {"SlotId": exist_filter_slot[0]["SlotId"],
"SlotIPV4Address": exist_filter_slot[0]["SlotIPV4Address"],
- "SlotIPV6Address": exist_filter_slot[0]["SlotIPV6Address"],
- "VlanId": exist_filter_slot[0]["VlanId"]}
+ "SlotIPV6Address": exist_filter_slot[0]["SlotIPV6Address"]}
+ if "VlanId" in exist_filter_slot[0]:
+ exist_slot_1.update({"VlanId": exist_filter_slot[0]["VlanId"]})
+ else:
+ exist_slot_1.update({"VlanId": ""})
exist_filter_slot = dict([(k, v) for k, v in exist_slot_1.items() if v is not None])
cp_exist_filter_slot = copy.deepcopy(exist_filter_slot)
cp_exist_filter_slot.update(req_filter_slot)
@@ -513,9 +495,48 @@ def check_mode_validation(module, deploy_data):
module.exit_json(msg=NO_CHANGES_FOUND, quick_deploy_settings=deploy_data)
req_payload.update(resp_filter_data)
req_payload.update(req_data_filter)
+ update_prefix_length(req_payload)
return req_payload, req_slot_payload
+def update_ipv4_data(req_data, ipv4_enabled, ipv4_enabled_deploy, ipv4_nt_deploy, deploy_options):
+ if ipv4_enabled is not None and ipv4_enabled is True or \
+ ipv4_enabled_deploy is not None and ipv4_enabled_deploy is True:
+ req_data["ProtocolTypeV4"] = None
+ if ipv4_enabled is not None:
+ req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
+ ipv4_network_type = deploy_options.get("ipv4_network_type")
+ req_data["NetworkTypeV4"] = ipv4_network_type
+ if ipv4_network_type == "Static" or ipv4_nt_deploy is not None and ipv4_nt_deploy == "Static":
+ req_data["IpV4SubnetMask"] = deploy_options.get("ipv4_subnet_mask")
+ req_data["IpV4Gateway"] = deploy_options.get("ipv4_gateway")
+ elif ipv4_enabled is not None and ipv4_enabled is False:
+ req_data["ProtocolTypeV4"] = str(ipv4_enabled).lower()
+
+
+def update_ipv6_data(req_data, ipv6_enabled, ipv6_enabled_deploy, ipv6_nt_deploy, deploy_options):
+ if ipv6_enabled is not None and ipv6_enabled is True or \
+ ipv6_enabled_deploy is not None and ipv6_enabled_deploy is True:
+ req_data["ProtocolTypeV6"] = None
+ if ipv6_enabled is not None:
+ req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
+ ipv6_network_type = deploy_options.get("ipv6_network_type")
+ req_data["NetworkTypeV6"] = ipv6_network_type
+ if ipv6_network_type == "Static" or ipv6_nt_deploy is not None and ipv6_nt_deploy == "Static":
+ req_data["PrefixLength"] = deploy_options.get("ipv6_prefix_length")
+ if deploy_options.get("ipv6_prefix_length") is not None:
+ req_data["PrefixLength"] = str(deploy_options.get("ipv6_prefix_length"))
+ req_data["IpV6Gateway"] = deploy_options.get("ipv6_gateway")
+ elif ipv6_enabled is not None and ipv6_enabled is False:
+ req_data["ProtocolTypeV6"] = str(ipv6_enabled).lower()
+
+
+def update_prefix_length(req_payload):
+ prefix_length = req_payload.get("PrefixLength")
+ if prefix_length == '0':
+ req_payload["PrefixLength"] = ""
+
+
def job_payload_submission(rest_obj, payload, slot_payload, settings_type, device_id, resp_data):
job_params = []
job_params.append({"Key": "protocolTypeV4", "Value": payload["ProtocolTypeV4"]})
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
index 954395280..876e5b235 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_devices.py
@@ -63,7 +63,7 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 1200
job_schedule:
@@ -233,8 +233,7 @@ from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import \
- get_rest_items, strip_substr_dict, job_tracking, apply_diff_key
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict, job_tracking
from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import CHANGES_MSG, NO_CHANGES_MSG
DEVICE_URI = "DeviceService/Devices"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py
index 71b0e0960..b16604c3c 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_diagnostics.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -60,7 +60,7 @@ options:
- Select this option to mask the personal identification information such as IPAddress,
DNS, alert destination, email, gateway, inet6, MacAddress, netmask etc.
- This option is applicable for C(application) of I(log_type).
- default: False
+ default: false
log_selectors:
type: list
description:
@@ -75,17 +75,17 @@ options:
elements: str
share_address:
type: str
- required: True
+ required: true
description: Network share IP address.
share_name:
type: str
- required: True
+ required: true
description:
- Network share path.
- Filename is auto generated and should not be provided as part of I(share_name).
share_type:
type: str
- required: True
+ required: true
description: Network share type
choices: [NFS, CIFS]
share_user:
@@ -108,7 +108,7 @@ options:
description:
- Whether to wait for the Job completion or not.
- The maximum wait time is I(job_wait_timeout).
- default: True
+ default: true
job_wait_timeout:
type: int
description:
@@ -120,13 +120,13 @@ options:
description:
- Test the availability of the network share location.
- I(job_wait) and I(job_wait_timeout) options are not applicable for I(test_connection).
- default: False
+ default: false
lead_chassis_only:
type: bool
description:
- Extract the logs from Lead chassis only.
- I(lead_chassis_only) is only applicable when I(log_type) is C(application) on OpenManage Enterprise Modular.
- default: False
+ default: false
requirements:
- "python >= 3.8.6"
author:
@@ -505,7 +505,7 @@ def main():
resp = response.json_data
if resp:
resp = rest_obj.strip_substr_dict(resp)
- module.exit_json(msg=message, job_status=resp)
+ module.exit_json(msg=message, job_status=resp, changed=True)
except HTTPError as err:
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py
index a4fde99f9..f50d8f25e 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_discovery.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -80,16 +80,16 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 10800
ignore_partial_failure:
description:
- "Provides the option to ignore partial failures. Partial failures occur when there is a combination of both
discovered and undiscovered IPs."
- - If C(False), then the partial failure is not ignored, and the module will error out.
- - If C(True), then the partial failure is ignored.
- - This option is only applicable if I(job_wait) is C(True).
+ - If C(false), then the partial failure is not ignored, and the module will error out.
+ - If C(true), then the partial failure is ignored.
+ - This option is only applicable if I(job_wait) is C(true).
type: bool
default: false
discovery_config_targets:
@@ -370,8 +370,9 @@ requirements:
author:
- "Jagadeesh N V (@jagadeeshnv)"
- "Sajna Shetty (@Sajna-Shetty)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support C(check_mode).
- If I(state) is C(present), then Idempotency is not supported.
'''
@@ -496,9 +497,9 @@ EXAMPLES = r'''
password: ipmi_pwd
schedule: RunLater
cron: "0 0 9 ? * MON,WED,FRI *"
- ignore_partial_failure: True
- trap_destination: True
- community_string: True
+ ignore_partial_failure: true
+ trap_destination: true
+ community_string: true
email_recipient: test_email@company.com
- name: Discover servers with ca check enabled
@@ -516,7 +517,7 @@ EXAMPLES = r'''
wsman:
username: user
password: password
- ca_check: True
+ ca_check: true
certificate_data: "{{ lookup('ansible.builtin.file', '/path/to/certificate_data_file') }}"
- name: Discover chassis with ca check enabled data
@@ -534,7 +535,7 @@ EXAMPLES = r'''
redfish:
username: user
password: password
- ca_check: True
+ ca_check: true
certificate_data: "-----BEGIN CERTIFICATE-----\r\n
ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
ABCDEFGHIJKLMNOPQRSTUVWXYZaqwertyuiopasdfghjklzxcvbnmasdasagasvv\r\n
@@ -598,6 +599,27 @@ discovery_ids:
returned: when discoveries with duplicate name exist for I(state) is C(present)
type: list
sample: [1234, 5678]
+job_detailed_status:
+ description: Detailed last execution history of a job.
+ returned: All time.
+ type: list
+ sample: [
+ {
+ "ElapsedTime": "00:00:00",
+ "EndTime": null,
+ "ExecutionHistoryId": 564873,
+ "Id": 656893,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2050,
+ "Name": "Running"
+ },
+ "Key": "192.96.24.1",
+ "Progress": "0",
+ "StartTime": "2023-07-04 06:23:54.008",
+ "Value": "Running\nDiscovery of target 192.96.24.1 started.\nDiscovery target resolved to IP 192.96.24.1 ."
+ }
+ ]
error_info:
description: Details of the HTTP Error.
returned: on HTTP error
@@ -622,10 +644,10 @@ error_info:
import json
import time
-from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.common.dict_transformations import snake_dict_to_camel_dict
@@ -650,6 +672,11 @@ DISCOVERY_PARTIAL = "Some IPs are not discovered."
ATLEAST_ONE_PROTOCOL = "Protocol not applicable for given device types."
INVALID_DISCOVERY_ID = "Invalid discovery ID provided."
SETTLING_TIME = 5
+JOB_STATUS_MAP = {
+ 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "completed successfully",
+ 2070: "Failed", 2090: "completed with errors", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped",
+ 2103: "Canceled"
+}
def check_existing_discovery(module, rest_obj):
@@ -720,39 +747,37 @@ def get_schedule(module):
return schedule_payload
-def get_execution_details(module, rest_obj, job_id):
+def get_execution_details(rest_obj, job_id):
try:
+ ips = {"Completed": [], "Failed": []}
+ job_detail_status = []
resp = rest_obj.invoke_request('GET', JOB_EXEC_HISTORY.format(job_id=job_id))
ex_hist = resp.json_data.get('value')
# Sorting based on startTime and to get latest execution instance.
tmp_dict = dict((x["StartTime"], x["Id"]) for x in ex_hist)
sorted_dates = sorted(tmp_dict.keys())
ex_url = JOB_EXEC_HISTORY.format(job_id=job_id) + "({0})/ExecutionHistoryDetails".format(tmp_dict[sorted_dates[-1]])
- ips = {"Completed": [], "Failed": []}
all_exec = rest_obj.get_all_items_with_pagination(ex_url)
for jb_ip in all_exec.get('value'):
+ jb_ip = strip_substr_dict(jb_ip)
+ jb_ip.get('JobStatus', {}).pop('@odata.type', None)
+ job_detail_status.append(jb_ip)
jobstatus = jb_ip.get('JobStatus', {}).get('Name', 'Unknown')
jlist = ips.get(jobstatus, [])
jlist.append(jb_ip.get('Key'))
ips[jobstatus] = jlist
except Exception:
- ips = {"Completed": [], "Failed": []}
- return ips
+ pass
+ return ips, job_detail_status
def discovery_job_tracking(rest_obj, job_id, job_wait_sec):
- job_status_map = {
- 2020: "Scheduled", 2030: "Queued", 2040: "Starting", 2050: "Running", 2060: "completed successfully",
- 2070: "Failed", 2090: "completed with errors", 2080: "New", 2100: "Aborted", 2101: "Paused", 2102: "Stopped",
- 2103: "Canceled"
- }
sleep_interval = 30
max_retries = job_wait_sec // sleep_interval
failed_job_status = [2070, 2100, 2101, 2102, 2103]
success_job_status = [2060, 2020, 2090]
job_url = (DISCOVERY_JOBS_URI + "({job_id})").format(job_id=job_id)
loop_ctr = 0
- job_failed = True
time.sleep(SETTLING_TIME)
while loop_ctr < max_retries:
loop_ctr += 1
@@ -761,17 +786,15 @@ def discovery_job_tracking(rest_obj, job_id, job_wait_sec):
job_dict = job_resp.json_data
job_status = job_dict['JobStatusId']
if job_status in success_job_status:
- job_failed = False
- return job_failed, JOB_TRACK_SUCCESS.format(job_status_map[job_status])
+ return JOB_TRACK_SUCCESS.format(JOB_STATUS_MAP[job_status])
elif job_status in failed_job_status:
- job_failed = True
- return job_failed, JOB_TRACK_FAIL.format(job_status_map[job_status])
+ return JOB_TRACK_FAIL.format(JOB_STATUS_MAP[job_status])
time.sleep(sleep_interval)
except HTTPError:
- return job_failed, JOB_TRACK_UNABLE.format(job_id)
+ return JOB_TRACK_UNABLE.format(job_id)
except Exception as err:
- return job_failed, str(err)
- return job_failed, JOB_TRACK_INCOMPLETE.format(job_id, max_retries)
+ return str(err)
+ return JOB_TRACK_INCOMPLETE.format(job_id, max_retries)
def get_job_data(discovery_json, rest_obj):
@@ -879,19 +902,22 @@ def exit_discovery(module, rest_obj, job_id):
msg = DISCOVERY_SCHEDULED
time.sleep(SETTLING_TIME)
djob = get_discovery_job(rest_obj, job_id)
+ detailed_job = []
if module.params.get("job_wait") and module.params.get('schedule') == 'RunNow':
- job_failed, job_message = discovery_job_tracking(rest_obj, job_id,
- job_wait_sec=module.params["job_wait_timeout"])
- if job_failed is True:
- djob.update({"Completed": [], "Failed": []})
- module.fail_json(msg=job_message, discovery_status=djob)
+ job_message = discovery_job_tracking(rest_obj, job_id, job_wait_sec=module.params["job_wait_timeout"])
msg = job_message
- ip_details = get_execution_details(module, rest_obj, job_id)
+ ip_details, detailed_job = get_execution_details(rest_obj, job_id)
djob = get_discovery_job(rest_obj, job_id)
djob.update(ip_details)
- if ip_details.get("Failed") and module.params.get("ignore_partial_failure") is False:
- module.fail_json(msg=DISCOVERY_PARTIAL, discovery_status=djob)
- module.exit_json(msg=msg, discovery_status=djob, changed=True)
+ if djob["JobStatusId"] == 2090 and not module.params.get("ignore_partial_failure"):
+ module.fail_json(msg=DISCOVERY_PARTIAL, discovery_status=djob, job_detailed_status=detailed_job)
+ if djob["JobStatusId"] == 2090 and module.params.get("ignore_partial_failure"):
+ module.exit_json(msg=JOB_TRACK_SUCCESS.format(JOB_STATUS_MAP[djob["JobStatusId"]]), discovery_status=djob,
+ job_detailed_status=detailed_job, changed=True)
+ if ip_details.get("Failed"):
+ module.fail_json(msg=JOB_TRACK_FAIL.format(JOB_STATUS_MAP[djob["JobStatusId"]]), discovery_status=djob,
+ job_detailed_status=detailed_job)
+ module.exit_json(msg=msg, discovery_status=djob, job_detailed_status=detailed_job, changed=True)
def create_discovery(module, rest_obj):
@@ -997,27 +1023,27 @@ def main():
"timeout": {"type": 'int', "default": 60},
"kgkey": {"type": 'str', "no_log": True}
}
- DiscoveryConfigModel = {"device_types": {"required": True, 'type': 'list', "elements": 'str'},
- "network_address_detail": {"required": True, "type": 'list', "elements": 'str'},
- "wsman": {"type": 'dict', "options": http_creds,
- "required_if": [['ca_check', True, ('certificate_data',)]]},
- "storage": {"type": 'dict', "options": http_creds,
- "required_if": [['ca_check', True, ('certificate_data',)]]},
- "redfish": {"type": 'dict', "options": http_creds,
+ discovery_config_model = {"device_types": {"required": True, 'type': 'list', "elements": 'str'},
+ "network_address_detail": {"required": True, "type": 'list', "elements": 'str'},
+ "wsman": {"type": 'dict', "options": http_creds,
"required_if": [['ca_check', True, ('certificate_data',)]]},
- "vmware": {"type": 'dict', "options": http_creds,
- "required_if": [['ca_check', True, ('certificate_data',)]]},
- "snmp": {"type": 'dict', "options": snmp_creds},
- "ssh": {"type": 'dict', "options": ssh_creds},
- "ipmi": {"type": 'dict', "options": ipmi_creds},
- }
+ "storage": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "redfish": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "vmware": {"type": 'dict', "options": http_creds,
+ "required_if": [['ca_check', True, ('certificate_data',)]]},
+ "snmp": {"type": 'dict', "options": snmp_creds},
+ "ssh": {"type": 'dict', "options": ssh_creds},
+ "ipmi": {"type": 'dict', "options": ipmi_creds},
+ }
specs = {
"discovery_job_name": {"type": 'str'},
"discovery_id": {"type": 'int'},
"state": {"default": "present", "choices": ['present', 'absent']},
"new_name": {"type": 'str'},
"discovery_config_targets":
- {"type": 'list', "elements": 'dict', "options": DiscoveryConfigModel,
+ {"type": 'list', "elements": 'dict', "options": discovery_config_model,
"required_one_of": [
('wsman', 'storage', 'redfish', 'vmware', 'snmp', 'ssh', 'ipmi')
]},
@@ -1059,7 +1085,7 @@ def main():
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
module.fail_json(msg=str(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py
index 7b74c306e..bd15dccc6 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_domain_user_groups.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -16,10 +16,10 @@ __metaclass__ = type
DOCUMENTATION = r"""
---
module: ome_domain_user_groups
-short_description: Create, modify, or delete an Active Directory user group on
+short_description: Create, modify, or delete an Active Directory/LDAP user group on
OpenManage Enterprise and OpenManage Enterprise Modular
version_added: "4.0.0"
-description: This module allows to create, modify, or delete an Active Directory user group on
+description: This module allows to create, modify, or delete an Active Directory/LDAP user group on
OpenManage Enterprise and OpenManage Enterprise Modular.
extends_documentation_fragment:
- dellemc.openmanage.ome_auth_options
@@ -27,21 +27,21 @@ options:
state:
type: str
description:
- - C(present) imports or modifies the Active Directory user group.
- - C(absent) deletes an existing Active Directory user group.
+ - C(present) imports or modifies the Active Directory/LDAP user group.
+ - C(absent) deletes an existing Active Directory/LDAP user group.
choices: [present, absent]
default: present
group_name:
type: str
- required: True
+ required: true
description:
- - The desired Active Directory user group name to be imported or removed.
+ - The desired Active Directory/LDAP user group name to be imported or removed.
- "Examples for user group name: Administrator or Account Operators or Access Control Assistance Operator."
- I(group_name) value is case insensitive.
role:
type: str
description:
- - The desired roles and privilege for the imported Active Directory user group.
+ - The desired roles and privilege for the imported Active Directory/LDAP user group.
- "OpenManage Enterprise Modular Roles: CHASSIS ADMINISTRATOR, COMPUTE MANAGER, STORAGE MANAGER,
FABRIC MANAGER, VIEWER."
- "OpenManage Enterprise Roles: ADMINISTRATOR, DEVICE MANAGER, VIEWER."
@@ -49,26 +49,33 @@ options:
directory_name:
type: str
description:
- - The directory name set while adding the Active Directory.
+ - The directory name set while adding the Active Directory/LDAP.
- I(directory_name) is mutually exclusive with I(directory_id).
+ directory_type:
+ type: str
+ description:
+ - Type of the account.
+ choices: ['AD', 'LDAP']
+ default: 'AD'
directory_id:
type: int
description:
- - The ID of the Active Directory.
+ - The ID of the Active Directory/LDAP.
- I(directory_id) is mutually exclusive with I(directory_name).
domain_username:
type: str
description:
- - Active directory domain username.
+ - Active Directory/LDAP domain username.
- "Example: username@domain or domain\\username."
domain_password:
type: str
description:
- - Active directory domain password.
+ - Active Directory/LDAP domain password.
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
notes:
- This module supports C(check_mode) and idempotency.
- Run this module from a system that has direct access to OpenManage Enterprise
@@ -108,15 +115,38 @@ EXAMPLES = r"""
ca_path: "/path/to/ca_cert.pem"
state: absent
group_name: administrators
+
+- name: Import LDAP directory group.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ directory_type: LDAP
+ state: present
+ group_name: account operators
+ directory_name: directory_name
+ role: administrator
+ domain_username: username@domain
+ domain_password: domain_password
+
+- name: Remove LDAP directory group.
+ dellemc.openmanage.ome_domain_user_groups:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ state: absent
+ group_name: account operators
"""
RETURN = r"""
---
msg:
type: str
- description: Overall status of the Active Directory user group operation.
+ description: Overall status of the Active Directory/LDAP user group operation.
returned: always
- sample: Successfully imported the active directory user group.
+ sample: Successfully imported the Active Directory/LDAP user group.
domain_user_status:
description: Details of the domain user operation, when I(state) is C(present).
returned: When I(state) is C(present).
@@ -171,8 +201,9 @@ from ansible.module_utils.urls import ConnectionError, SSLValidationError
ROLE_URI = "AccountService/Roles"
ACCOUNT_URI = "AccountService/Accounts"
GET_AD_ACC = "AccountService/ExternalAccountProvider/ADAccountProvider"
+GET_LDAP_ACC = "AccountService/ExternalAccountProvider/LDAPAccountProvider"
IMPORT_ACC_PRV = "AccountService/Actions/AccountService.ImportExternalAccountProvider"
-SEARCH_AD = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.SearchGroups"
+SEARCH_GROUPS = "AccountService/ExternalAccountProvider/Actions/ExternalAccountProvider.SearchGroups"
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_FOUND = "Changes found to be applied."
@@ -185,7 +216,8 @@ def get_directory(module, rest_obj):
dir_id = None
if user_dir_name is None and user_dir_id is None:
module.fail_json(msg="missing required arguments: directory_name or directory_id")
- directory_resp = rest_obj.invoke_request("GET", GET_AD_ACC)
+ URI = GET_AD_ACC if module.params.get("directory_type") == "AD" else GET_LDAP_ACC
+ directory_resp = rest_obj.invoke_request("GET", URI)
for dire in directory_resp.json_data["value"]:
if user_dir_name is not None and dire["Name"] == user_dir_name:
dir_id = dire["Id"]
@@ -201,16 +233,17 @@ def get_directory(module, rest_obj):
def search_directory(module, rest_obj, dir_id):
group_name, obj_gui_id, common_name = module.params["group_name"], None, None
- payload = {"DirectoryServerId": dir_id, "Type": "AD",
+ payload = {"DirectoryServerId": dir_id,
+ "Type": module.params["directory_type"],
"UserName": module.params["domain_username"],
"Password": module.params["domain_password"],
"CommonName": group_name}
try:
- resp = rest_obj.invoke_request("POST", SEARCH_AD, data=payload)
- for ad in resp.json_data:
- if ad["CommonName"].lower() == group_name.lower():
- obj_gui_id = ad["ObjectGuid"]
- common_name = ad["CommonName"]
+ resp = rest_obj.invoke_request("POST", SEARCH_GROUPS, data=payload)
+ for key in resp.json_data:
+ if key["CommonName"].lower() == group_name.lower():
+ obj_gui_id = key["ObjectGuid"]
+ common_name = key["CommonName"]
break
else:
module.fail_json(msg="Unable to complete the operation because the entered "
@@ -227,7 +260,7 @@ def directory_user(module, rest_obj):
user = get_directory_user(module, rest_obj)
new_role_id = get_role(module, rest_obj)
dir_id = get_directory(module, rest_obj)
- domain_resp, msg = None, ''
+ domain_resp, local_msg, msg = None, '', ''
if user is None:
obj_gui_id, common_name = search_directory(module, rest_obj, dir_id)
if module.check_mode:
@@ -238,7 +271,7 @@ def directory_user(module, rest_obj):
"IsBuiltin": False, "Enabled": True, "ObjectGuid": obj_gui_id}
]
domain_resp = rest_obj.invoke_request("POST", IMPORT_ACC_PRV, data=payload)
- msg = 'imported'
+ local_msg, msg = 'import', 'imported'
else:
if (int(user["RoleId"]) == new_role_id):
user = rest_obj.strip_substr_dict(user)
@@ -250,9 +283,9 @@ def directory_user(module, rest_obj):
if module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True, domain_user_status=payload)
domain_resp = rest_obj.invoke_request("PUT", update_uri, data=payload)
- msg = 'updated'
+ local_msg, msg = 'update', 'updated'
if domain_resp is None:
- module.fail_json(msg="Unable to complete the Active Directory user account.")
+ module.fail_json(msg="Unable to {0} the domain user group.".format(local_msg))
return domain_resp.json_data, msg
@@ -293,11 +326,11 @@ def get_directory_user(module, rest_obj):
def delete_directory_user(rest_obj, user_id):
delete_uri, changed = "{0}('{1}')".format(ACCOUNT_URI, user_id), False
- msg = "Invalid active directory user group name provided."
+ msg = "Invalid domain user group name provided."
resp = rest_obj.invoke_request('DELETE', delete_uri)
if resp.status_code == 204:
changed = True
- msg = "Successfully deleted the active directory user group."
+ msg = "Successfully deleted the domain user group."
return msg, changed
@@ -308,6 +341,7 @@ def main():
"group_name": {"required": True, "type": 'str'},
"role": {"required": False, "type": 'str'},
"directory_name": {"required": False, "type": 'str'},
+ "directory_type": {"type": 'str', "choices": ['AD', 'LDAP'], "default": "AD"},
"directory_id": {"required": False, "type": 'int'},
"domain_username": {"required": False, "type": 'str'},
"domain_password": {"required": False, "type": 'str', "no_log": True},
@@ -324,10 +358,10 @@ def main():
if isinstance(resp, list):
resp = resp[0]
module.exit_json(
- msg="Successfully {0} the active directory user group.".format(msg),
+ msg="Successfully {0} the domain user group.".format(msg),
domain_user_status=resp, changed=True
)
- if module.params["state"] == "absent":
+ else:
user = get_directory_user(module, rest_obj)
msg, changed = delete_directory_user(rest_obj, int(user["Id"]))
user = rest_obj.strip_substr_dict(user)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py
index a3bfff955..5e83a07d5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -60,6 +60,7 @@ options:
- This option is case sensitive.
- This is applicable to I(device_service_tag), I(device_id), and I(baseline_name).
type: list
+ default: []
elements: str
devices:
description:
@@ -81,6 +82,7 @@ options:
components:
description: The target components to be updated. If not specified, all applicable device components are considered.
type: list
+ default: []
elements: str
schedule:
type: str
@@ -93,13 +95,28 @@ options:
- RebootNow
- StageForNextReboot
default: RebootNow
+ reboot_type:
+ version_added: '8.3.0'
+ type: str
+ description:
+ - This option provides the choices to reboot the server immediately after the firmware update.
+ - This is applicable when I(schedule) is C(RebootNow).
+ - C(GracefulRebootForce) performs a graceful reboot with forced shutdown.
+ - C(GracefulReboot) performs a graceful reboot without forced shutdown.
+ - C(PowerCycle) performs a power cycle for a hard reset on the device.
+ choices:
+ - GracefulReboot
+ - GracefulRebootForce
+ - PowerCycle
+ default: GracefulRebootForce
requirements:
- - "python >= 3.8.6"
+ - "python >= 3.9.6"
author:
- "Felix Stephen (@felixs88)"
- "Jagadeesh N V (@jagadeeshnv)"
+ - "Abhishek Sinha (@ABHISHEK-SINHA10)"
notes:
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -200,7 +217,7 @@ EXAMPLES = r'''
devices:
- id: 12345
components:
- - Lifecycle Controller
+ - Lifecycle Controller
- id: 12346
components:
- Enterprise UEFI Diagnostics
@@ -237,6 +254,17 @@ EXAMPLES = r'''
- id: 12345
components:
- iDRAC with Lifecycle Controller
+
+- name: "Update firmware using baseline name and components and perform Powercycle."
+ dellemc.openmanage.ome_firmware:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: baseline_devices
+ components:
+ - BIOS
+ reboot_type: PowerCycle
'''
RETURN = r'''
@@ -325,7 +353,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
@@ -361,8 +389,11 @@ def job_payload_for_update(rest_obj, module, target_data, baseline=None):
{"Key": "signVerify", "Value": "true"}]
# reboot applicable only if staging false
if schedule == "RebootNow":
- params.append({"Key": "rebootType", "Value": "3"})
- # reboot_dict = {"GracefulReboot": "2", "GracefulRebootForce": "3", "PowerCycle": "1"}
+ reboot_dict = {"PowerCycle": "1",
+ "GracefulReboot": "2",
+ "GracefulRebootForce": "3"}
+ reboot_type = module.params["reboot_type"]
+ params.append({"Key": "rebootType", "Value": reboot_dict[reboot_type]})
payload = {
"Id": 0, "JobName": "Firmware Update Task",
"JobDescription": FW_JOB_DESC, "Schedule": "startnow",
@@ -504,7 +535,7 @@ def single_dup_update(rest_obj, module):
device_id_tags = _validate_device_attributes(module)
device_ids, id_tag_map = get_device_ids(rest_obj, module, device_id_tags)
if module.check_mode:
- module.exit_json(msg=CHANGES_FOUND)
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
upload_status, token = upload_dup_file(rest_obj, module)
if upload_status:
report_payload = get_dup_applicability_payload(token, device_ids=device_ids, group_ids=group_ids,
@@ -549,7 +580,7 @@ def baseline_based_update(rest_obj, module, baseline, dev_comp_map):
if not compliance_report_list:
module.exit_json(msg=NO_CHANGES_MSG)
if module.check_mode:
- module.exit_json(msg=CHANGES_FOUND)
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
return compliance_report_list
@@ -605,6 +636,9 @@ def main():
"components": {"type": "list", "elements": 'str', "default": []},
"baseline_name": {"type": "str"},
"schedule": {"type": 'str', "choices": ['RebootNow', 'StageForNextReboot'], "default": 'RebootNow'},
+ "reboot_type": {"type": 'str',
+ "choices": ['PowerCycle', 'GracefulReboot', 'GracefulRebootForce'],
+ "default": 'GracefulRebootForce'},
"devices": {
"type": 'list', "elements": 'dict',
"options": {
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py
index d6282db3a..6c2c6a1c5 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,8 @@ short_description: Create, modify, or delete a firmware baseline on OpenManage E
description: This module allows to create, modify, or delete a firmware baseline on OpenManage Enterprise or OpenManage Enterprise Modular.
version_added: "2.0.0"
author:
- - Jagadeesh N V(@jagadeeshnv)
+ - "Jagadeesh N V(@jagadeeshnv)"
+ - "Kritika Bhateja (@Kritika-Bhateja-03)"
extends_documentation_fragment:
- dellemc.openmanage.ome_auth_options
options:
@@ -61,12 +62,12 @@ options:
type: bool
description:
- Indicates whether firmware downgrade is allowed for the devices in the baseline.
- - This value will be set to C(True) by default, if not provided during baseline creation.
+ - This value will be set to C(true) by default, if not provided during baseline creation.
is_64_bit:
type: bool
description:
- Indicates if the repository contains 64-bit DUPs.
- - This value will be set to C(True) by default, if not provided during baseline creation.
+ - This value will be set to C(true) by default, if not provided during baseline creation.
device_ids:
type: list
elements: int
@@ -95,14 +96,22 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 600
version_added: 3.4.0
+ filter_no_reboot_required:
+ description:
+ - Select only components with no reboot required allows to create a
+ firmware/driver baseline that consists of only the components of the
+ target devices that don't require a reboot of the target devices.
+ type: bool
+ version_added: 8.1.0
+
requirements:
- "python >= 3.8.6"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
- I(device_group_names) option is not applicable for OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
@@ -122,6 +131,20 @@ EXAMPLES = r'''
- 1010
- 2020
+- name: Create baseline for device IDs with no reboot required
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ filter_no_reboot_required: true
+ device_ids:
+ - 1010
+ - 2020
+
- name: Create baseline for servicetags
dellemc.openmanage.ome_firmware_baseline:
hostname: "192.168.0.1"
@@ -135,6 +158,20 @@ EXAMPLES = r'''
- "SVCTAG1"
- "SVCTAG2"
+- name: Create baseline for servicetags with no reboot required
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "baseline_name"
+ baseline_description: "baseline_description"
+ catalog_name: "catalog_name"
+ filter_no_reboot_required: true
+ device_service_tags:
+ - "SVCTAG1"
+ - "SVCTAG2"
+
- name: Create baseline for device groups without job tracking
dellemc.openmanage.ome_firmware_baseline:
hostname: "192.168.0.1"
@@ -147,7 +184,7 @@ EXAMPLES = r'''
device_group_names:
- "Group1"
- "Group2"
- job_wait: no
+ job_wait: false
- name: Modify an existing baseline
dellemc.openmanage.ome_firmware_baseline:
@@ -163,8 +200,18 @@ EXAMPLES = r'''
- "Group3"
- "Group4"
- "Group5"
- downgrade_enabled: no
- is_64_bit: yes
+ downgrade_enabled: false
+ is_64_bit: true
+
+- name: Modify no reboot filter in existing baseline
+ dellemc.openmanage.ome_firmware_baseline:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ baseline_name: "existing_baseline_name"
+ new_baseline_name: "new_baseline_name"
+ filter_no_reboot_required: true
- name: Delete a baseline
dellemc.openmanage.ome_firmware_baseline:
@@ -192,6 +239,7 @@ baseline_status:
"Description": "BASELINE DESCRIPTION",
"DeviceComplianceReports": [],
"DowngradeEnabled": true,
+ "FilterNoRebootRequired": true,
"Id": 23,
"Is64Bit": true,
"Name": "my_baseline",
@@ -267,7 +315,6 @@ GROUP_ID = 6000
import json
import time
-from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
@@ -329,7 +376,7 @@ def get_dev_ids(module, rest_obj, param, devkey):
targets = []
if values:
devlist = values
- device_resp = dict([(device[devkey], device) for device in devlist])
+ device_resp = {device[devkey]: device for device in devlist}
for st in paramlist:
if st in device_resp:
djson = device_resp[st]
@@ -353,7 +400,7 @@ def get_group_ids(module, rest_obj):
targets = []
if values:
grplist = values
- device_resp = dict([(str(grp['Name']), grp) for grp in grplist])
+ device_resp = {str(grp['Name']): grp for grp in grplist}
for st in grp_name_list:
if st in device_resp:
djson = device_resp[st]
@@ -413,6 +460,7 @@ def _get_baseline_payload(module, rest_obj):
"Targets": targets
}
baseline_payload['Description'] = module.params.get("baseline_description")
+ baseline_payload['FilterNoRebootRequired'] = module.params.get("filter_no_reboot_required")
de = module.params.get("downgrade_enabled")
baseline_payload['DowngradeEnabled'] = de if de is not None else True
sfb = module.params.get("is_64_bit")
@@ -434,26 +482,29 @@ def create_baseline(module, rest_obj):
def update_modify_payload(module, rest_obj, modify_payload, current_baseline):
- paylist = ['Name', "CatalogId", "RepositoryId", 'Description', 'DowngradeEnabled', 'Is64Bit']
+ paylist = ['Name', "CatalogId", "RepositoryId", 'Description', 'DowngradeEnabled', 'Is64Bit',
+ 'FilterNoRebootRequired']
diff_tuple = recursive_diff(modify_payload, current_baseline)
diff = 0
- payload = dict([(item, current_baseline.get(item)) for item in paylist])
- if diff_tuple:
- if diff_tuple[0]:
+ payload = {item: current_baseline.get(item) for item in paylist}
+ try:
+ if diff_tuple and diff_tuple[0]:
diff += 1
payload.update(diff_tuple[0])
- payload['Targets'] = current_baseline.get('Targets', [])
- inp_targets_list = get_target_list(module, rest_obj)
- if inp_targets_list:
- inp_target_dict = dict([(item['Id'], item['Type']['Id']) for item in inp_targets_list])
- cur_target_dict = dict([(item['Id'], item['Type']['Id']) for item in current_baseline.get('Targets', [])])
- diff_tuple = recursive_diff(inp_target_dict, cur_target_dict)
- if diff_tuple:
- diff += 1
- payload['Targets'] = inp_targets_list
- if diff == 0:
- module.exit_json(msg=NO_CHANGES_MSG)
- payload['Id'] = current_baseline['Id']
+ payload['Targets'] = current_baseline.get('Targets', [])
+ inp_targets_list = get_target_list(module, rest_obj)
+ if inp_targets_list:
+ inp_target_dict = {item['Id']: item['Type']['Id'] for item in inp_targets_list}
+ cur_target_dict = {item['Id']: item['Type']['Id'] for item in current_baseline.get('Targets', [])}
+ diff_tuple = recursive_diff(inp_target_dict, cur_target_dict)
+ if diff_tuple:
+ diff += 1
+ payload['Targets'] = inp_targets_list
+ if diff == 0:
+ module.exit_json(msg=NO_CHANGES_MSG)
+ payload['Id'] = current_baseline['Id']
+ except (IndexError, TypeError) as err:
+ module.fail_json(msg=str(err))
return payload
@@ -478,6 +529,8 @@ def modify_baseline(module, rest_obj, baseline_list):
modify_payload['DowngradeEnabled'] = module.params.get("downgrade_enabled")
if module.params.get("is_64_bit") is not None:
modify_payload['Is64Bit'] = module.params.get("is_64_bit")
+ if module.params.get("filter_no_reboot_required") is not None:
+ modify_payload['FilterNoRebootRequired'] = module.params.get("filter_no_reboot_required")
payload = update_modify_payload(module, rest_obj, modify_payload, current_baseline)
if module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True)
@@ -512,7 +565,8 @@ def main():
"device_service_tags": {"type": 'list', "elements": 'str'},
"device_group_names": {"type": 'list', "elements": 'str'},
"job_wait": {"type": 'bool', "default": True},
- "job_wait_timeout": {"type": 'int', "default": 600}
+ "job_wait_timeout": {"type": 'int', "default": 600},
+ "filter_no_reboot_required": {"type": 'bool'}
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -542,7 +596,7 @@ def main():
module.fail_json(msg=str(err), error_info=json.load(err))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, OSError) as err:
module.fail_json(msg=str(err))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py
index 9e138a002..af48fc151 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_compliance_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -64,7 +64,7 @@ requirements:
- "python >= 3.8.6"
author: "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -77,8 +77,8 @@ EXAMPLES = r'''
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_ids:
- - 11111
- - 22222
+ - 11111
+ - 22222
- name: Retrieves device based compliance report for specified service Tags
dellemc.openmanage.ome_firmware_baseline_compliance_info:
@@ -87,8 +87,8 @@ EXAMPLES = r'''
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_service_tags:
- - MXL1234
- - MXL4567
+ - MXL1234
+ - MXL4567
- name: Retrieves device based compliance report for specified group names
dellemc.openmanage.ome_firmware_baseline_compliance_info:
@@ -97,8 +97,8 @@ EXAMPLES = r'''
password: "password"
ca_path: "/path/to/ca_cert.pem"
device_group_names:
- - "group1"
- - "group2"
+ - "group1"
+ - "group2"
- name: Retrieves device compliance report for a specified baseline
dellemc.openmanage.ome_firmware_baseline_compliance_info:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py
index a98359169..261d67030 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_baseline_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -31,7 +31,7 @@ requirements:
- "python >= 3.8.6"
author: "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -104,7 +104,7 @@ from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
def get_specific_baseline(module, baseline_name, resp_data):
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py
index 29b7ed905..b437db3ae 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_firmware_catalog.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -96,9 +96,9 @@ options:
check_certificate:
type: bool
description:
- - The certificate warnings are ignored when I(repository_type) is HTTPS. If C(True). If not, certificate warnings
+ - The certificate warnings are ignored when I(repository_type) is HTTPS. If C(true). If not, certificate warnings
are not ignored.
- default: False
+ default: false
job_wait:
description:
- Provides the option to wait for job completion.
@@ -109,7 +109,7 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 600
version_added: 3.4.0
@@ -120,7 +120,8 @@ author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- If I(repository_password) is provided, then the module always reports the changed status.
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise or OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise or OpenManage Enterprise Modular.
+ - This module supports IPv4 and IPv6 addresses.
- This module supports C(check_mode).
'''
@@ -138,7 +139,7 @@ EXAMPLES = r'''
source: "downloads.dell.com"
source_path: "catalog"
file_name: "catalog.gz"
- check_certificate: True
+ check_certificate: true
- name: Create a catalog from HTTP repository
dellemc.openmanage.ome_firmware_catalog:
@@ -191,7 +192,7 @@ EXAMPLES = r'''
catalog_name: "catalog_name"
catalog_description: "catalog_description"
repository_type: "DELL_ONLINE"
- check_certificate: True
+ check_certificate: true
- name: Modify a catalog using a repository from CIFS share
dellemc.openmanage.ome_firmware_catalog:
@@ -255,7 +256,7 @@ catalog_status:
"BaseLocation": null,
"BundlesCount": 0,
"Filename": "catalog.gz",
- "Id": 0,
+ "Id": 12,
"LastUpdated": null,
"ManifestIdentifier": null,
"ManifestVersion": null,
@@ -351,9 +352,11 @@ SETTLING_TIME = 3
import json
import time
+import os
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import remove_key
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
@@ -408,7 +411,7 @@ def exit_catalog(module, rest_obj, catalog_resp, operation, msg):
msg = CATALOG_UPDATED.format(operation=operation)
time.sleep(SETTLING_TIME)
catalog = get_updated_catalog_info(module, rest_obj, catalog_resp)
- module.exit_json(msg=msg, catalog_status=catalog, changed=True)
+ module.exit_json(msg=msg, catalog_status=remove_key(catalog), changed=True)
def _get_catalog_payload(params, name):
@@ -521,11 +524,21 @@ def modify_catalog(module, rest_obj, catalog_list, all_catalog):
new_catalog_current_setting = catalog_payload.copy()
repo_id = new_catalog_current_setting["Repository"]["Id"]
del new_catalog_current_setting["Repository"]["Id"]
+ fname = modify_payload.get('Filename')
+ # Special case handling for .gz catalog files
+ if fname and fname.lower().endswith('.gz'):
+ modify_payload['Filename'] = new_catalog_current_setting.get('Filename')
+ src_path = modify_payload.get('SourcePath')
+ if src_path is None:
+ src_path = new_catalog_current_setting.get('SourcePath', "")
+ if src_path.lower().endswith('.gz'):
+ src_path = os.path.dirname(src_path)
+ modify_payload['SourcePath'] = os.path.join(src_path, fname)
diff = compare_payloads(modify_payload, new_catalog_current_setting)
- if module.check_mode and diff:
- module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
if not diff:
module.exit_json(msg=CHECK_MODE_CHANGE_NOT_FOUND_MSG, changed=False)
+ if module.check_mode:
+ module.exit_json(msg=CHECK_MODE_CHANGE_FOUND_MSG, changed=True)
new_catalog_current_setting["Repository"].update(modify_payload["Repository"])
catalog_payload.update(modify_payload)
catalog_payload["Repository"] = new_catalog_current_setting["Repository"]
@@ -637,7 +650,7 @@ def main():
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
except (IOError, ValueError, TypeError, SSLError, ConnectionError, SSLValidationError, OSError) as err:
- module.fail_json(msg=str(err))
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py
index 411a6221a..3daf178cf 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_groups.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -73,10 +73,10 @@ options:
requirements:
- "python >= 3.8.6"
notes:
- - This module manages only static device groups on Dell EMC OpenManage Enterprise.
+ - This module manages only static device groups on Dell OpenManage Enterprise.
- If a device group with the name I(parent_group_name) does not exist, a new device group with the same name is created.
- Make sure the entered parent group is not the descendant of the provided group.
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
"""
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py
index 4906dcf55..9a627b234 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_identity_pool.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -32,7 +32,7 @@ options:
choices: [present, absent]
pool_name:
type: str
- required: True
+ required: true
description:
- This option is mandatory for I(state) when creating, modifying and deleting an identity pool.
new_pool_name:
@@ -129,7 +129,7 @@ author:
- "Sajna Shetty(@Sajna-Shetty)"
- "Deepak Joshi(@Dell-Deepak-Joshi))"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -145,25 +145,25 @@ EXAMPLES = r'''
pool_name: "pool1"
pool_description: "Identity pool with Ethernet, FCoE, iSCSI and FC settings"
ethernet_settings:
- starting_mac_address: "50:50:50:50:50:00"
- identity_count: 60
+ starting_mac_address: "50:50:50:50:50:00"
+ identity_count: 60
fcoe_settings:
- starting_mac_address: "70:70:70:70:70:00"
- identity_count: 75
+ starting_mac_address: "70:70:70:70:70:00"
+ identity_count: 75
iscsi_settings:
- starting_mac_address: "60:60:60:60:60:00"
- identity_count: 30
- initiator_config:
- iqn_prefix: "iqn.myprefix."
- initiator_ip_pool_settings:
- ip_range: "10.33.0.1-10.33.0.255"
- subnet_mask: "255.255.255.0"
- gateway: "192.168.4.1"
- primary_dns_server : "10.8.8.8"
- secondary_dns_server : "8.8.8.8"
+ starting_mac_address: "60:60:60:60:60:00"
+ identity_count: 30
+ initiator_config:
+ iqn_prefix: "iqn.myprefix."
+ initiator_ip_pool_settings:
+ ip_range: "10.33.0.1-10.33.0.255"
+ subnet_mask: "255.255.255.0"
+ gateway: "192.168.4.1"
+ primary_dns_server: "10.8.8.8"
+ secondary_dns_server: "8.8.8.8"
fc_settings:
- starting_address: "30:30:30:30:30:00"
- identity_count: 45
+ starting_address: "30:30:30:30:30:00"
+ identity_count: 45
- name: Create an identity pool using only ethernet settings
dellemc.openmanage.ome_identity_pool:
@@ -174,8 +174,8 @@ EXAMPLES = r'''
pool_name: "pool2"
pool_description: "create identity pool with ethernet"
ethernet_settings:
- starting_mac_address: "aa-bb-cc-dd-ee-aa"
- identity_count: 80
+ starting_mac_address: "aa-bb-cc-dd-ee-aa"
+ identity_count: 80
- name: Modify an identity pool
dellemc.openmanage.ome_identity_pool:
@@ -187,11 +187,11 @@ EXAMPLES = r'''
new_pool_name: "pool3"
pool_description: "modifying identity pool with ethernet and fcoe settings"
ethernet_settings:
- starting_mac_address: "90-90-90-90-90-90"
- identity_count: 61
+ starting_mac_address: "90-90-90-90-90-90"
+ identity_count: 61
fcoe_settings:
- starting_mac_address: "aabb.ccdd.5050"
- identity_count: 77
+ starting_mac_address: "aabb.ccdd.5050"
+ identity_count: 77
- name: Modify an identity pool using iSCSI and FC settings
dellemc.openmanage.ome_identity_pool:
@@ -265,7 +265,7 @@ import binascii
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
IDENTITY_URI = "IdentityPoolService/IdentityPools"
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py
index 26b0d545e..8a875c756 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_job_info.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -37,11 +37,21 @@ options:
filter:
description: Filter records by the values supported.
type: str
+ fetch_execution_history:
+ description:
+ - Fetches the execution history of the job.
+ - I(fetch_execution_history) is only applicable when valid I(job_id) is given.
+ - When C(true), fetches all the execution history details.
+ - When C(false), fetches only the job info and last execution details.
+ type: bool
+ default: false
requirements:
- - "python >= 3.8.6"
-author: "Jagadeesh N V(@jagadeeshnv)"
+ - "python >= 3.8.6"
+author:
+ - "Jagadeesh N V (@jagadeeshnv)"
+ - "Abhishek Sinha (@Abhishek-Dell)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -73,6 +83,14 @@ EXAMPLES = r'''
skip: 1
filter: "JobType/Id eq 8"
+- name: Get detail job execution history with last execution detail for a job.
+ dellemc.openmanage.ome_job_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ job_id: 12345
+ fetch_execution_history: true
'''
RETURN = r'''
@@ -89,57 +107,200 @@ job_info:
sample: {
"value": [
{
- "Builtin": false,
- "CreatedBy": "system",
- "Editable": true,
+ "Id": 10429,
+ "JobName": "Discovery-201",
+ "JobDescription": "Discovery-201",
+ "NextRun": null,
+ "LastRun": "2023-06-07 09:33:07.161",
+ "StartTime": null,
"EndTime": null,
- "Id": 12345,
- "JobDescription": "Refresh Inventory for Device",
- "JobName": "Refresh Inventory for Device",
- "JobStatus": {
- "Id": 2080,
- "Name": "New"
+ "Schedule": "startnow",
+ "State": "Enabled",
+ "CreatedBy": "admin",
+ "UpdatedBy": "admin",
+ "Visible": true,
+ "Editable": true,
+ "Builtin": false,
+ "UserGenerated": true,
+ "Targets": [],
+ "Params": [],
+ "LastRunStatus": {
+ "Id": 2070,
+ "Name": "Failed"
},
"JobType": {
- "Id": 8,
- "Internal": false,
- "Name": "Inventory_Task"
+ "Id": 101,
+ "Name": "Discovery_Task",
+ "Internal": false
},
- "LastRun": "2000-01-29 10:51:34.776",
- "LastRunStatus": {
- "Id": 2060,
- "Name": "Completed"
+ "JobStatus": {
+ "Id": 2080,
+ "Name": "New"
},
- "NextRun": null,
- "Params": [],
- "Schedule": "",
- "StartTime": null,
- "State": "Enabled",
- "Targets": [
+ "ExecutionHistories": [
{
- "Data": "''",
- "Id": 123123,
- "JobId": 12345,
- "TargetType": {
- "Id": 1000,
- "Name": "DEVICE"
- }
+ "Id": 1243224,
+ "JobName": "Discovery-201",
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:33:07.148",
+ "EndTime": "2023-06-07 09:33:08.403",
+ "LastUpdateTime": "2023-06-07 09:33:08.447185",
+ "ExecutedBy": "admin",
+ "JobId": 10429,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ },
+ "ExecutionHistoryDetails": [
+ {
+ "Id": 1288519,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:33:07.525",
+ "EndTime": "2023-06-07 09:33:08.189",
+ "ElapsedTime": "00:00:00",
+ "Key": "198.168.0.1",
+ "Value": "Running\nDiscovery of target 198.168.0.1 started
+ .\nDiscovery target resolved to IP 198.168.0.1 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry the
+ operation.\n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243224,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ },
+ {
+ "Id": 1288518,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:33:07.521",
+ "EndTime": "2023-06-07 09:33:08.313",
+ "ElapsedTime": "00:00:00",
+ "Key": "198.168.0.2",
+ "Value": "Running\nDiscovery of target 198.168.0.2 started.
+ \nDiscovery target resolved to IP 198.168.0.2 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry the
+ operation.\n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243224,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ }
+ ]
+ },
+ {
+ "Id": 1243218,
+ "JobName": "Discovery-201",
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:30:55.064",
+ "EndTime": "2023-06-07 09:30:56.338",
+ "LastUpdateTime": "2023-06-07 09:30:56.365294",
+ "ExecutedBy": "admin",
+ "JobId": 10429,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ },
+ "ExecutionHistoryDetails": [
+ {
+ "Id": 1288512,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:30:55.441",
+ "EndTime": "2023-06-07 09:30:56.085",
+ "ElapsedTime": "00:00:00",
+ "Key": "198.168.0.1",
+ "Value": "Running\nDiscovery of target 198.168.0.1 started.
+ \nDiscovery target resolved to IP 198.168.0.1 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry the
+ operation.\n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243218,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ },
+ {
+ "Id": 1288511,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:30:55.439",
+ "EndTime": "2023-06-07 09:30:56.21",
+ "ElapsedTime": "00:00:00",
+ "Key": "198.168.0.2",
+ "Value": "Running\nDiscovery of target 198.168.0.2 started.
+ \nDiscovery target resolved to IP 198.168.0.2 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry
+ the operation.\n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243218,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ }
+ ]
}
],
- "UpdatedBy": null,
- "Visible": true
+ "LastExecutionDetail": {
+ "Id": 1288519,
+ "Progress": "100",
+ "StartTime": "2023-06-07 09:33:07.525",
+ "EndTime": "2023-06-07 09:33:08.189",
+ "ElapsedTime": null,
+ "Key": "198.168.0.1",
+ "Value": "Running\nDiscovery of target 198.168.0.1 started.
+ \nDiscovery target resolved to IP 198.168.0.1 .\n:
+ ========== EEMI Code: CGEN1009 ==========\nMessage:
+ Unable to perform the requested action because the device
+ management endpoint authentication over WSMAN, REDFISH failed.
+ \nRecommended actions: Make sure the credentials associated
+ with the device management endpoint are valid and retry the operation.
+ \n=======================================\nTask Failed.
+ Completed With Errors.",
+ "ExecutionHistoryId": 1243224,
+ "IdBaseEntity": 0,
+ "JobStatus": {
+ "Id": 2070,
+ "Name": "Failed"
+ }
+ }
}
- ]}
+]
+}
'''
import json
-from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict, remove_key
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
JOBS_URI = "JobService/Jobs"
+EXECUTION_HISTORIES_URI = "JobService/Jobs({0})/ExecutionHistories"
+LAST_EXECUTION_DETAIL_URI = "JobService/Jobs({0})/LastExecutionDetail"
def _get_query_parameters(module_params):
@@ -154,6 +315,45 @@ def _get_query_parameters(module_params):
return query_parameter
+def get_uri_detail(rest_obj, uri):
+ try:
+ result = []
+ resp = rest_obj.invoke_request('GET', uri)
+ json_data = resp.json_data
+ if value := json_data.get('value'):
+ for each_element in value:
+ each_element.get('JobStatus', {}).pop('@odata.type', None)
+ execution_history_detail_uri = each_element.get('ExecutionHistoryDetails@odata.navigationLink', '')[5:]
+ if execution_history_detail_uri:
+ execution_history_detail = get_uri_detail(rest_obj, execution_history_detail_uri)
+ each_element.update({"ExecutionHistoryDetails": execution_history_detail})
+ result.append(strip_substr_dict(each_element))
+ else:
+ json_data.get('JobStatus', {}).pop('@odata.type', None)
+ result = strip_substr_dict(json_data)
+ except Exception:
+ pass
+ return result
+
+
+def get_execution_history_of_a_job(rest_obj, job_id):
+ try:
+ execution_histories = get_uri_detail(
+ rest_obj, EXECUTION_HISTORIES_URI.format(job_id))
+ except Exception:
+ pass
+ return execution_histories
+
+
+def last_execution_detail_of_a_job(rest_obj, job_id):
+ try:
+ last_execution_detail = get_uri_detail(
+ rest_obj, LAST_EXECUTION_DETAIL_URI.format(job_id))
+ except Exception:
+ pass
+ return last_execution_detail
+
+
def main():
specs = {
"job_id": {"required": False, "type": 'int'},
@@ -162,6 +362,7 @@ def main():
"skip": {"type": 'int', "required": False},
"filter": {"type": 'str', "required": False},
}},
+ "fetch_execution_history": {"type": 'bool', "default": False},
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -177,7 +378,13 @@ def main():
job_id = module.params.get("job_id")
jpath = "{0}({1})".format(JOBS_URI, job_id)
resp = rest_obj.invoke_request('GET', jpath)
- job_facts = resp.json_data
+ job_facts = remove_key(resp.json_data)
+ execution_detail = []
+ if module.params.get("fetch_execution_history"):
+ execution_detail = get_execution_history_of_a_job(rest_obj, job_id)
+ last_execution = last_execution_detail_of_a_job(rest_obj, job_id)
+ job_facts.update({'ExecutionHistories': execution_detail,
+ 'LastExecutionDetail': last_execution})
resp_status.append(resp.status_code)
else:
# query applicable only for all jobs list fetching
@@ -185,20 +392,25 @@ def main():
if query_param:
resp = rest_obj.invoke_request('GET', JOBS_URI, query_param=query_param)
job_facts = resp.json_data
+ job_facts = remove_key(job_facts)
resp_status.append(resp.status_code)
else:
# Fetch all jobs, filter and pagination options
job_report = rest_obj.get_all_report_details(JOBS_URI)
- job_facts = {"@odata.context": job_report["resp_obj"].json_data["@odata.context"],
- "@odata.count": len(job_report["report_list"]),
- "value": job_report["report_list"]}
- if job_facts["@odata.count"] > 0:
+ job_facts = {"value": job_report["report_list"]}
+ job_facts = remove_key(job_facts)
+ if len(job_facts["value"]) > 0:
resp_status.append(200)
+ for each_value in job_facts["value"]:
+ job_id = each_value["Id"] if "Id" in each_value else None
+ last_execution = last_execution_detail_of_a_job(rest_obj, job_id)
+ each_value.update({'ExecutionHistories': [],
+ 'LastExecutionDetail': last_execution})
except HTTPError as httperr:
module.fail_json(msg=str(httperr), job_info=json.load(httperr))
except URLError as err:
module.exit_json(msg=str(err), unreachable=True)
- except (SSLValidationError, ConnectionError, TypeError, ValueError, SSLError, OSError) as err:
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError) as err:
module.fail_json(msg=str(err))
if 200 in resp_status:
module.exit_json(msg="Successfully fetched the job info", job_info=job_facts)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py
index 08e307c78..044601d0d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_port_breakout.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -24,12 +24,12 @@ extends_documentation_fragment:
- dellemc.openmanage.omem_auth_options
options:
target_port:
- required: True
+ required: true
description: "The ID of the port in the switch to breakout. Enter the port ID in the format: service tag:port.
For example, 2HB7NX2:ethernet1/1/13."
type: str
breakout_type:
- required: True
+ required: true
description:
- The preferred breakout type. For example, 4X10GE.
- To revoke the default breakout configuration, enter 'HardwareDefault'.
@@ -38,7 +38,7 @@ requirements:
- "python >= 3.8.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py
index 90ac7a837..393f44a71 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -67,7 +67,7 @@ requirements:
author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -185,7 +185,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
VLAN_CONFIG = "NetworkConfigurationService/Networks"
@@ -254,7 +254,7 @@ def create_vlan(module, rest_obj, vlans):
def delete_vlan(module, rest_obj, vlan_id):
if module.check_mode:
module.exit_json(changed=True, msg=CHECK_MODE_MSG)
- resp = rest_obj.invoke_request("DELETE", VLAN_ID_CONFIG.format(Id=vlan_id))
+ rest_obj.invoke_request("DELETE", VLAN_ID_CONFIG.format(Id=vlan_id))
module.exit_json(msg="Successfully deleted the VLAN.", changed=True)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py
index f1de512be..ea9861a3b 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_network_vlan_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -40,7 +40,7 @@ requirements:
- "python >= 3.8.6"
author: "Deepak Joshi(@deepakjoshishri)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py
index 7ead69f70..0122848c4 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_powerstate.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -25,7 +25,7 @@ options:
power_state:
description: Desired end power state.
type: str
- required: True
+ required: true
choices: ['on', 'off', 'coldboot', 'warmboot', 'shutdown']
device_service_tag:
description:
@@ -41,7 +41,7 @@ requirements:
- "python >= 3.8.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py
index d2f7a87c8..eed9a45fd 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -165,7 +165,7 @@ requirements:
- "python >= 3.8.6"
author: "Jagadeesh N V (@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
- C(assign) operation on a already assigned profile will not redeploy.
'''
@@ -193,7 +193,7 @@ EXAMPLES = r'''
name_prefix: "omam_profile"
number_of_profiles: 1
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.1"
iso_path: "path/to/my_iso.iso"
@@ -210,7 +210,7 @@ EXAMPLES = r'''
name_prefix: "omam_profile"
number_of_profiles: 1
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: CIFS
share_ip: "192.168.0.2"
share_user: "username"
@@ -230,7 +230,7 @@ EXAMPLES = r'''
new_name: "modified profile"
description: "new description"
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.3"
iso_path: "path/to/my_iso.iso"
@@ -266,7 +266,7 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
command: "delete"
filters:
- SelectAll: True
+ SelectAll: true
Filters: =contains(ProfileName,'Profile 00002')
- name: Delete profiles using profile list filter
@@ -291,7 +291,7 @@ EXAMPLES = r'''
name: "Profile 00001"
device_id: 12456
boot_to_network_iso:
- boot_to_network: True
+ boot_to_network: true
share_type: NFS
share_ip: "192.168.0.1"
iso_path: "path/to/my_iso.iso"
@@ -305,10 +305,10 @@ EXAMPLES = r'''
ShutdownType: 0
TimeToWaitBeforeShutdown: 300
EndHostPowerState: 1
- StrictCheckingVlan: True
+ StrictCheckingVlan: true
Schedule:
- RunNow: True
- RunLater: False
+ RunNow: true
+ RunLater: false
- name: Unassign a profile using profile name
dellemc.openmanage.ome_profile:
@@ -327,7 +327,7 @@ EXAMPLES = r'''
ca_path: "/path/to/ca_cert.pem"
command: "unassign"
filters:
- SelectAll: True
+ SelectAll: true
Filters: =contains(ProfileName,'Profile 00003')
- name: Unassign profiles using profile list filter
@@ -600,7 +600,7 @@ def assign_profile(module, rest_obj):
ad_opts = mparam.get("attributes")
for opt in ad_opts_list:
if ad_opts and ad_opts.get(opt):
- diff = attributes_check(module, rest_obj, ad_opts, prof['Id'])
+ attributes_check(module, rest_obj, ad_opts, prof['Id'])
payload[opt] = ad_opts.get(opt)
if module.check_mode:
module.exit_json(msg=CHANGES_MSG, changed=True)
@@ -710,7 +710,7 @@ def modify_profile(module, rest_obj):
if diff:
if module.check_mode:
module.exit_json(msg=CHANGES_MSG, changed=True)
- resp = rest_obj.invoke_request('PUT', PROFILE_VIEW + "({0})".format(payload['Id']), data=payload)
+ rest_obj.invoke_request('PUT', PROFILE_VIEW + "({0})".format(payload['Id']), data=payload)
module.exit_json(msg="Successfully modified the profile.", changed=True)
module.exit_json(msg=NO_CHANGES_MSG)
@@ -724,7 +724,7 @@ def delete_profile(module, rest_obj):
module.fail_json(msg="Profile has to be in an unassigned state for it to be deleted.")
if module.check_mode:
module.exit_json(msg=CHANGES_MSG, changed=True)
- resp = rest_obj.invoke_request('DELETE', PROFILE_VIEW + "({0})".format(prof['Id']))
+ rest_obj.invoke_request('DELETE', PROFILE_VIEW + "({0})".format(prof['Id']))
module.exit_json(msg="Successfully deleted the profile.", changed=True)
else:
module.exit_json(msg=PROFILE_NOT_FOUND.format(name=mparam.get('name')))
@@ -732,7 +732,7 @@ def delete_profile(module, rest_obj):
payload = mparam.get('filters')
if module.check_mode:
module.exit_json(msg=CHANGES_MSG, changed=True)
- resp = rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='Delete'), data=payload)
+ rest_obj.invoke_request('POST', PROFILE_ACTION.format(action='Delete'), data=payload)
module.exit_json(msg="Successfully completed the delete operation.", changed=True)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile_info.py
new file mode 100644
index 000000000..4c5f07dd2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_profile_info.py
@@ -0,0 +1,410 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_profile_info
+short_description: Retrieve profiles with attribute details
+version_added: "7.2.0"
+description:
+ - "This module retrieve profiles with attributes on OpenManage Enterprise or OpenManage Enterprise Modular."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ profile_id:
+ description:
+ - Id of the profile.
+ - This is mutually exclusive with I(profile_name), I(system_query_options), I(template_id), and I(template_name).
+ type: int
+ profile_name:
+ description:
+ - Name of the profile.
+ - This is mutually exclusive with I(template_id), I(profile_id), I(system_query_options), and I(template_name).
+ type: str
+ template_id:
+ description:
+ - Provide the ID of the template to retrieve the list of profile(s) linked to it.
+ - This is mutually exclusive with I(profile_name), I(profile_id), I(system_query_options), and I(template_name).
+ type: int
+ template_name:
+ description:
+ - Provide the name of the template to retrieve the list of profile(s) linked to it.
+ - This is mutually exclusive with I(profile_name), I(profile_id), I(template_id), and I(system_query_options).
+ type: str
+ system_query_options:
+ description:
+ - Option for providing supported odata filters.
+ - "The profile list can be fetched and sorted based on ProfileName, TemplateName, TargetTypeId, TargetName,
+ ChassisName, ProfileState, LastRunStatus, or ProfileModified."
+ - This is mutually exclusive with I(profile_name), I(profile_id), I(template_id), and I(template_name).
+ - "C(Note) If I(profile_name), I(profile_id), I(template_id), or I(template_name) option is not provided, the
+ module retrieves all the profiles."
+ type: dict
+requirements:
+ - "python >= 3.9.6"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+notes:
+ - Run this module on a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Retrieve all profiles
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve profile using the name
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ profile_name: eprof 00001
+
+- name: Retrieve profile using the id
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ profile_id: 10129
+
+- name: Retrieve the profiles using the template name
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: t2
+
+- name: Retrieve the profiles using the template id
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 11
+
+- name: Retrieve the profiles based on the odata filters
+ dellemc.openmanage.ome_profile_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ system_query_options:
+ filter: TemplateName eq 'mytemplate'
+ orderby: ProfileState
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of profile information retrieval.
+ returned: always
+ type: str
+ sample: "Successfully retrieved the profile information."
+profile_info:
+ description: Information about the profile.
+ returned: success
+ type: list
+ elements: dict
+ sample:
+ [
+ {
+ "Id": 71460,
+ "ProfileName": "Profile 00001",
+ "ProfileDescription": "from source template: (Template)",
+ "TemplateId": 8,
+ "TemplateName": "Template",
+ "DataSchemaId": 8,
+ "TargetId": 0,
+ "TargetName": null,
+ "TargetTypeId": 0,
+ "DeviceIdInSlot": 0,
+ "ChassisId": 0,
+ "ChassisName": null,
+ "GroupId": 0,
+ "GroupName": null,
+ "NetworkBootToIso": null,
+ "ProfileState": 0,
+ "DeploymentTaskId": 0,
+ "LastRunStatus": 2200,
+ "ProfileModified": 0,
+ "CreatedBy": "admin",
+ "EditedBy": null,
+ "CreatedDate": "2019-09-26 13:56:41.924966",
+ "LastEditDate": "2020-12-11 08:27:20.500564",
+ "LastDeployDate": "",
+ "AttributeIdMap": {
+ "4965": {
+ "Value": "hostname",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4963": {
+ "Value": "second floor",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4960": {
+ "Value": "10A",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4959": {
+ "Value": "OMAMDEV",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4957": {
+ "Value": "Dell LAB",
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4958": {
+ "Value": null,
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4066": {
+ "Value": null,
+ "IsReadOnly": false,
+ "IsIgnored": true
+ },
+ "4231": {
+ "Value": "1",
+ "IsReadOnly": false,
+ "IsIgnored": false
+ },
+ "4229": {
+ "Value": "Disabled",
+ "IsReadOnly": false,
+ "IsIgnored": false
+ }
+ },
+ "AttributeDetails": {
+ "System": {
+ "Server Operating System": {
+ "ServerOS 1 Server Host Name": 4965
+ },
+ "Server Topology": {
+ "ServerTopology 1 Room Name": 4963,
+ "ServerTopology 1 Rack Slot": 4960,
+ "ServerTopology 1 Rack Name": 4959,
+ "ServerTopology 1 Data Center Name": 4957,
+ "ServerTopology 1 Aisle Name": 4958
+ }
+ },
+ "iDRAC": {
+ "Active Directory": {
+ "ActiveDirectory 1 Active Directory RAC Name": 4066
+ },
+ "NIC Information": {
+ "NIC 1 VLAN ID": 4231,
+ "NIC 1 Enable VLAN": 4229
+ }
+ }
+ }
+ }
+ ]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+
+PROFILE_VIEW = "ProfileService/Profiles"
+TEMPLATE_VIEW = "TemplateService/Templates"
+SUCCESS_MSG = "Successfully retrieved the profile information."
+NO_PROFILES_MSG = "Profiles with {0} {1} not found."
+SEPRTR = ','
+
+
+def get_template_details(module, rest_obj):
+ id = module.params.get('template_id')
+ query_param = {"$filter": "Id eq {0}".format(id)}
+ srch = 'Id'
+ t_id = 'template_id'
+ if not id:
+ id = module.params.get('template_name')
+ query_param = {"$filter": "Name eq '{0}'".format(id)}
+ srch = 'Name'
+ t_id = 'template_name'
+ resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ return xtype, id, t_id
+ module.exit_json(failed=True, msg="Template with {0} '{1}' not found.".format(srch.lower(), id))
+
+
+def get_profile_query(rest_obj, query, url_prm):
+ prof_list = []
+ try:
+ if query:
+ resp = rest_obj.get_all_items_with_pagination(PROFILE_VIEW, query_param=query)
+ prof_list = resp.get("value")
+ if url_prm:
+ url_resp = rest_obj.invoke_request("GET", "{0}{1}".format(PROFILE_VIEW, url_prm))
+ prof_list = [url_resp.json_data]
+ except Exception:
+ prof_list = []
+ return prof_list
+
+
+def construct_tree_str(nprfx, attr_detailed):
+ str_lst = nprfx.split(SEPRTR)
+ br = attr_detailed
+ for xs in str_lst:
+ if xs not in br:
+ br[xs] = {}
+ br = br.get(xs)
+ return br
+
+
+def recurse_subattr_list(subgroup, prefix, attr_detailed, attr_map):
+ rq_attr = ["Value", "IsReadOnly", "IsIgnored"]
+ if isinstance(subgroup, list):
+ for each_sub in subgroup:
+ nprfx = "{0}{1}{2}".format(prefix, SEPRTR, each_sub.get("DisplayName"))
+ if each_sub.get("SubAttributeGroups"):
+ recurse_subattr_list(each_sub.get("SubAttributeGroups"), nprfx, attr_detailed, attr_map)
+ else:
+ for attr in each_sub.get('Attributes'):
+ nd = construct_tree_str(nprfx, attr_detailed)
+ nd[attr['DisplayName']] = attr['AttributeId']
+ vlist = dict((xf, attr.get(xf)) for xf in rq_attr)
+ attr_map[attr['AttributeId']] = vlist
+
+
+def get_subattr_all(attr_dtls):
+ attr_detailed = {}
+ attr_map = {}
+ for each in attr_dtls:
+ recurse_subattr_list(each.get('SubAttributeGroups'), each.get('DisplayName'), attr_detailed, attr_map)
+ return attr_detailed, attr_map
+
+
+def get_attribute_detail_tree(rest_obj, prof_id):
+ try:
+ resp = rest_obj.invoke_request('GET', "{0}({1})/AttributeDetails".format(PROFILE_VIEW, prof_id))
+ attr_list = resp.json_data.get("AttributeGroups")
+ attr_detailed, attr_map = get_subattr_all(attr_list)
+ except Exception:
+ attr_detailed, attr_map = {}, {}
+ return attr_detailed, attr_map
+
+
+def main():
+ argument_spec = {
+ "profile_id": {"type": 'int'},
+ "profile_name": {"type": 'str'},
+ "template_id": {"type": 'int'},
+ "template_name": {"type": 'str'},
+ "system_query_options": {"type": 'dict'}
+ }
+ argument_spec.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[('profile_id', 'profile_name', 'template_name', 'template_id',
+ 'system_query_options')],
+ supports_check_mode=True)
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ query = {}
+ url_prm = None
+ prof_list = []
+ if module.params.get("template_id") or module.params.get("template_name"):
+ tmplt, value, name = get_template_details(module, rest_obj)
+ query["$filter"] = "TemplateName eq '{0}'".format(tmplt.get('Name'))
+ elif module.params.get("profile_id"):
+ url_prm = "({0})".format(module.params.get("profile_id"))
+ name = "profile_id"
+ value = module.params.get("profile_id")
+ elif module.params.get("profile_name"):
+ query["$filter"] = "ProfileName eq '{0}'".format(module.params.get("profile_name"))
+ name = "profile_name"
+ value = module.params.get("profile_name")
+ elif module.params.get("system_query_options"):
+ name = "provided"
+ value = "system_query_options"
+ for k, v in module.params.get("system_query_options").items():
+ query["${0}".format(k)] = v
+ if query or url_prm:
+ prof_list = get_profile_query(rest_obj, query, url_prm)
+ if module.params.get("profile_name"):
+ xprofs = []
+ pname = module.params.get("profile_name")
+ for xp in prof_list:
+ if xp.get("ProfileName") == pname:
+ xprofs.append(xp)
+ break
+ prof_list = xprofs
+ else:
+ resp = rest_obj.get_all_items_with_pagination(PROFILE_VIEW)
+ prof_list = resp.get("value")
+ if not bool(prof_list):
+ module.exit_json(msg=SUCCESS_MSG, profile_info=prof_list)
+ for xp in prof_list:
+ attr_tree, attr_map = get_attribute_detail_tree(rest_obj, xp["Id"])
+ xp["AttributeIdMap"] = attr_map
+ xp["AttributeDetails"] = attr_tree
+ strip_substr_dict(xp)
+ if prof_list:
+ module.exit_json(msg=SUCCESS_MSG, profile_info=prof_list) # ,xcount=len(prof_list))
+ else:
+ module.exit_json(msg=NO_PROFILES_MSG.format(name, value), failed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError,
+ AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py
index 81e3cb2ca..12286bff3 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profile_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py
index d30e7f382..60b436a29 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_server_interface_profiles.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -53,7 +53,7 @@ options:
- ID of the NIC or port number.
- C(Note) This will not be validated.
type: str
- required: True
+ required: true
team:
description:
- Group two or more ports. The ports must be connected to the same pair of Ethernet switches.
@@ -86,7 +86,7 @@ options:
- The I(names) can be retrieved using the M(dellemc.openmanage.ome_network_vlan_info)
type: list
elements: str
- required: True
+ required: true
job_wait:
description:
- Provides the option to wait for job completion.
@@ -95,7 +95,7 @@ options:
job_wait_timeout:
description:
- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
- - This option is applicable when I(job_wait) is C(True).
+ - This option is applicable when I(job_wait) is C(true).
type: int
default: 120
requirements:
@@ -103,7 +103,7 @@ requirements:
author: "Jagadeesh N V (@jagadeeshnv)"
notes:
- This module supports C(check_mode).
- - Run this module from a system that has direct access to Dell EMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
'''
EXAMPLES = r'''
@@ -120,13 +120,13 @@ EXAMPLES = r'''
nic_teaming: LACP
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan1
- nic_identifier: NIC.Mezzanine.1A-2-1
- team: yes
+ team: true
untagged_network: 3
tagged_networks:
names:
@@ -144,13 +144,13 @@ EXAMPLES = r'''
nic_teaming: NoTeaming
nic_configuration:
- nic_identifier: NIC.Mezzanine.1A-1-1
- team: no
+ team: false
untagged_network: 2
tagged_networks:
names:
- vlan2
- nic_identifier: NIC.Mezzanine.1A-2-1
- team: yes
+ team: true
untagged_network: 3
tagged_networks:
names:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py
index b4cd907eb..2e790fc08 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -79,7 +79,7 @@ requirements:
author:
- "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_info.py
new file mode 100644
index 000000000..9ce352d5e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_info.py
@@ -0,0 +1,699 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_smart_fabric_info
+short_description: Retrieves the information of smart fabrics inventoried by OpenManage Enterprise Modular
+version_added: "7.1.0"
+description:
+ - This module retrieves the list of smart fabrics in the inventory of OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ fabric_id:
+ description:
+ - Unique Id of the fabric.
+ - I(fabric_id) is mutually exclusive with I(fabric_name).
+ type: str
+ fabric_name:
+ description:
+ - Name of the fabric.
+ - I(fabric_name) is mutually exclusive with I(fabric_id).
+ type: str
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Kritika Bhateja(@Kritka-Bhateja)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = """
+---
+- name: Retrieve details of all smart fabrics
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve details of a specific smart fabric identified by its fabric ID
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+
+- name: Retrieve details of a specific smart fabric identified by its fabric name
+ dellemc.openmanage.ome_smart_fabric_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_name: "f1"
+"""
+
+RETURN = '''
+---
+msg:
+ type: str
+ description: Status of smart fabric information retrieval.
+ returned: always
+ sample: "Successfully retrieved the smart fabric information."
+smart_fabric_info:
+ type: list
+ description: Returns the information about smart fabric.
+ returned: success
+ sample: [
+ {
+ "Description": "Fabric f1",
+ "FabricDesign": [
+ {
+ "Actions": {
+ "#NetworkService.GetApplicableNodes": {
+ "target": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/FabricDesign/NetworkService.GetApplicableNodes"
+ },
+ "Oem": {}
+ },
+ "FabricDesignNode": [
+ {
+ "ChassisName": "Chassis-X",
+ "NodeName": "Switch-B",
+ "Slot": "Slot-A2",
+ "Type": "WeaverSwitch"
+ },
+ {
+ "ChassisName": "Chassis-X",
+ "NodeName": "Switch-A",
+ "Slot": "Slot-A1",
+ "Type": "WeaverSwitch"
+ }
+ ],
+ "Name": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis",
+ "NetworkLink": [
+ {
+ "DestinationInterface": "ethernet1/1/38",
+ "DestinationNode": "Switch-B",
+ "SourceInterface": "ethernet1/1/38",
+ "SourceNode": "Switch-A"
+ },
+ {
+ "DestinationInterface": "ethernet1/1/37",
+ "DestinationNode": "Switch-B",
+ "SourceInterface": "ethernet1/1/37",
+ "SourceNode": "Switch-A"
+ },
+ {
+ "DestinationInterface": "ethernet1/1/39",
+ "DestinationNode": "Switch-B",
+ "SourceInterface": "ethernet1/1/39",
+ "SourceNode": "Switch-A"
+ },
+ {
+ "DestinationInterface": "ethernet1/1/40",
+ "DestinationNode": "Switch-B",
+ "SourceInterface": "ethernet1/1/40",
+ "SourceNode": "Switch-A"
+ }
+ ]
+ }
+ ],
+ "FabricDesignMapping": [
+ {
+ "DesignNode": "Switch-A",
+ "PhysicalNode": "NODEID1"
+ },
+ {
+ "DesignNode": "Switch-B",
+ "PhysicalNode": "NODEID2"
+ }
+ ],
+ "Health": {
+ "Issues": [
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because the interface for an uplink
+ mentioned in the message is not in operational status.",
+ "Message": "The SmartFabric is not healthy because the interface JRWSV43:ethernet1/1/35 for uplink
+ 1ad54420-b145-49a1-9779-21a579ef6f2d is not in operational status.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0016",
+ "Resolution": "Make sure that all the uplink interfaces are in operational status.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ },
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because one or more VLTi links are not connected.",
+ "Message": "The SmartFabric is not healthy because all InterSwitch Links are not connected.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0017",
+ "Resolution": "Make sure that the VLTi cables for all ISLs are connected and operational as per the selected fabric design.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ },
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because the interface for an uplink
+ mentioned in the message is not in operational status.",
+ "Message": "The SmartFabric is not healthy because the interface 6H7J6Z2:ethernet1/1/35 for uplink
+ 1ad54420-b145-49a1-9779-21a579ef6f2d is not in operational status.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0016",
+ "Resolution": "Make sure that all the uplink interfaces are in operational status.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ },
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because one or more of the uplink interfaces are not bonded.",
+ "Message": "The SmartFabric is not healthy because the uplink 1ad54420-b145-49a1-9779-21a579ef6f2d
+ interface 6H7J6Z2:ethernet1/1/35 is not bonded to the other interfaces in the uplink.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0019",
+ "Resolution": "Make sure that the Link Aggregation Control Protocol (LACP) is enabled on all ports on the remote
+ switch to which the uplink ports from the fabric are connected.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ },
+ {
+ "Category": "Audit",
+ "DetailedDescription": "The SmartFabric is not healthy because one or more of the uplink interfaces are not bonded.",
+ "Message": "The SmartFabric is not healthy because the uplink 1ad54420-b145-49a1-9779-21a579ef6f2d
+ interface JRWSV43:ethernet1/1/35 is not bonded to the other interfaces in the uplink.",
+ "MessageArgs": [],
+ "MessageId": "NFAB0019",
+ "Resolution": "Make sure that the Link Aggregation Control Protocol (LACP) is enabled on all ports
+ on the remote switch to which the uplink ports from the fabric are connected.",
+ "Severity": "Warning",
+ "TimeStamp": "2019-09-25T11:50:06Z"
+ }
+ ],
+ "Status": "4000"
+ },
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "LifeCycleStatus": [
+ {
+ "Activity": "Create",
+ "Status": "2060"
+ }
+ ],
+ "Multicast": [
+ {
+ "FloodRestrict": true,
+ "IgmpVersion": "3",
+ "MldVersion": "2"
+ }
+ ],
+ "Name": "f1",
+ "OverrideLLDPConfiguration": "Disabled",
+ "ScaleVLANProfile": "Enabled",
+ "Servers": [
+ {
+ "ChassisServiceTag": "6H5S6Z2",
+ "ConnectionState": true,
+ "ConnectionStateReason": 101,
+ "DeviceCapabilities": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 7,
+ 8,
+ 9,
+ 41,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 208,
+ 16,
+ 17,
+ 18,
+ 212,
+ 30,
+ 31
+ ],
+ "DeviceManagement": [
+ {
+ "DnsName": "iDRAC-6GZK6Z2",
+ "InstrumentationName": "",
+ "MacAddress": "4c:d9:8f:7a:7c:43",
+ "ManagementId": 135185,
+ "ManagementProfile": [
+ {
+ "AgentName": "iDRAC",
+ "HasCreds": 0,
+ "ManagementId": 135185,
+ "ManagementProfileId": 135185,
+ "ManagementURL": "https://[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]:443/",
+ "ProfileId": "WSMAN_OOB",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:38.552",
+ "Version": "3.20.21.20"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "100.96.24.28"
+ },
+ {
+ "DnsName": "iDRAC-6GZK6Z2",
+ "InstrumentationName": "",
+ "MacAddress": "4c:d9:8f:7a:7c:43",
+ "ManagementId": 135186,
+ "ManagementProfile": [
+ {
+ "AgentName": "iDRAC",
+ "HasCreds": 0,
+ "ManagementId": 135186,
+ "ManagementProfileId": 135186,
+ "ManagementURL": "https://[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]:443/",
+ "ProfileId": "WSMAN_OOB",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:38.552",
+ "Version": "3.20.21.20"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "[2607:f2b1:f081:9:4ed9:8fff:fe7a:7c43]"
+ }
+ ],
+ "DeviceName": "MX-6H5S6Z2:Sled-1",
+ "DeviceServiceTag": "6GZK6Z2",
+ "Enabled": true,
+ "Id": 10071,
+ "Identifier": "6GZK6Z2",
+ "LastInventoryTime": "2019-10-29 09:30:38.552",
+ "LastStatusTime": "2019-10-29 09:41:51.051",
+ "ManagedState": 3000,
+ "Model": "PowerEdge MX840c",
+ "PowerState": 17,
+ "SlotConfiguration": {
+ "ChassisId": "10072",
+ "ChassisName": "MX-6H5S6Z2",
+ "ChassisServiceTag": "6H5S6Z2",
+ "DeviceType": "1000",
+ "SledBlockPowerOn": "None blocking",
+ "SlotId": "10084",
+ "SlotName": "Sled-1",
+ "SlotNumber": "1",
+ "SlotType": "2000"
+ },
+ "Status": 1000,
+ "SystemId": 1894,
+ "Type": 1000
+ }
+ ],
+ "Summary": {
+ "NodeCount": 2,
+ "ServerCount": 1,
+ "UplinkCount": 1
+ },
+ "Switches": [
+ {
+ "ChassisServiceTag": "6H5S6Z2",
+ "ConnectionState": true,
+ "ConnectionStateReason": 101,
+ "DeviceCapabilities": [
+ 1,
+ 2,
+ 3,
+ 5,
+ 7,
+ 8,
+ 9,
+ 207,
+ 18,
+ 602,
+ 603,
+ 604,
+ 605,
+ 606,
+ 607,
+ 608,
+ 609,
+ 610,
+ 611,
+ 612,
+ 613,
+ 614,
+ 615,
+ 616,
+ 617,
+ 618,
+ 619,
+ 620,
+ 621,
+ 622
+ ],
+ "DeviceManagement": [
+ {
+ "DnsName": "",
+ "InstrumentationName": "MX9116n Fabric Engine",
+ "MacAddress": "20:04:0F:4F:4E:04",
+ "ManagementId": 135181,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 135181,
+ "ManagementProfileId": 135181,
+ "ManagementURL": "",
+ "ProfileId": "",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:36.273"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "100.96.24.36"
+ },
+ {
+ "DnsName": "",
+ "InstrumentationName": "MX9116n Fabric Engine",
+ "MacAddress": "20:04:0F:4F:4E:04",
+ "ManagementId": 135182,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 135182,
+ "ManagementProfileId": 135182,
+ "ManagementURL": "",
+ "ProfileId": "",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:36.273"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": ""
+ }
+ ],
+ "DeviceName": "MX-6H5S6Z2:IOM-A2",
+ "DeviceServiceTag": "6H7J6Z2",
+ "Enabled": true,
+ "Id": 10074,
+ "Identifier": "6H7J6Z2",
+ "LastInventoryTime": "2019-10-29 09:30:36.332",
+ "LastStatusTime": "2019-10-29 09:31:00.931",
+ "ManagedState": 3000,
+ "Model": "MX9116n Fabric Engine",
+ "PowerState": 17,
+ "SlotConfiguration": {
+ "ChassisId": "10072",
+ "ChassisName": "MX-6H5S6Z2",
+ "ChassisServiceTag": "6H5S6Z2",
+ "DeviceType": "4000",
+ "SledBlockPowerOn": "null",
+ "SlotId": "10079",
+ "SlotName": "IOM-A2",
+ "SlotNumber": "2",
+ "SlotType": "4000"
+ },
+ "Status": 1000,
+ "SystemId": 2031,
+ "Type": 4000
+ },
+ {
+ "ChassisServiceTag": "6H5S6Z2",
+ "ConnectionState": true,
+ "ConnectionStateReason": 101,
+ "DeviceCapabilities": [
+ 1,
+ 2,
+ 3,
+ 5,
+ 7,
+ 8,
+ 9,
+ 207,
+ 18,
+ 602,
+ 603,
+ 604,
+ 605,
+ 606,
+ 607,
+ 608,
+ 609,
+ 610,
+ 611,
+ 612,
+ 613,
+ 614,
+ 615,
+ 616,
+ 617,
+ 618,
+ 619,
+ 620,
+ 621,
+ 622
+ ],
+ "DeviceManagement": [
+ {
+ "DnsName": "",
+ "InstrumentationName": "MX9116n Fabric Engine",
+ "MacAddress": "E8:B5:D0:52:61:46",
+ "ManagementId": 135183,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 135183,
+ "ManagementProfileId": 135183,
+ "ManagementURL": "",
+ "ProfileId": "",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:37.115"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": "100.96.24.37"
+ },
+ {
+ "DnsName": "",
+ "InstrumentationName": "MX9116n Fabric Engine",
+ "MacAddress": "E8:B5:D0:52:61:46",
+ "ManagementId": 135184,
+ "ManagementProfile": [
+ {
+ "HasCreds": 0,
+ "ManagementId": 135184,
+ "ManagementProfileId": 135184,
+ "ManagementURL": "",
+ "ProfileId": "",
+ "Status": 1000,
+ "StatusDateTime": "2019-10-29 09:30:37.115"
+ }
+ ],
+ "ManagementType": 2,
+ "NetworkAddress": ""
+ }
+ ],
+ "DeviceName": "MX-6H5S6Z2:IOM-A1",
+ "DeviceServiceTag": "JRWSV43",
+ "Enabled": true,
+ "Id": 20881,
+ "Identifier": "JRWSV43",
+ "LastInventoryTime": "2019-10-29 09:30:37.172",
+ "LastStatusTime": "2019-10-29 09:31:00.244",
+ "ManagedState": 3000,
+ "Model": "MX9116n Fabric Engine",
+ "PowerState": 17,
+ "SlotConfiguration": {
+ "ChassisId": "10072",
+ "ChassisName": "MX-6H5S6Z2",
+ "ChassisServiceTag": "6H5S6Z2",
+ "DeviceType": "4000",
+ "SledBlockPowerOn": "null",
+ "SlotId": "10078",
+ "SlotName": "IOM-A1",
+ "SlotNumber": "1",
+ "SlotType": "4000"
+ },
+ "Status": 1000,
+ "SystemId": 2031,
+ "Type": 4000
+ }
+ ],
+ "Uplinks": [
+ {
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "MediaType": "Ethernet",
+ "Name": "u1",
+ "NativeVLAN": 1,
+ "Summary": {
+ "NetworkCount": 1,
+ "PortCount": 2
+ },
+ "UfdEnable": "Disabled"
+ }
+ ]
+ }
+ ]
+
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide
+ for more information about resource URI and its properties."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+
+FABRIC_URI = "NetworkService/Fabrics"
+# messages
+SUCCESS_MSG = "Successfully retrieved the smart fabric information."
+UNSUCCESS_MSG = "Unable to retrieve smart fabric information."
+INVALID_FABRIC_ID = "Unable to retrieve smart fabric information with fabric ID {0}."
+INVALID_FABRIC_NAME = "Unable to retrieve smart fabric information with fabric name {0}."
+
+
+def get_smart_fabric_details_via_id(module, rest_obj, fabric_id):
+ resp = []
+ try:
+ fabric_path = "{0}('{1}')".format(FABRIC_URI, fabric_id)
+ resp_det = rest_obj.invoke_request("GET", fabric_path)
+ resp = [resp_det.json_data]
+ except HTTPError:
+ module.exit_json(msg=INVALID_FABRIC_ID.format(fabric_id), failed=True)
+ return resp
+
+
+def fetch_smart_fabric_link_details(module, rest_obj, fabric_details_dict):
+ info_dict = {"Switches": "Switches@odata.navigationLink", "Servers": "Servers@odata.navigationLink",
+ "ISLLinks": "ISLLinks@odata.navigationLink", "Uplinks": "Uplinks@odata.navigationLink",
+ "Multicast": None, "FabricDesign": None}
+ info_list = ["Multicast", "FabricDesign"]
+ try:
+ for key in info_dict:
+ link = info_dict[key]
+ if key in info_list:
+ fabric_info_dict = fabric_details_dict[key]["@odata.id"]
+ uri = fabric_info_dict.strip("/api")
+ response = rest_obj.invoke_request('GET', uri)
+ if response.json_data:
+ details = [response.json_data]
+ else:
+ fabric_info_dict = fabric_details_dict.get(link)
+ uri = fabric_info_dict.strip("/api")
+ response = rest_obj.invoke_request('GET', uri)
+ if response.json_data:
+ details = response.json_data.get("value")
+ for item in details:
+ item = strip_substr_dict(item)
+ item = clean_data(item)
+ fabric_details_dict[key] = details
+ except HTTPError:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ return fabric_details_dict
+
+
+def strip_smart_fabric_info(module, rest_obj, smart_fabric_info):
+ for i in range(len(smart_fabric_info)):
+ fabrics_details = smart_fabric_info[i]
+ fabrics_details = fetch_smart_fabric_link_details(module, rest_obj, fabrics_details)
+ fabrics_details = strip_substr_dict(fabrics_details)
+ fabrics_details = clean_data(fabrics_details)
+ smart_fabric_info[i] = fabrics_details
+ return smart_fabric_info
+
+
+def clean_data(data):
+ """
+ data: A dictionary.
+ return: A data dictionary after removing items that are not required for end user.
+ """
+ for k in data.copy():
+ if isinstance(data[k], dict):
+ if data[k].get("@odata.id"):
+ del data[k]["@odata.id"]
+ if not data[k]:
+ del data[k]
+ return data
+
+
+def main():
+
+ specs = {
+ "fabric_id": {"type": 'str', "required": False},
+ "fabric_name": {"type": 'str', "required": False}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[
+ ('fabric_id', 'fabric_name')
+ ],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ if module.params.get("fabric_id") is not None:
+ fabric_id = module.params.get("fabric_id")
+ smart_fabric_info = get_smart_fabric_details_via_id(module, rest_obj, fabric_id)
+ smart_fabric_info = strip_smart_fabric_info(module, rest_obj, smart_fabric_info)
+ module.exit_json(msg=SUCCESS_MSG, smart_fabric_info=smart_fabric_info)
+ else:
+ resp = rest_obj.invoke_request('GET', FABRIC_URI)
+ if resp.json_data:
+ smart_fabric_info = resp.json_data.get("value")
+ if module.params.get("fabric_name") is not None:
+ fabric_name_found = False
+ for fabric in smart_fabric_info:
+ fabric_name = module.params.get("fabric_name")
+ if fabric['Name'] == fabric_name:
+ smart_fabric_info = [fabric]
+ fabric_name_found = True
+ if not fabric_name_found:
+ module.exit_json(msg=INVALID_FABRIC_NAME.format(fabric_name), failed=True)
+ smart_fabric_info = strip_smart_fabric_info(module, rest_obj, smart_fabric_info)
+ module.exit_json(msg=SUCCESS_MSG, smart_fabric_info=smart_fabric_info)
+ else:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, TypeError, ValueError, OSError, SSLError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py
index cae5d8d69..0ac1f2557 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -93,7 +93,7 @@ requirements:
author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise Modular.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink_info.py
new file mode 100644
index 000000000..d6bb0683e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_smart_fabric_uplink_info.py
@@ -0,0 +1,346 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+---
+module: ome_smart_fabric_uplink_info
+short_description: Retrieve details of fabric uplink on OpenManage Enterprise Modular.
+version_added: "7.1.0"
+description: This module retrieve details of fabric uplink on OpenManage Enterprise Modular.
+extends_documentation_fragment:
+ - dellemc.openmanage.omem_auth_options
+options:
+ fabric_id:
+ type: str
+ description:
+ - Unique id of the fabric.
+ - I(fabric_id) is mutually exclusive with I(fabric_name).
+ fabric_name:
+ type: str
+ description:
+ - Unique name of the fabric.
+ - I(fabric_name) is mutually exclusive with I(fabric_id).
+ uplink_id:
+ type: str
+ description:
+ - Unique id of the uplink.
+ - I(uplink_id) is mutually exclusive with I(uplink_name).
+ - I(fabric_id) or I(fabric_name) is required along with I(uplink_id).
+ uplink_name:
+ type: str
+ description:
+ - Unique name of the uplink.
+ - I(uplink_name) is mutually exclusive with I(uplink_id).
+ - I(fabric_id) or I(fabric_name) is required along with I(uplink_name).
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Husniya Hameed(@husniya_hameed)"
+notes:
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise Modular.
+ - This module supports C(check_mode).
+'''
+
+EXAMPLES = r'''
+---
+- name: Retrieve all fabric uplink information using fabric_id.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+
+- name: Retrieve all fabric uplink information using fabric_name.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_name: "f1"
+
+- name: Retrieve specific fabric information using uplink_id.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ uplink_id: "1ad54420-b145-49a1-9779-21a579ef6f2d"
+
+- name: Retrieve specific fabric information using uplink_name.
+ dellemc.openmanage.ome_smart_fabric_uplink_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ fabric_id: "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ uplink_name: "u1"
+'''
+
+RETURN = r'''
+---
+msg:
+ type: str
+ description: Status of fabric uplink information retrieval.
+ returned: always
+ sample: "Successfully retrieved the fabric uplink information."
+uplink_info:
+ type: list
+ description: Information about the fabric uplink.
+ returned: on success
+ sample: [{
+ "Description": "",
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "MediaType": "Ethernet",
+ "Name": "u1",
+ "NativeVLAN": 1,
+ "Networks": [{
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "Description": null,
+ "Id": 10155,
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d",
+ "Name": "testvlan",
+ "Type": 1,
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143
+ }],
+ "Ports": [{
+ "AdminStatus": "Enabled",
+ "BlinkStatus": "OFF",
+ "ConfiguredSpeed": "0",
+ "CurrentSpeed": "0",
+ "Description": "",
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "MaxSpeed": "0",
+ "MediaType": "Ethernet",
+ "Name": "",
+ "NodeServiceTag": "SVCTAG1",
+ "OpticsType": "NotPresent",
+ "PortNumber": "ethernet1/1/35",
+ "Role": "Uplink",
+ "Status": "Down",
+ "Type": "PhysicalEthernet"
+ },
+ {
+ "AdminStatus": "Enabled",
+ "BlinkStatus": "OFF",
+ "ConfiguredSpeed": "0",
+ "CurrentSpeed": "0",
+ "Description": "",
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "MaxSpeed": "0",
+ "MediaType": "Ethernet",
+ "Name": "",
+ "NodeServiceTag": "SVCTAG1",
+ "OpticsType": "NotPresent",
+ "PortNumber": "ethernet1/1/35",
+ "Role": "Uplink",
+ "Status": "Down",
+ "Type": "PhysicalEthernet"
+ }],
+ "Summary": {
+ "NetworkCount": 1,
+ "PortCount": 2
+ },
+ "UfdEnable": "Disabled"
+ }]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to complete the request because the resource URI does not exist or is not implemented.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Check the request resource URI. Refer to the OpenManage Enterprise-Modular User's Guide
+ for more information about resource URI and its properties."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import strip_substr_dict
+
+ALL_UPLINKS_URI = "NetworkService/Fabrics('{0}')/Uplinks?$expand=Networks,Ports"
+FABRIC_URI = "NetworkService/Fabrics"
+UPLINK_URI = "NetworkService/Fabrics('{0}')/Uplinks('{1}')?$expand=Networks,Ports"
+# Messages
+SUCCESS_MSG = "Successfully retrieved the fabric uplink information."
+UNSUCCESS_MSG = "Unable to retrieve smart fabric uplink information."
+INVALID_FABRIC_ID = "Unable to retrieve smart fabric uplink information with fabric ID {0}."
+INVALID_FABRIC_NAME = "Unable to retrieve smart fabric uplink information with fabric name {0}."
+INVALID_UPLINK_ID = "Unable to retrieve smart fabric uplink information with uplink ID {0}."
+INVALID_UPLINK_NAME = "Unable to retrieve smart fabric uplink information with uplink name {0}."
+ID_UNAVAILABLE = "fabric_id or fabric_name is required along with uplink_id."
+NAME_UNAVAILABLE = "fabric_id or fabric_name is required along with uplink_name."
+
+
+def get_all_uplink_details(module, rest_obj):
+ resp = []
+ try:
+ fabric_det = rest_obj.invoke_request("GET", FABRIC_URI)
+ fabric_resp = fabric_det.json_data.get("value")
+ for each in fabric_resp:
+ if each.get("Uplinks@odata.navigationLink"):
+ uplink_det = each.get("Uplinks@odata.navigationLink")
+ uplink = uplink_det[5:] + "?$expand=Networks,Ports"
+ uplink_details = rest_obj.invoke_request("GET", uplink)
+ for val in uplink_details.json_data.get("value"):
+ resp.append(val)
+ except HTTPError:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ return resp
+
+
+def get_uplink_details_from_fabric_id(module, rest_obj, fabric_id):
+ resp = []
+ try:
+ resp_det = rest_obj.invoke_request("GET", ALL_UPLINKS_URI.format(fabric_id))
+ resp = resp_det.json_data.get("value")
+ except HTTPError:
+ module.exit_json(msg=INVALID_FABRIC_ID.format(fabric_id), failed=True)
+ return resp
+
+
+def get_fabric_id_from_name(module, rest_obj, fabric_name):
+ fabric_id = ""
+ try:
+ resp_det = rest_obj.invoke_request("GET", FABRIC_URI)
+ resp = resp_det.json_data.get("value")
+ for each in resp:
+ if each["Name"] == fabric_name:
+ fabric_id = each["Id"]
+ break
+ except HTTPError:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ if not fabric_id:
+ module.exit_json(msg=INVALID_FABRIC_NAME.format(fabric_name), failed=True)
+ return fabric_id
+
+
+def get_uplink_details(module, rest_obj, fabric_id, uplink_id):
+ resp = []
+ try:
+ resp_det = rest_obj.invoke_request("GET", UPLINK_URI.format(fabric_id, uplink_id))
+ resp = [resp_det.json_data]
+ except HTTPError:
+ module.exit_json(msg=INVALID_UPLINK_ID.format(uplink_id), failed=True)
+ return resp
+
+
+def get_uplink_id_from_name(module, rest_obj, uplink_name, fabric_id):
+ uplink_id = ""
+ try:
+ resp_det = rest_obj.invoke_request("GET", ALL_UPLINKS_URI.format(fabric_id))
+ resp = resp_det.json_data.get("value")
+ for each in resp:
+ if each["Name"] == uplink_name:
+ uplink_id = each["Id"]
+ break
+ except HTTPError:
+ module.exit_json(msg=UNSUCCESS_MSG, failed=True)
+ if not uplink_id:
+ module.exit_json(msg=INVALID_UPLINK_NAME.format(uplink_name), failed=True)
+ return uplink_id
+
+
+def strip_uplink_info(uplink_info):
+ for item in uplink_info:
+ item = strip_substr_dict(item)
+ if item["Networks"]:
+ for net in item["Networks"]:
+ net = strip_substr_dict(net)
+ if item["Ports"]:
+ for port in item["Ports"]:
+ port = strip_substr_dict(port)
+ return uplink_info
+
+
+def main():
+ specs = {
+ "fabric_id": {"type": "str"},
+ "fabric_name": {"type": "str"},
+ "uplink_id": {"type": "str"},
+ "uplink_name": {"type": "str"}
+ }
+ specs.update(ome_auth_params)
+ module = AnsibleModule(
+ argument_spec=specs,
+ mutually_exclusive=[('fabric_id', 'fabric_name'), ('uplink_id', 'uplink_name')],
+ supports_check_mode=True
+ )
+ try:
+ with RestOME(module.params, req_session=True) as rest_obj:
+ uplink_info = []
+ fabric_id = module.params["fabric_id"]
+ fabric_name = module.params["fabric_name"]
+ uplink_id = module.params["uplink_id"]
+ uplink_name = module.params["uplink_name"]
+
+ if fabric_id:
+ uplink_info = get_uplink_details_from_fabric_id(module, rest_obj, fabric_id)
+ elif fabric_name:
+ fabric_id = get_fabric_id_from_name(module, rest_obj, fabric_name)
+ if fabric_id:
+ uplink_info = get_uplink_details_from_fabric_id(module, rest_obj, fabric_id)
+
+ if uplink_id and not (fabric_id or fabric_name):
+ module.exit_json(msg=ID_UNAVAILABLE, failed=True)
+ elif uplink_id:
+ uplink_info = get_uplink_details(module, rest_obj, fabric_id, uplink_id)
+ elif uplink_name and not (fabric_id or fabric_name):
+ module.exit_json(msg=NAME_UNAVAILABLE, failed=True)
+ elif uplink_name:
+ uplink_id = get_uplink_id_from_name(module, rest_obj, uplink_name, fabric_id)
+ if uplink_id:
+ uplink_info = get_uplink_details(module, rest_obj, fabric_id, uplink_id)
+
+ if fabric_id is None and fabric_name is None and uplink_id is None and uplink_name is None:
+ uplink_info = get_all_uplink_details(module, rest_obj)
+ if not bool(uplink_info):
+ module.exit_json(msg=SUCCESS_MSG, uplink_info=uplink_info)
+
+ uplink_info_strip = strip_uplink_info(uplink_info)
+ module.exit_json(msg=SUCCESS_MSG, uplink_info=uplink_info_strip)
+
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, TypeError, ConnectionError, SSLValidationError, SSLError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py
index 8c5fa98b3..6bf77ad02 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.5.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -39,13 +39,13 @@ options:
template_id:
description:
- ID of the existing template.
- - This option is applicable when I(command) is C(modify), C(deploy), C(delete) and C(export).
+ - This option is applicable when I(command) is C(modify), C(deploy), C(delete), C(clone) and C(export).
- This option is mutually exclusive with I(template_name).
type: int
template_name:
description:
- Name of the existing template.
- - This option is applicable when I(command) is C(modify), C(deploy), C(delete) and C(export).
+ - This option is applicable when I(command) is C(modify), C(deploy), C(delete), C(clone) and C(export).
- This option is mutually exclusive with I(template_id).
type: str
device_id:
@@ -120,11 +120,26 @@ options:
and servers. This is applicable when I(command) is C(create).
- >-
Refer OpenManage Enterprise API Reference Guide for more details.
+ job_wait:
+ type: bool
+ description:
+ - Provides the option to wait for job completion.
+ - This option is applicable when I(command) is C(create), or C(deploy).
+ default: true
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(true).
+ default: 1200
requirements:
- "python >= 3.8.6"
-author: "Jagadeesh N V (@jagadeeshnv)"
+author:
+ - "Jagadeesh N V (@jagadeeshnv)"
+ - "Husniya Hameed (@husniya_hameed)"
+ - "Kritika Bhateja (@Kritika-Bhateja)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -184,7 +199,7 @@ EXAMPLES = r'''
- name: Deploy template on multiple devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -199,7 +214,7 @@ EXAMPLES = r'''
- name: Deploy template on groups
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -211,7 +226,7 @@ EXAMPLES = r'''
- name: Deploy template on multiple devices along with the attributes values to be modified on the target devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -232,18 +247,18 @@ EXAMPLES = r'''
# Service tags not allowed.
- DeviceId: 12765
Attributes:
- - Id : 15645
- Value : "0.0.0.0"
- IsIgnored : false
+ - Id: 15645
+ Value: "0.0.0.0"
+ IsIgnored: false
- DeviceId: 10173
Attributes:
- - Id : 18968,
- Value : "hostname-1"
- IsIgnored : false
+ - Id: 18968,
+ Value: "hostname-1"
+ IsIgnored: false
- name: Deploy template and Operating System (OS) on multiple devices
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -277,7 +292,7 @@ EXAMPLES = r'''
- name: "Deploy template on multiple devices and changes the device-level attributes. After the template is deployed,
install OS using its image"
dellemc.openmanage.ome_template:
- hostname: "192.168.0.1"
+ hostname: "192.168.0.1"
username: "username"
password: "password"
ca_path: "/path/to/ca_cert.pem"
@@ -293,14 +308,14 @@ install OS using its image"
Attributes:
- DeviceId: 12765
Attributes:
- - Id : 15645
- Value : "0.0.0.0"
- IsIgnored : false
+ - Id: 15645
+ Value: "0.0.0.0"
+ IsIgnored: false
- DeviceId: 10173
Attributes:
- - Id : 18968,
- Value : "hostname-1"
- IsIgnored : false
+ - Id: 18968,
+ Value: "hostname-1"
+ IsIgnored: false
NetworkBootIsoModel:
BootToNetwork: true
ShareType: "NFS"
@@ -456,6 +471,19 @@ install OS using its image"
Name: "Configuration Compliance"
Content: "{{ lookup('ansible.builtin.file', './test.xml') }}"
Type: 2
+
+- name: Create a template from a reference device with Job wait as false
+ dellemc.openmanage.ome_template:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ device_id: 25123
+ attributes:
+ Name: "New Template"
+ Description: "New Template description"
+ Fqdds: iDRAC,BIOS,
+ job_wait: false
'''
RETURN = r'''
@@ -516,12 +544,13 @@ error_info:
'''
import json
+import time
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import apply_diff_key
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import apply_diff_key, job_tracking
TEMPLATES_URI = "TemplateService/Templates"
@@ -531,12 +560,24 @@ TEMPLATE_ATTRIBUTES = "TemplateService/Templates({template_id})/AttributeDetails
DEVICE_URI = "DeviceService/Devices"
GROUP_URI = "GroupService/Groups"
PROFILE_URI = "ProfileService/Profiles"
+JOB_URI = "JobService/Jobs({job_id})"
SEPRTR = ','
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_FOUND = "Changes found to be applied."
TEMPLATE_NAME_EXISTS = "Template with name '{name}' already exists."
DEPLOY_DEV_ASSIGNED = "The device(s) '{dev}' have been assigned the template(s) '{temp}' " \
"respectively. Please unassign the profiles from the devices."
+MSG_DICT = {'create_when_job_wait_true': "Successfully created a template with ID {0}",
+ 'create_when_job_wait_false': "Successfully submitted a template creation with job ID {0}",
+ 'modify': "Successfully modified the template with ID {0}",
+ 'deploy_when_job_wait_false': "Successfully submitted a template deployment with job ID {0}",
+ 'deploy_when_job_wait_true': "Successfully deployed the template with ID {0}",
+ 'fail': 'Failed to {command} template.',
+ 'delete': "Deleted successfully",
+ 'export': "Exported successfully",
+ 'import': "Imported successfully",
+ 'clone': "Cloned successfully",
+ 'timed_out': "Template operation is in progress. Task excited after 'job_wait_timeout'."}
def get_profiles(rest_obj):
@@ -915,31 +956,49 @@ def fail_module(module, **failmsg):
module.fail_json(**failmsg)
-def exit_module(module, response):
+def exit_module(rest_obj, module, response, time_out=False):
password_no_log(module.params.get("attributes"))
resp = None
- my_change = True
+ changed_flag = True
command = module.params.get('command')
result = {}
if command in ["create", "modify", "deploy", "import", "clone"]:
result["return_id"] = response.json_data
resp = result["return_id"]
- if command == 'deploy' and result["return_id"] == 0:
- result["failed"] = True
- command = 'deploy_fail'
- my_change = False
+ if command == 'deploy':
+ if time_out:
+ command = 'timed_out'
+ changed_flag = False
+ elif not result["return_id"]:
+ result["failed"] = True
+ command = 'deploy_fail'
+ changed_flag = False
+ elif module.params["job_wait"]:
+ command = 'deploy_when_job_wait_true'
+ else:
+ command = 'deploy_when_job_wait_false'
+ elif command == 'create':
+ if time_out:
+ resp = get_job_id(rest_obj, resp)
+ command = 'timed_out'
+ changed_flag = False
+ elif module.params["job_wait"]:
+ command = 'create_when_job_wait_true'
+ else:
+ time.sleep(5)
+ resp = get_job_id(rest_obj, resp)
+ command = 'create_when_job_wait_false'
if command == 'export':
- my_change = False
+ changed_flag = False
result = response.json_data
- msg_dict = {'create': "Successfully created a template with ID {0}".format(resp),
- 'modify': "Successfully modified the template with ID {0}".format(resp),
- 'deploy': "Successfully created the template-deployment job with ID {0}".format(resp),
- 'deploy_fail': 'Failed to deploy template.',
- 'delete': "Deleted successfully",
- 'export': "Exported successfully",
- 'import': "Imported successfully",
- 'clone': "Cloned successfully"}
- module.exit_json(msg=msg_dict.get(command), changed=my_change, **result)
+ message = MSG_DICT.get(command).format(resp)
+ module.exit_json(msg=message, changed=changed_flag, **result)
+
+
+def get_job_id(rest_obj, template_id):
+ template = rest_obj.invoke_request("GET", TEMPLATE_PATH.format(template_id=template_id))
+ job_id = template.json_data.get("TaskId")
+ return job_id
def main():
@@ -954,6 +1013,8 @@ def main():
"device_service_tag": {"required": False, "type": 'list', "default": [], "elements": 'str'},
"device_group_names": {"required": False, "type": 'list', "default": [], "elements": 'str'},
"attributes": {"required": False, "type": 'dict'},
+ "job_wait": {"required": False, "type": "bool", "default": True},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 1200}
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -976,10 +1037,38 @@ def main():
_validate_inputs(module)
with RestOME(module.params, req_session=True) as rest_obj:
path, payload, rest_method = _get_resource_parameters(module, rest_obj)
- # module.exit_json(payload=payload, path=path)
resp = rest_obj.invoke_request(rest_method, path, data=payload)
+ job_wait = module.params["job_wait"]
+ job_id = None
+ if job_wait:
+ if module.params["command"] == "create":
+ template_id = resp.json_data
+ count = 30
+ sleep_time = 5
+ while count > 0:
+ try:
+ job_id = get_job_id(rest_obj, template_id)
+ if job_id:
+ break
+ time.sleep(sleep_time)
+ count = count - sleep_time
+ except HTTPError:
+ time.sleep(sleep_time)
+ count = count - sleep_time
+ continue
+ elif module.params["command"] == "deploy":
+ job_id = resp.json_data
+ if job_id:
+ job_uri = JOB_URI.format(job_id=job_id)
+ job_failed, msg, job_dict, wait_time = job_tracking(rest_obj, job_uri, max_job_wait_sec=module.params["job_wait_timeout"])
+ if job_failed:
+ if job_dict.get('LastRunStatus').get('Name') == "Running":
+ exit_module(rest_obj, module, resp, True)
+ else:
+ message = MSG_DICT.get('fail').format(command=module.params["command"])
+ module.fail_json(msg=message)
if resp.success:
- exit_module(module, resp)
+ exit_module(rest_obj, module, resp)
except HTTPError as err:
fail_module(module, msg=str(err), error_info=json.load(err))
except URLError as err:
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py
index 701874f70..88a09ae95 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_identity_pool.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -37,7 +37,7 @@ requirements:
- "python >= 3.8.6"
author: "Felix Stephen (@felixs88)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -93,7 +93,7 @@ error_info:
import json
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ssl import SSLError
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py
index e233c5ac5..9e91a5fb3 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -37,7 +37,7 @@ requirements:
- "python >= 3.8.6"
author: "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py
index 987a8b610..c9d0bd97d 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -95,7 +95,7 @@ requirements:
author:
- "Jagadeesh N V(@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
@@ -194,7 +194,7 @@ import json
from ssl import SSLError
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
-from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
NETWORK_HIERARCHY_VIEW = 4 # For Network hierarchy View in a Template
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan_info.py
new file mode 100644
index 000000000..b91a6a946
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_template_network_vlan_info.py
@@ -0,0 +1,251 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = """
+---
+module: ome_template_network_vlan_info
+short_description: Retrieves network configuration of template.
+version_added: "7.2.0"
+description:
+ - "This module retrieves the network configuration of a template on OpenManage Enterprise or OpenManage Enterprise Modular."
+extends_documentation_fragment:
+ - dellemc.openmanage.ome_auth_options
+options:
+ template_id:
+ description:
+ - Id of the template.
+ - This is mutually exclusive with I(template_name).
+ type: int
+ template_name:
+ description:
+ - Name of the template.
+ - This is mutually exclusive with I(template_id).
+ - "C(Note) If I(template_id) or I(template_name) option is not provided, the module retrieves network VLAN info of
+ all templates."
+ type: str
+requirements:
+ - "python >= 3.9.6"
+author:
+ - Jagadeesh N V(@jagadeeshnv)
+notes:
+ - Run this module on a system that has direct access to Dell OpenManage Enterprise.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Retrieve network details of all templates.
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+
+- name: Retrieve network details using template ID
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_id: 1234
+
+- name: Retrieve network details using template name
+ dellemc.openmanage.ome_template_network_vlan_info:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ template_name: template1
+"""
+
+RETURN = r'''
+---
+msg:
+ description: Status of template VLAN information retrieval.
+ returned: always
+ type: str
+ sample: "Successfully retrieved the template network VLAN information."
+vlan_info:
+ description: Information about the template network VLAN.
+ returned: success
+ type: list
+ elements: dict
+ sample: [{
+ "TemplateId": 58,
+ "TemplateName": "t2",
+ "NicBondingTechnology" : "LACP",
+ "NicModel": {
+ "NIC in Mezzanine 1B" : {
+ '1' : {"Port" : 1,
+ "Vlan Tagged" : ["25367", "32656", "32658", "26898"],
+ "Vlan UnTagged" : "21474",
+ "NICBondingEnabled" : "false"},
+ '2' : {"Port" : 2,
+ "Vlan Tagged" : [],
+ "Vlan UnTagged" : "32658",
+ "NIC Bonding Enabled" : "true"}
+ },
+ "NIC in Mezzanine 1A" : {
+ '1' : {"Port" : 1,
+ "Vlan Tagged" : ["32656", "32658"],
+ "Vlan UnTagged" : "25367",
+ "NIC Bonding Enabled" : "true"},
+ '2' : {"Port" : 2,
+ "Vlan Tagged" : ["21474"],
+ "Vlan UnTagged" : "32656",
+ "NIC Bonding Enabled" : "false"}
+ }
+ }}]
+error_info:
+ description: Details of the HTTP Error.
+ returned: on HTTP error
+ type: dict
+ sample: {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "GEN1234",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ }
+'''
+
+import json
+from ssl import SSLError
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, ome_auth_params
+
+NETWORK_HIERARCHY_VIEW = 4 # For Network hierarchy View in a Template
+TEMPLATE_ATTRIBUTE_VIEW = "TemplateService/Templates({0})/Views({1})/AttributeViewDetails"
+TEMPLATE_VIEW = "TemplateService/Templates" # Add ?$top=9999 if not query
+KEY_ATTR_NAME = 'DisplayName'
+SUB_GRP_ATTR_NAME = 'SubAttributeGroups'
+GRP_ATTR_NAME = 'Attributes'
+GRP_NAME_ID_ATTR_NAME = 'GroupNameId'
+CUSTOM_ID_ATTR_NAME = 'CustomId'
+SUCCESS_MSG = "Successfully retrieved the template network VLAN information."
+NO_TEMPLATES_MSG = "No templates with network info were found."
+
+
+def get_template_details(module, rest_obj):
+ id = module.params.get('template_id')
+ query_param = {"$filter": "Id eq {0}".format(id)}
+ srch = 'Id'
+ if not id:
+ id = module.params.get('template_name')
+ query_param = {"$filter": "Name eq '{0}'".format(id)}
+ srch = 'Name'
+ resp = rest_obj.invoke_request('GET', TEMPLATE_VIEW, query_param=query_param)
+ if resp.success and resp.json_data.get('value'):
+ tlist = resp.json_data.get('value', [])
+ for xtype in tlist:
+ if xtype.get(srch) == id:
+ return xtype
+ module.exit_json(failed=True, msg="Template with {0} '{1}' not found.".format(srch.lower(), id))
+
+
+def get_template_vlan_info(rest_obj, template_id):
+ result = {}
+ try:
+ resp = rest_obj.invoke_request('GET', TEMPLATE_ATTRIBUTE_VIEW.format(template_id, NETWORK_HIERARCHY_VIEW))
+ if resp.json_data.get('AttributeGroups', []):
+ nic_model = resp.json_data.get('AttributeGroups', [])
+ for xnic in nic_model:
+ if xnic.get(KEY_ATTR_NAME) == "NICModel":
+ nic_group = xnic.get('SubAttributeGroups', [])
+ nic_group_dict = {}
+ for nic in nic_group:
+ nic_dict = {}
+ for port in nic.get(SUB_GRP_ATTR_NAME): # ports
+ port_number = port.get(GRP_NAME_ID_ATTR_NAME)
+ port_dict = {"Port": port_number}
+ for partition in port.get(SUB_GRP_ATTR_NAME): # partitions
+ for attribute in partition.get(GRP_ATTR_NAME): # attributes
+ if attribute.get(CUSTOM_ID_ATTR_NAME) != 0:
+ if attribute.get(KEY_ATTR_NAME).lower() == "vlan untagged":
+ port_dict[attribute.get(KEY_ATTR_NAME)] = int(attribute.get("Value"))
+ if attribute.get(KEY_ATTR_NAME).lower() == "vlan tagged":
+ port_dict[attribute.get(KEY_ATTR_NAME)] = []
+ if attribute.get("Value"):
+ port_dict[attribute.get(KEY_ATTR_NAME)] = \
+ list(map(int, (attribute.get("Value")).replace(" ", "").split(",")))
+ if attribute.get(KEY_ATTR_NAME).lower() == "nic bonding enabled":
+ port_dict[attribute.get(KEY_ATTR_NAME)] = attribute.get("Value")
+ nic_dict[port_number] = port_dict
+ nic_group_dict[nic.get(KEY_ATTR_NAME)] = nic_dict
+ result[xnic.get(KEY_ATTR_NAME)] = nic_group_dict
+ if xnic.get(KEY_ATTR_NAME) == "NicBondingTechnology":
+ nic_bonding_list = xnic.get("Attributes", [])
+ for xbnd in nic_bonding_list:
+ if xbnd.get(KEY_ATTR_NAME).lower() == "nic bonding technology":
+ result[xnic.get(KEY_ATTR_NAME)] = xbnd.get('Value')
+ except Exception:
+ result = {}
+ return result
+
+
+def main():
+ argument_spec = {
+ "template_id": {"type": 'int'},
+ "template_name": {"type": 'str'}
+ }
+ argument_spec.update(ome_auth_params)
+ module = AnsibleModule(argument_spec=argument_spec,
+ mutually_exclusive=[('template_id', 'template_name')],
+ supports_check_mode=True)
+ try:
+ templates = []
+ with RestOME(module.params, req_session=True) as rest_obj:
+ # all_templates = True
+ if module.params.get("template_id") or module.params.get("template_name"):
+ tmplt = get_template_details(module, rest_obj)
+ templates.append(tmplt)
+ # all_templates = False
+ else:
+ resp = rest_obj.get_all_items_with_pagination(TEMPLATE_VIEW)
+ templates = resp.get("value")
+ vlan_info = []
+ for xtmp in templates:
+ if xtmp.get("ViewTypeId") != 4:
+ result = get_template_vlan_info(rest_obj, xtmp['Id'])
+ result["TemplateId"] = xtmp['Id']
+ result["TemplateName"] = xtmp['Name']
+ vlan_info.append(result)
+ # if vlan_info is not None and not all_templates:
+ module.exit_json(msg=SUCCESS_MSG, vlan_info=vlan_info)
+ # else:
+ # module.exit_json(msg=NO_TEMPLATES_MSG, failed=all_templates)
+ except HTTPError as err:
+ module.fail_json(msg=str(err), error_info=json.load(err))
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (IOError, ValueError, SSLError, TypeError, ConnectionError,
+ AttributeError, IndexError, KeyError, OSError) as err:
+ module.fail_json(msg=str(err))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py
index c768b4ca5..27092a036 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -56,7 +56,7 @@ requirements:
- "python >= 3.8.6"
author: "Sajna Shetty(@Sajna-Shetty)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module does not support C(check_mode).
'''
@@ -72,7 +72,7 @@ EXAMPLES = r'''
UserName: "user1"
Password: "UserPassword"
RoleId: "10"
- Enabled: True
+ Enabled: true
- name: Create user with all parameters
dellemc.openmanage.ome_user:
@@ -85,10 +85,10 @@ EXAMPLES = r'''
Description: "user2 description"
Password: "UserPassword"
RoleId: "10"
- Enabled: True
+ Enabled: true
DirectoryServiceId: 0
UserTypeId: 1
- Locked: False
+ Locked: false
Name: "user2"
- name: Modify existing user
@@ -101,7 +101,7 @@ EXAMPLES = r'''
attributes:
UserName: "user3"
RoleId: "10"
- Enabled: True
+ Enabled: true
Description: "Modify user Description"
- name: Delete existing user using id
@@ -236,7 +236,7 @@ def main():
"choices": ['present', 'absent']},
"user_id": {"required": False, "type": 'int'},
"name": {"required": False, "type": 'str'},
- "attributes": {"required": False, "type": 'dict'},
+ "attributes": {"required": False, "type": 'dict', "default": {}},
}
specs.update(ome_auth_params)
module = AnsibleModule(
@@ -247,8 +247,6 @@ def main():
try:
_validate_inputs(module)
- if module.params.get("attributes") is None:
- module.params["attributes"] = {}
with RestOME(module.params, req_session=True) as rest_obj:
method, path, payload = _get_resource_parameters(module, rest_obj)
resp = rest_obj.invoke_request(method, path, data=payload)
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py
index b42f180fe..488444694 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/ome_user_info.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -35,9 +35,9 @@ options:
type: str
requirements:
- "python >= 3.8.6"
-author: "Jagadeesh N V(@jagadeeshnv)"
+author: "Jagadeesh N V (@jagadeeshnv)"
notes:
- - Run this module from a system that has direct access to DellEMC OpenManage Enterprise.
+ - Run this module from a system that has direct access to Dell OpenManage Enterprise.
- This module supports C(check_mode).
'''
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py
index c0a0fc475..c974aaccc 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_event_subscription.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+
@@ -30,7 +30,7 @@ options:
- The HTTPS URI of the destination to send events.
- HTTPS is required.
type: str
- required: True
+ required: true
event_type:
description:
- Specifies the event type to be subscribed.
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py
index a03ba0407..98f64f780 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.5.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -30,19 +30,38 @@ options:
- Firmware Image location URI or local path.
- For example- U(http://<web_address>/components.exe) or /home/firmware_repo/component.exe.
type: str
- required: True
+ required: true
transfer_protocol:
description: Protocol used to transfer the firmware image file. Applicable for URI based update.
type: str
default: HTTP
choices: ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]
+ job_wait:
+ description: Provides the option to wait for job completion.
+ type: bool
+ default: true
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(true).
+ - "Note: If a firmware update needs a reboot, the job will get scheduled and waits for
+ no of seconds specfied in I(job_wait_time). to reduce the wait time either give
+ I(job_wait_time) minimum or make I(job_wait)as false and retrigger."
+ default: 3600
requirements:
- "python >= 3.8.6"
- "urllib3"
author:
- "Felix Stephen (@felixs88)"
+ - "Husniya Hameed (@husniya_hameed)"
+ - "Shivam Sharma (@Shivam-Sharma)"
+ - "Kritika Bhateja (@Kritika_Bhateja)"
+ - "Abhishek Sinha (@ABHISHEK-SINHA10)"
notes:
- Run this module from a system that has direct access to Redfish APIs.
+ - This module supports both IPv4 and IPv6 addresses.
+ - This module supports only iDRAC9 and above.
- This module does not support C(check_mode).
"""
@@ -57,6 +76,17 @@ EXAMPLES = """
image_uri: "http://192.168.0.2/firmware_repo/component.exe"
transfer_protocol: "HTTP"
+- name: Update the firmware from a single executable file available in a HTTP protocol with job_Wait
+ dellemc.openmanage.redfish_firmware:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ image_uri: "http://192.168.0.2/firmware_repo/component.exe"
+ transfer_protocol: "HTTP"
+ job_wait: true
+ job_wait_timeout: 600
+
- name: Update the firmware from a single executable file available in a local path
dellemc.openmanage.redfish_firmware:
baseuri: "192.168.0.1"
@@ -72,7 +102,7 @@ msg:
description: Overall status of the firmware update task.
returned: always
type: str
- sample: Successfully submitted the firmware update task.
+ sample: "Successfully updated the firmware."
task:
description: Returns ID and URI of the created task.
returned: success
@@ -112,6 +142,7 @@ error_info:
import json
import os
+import time
from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
@@ -126,7 +157,16 @@ except ImportError:
HAS_LIB = False
UPDATE_SERVICE = "UpdateService"
-JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}"
+JOB_URI = "JobService/Jobs/{job_id}"
+JOB_WAIT_MSG = 'Job wait timed out after {0} seconds.'
+FAIL_JOB_MSG = "Firmware update failed."
+SUCCESS_JOB_MSG = "Successfully updated the firmware."
+SCHEDULE_JOB_MSG = "Successfully scheduled the firmware job."
+JOBSTATUS_SUCCESS = "success"
+JOBSTATUS_FAILED = "failed"
+JOBSTATUS_TIMED_OUT = "timed_out"
+JOBSTATUS_SCHEDULED = "scheduled"
+JOBSTATUS_ERRORED = "errored"
def _encode_form_data(payload_file):
@@ -176,8 +216,7 @@ def firmware_update(obj, module):
data, ctype = _encode_form_data(binary_payload)
headers = {"If-Match": resp_inv.headers.get("etag")}
headers.update({"Content-Type": ctype})
- upload_status = obj.invoke_request("POST", push_uri, data=data, headers=headers, dump=False,
- api_timeout=100)
+ upload_status = obj.invoke_request("POST", push_uri, data=data, headers=headers, dump=False, api_timeout=module.params["timeout"])
if upload_status.status_code == 201:
payload = {"ImageURI": upload_status.headers.get("location")}
update_status = obj.invoke_request("POST", update_uri, data=payload)
@@ -186,11 +225,51 @@ def firmware_update(obj, module):
return update_status
+def wait_for_job_completion(module, job_uri, job_wait_timeout=900, interval=30):
+ try:
+ with Redfish(module.params, req_session=False) as obj:
+ track_counter = 0
+ final_jobstatus = ""
+ job_msg = ""
+ while track_counter <= job_wait_timeout:
+ try:
+ response = obj.invoke_request("GET", "{0}{1}".format(obj.root_uri, job_uri))
+ if response.json_data.get("PercentComplete") == 100 and response.json_data.get("JobState") == "Completed":
+ if response.json_data.get("JobStatus") == "OK":
+ final_jobstatus = JOBSTATUS_SUCCESS
+ job_msg = SUCCESS_JOB_MSG
+ else:
+ final_jobstatus = JOBSTATUS_FAILED
+ job_msg = FAIL_JOB_MSG
+ break
+ track_counter += interval
+ time.sleep(interval)
+ except (HTTPError, URLError):
+ track_counter += interval
+ time.sleep(interval)
+ # TIMED OUT
+ # when job is scheduled
+ if not final_jobstatus:
+ if response.json_data.get("PercentComplete") == 0 and response.json_data.get("JobState") == "Starting":
+ final_jobstatus = JOBSTATUS_SCHEDULED
+ job_msg = SCHEDULE_JOB_MSG
+ # when job timed out
+ else:
+ job_msg = JOB_WAIT_MSG.format(job_wait_timeout)
+ final_jobstatus = JOBSTATUS_TIMED_OUT
+ except Exception as error_message:
+ job_msg = str(error_message)
+ module.exit_json(msg=str(job_msg))
+ final_jobstatus = JOBSTATUS_ERRORED
+ return final_jobstatus, job_msg
+
+
def main():
specs = {
"image_uri": {"required": True, "type": "str"},
- "transfer_protocol": {"type": "str", "default": "HTTP",
- "choices": ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]},
+ "transfer_protocol": {"type": "str", "default": "HTTP", "choices": ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]},
+ "job_wait": {"required": False, "type": 'bool', "default": True},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 3600}
}
specs.update(redfish_auth_params)
module = AnsibleModule(
@@ -206,10 +285,21 @@ def main():
message = "Successfully submitted the firmware update task."
task_uri = status.headers.get("Location")
job_id = task_uri.split("/")[-1]
- module.exit_json(msg=message, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, changed=True)
- module.fail_json(msg=message, error_info=json.loads(status))
+ else:
+ module.fail_json(msg=message, error_info=json.loads(status))
+ job_wait = module.params['job_wait']
+ job_wait_timeout = module.params['job_wait_timeout']
+ if job_wait and job_wait_timeout > 0:
+ job_uri = JOB_URI.format(job_id=job_id)
+ job_resp, job_msg = wait_for_job_completion(module, job_uri, job_wait_timeout=module.params['job_wait_timeout'])
+ if job_resp == JOBSTATUS_FAILED:
+ module.exit_json(msg=job_msg, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, failed=True)
+ else:
+ module.exit_json(msg=job_msg, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, changed=True)
+ else:
+ module.exit_json(msg=message, task={"id": job_id, "uri": JOB_URI.format(job_id=job_id)}, changed=True)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
except (RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError, IOError, AssertionError, OSError, SSLError) as e:
module.fail_json(msg=str(e))
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware_rollback.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware_rollback.py
new file mode 100644
index 000000000..ef93d669f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_firmware_rollback.py
@@ -0,0 +1,358 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = r"""
+---
+module: redfish_firmware_rollback
+short_description: To perform a component firmware rollback using component name
+version_added: "8.2.0"
+description:
+ - This module allows to rollback the firmware of different server components.
+ - Depending on the component, the firmware update is applied after an automatic or manual reboot.
+extends_documentation_fragment:
+ - dellemc.openmanage.redfish_auth_options
+options:
+ name:
+ type: str
+ required: true
+ description: The name or regular expression of the component to match and is case-sensitive.
+ reboot:
+ description:
+ - Reboot the server to apply the previous version of the firmware.
+ - C(true) reboots the server to rollback the firmware to the available version.
+ - C(false) schedules the rollback of firmware until the next restart.
+ - When I(reboot) is C(false), some components update immediately, and the server may reboot.
+ So, the module must wait till the server is accessible.
+ type: bool
+ default: true
+ reboot_timeout:
+ type: int
+ description: Wait time in seconds. The module waits for this duration till the server reboots.
+ default: 900
+requirements:
+ - "python >= 3.9.6"
+author:
+ - "Felix Stephen (@felixs88)"
+notes:
+ - Run this module from a system that has direct access to Redfish APIs.
+ - For components that do not require a reboot, firmware rollback proceeds irrespective of
+ I(reboot) is C(true) or C(false).
+ - This module supports IPv4 and IPv6 addresses.
+ - This module supports C(check_mode).
+"""
+
+EXAMPLES = """
+---
+- name: Rollback a BIOS component firmware
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "BIOS"
+
+- name: Rollback all NIC cards with a name starting from 'Broadcom Gigabit'.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "Broadcom Gigabit Ethernet.*"
+
+- name: Rollback all the component firmware except BIOS component.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: "(?!BIOS).*"
+
+- name: Rollback all the available firmware component.
+ dellemc.openmanage.redfish_firmware_rollback:
+ baseuri: "192.168.0.1:443"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ name: ".*"
+"""
+
+RETURN = """
+---
+msg:
+ type: str
+ description: Overall firmware rollback status.
+ returned: always
+ sample: "Successfully completed the job for firmware rollback."
+status:
+ type: list
+ description: Firmware rollback job and progress details from the iDRAC.
+ returned: success
+ sample: [{
+ "ActualRunningStartTime": "2023-08-04T12:26:55",
+ "ActualRunningStopTime": "2023-08-04T12:32:35",
+ "CompletionTime": "2023-08-04T12:32:35",
+ "Description": "Job Instance",
+ "EndTime": "TIME_NA",
+ "Id": "JID_911698303631",
+ "JobState": "Completed",
+ "JobType": "FirmwareUpdate",
+ "Message": "Job completed successfully.",
+ "MessageArgs": [],
+ "MessageId": "PR19",
+ "Name": "Firmware Rollback: Firmware",
+ "PercentComplete": 100,
+ "StartTime": "2023-08-04T12:23:50",
+ "TargetSettingsURI": null
+ }]
+error_info:
+ type: dict
+ description: Details of the HTTP error.
+ returned: on http error
+ sample: {
+ "error": {
+ "@Message.ExtendedInfo": [{
+ "Message": "InstanceID value provided for the update operation is invalid",
+ "MessageArgs": [],
+ "MessageArgs@odata.count": 0,
+ "MessageId": "IDRAC.2.8.SUP024",
+ "RelatedProperties": [],
+ "RelatedProperties@odata.count": 0,
+ "Resolution": "Enumerate inventory, copy the InstanceID value and provide that value for the update operation.",
+ "Severity": "Warning"
+ }],
+ "code": "Base.1.12.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information"
+ }
+ }
+"""
+
+
+import json
+import re
+import time
+from ssl import SSLError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params, \
+ SESSION_RESOURCE_COLLECTION
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import wait_for_redfish_reboot_job, \
+ wait_for_redfish_job_complete, strip_substr_dict, MANAGER_JOB_ID_URI, RESET_UNTRACK, MANAGERS_URI, RESET_SUCCESS
+from ansible.module_utils.basic import AnsibleModule
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+
+
+UPDATE_SERVICE = "UpdateService"
+SYSTEM_RESOURCE_ID = "System.Embedded.1"
+NO_COMPONENTS = "There were no firmware components to rollback."
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+NOT_SUPPORTED = "The target firmware version does not support the firmware rollback."
+COMPLETED_ERROR = "The job for firmware rollback has been completed with error(s)."
+SCHEDULED_ERROR = "The job for firmware rollback has been scheduled with error(s)."
+ROLLBACK_SUCCESS = "Successfully completed the job for firmware rollback."
+ROLLBACK_SCHEDULED = "Successfully scheduled the job for firmware rollback."
+ROLLBACK_FAILED = "Failed to complete the job for firmware rollback."
+REBOOT_FAIL = "Failed to reboot the server."
+NEGATIVE_TIMEOUT_MESSAGE = "The parameter reboot_timeout value cannot be negative or zero."
+JOB_WAIT_MSG = "Task excited after waiting for {0} seconds. Check console for firmware rollback status."
+REBOOT_COMP = ["Integrated Dell Remote Access Controller"]
+
+
+def get_rollback_preview_target(redfish_obj, module):
+ action_resp = redfish_obj.invoke_request("GET", "{0}{1}".format(redfish_obj.root_uri, UPDATE_SERVICE))
+ action_attr = action_resp.json_data["Actions"]
+ update_uri = None
+ if "#UpdateService.SimpleUpdate" in action_attr:
+ update_service = action_attr.get("#UpdateService.SimpleUpdate")
+ if 'target' not in update_service:
+ module.fail_json(msg=NOT_SUPPORTED)
+ update_uri = update_service.get('target')
+ inventory_uri = action_resp.json_data.get('FirmwareInventory').get('@odata.id')
+ inventory_uri_resp = redfish_obj.invoke_request("GET", "{0}{1}".format(inventory_uri, "?$expand=*($levels=1)"),
+ api_timeout=120)
+ previous_component = list(filter(lambda d: d["Id"].startswith("Previous"), inventory_uri_resp.json_data["Members"]))
+ if not previous_component:
+ module.fail_json(msg=NO_COMPONENTS)
+ component_name = module.params["name"]
+ try:
+ component_compile = re.compile(r"^{0}$".format(component_name))
+ except Exception:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ prev_uri, reboot_uri = {}, []
+ for each in previous_component:
+ available_comp = each["Name"]
+ available_name = re.match(component_compile, available_comp)
+ if not available_name:
+ continue
+ if available_name.group() in REBOOT_COMP:
+ reboot_uri.append(each["@odata.id"])
+ continue
+ prev_uri[each["Version"]] = each["@odata.id"]
+ if module.check_mode and (prev_uri or reboot_uri):
+ module.exit_json(msg=CHANGES_FOUND, changed=True)
+ elif not prev_uri and not reboot_uri:
+ module.exit_json(msg=NO_CHANGES_FOUND)
+ return list(prev_uri.values()), reboot_uri, update_uri
+
+
+def get_job_status(redfish_obj, module, job_ids, job_wait=True):
+ each_status, failed_count, js_job_msg = [], 0, ""
+ wait_timeout = module.params["reboot_timeout"]
+ for each in job_ids:
+ each_job_uri = MANAGER_JOB_ID_URI.format(each)
+ job_resp, js_job_msg = wait_for_redfish_job_complete(redfish_obj, each_job_uri, job_wait=job_wait,
+ wait_timeout=wait_timeout)
+ if job_resp and js_job_msg:
+ module.exit_json(msg=JOB_WAIT_MSG.format(wait_timeout), job_status=[strip_substr_dict(job_resp.json_data)],
+ changed=True)
+ job_status = job_resp.json_data
+ if job_status["JobState"] == "Failed":
+ failed_count += 1
+ strip_odata = strip_substr_dict(job_status)
+ each_status.append(strip_odata)
+ return each_status, failed_count
+
+
+def require_session(idrac, module):
+ session_id, token = "", None
+ payload = {'UserName': module.params["username"], 'Password': module.params["password"]}
+ path = SESSION_RESOURCE_COLLECTION["SESSION"]
+ resp = idrac.invoke_request('POST', path, data=payload, api_timeout=120)
+ if resp and resp.success:
+ session_id = resp.json_data.get("Id")
+ token = resp.headers.get('X-Auth-Token')
+ return session_id, token
+
+
+def wait_for_redfish_idrac_reset(module, redfish_obj, wait_time_sec, interval=30):
+ time.sleep(interval // 2)
+ msg = RESET_UNTRACK
+ wait = wait_time_sec
+ track_failed = True
+ resetting = False
+ while wait > 0 and track_failed:
+ try:
+ redfish_obj.invoke_request("GET", MANAGERS_URI, api_timeout=120)
+ msg = RESET_SUCCESS
+ track_failed = False
+ break
+ except HTTPError as err:
+ if err.getcode() == 401:
+ new_redfish_obj = Redfish(module.params, req_session=True)
+ sid, token = require_session(new_redfish_obj, module)
+ redfish_obj.session_id = sid
+ redfish_obj._headers.update({"X-Auth-Token": token})
+ track_failed = False
+ if not resetting:
+ resetting = True
+ break
+ time.sleep(interval)
+ wait -= interval
+ resetting = True
+ except URLError:
+ time.sleep(interval)
+ wait -= interval
+ if not resetting:
+ resetting = True
+ except Exception:
+ time.sleep(interval)
+ wait -= interval
+ resetting = True
+ return track_failed, resetting, msg
+
+
+def simple_update(redfish_obj, preview_uri, update_uri):
+ job_ids = []
+ for uri in preview_uri:
+ resp = redfish_obj.invoke_request("POST", update_uri, data={"ImageURI": uri})
+ time.sleep(30)
+ task_uri = resp.headers.get("Location")
+ task_id = task_uri.split("/")[-1]
+ job_ids.append(task_id)
+ return job_ids
+
+
+def rollback_firmware(redfish_obj, module, preview_uri, reboot_uri, update_uri):
+ current_job_status, failed_cnt, resetting = [], 0, False
+ job_ids = simple_update(redfish_obj, preview_uri, update_uri)
+ if module.params["reboot"] and preview_uri:
+ payload = {"ResetType": "ForceRestart"}
+ job_resp_status, reset_status, reset_fail = wait_for_redfish_reboot_job(redfish_obj, SYSTEM_RESOURCE_ID,
+ payload=payload)
+ if reset_status and job_resp_status:
+ job_uri = MANAGER_JOB_ID_URI.format(job_resp_status["Id"])
+ job_resp, job_msg = wait_for_redfish_job_complete(redfish_obj, job_uri)
+ job_status = job_resp.json_data
+ if job_status["JobState"] != "RebootCompleted":
+ if job_msg:
+ module.fail_json(msg=JOB_WAIT_MSG.format(module.params["reboot_timeout"]))
+ else:
+ module.fail_json(msg=REBOOT_FAIL)
+ elif not reset_status and reset_fail:
+ module.fail_json(msg=reset_fail)
+
+ current_job_status, failed = get_job_status(redfish_obj, module, job_ids, job_wait=True)
+ failed_cnt += failed
+ if not module.params["reboot"] and preview_uri:
+ current_job_status, failed = get_job_status(redfish_obj, module, job_ids, job_wait=False)
+ failed_cnt += failed
+ if reboot_uri:
+ job_ids = simple_update(redfish_obj, reboot_uri, update_uri)
+ track, resetting, js_job_msg = wait_for_redfish_idrac_reset(module, redfish_obj, 900)
+ if not track and resetting:
+ reboot_job_status, failed = get_job_status(redfish_obj, module, job_ids, job_wait=True)
+ current_job_status.extend(reboot_job_status)
+ failed_cnt += failed
+ return current_job_status, failed_cnt, resetting
+
+
+def main():
+ specs = {
+ "name": {"required": True, "type": "str"},
+ "reboot": {"type": "bool", "default": True},
+ "reboot_timeout": {"type": "int", "default": 900},
+ }
+ specs.update(redfish_auth_params)
+ module = AnsibleModule(argument_spec=specs, supports_check_mode=True)
+ if module.params["reboot_timeout"] <= 0:
+ module.fail_json(msg=NEGATIVE_TIMEOUT_MESSAGE)
+ try:
+ with Redfish(module.params, req_session=True) as redfish_obj:
+ preview_uri, reboot_uri, update_uri = get_rollback_preview_target(redfish_obj, module)
+ job_status, failed_count, resetting = rollback_firmware(redfish_obj, module, preview_uri, reboot_uri, update_uri)
+ if not job_status or (failed_count == len(job_status)):
+ module.exit_json(msg=ROLLBACK_FAILED, status=job_status, failed=True)
+ if module.params["reboot"]:
+ msg, module_fail, changed = ROLLBACK_SUCCESS, False, True
+ if failed_count > 0 and failed_count != len(job_status):
+ msg, module_fail, changed = COMPLETED_ERROR, True, False
+ else:
+ msg, module_fail, changed = ROLLBACK_SCHEDULED, False, True
+ if failed_count > 0 and failed_count != len(job_status):
+ msg, module_fail, changed = SCHEDULED_ERROR, True, False
+ elif resetting and len(job_status) == 1 and failed_count != len(job_status):
+ msg, module_fail, changed = ROLLBACK_SUCCESS, False, True
+ module.exit_json(msg=msg, job_status=job_status, failed=module_fail, changed=changed)
+ except HTTPError as err:
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (RuntimeError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, IOError, AssertionError, OSError, SSLError) as e:
+ module.fail_json(msg=str(e))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py
index 23094b158..085bbc018 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_powerstate.py
@@ -2,8 +2,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.1
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -29,7 +29,7 @@ options:
For example- U(https://<I(baseuri)>/redfish/v1/Systems/<I(resource_id)>).
- This option is mandatory for I(base_uri) with multiple devices.
- To get the device details, use the API U(https://<I(baseuri)>/redfish/v1/Systems).
- required: False
+ required: false
type: str
reset_type:
description:
@@ -46,7 +46,7 @@ options:
- If C(PushPowerButton), Simulates the pressing of a physical power button on the device.
- When a power control operation is performed, which is not supported on the device, an error message is displayed
with the list of operations that can be performed.
- required: True
+ required: true
type: str
choices: ["ForceOff", "ForceOn", "ForceRestart", "GracefulRestart", "GracefulShutdown",
"Nmi", "On", "PowerCycle", "PushPowerButton"]
@@ -121,7 +121,7 @@ from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, redfish_auth_params
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import ConnectionError
powerstate_map = {}
diff --git a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
index ce02b4c00..d8f0c5503 100644
--- a/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
+++ b/ansible_collections/dellemc/openmanage/plugins/modules/redfish_storage_volume.py
@@ -2,9 +2,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.5.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -58,17 +58,13 @@ options:
volume_type:
description:
- One of the following volume types must be selected to create a volume.
- - >-
- C(Mirrored) The volume is a mirrored device.
- - >-
- C(NonRedundant) The volume is a non-redundant storage device.
- - >-
- C(SpannedMirrors) The volume is a spanned set of mirrored devices.
- - >-
- C(SpannedStripesWithParity) The volume is a spanned set of devices which uses parity to retain redundant
+ - C(NonRedundant) The volume is a non-redundant storage device.
+ - C(Mirrored) The volume is a mirrored device.
+ - C(StripedWithParity) The volume is a device which uses parity to retain redundant information.
+ - C(SpannedMirrors) The volume is a spanned set of mirrored devices.
+ - C(SpannedStripesWithParity) The volume is a spanned set of devices which uses parity to retain redundant
information.
- - >-
- C(StripedWithParity) The volume is a device which uses parity to retain redundant information.
+ - I(volume_type) is mutually exclusive with I(raid_type).
type: str
choices: [NonRedundant, Mirrored, StripedWithParity, SpannedMirrors, SpannedStripesWithParity]
name:
@@ -76,6 +72,7 @@ options:
- Name of the volume to be created.
- Only applicable when I(state) is C(present).
type: str
+ aliases: ['volume_name']
drives:
description:
- FQDD of the Physical disks.
@@ -125,15 +122,72 @@ options:
type: str
choices: [Fast, Slow]
default: Fast
+ raid_type:
+ description:
+ - C(RAID0) to create a RAID0 type volume.
+ - C(RAID1) to create a RAID1 type volume.
+ - C(RAID5) to create a RAID5 type volume.
+ - C(RAID6) to create a RAID6 type volume.
+ - C(RAID10) to create a RAID10 type volume.
+ - C(RAID50) to create a RAID50 type volume.
+ - C(RAID60) to create a RAID60 type volume.
+ - I(raid_type) is mutually exclusive with I(volume_type).
+ type: str
+ choices: [RAID0, RAID1, RAID5, RAID6, RAID10, RAID50, RAID60]
+ version_added: 8.3.0
+ apply_time:
+ description:
+ - Apply time of the Volume configuration.
+ - C(Immediate) allows you to apply the volume configuration on the host server immediately and apply the changes. This is applicable for I(job_wait).
+ - C(OnReset) allows you to apply the changes on the next reboot of the host server.
+ - I(apply_time) has a default value based on the different types of the controller.
+ For example, BOSS-S1 and BOSS-N1 controllers have a default value of I(apply_time) as C(OnReset),
+ and PERC controllers have a default value of I(apply_time) as C(Immediate).
+ type: str
+ choices: [Immediate, OnReset]
+ version_added: 8.5.0
+ reboot_server:
+ description:
+ - Reboot the server to apply the changes.
+ - I(reboot_server) is applicable only when I(apply_timeout) is C(OnReset) or when the default value for the apply time of the controller is C(OnReset).
+ type: bool
+ default: false
+ version_added: 8.5.0
+ force_reboot:
+ description:
+ - Reboot the server forcefully to apply the changes when the normal reboot fails.
+ - I(force_reboot) is applicable only when I(reboot_server) is C(true).
+ type: bool
+ default: false
+ version_added: 8.5.0
+ job_wait:
+ description:
+ - This parameter provides the option to wait for the job completion.
+ - This is applicable when I(apply_time) is C(Immediate).
+ - This is applicable when I(apply_time) is C(OnReset) and I(reboot_server) is C(true).
+ type: bool
+ default: false
+ version_added: 8.5.0
+ job_wait_timeout:
+ description:
+ - This parameter is the maximum wait time of I(job_wait) in seconds.
+ - This option is applicable when I(job_wait) is C(true).
+ type: int
+ default: 1200
+ version_added: 8.5.0
+
requirements:
- - "python >= 3.8.6"
-author: "Sajna Shetty(@Sajna-Shetty)"
+ - "python >= 3.9.6"
+author:
+ - "Sajna Shetty(@Sajna-Shetty)"
+ - "Kritika Bhateja(@Kritika-Bhateja-03)"
notes:
- Run this module from a system that has direct access to Redfish APIs.
- This module supports C(check_mode).
- This module always reports changes when I(name) and I(volume_id) are not specified.
Either I(name) or I(volume_id) is required to support C(check_mode).
+ - This module supports IPv4 and IPv6 addresses.
'''
EXAMPLES = r'''
@@ -167,7 +221,48 @@ EXAMPLES = r'''
controller_id: "RAID.Slot.1-1"
volume_type: "NonRedundant"
drives:
- - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+
+- name: Create a RAID0 on PERC controller on reset
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ apply_time: OnReset
+
+- name: Create a RAID0 on BOSS controller with restart
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ apply_time: OnReset
+ reboot_server: true
+
+- name: Create a RAID0 on BOSS controller with force restart
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ reboot_server: true
+ force_reboot: true
- name: Modify a volume's encryption type settings
dellemc.openmanage.redfish_storage_volume:
@@ -198,6 +293,38 @@ EXAMPLES = r'''
command: "initialize"
volume_id: "Disk.Virtual.6:RAID.Slot.1-1"
initialize_type: "Slow"
+
+- name: Create a RAID6 volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID6"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-3
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-4
+
+- name: Create a RAID60 volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID60"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-2
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-3
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-4
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-5
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-6
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-7
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-8
'''
RETURN = r'''
@@ -213,7 +340,7 @@ task:
returned: success
sample: {
"id": "JID_XXXXXXXXXXXXX",
- "uri": "/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXXX"
+ "uri": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"
}
error_info:
type: dict
@@ -249,6 +376,8 @@ from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.utils import MANAGER_JOB_ID_URI, wait_for_redfish_reboot_job, \
+ strip_substr_dict, wait_for_job_completion
VOLUME_INITIALIZE_URI = "{storage_base_uri}/Volumes/{volume_id}/Actions/Volume.Initialize"
@@ -257,9 +386,26 @@ CONTROLLER_URI = "{storage_base_uri}/{controller_id}"
SETTING_VOLUME_ID_URI = "{storage_base_uri}/Volumes/{volume_id}/Settings"
CONTROLLER_VOLUME_URI = "{storage_base_uri}/{controller_id}/Volumes"
VOLUME_ID_URI = "{storage_base_uri}/Volumes/{volume_id}"
+APPLY_TIME_INFO_API = CONTROLLER_URI + "/Volumes"
+REBOOT_API = "Actions/ComputerSystem.Reset"
storage_collection_map = {}
CHANGES_FOUND = "Changes found to be applied."
NO_CHANGES_FOUND = "No changes found to be applied."
+RAID_TYPE_NOT_SUPPORTED_MSG = "RAID Type {raid_type} is not supported."
+APPLY_TIME_NOT_SUPPORTED_MSG = "Apply time {apply_time} is not supported. The supported values \
+are {supported_apply_time_values}. Enter the valid values and retry the operation."
+JOB_COMPLETION = "The job is successfully completed."
+JOB_SUBMISSION = "The job is successfully submitted."
+JOB_FAILURE_PROGRESS_MSG = "Unable to complete the task initiated for creating the storage volume."
+REBOOT_FAIL = "Failed to reboot the server."
+CONTROLLER_NOT_EXIST_ERROR = "Specified Controller {controller_id} does not exist in the System."
+TIMEOUT_NEGATIVE_OR_ZERO_MSG = "The parameter job_wait_timeout value cannot be negative or zero."
+SYSTEM_ID = "System.Embedded.1"
+volume_type_map = {"NonRedundant": "RAID0",
+ "Mirrored": "RAID1",
+ "StripedWithParity": "RAID5",
+ "SpannedMirrors": "RAID10",
+ "SpannedStripesWithParity": "RAID50"}
def fetch_storage_resource(module, session_obj):
@@ -269,6 +415,7 @@ def fetch_storage_resource(module, session_obj):
system_members = system_resp.json_data.get("Members")
if system_members:
system_id_res = system_members[0]["@odata.id"]
+ SYSTEM_ID = system_id_res.split('/')[-1]
system_id_res_resp = session_obj.invoke_request("GET", system_id_res)
system_id_res_data = system_id_res_resp.json_data.get("Storage")
if system_id_res_data:
@@ -294,16 +441,17 @@ def volume_payload(module):
oem = params.get("oem")
encrypted = params.get("encrypted")
encryption_types = params.get("encryption_types")
+ volume_type = params.get("volume_type")
+ raid_type = params.get("raid_type")
+ apply_time = params.get("apply_time")
if capacity_bytes:
capacity_bytes = int(capacity_bytes)
if drives:
storage_base_uri = storage_collection_map["storage_base_uri"]
physical_disks = [{"@odata.id": DRIVES_URI.format(storage_base_uri=storage_base_uri,
driver_id=drive_id)} for drive_id in drives]
-
raid_mapper = {
"Name": params.get("name"),
- "VolumeType": params.get("volume_type"),
"BlockSizeBytes": params.get("block_size_bytes"),
"CapacityBytes": capacity_bytes,
"OptimumIOSizeBytes": params.get("optimum_io_size_bytes"),
@@ -316,7 +464,12 @@ def volume_payload(module):
raid_payload.update({"Encrypted": encrypted})
if encryption_types:
raid_payload.update({"EncryptionTypes": [encryption_types]})
-
+ if volume_type:
+ raid_payload.update({"RAIDType": volume_type_map.get(volume_type)})
+ if raid_type:
+ raid_payload.update({"RAIDType": raid_type})
+ if apply_time is not None:
+ raid_payload.update({"@Redfish.OperationApplyTime": apply_time})
return raid_payload
@@ -353,9 +506,7 @@ def check_specified_identifier_exists_in_the_system(module, session_obj, uri, er
return resp
except HTTPError as err:
if err.code == 404:
- if module.check_mode:
- return err
- module.fail_json(msg=err_message)
+ module.exit_json(msg=err_message, failed=True)
raise err
except (URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
raise err
@@ -367,8 +518,7 @@ def check_controller_id_exists(module, session_obj):
"""
specified_controller_id = module.params.get("controller_id")
uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=specified_controller_id)
- err_message = "Specified Controller {0} does " \
- "not exist in the System.".format(specified_controller_id)
+ err_message = CONTROLLER_NOT_EXIST_ERROR.format(controller_id=specified_controller_id)
resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, err_message)
if resp.success:
return check_physical_disk_exists(module, resp.json_data["Drives"])
@@ -420,6 +570,7 @@ def check_mode_validation(module, session_obj, action, uri):
encryption_types = module.params.get("encryption_types")
encrypted = module.params.get("encrypted")
volume_type = module.params.get("volume_type")
+ raid_type = module.params.get("raid_type")
drives = module.params.get("drives")
if name is None and volume_id is None and module.check_mode:
module.exit_json(msg=CHANGES_FOUND, changed=True)
@@ -444,12 +595,12 @@ def check_mode_validation(module, session_obj, action, uri):
exist_value = {"Name": resp_data["Name"], "BlockSizeBytes": resp_data["BlockSizeBytes"],
"CapacityBytes": resp_data["CapacityBytes"], "Encrypted": resp_data["Encrypted"],
"EncryptionTypes": resp_data["EncryptionTypes"][0],
- "OptimumIOSizeBytes": resp_data["OptimumIOSizeBytes"], "VolumeType": resp_data["VolumeType"]}
+ "OptimumIOSizeBytes": resp_data["OptimumIOSizeBytes"], "RAIDType": resp_data["RAIDType"]}
exit_value_filter = dict([(k, v) for k, v in exist_value.items() if v is not None])
cp_exist_value = copy.deepcopy(exit_value_filter)
req_value = {"Name": name, "BlockSizeBytes": block_size_bytes,
"Encrypted": encrypted, "OptimumIOSizeBytes": optimum_io_size_bytes,
- "VolumeType": volume_type, "EncryptionTypes": encryption_types}
+ "RAIDType": raid_type, "EncryptionTypes": encryption_types}
if capacity_bytes is not None:
req_value["CapacityBytes"] = int(capacity_bytes)
req_value_filter = dict([(k, v) for k, v in req_value.items() if v is not None])
@@ -469,12 +620,63 @@ def check_mode_validation(module, session_obj, action, uri):
return None
+def check_raid_type_supported(module, session_obj):
+ volume_type = module.params.get("volume_type")
+ if volume_type:
+ raid_type = volume_type_map.get(volume_type)
+ else:
+ raid_type = module.params.get("raid_type")
+ if raid_type:
+ try:
+ specified_controller_id = module.params.get("controller_id")
+ uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=specified_controller_id)
+ resp = session_obj.invoke_request("GET", uri)
+ supported_raid_types = resp.json_data['StorageControllers'][0]['SupportedRAIDTypes']
+ if raid_type not in supported_raid_types:
+ module.exit_json(msg=RAID_TYPE_NOT_SUPPORTED_MSG.format(raid_type=raid_type), failed=True)
+ except (HTTPError, URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def get_apply_time(module, session_obj, controller_id):
+ """
+ gets the apply time from user if given otherwise fetches from server
+ """
+ apply_time = module.params.get("apply_time")
+ try:
+ uri = APPLY_TIME_INFO_API.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
+ resp = session_obj.invoke_request("GET", uri)
+ supported_apply_time_values = resp.json_data['@Redfish.OperationApplyTimeSupport']['SupportedValues']
+ if apply_time:
+ if apply_time not in supported_apply_time_values:
+ module.exit_json(msg=APPLY_TIME_NOT_SUPPORTED_MSG.format(apply_time=apply_time, supported_apply_time_values=supported_apply_time_values),
+ failed=True)
+ else:
+ apply_time = supported_apply_time_values[0]
+ return apply_time
+ except (HTTPError, URLError, SSLValidationError, ConnectionError, TypeError, ValueError) as err:
+ raise err
+
+
+def check_apply_time_supported_and_reboot_required(module, session_obj, controller_id):
+ """
+ checks whether the apply time is supported and reboot operation is required or not.
+ """
+ apply_time = get_apply_time(module, session_obj, controller_id)
+ reboot_server = module.params.get("reboot_server")
+ if reboot_server and apply_time == "OnReset":
+ return True
+ return False
+
+
def perform_volume_create_modify(module, session_obj):
"""
perform volume creation and modification for state present
"""
specified_controller_id = module.params.get("controller_id")
volume_id = module.params.get("volume_id")
+ check_raid_type_supported(module, session_obj)
+ action, uri, method = None, None, None
if specified_controller_id is not None:
check_controller_id_exists(module, session_obj)
uri = CONTROLLER_VOLUME_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"],
@@ -583,6 +785,76 @@ def validate_inputs(module):
" volume_id must be specified to perform further actions.")
+def perform_force_reboot(module, session_obj):
+ payload = {"ResetType": "ForceRestart"}
+ job_resp_status, reset_status, reset_fail = wait_for_redfish_reboot_job(session_obj, SYSTEM_ID, payload=payload)
+ if reset_status and job_resp_status:
+ job_uri = MANAGER_JOB_ID_URI.format(job_resp_status["Id"])
+ resp, msg = wait_for_job_completion(session_obj, job_uri, wait_timeout=module.params.get("job_wait_timeout"))
+ if resp:
+ job_data = strip_substr_dict(resp.json_data)
+ if job_data["JobState"] == "Failed":
+ module.exit_json(msg=REBOOT_FAIL, job_status=job_data, failed=True)
+ else:
+ resp = session_obj.invoke_request("GET", job_uri)
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=msg, job_status=job_data)
+
+
+def perform_reboot(module, session_obj):
+ payload = {"ResetType": "GracefulRestart"}
+ force_reboot = module.params.get("force_reboot")
+ job_resp_status, reset_status, reset_fail = wait_for_redfish_reboot_job(session_obj, SYSTEM_ID, payload=payload)
+ if reset_status and job_resp_status:
+ job_uri = MANAGER_JOB_ID_URI.format(job_resp_status["Id"])
+ resp, msg = wait_for_job_completion(session_obj, job_uri, wait_timeout=module.params.get("job_wait_timeout"))
+ if resp:
+ job_data = strip_substr_dict(resp.json_data)
+ if force_reboot and job_data["JobState"] == "Failed":
+ perform_force_reboot(module, session_obj)
+ else:
+ resp = session_obj.invoke_request("GET", job_uri)
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=msg, job_status=job_data)
+
+
+def check_job_tracking_required(module, session_obj, reboot_required, controller_id):
+ job_wait = module.params.get("job_wait")
+ apply_time = None
+ if controller_id:
+ apply_time = get_apply_time(module, session_obj, controller_id)
+ if job_wait:
+ if apply_time == "OnReset" and not reboot_required:
+ return False
+ return True
+ return False
+
+
+def track_job(module, session_obj, job_id, job_url):
+ resp, msg = wait_for_job_completion(session_obj, job_url,
+ wait_timeout=module.params.get("job_wait_timeout"))
+ if resp:
+ job_data = strip_substr_dict(resp.json_data)
+ if job_data["JobState"] == "Failed":
+ changed, failed = False, True
+ module.exit_json(msg=JOB_FAILURE_PROGRESS_MSG, task={"id": job_id, "uri": job_url},
+ changed=changed, job_status=job_data, failed=failed)
+ elif job_data["JobState"] == "Scheduled":
+ task_status = {"uri": job_url, "id": job_id}
+ module.exit_json(msg=JOB_SUBMISSION, task=task_status, job_status=job_data, changed=True)
+ else:
+ changed, failed = True, False
+ module.exit_json(msg=JOB_COMPLETION, task={"id": job_id, "uri": job_url},
+ changed=changed, job_status=job_data, failed=failed)
+ else:
+ module.exit_json(msg=msg)
+
+
+def validate_negative_job_time_out(module):
+ if module.params.get("job_wait") and module.params.get("job_wait_timeout") <= 0:
+ module.exit_json(msg=TIMEOUT_NEGATIVE_OR_ZERO_MSG, failed=True)
+
+
def main():
specs = {
"state": {"type": "str", "required": False, "choices": ['present', 'absent']},
@@ -591,7 +863,10 @@ def main():
"choices": ['NonRedundant', 'Mirrored',
'StripedWithParity', 'SpannedMirrors',
'SpannedStripesWithParity']},
- "name": {"required": False, "type": "str"},
+ "raid_type": {"type": "str", "required": False,
+ "choices": ['RAID0', 'RAID1', 'RAID5',
+ 'RAID6', 'RAID10', 'RAID50', 'RAID60']},
+ "name": {"required": False, "type": "str", "aliases": ['volume_name']},
"controller_id": {"required": False, "type": "str"},
"drives": {"elements": "str", "required": False, "type": "list"},
"block_size_bytes": {"required": False, "type": "int"},
@@ -603,13 +878,18 @@ def main():
"volume_id": {"required": False, "type": "str"},
"oem": {"required": False, "type": "dict"},
"initialize_type": {"type": "str", "required": False, "choices": ['Fast', 'Slow'], "default": "Fast"},
+ "apply_time": {"required": False, "type": "str", "choices": ['Immediate', 'OnReset']},
+ "reboot_server": {"required": False, "type": "bool", "default": False},
+ "force_reboot": {"required": False, "type": "bool", "default": False},
+ "job_wait": {"required": False, "type": "bool", "default": False},
+ "job_wait_timeout": {"required": False, "type": "int", "default": 1200}
}
specs.update(redfish_auth_params)
module = AnsibleModule(
argument_spec=specs,
- mutually_exclusive=[['state', 'command']],
+ mutually_exclusive=[['state', 'command'], ['volume_type', 'raid_type']],
required_one_of=[['state', 'command']],
required_if=[['command', 'initialize', ['volume_id']],
['state', 'absent', ['volume_id']], ],
@@ -617,16 +897,42 @@ def main():
try:
validate_inputs(module)
+ validate_negative_job_time_out(module)
with Redfish(module.params, req_session=True) as session_obj:
fetch_storage_resource(module, session_obj)
+ controller_id = module.params.get("controller_id")
+ volume_id = module.params.get("volume_id")
+ reboot_server = module.params.get("reboot_server")
+ reboot_required = module.params.get("reboot_required")
+ if controller_id:
+ uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
+ resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, CONTROLLER_NOT_EXIST_ERROR.format(controller_id=controller_id))
+ reboot_required = check_apply_time_supported_and_reboot_required(module, session_obj, controller_id)
status_message = configure_raid_operation(module, session_obj)
- task_status = {"uri": status_message.get("task_uri"), "id": status_message.get("task_id")}
- module.exit_json(msg=status_message["msg"], task=task_status, changed=True)
+ if volume_id and reboot_server:
+ controller_id = volume_id.split(":")[-1]
+ uri = CONTROLLER_URI.format(storage_base_uri=storage_collection_map["storage_base_uri"], controller_id=controller_id)
+ resp = check_specified_identifier_exists_in_the_system(module, session_obj, uri, CONTROLLER_NOT_EXIST_ERROR.format(controller_id=controller_id))
+ reboot_required = check_apply_time_supported_and_reboot_required(module, session_obj, controller_id)
+ if reboot_required:
+ perform_reboot(module, session_obj)
+ job_tracking_required = check_job_tracking_required(module, session_obj, reboot_required, controller_id)
+ job_id = status_message.get("task_id")
+ job_url = MANAGER_JOB_ID_URI.format(job_id)
+ if job_tracking_required:
+ track_job(module, session_obj, job_id, job_url)
+ else:
+ task_status = {"uri": job_url, "id": job_id}
+ resp = session_obj.invoke_request("GET", job_url)
+ job_data = strip_substr_dict(resp.json_data)
+ module.exit_json(msg=status_message["msg"], task=task_status, job_status=job_data, changed=True)
except HTTPError as err:
- module.fail_json(msg=str(err), error_info=json.load(err))
- except (URLError, SSLValidationError, ConnectionError, ImportError, ValueError,
+ module.exit_json(msg=str(err), error_info=json.load(err), failed=True)
+ except URLError as err:
+ module.exit_json(msg=str(err), unreachable=True)
+ except (SSLValidationError, ConnectionError, ImportError, ValueError,
RuntimeError, TypeError, OSError, SSLError) as err:
- module.fail_json(msg=str(err))
+ module.exit_json(msg=str(err), failed=True)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/openmanage/requirements.txt b/ansible_collections/dellemc/openmanage/requirements.txt
index 604f7ba22..30a428ace 100644
--- a/ansible_collections/dellemc/openmanage/requirements.txt
+++ b/ansible_collections/dellemc/openmanage/requirements.txt
@@ -1,2 +1,3 @@
omsdk
-netaddr>=0.7.19 \ No newline at end of file
+netaddr>=0.7.19
+jmespath
diff --git a/ansible_collections/dellemc/openmanage/requirements.yml b/ansible_collections/dellemc/openmanage/requirements.yml
index 6440db74a..28b1714b2 100644
--- a/ansible_collections/dellemc/openmanage/requirements.yml
+++ b/ansible_collections/dellemc/openmanage/requirements.yml
@@ -1,2 +1,4 @@
collections:
- - name: dellemc.openmanage
+ - dellemc.openmanage
+ - ansible.utils
+ - ansible.windows
diff --git a/ansible_collections/dellemc/openmanage/roles/README.md b/ansible_collections/dellemc/openmanage/roles/README.md
new file mode 100644
index 000000000..90f2f97d5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/README.md
@@ -0,0 +1,22 @@
+# dellemc.openmanage roles directory
+
+Here are the list of roles supported by Dell.
+
+```
+.
+├── idrac_attributes
+├── idrac_bios
+├── idrac_boot
+├── idrac_certificate
+├── idrac_export_server_config_profile
+├── idrac_firmware
+├── idrac_gather_facts
+├── idrac_import_server_config_profile
+├── idrac_job_queue
+├── idrac_os_deployment
+├── idrac_reset
+├── idrac_server_powerstate
+├── idrac_storage_controller
+├── redfish_firmware
+└── redfish_storage_volume
+``` \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/README.md
new file mode 100644
index 000000000..2458b1ab5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/README.md
@@ -0,0 +1,308 @@
+# idrac_attributes
+
+Role to configure the iDRAC system, manager and lifecycle attributes for Dell PowerEdge servers.
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+docker
+molecule
+python
+```
+### Production
+Requirements to use the role.
+```
+ansible
+python
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address or hostname.</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username with admin privileges</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>idrac_attributes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- Dictionary of iDRAC attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry.<br>- To view the list of attributes in Attribute Registry for iDRAC9 and above, use the Role idrac_gather_facts with idrac components.<br>- For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.<br>- If the manager attribute name in Server Configuration Profile is <GroupName><Instance>#<AttributeName>(for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName> (for Example, 'SNMP.1.AgentCommunity').</td>
+ </tr>
+ <tr>
+ <td>system_attributes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- Dictionary of System attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry.<br>- To view the list of attributes in Attribute Registry for iDRAC9 and above, use the Role idrac_gather_facts with idrac components.<br>- For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.<br>- If the manager attribute name in Server Configuration Profile is <GroupName><Instance>#<AttributeName> for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName> (for Example, 'ThermalSettings.1.ThermalProfile').</td>
+ </tr>
+ <tr>
+ <td>lifecycle_controller_attributes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- Dictionary of Lifecycle Controller attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry.<br>- To view the list of attributes in Attribute Registry for iDRAC9 and above, use the Role idrac_gather_facts with idrac components.<br>- For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.<br>- If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>(for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName>(for Example, 'LCAttributes.1.AutoUpdate')</td>
+ <tr>
+ <td>manager_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Redfish ID of the resource. If the Redfish ID of the resource is not specified, then the first ID from the Manager IDs list will be picked up.</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_attributes_out</td>
+ <td>{"changed": true,
+ "failed": false,
+ "msg": "Successfully updated the attributes."
+}</td>
+<td>Module output of idrac attributes</td>
+</tbody>
+</table>
+
+## Examples
+-----
+
+```
+- name: Configure iDRAC attributes
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ idrac_attributes:
+ SNMP.1.AgentCommunity: public
+
+- name: Configure System attributes
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ system_attributes:
+ ThermalSettings.1.ThermalProfile: Sound Cap
+
+- name: Configure Lifecycle Controller attributes
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ lifecycle_controller_attributes:
+ LCAttributes.1.AutoUpdate: Enabled
+
+- name: Configure the iDRAC attributes for email alert settings.
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ idrac_attributes:
+ EmailAlert.1.CustomMsg: Display Message
+ EmailAlert.1.Enable: Enabled
+ EmailAlert.1.Address: test@test.com
+
+- name: Configure the iDRAC attributes for SNMP alert settings.
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ idrac_attributes:
+ SNMPAlert.1.Destination: 192.168.0.2
+ SNMPAlert.1.State: Enabled
+ SNMPAlert.1.SNMPv3Username: username
+
+- name: Configure the iDRAC attributes for SMTP alert settings.
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ idrac_attributes:
+ RemoteHosts.1.SMTPServerIPAddress: 192.168.0.3
+ RemoteHosts.1.SMTPAuthentication: Enabled
+ RemoteHosts.1.SMTPPort: 25
+ RemoteHosts.1.SMTPUserName: username
+ RemoteHosts.1.SMTPPassword: password
+
+- name: Configure the iDRAC attributes for webserver settings.
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ idrac_attributes:
+ WebServer.1.SSLEncryptionBitLength: 128-Bit or higher
+ WebServer.1.TLSProtocol: TLS 1.1 and Higher
+
+- name: Configure the iDRAC attributes for SNMP settings.
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ idrac_attributes:
+ SNMP.1.SNMPProtocol: All
+ SNMP.1.AgentEnable: Enabled
+ SNMP.1.TrapFormat: SNMPv1
+ SNMP.1.AlertPort: 162
+ SNMP.1.AgentCommunity: public
+
+- name: Configure the iDRAC LC attributes for collecting system inventory.
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ lifecycle_controller_attributes:
+ LCAttributes.1.CollectSystemInventoryOnRestart: Enabled
+
+- name: Configure the iDRAC system attributes for LCD configuration.
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ system_attributes:
+ LCD.1.Configuration: Service Tag
+ LCD.1.vConsoleIndication: Enabled
+ LCD.1.FrontPanelLocking: Full-Access
+ LCD.1.UserDefinedString: custom string
+
+- name: Configure the iDRAC attributes for Timezone settings.
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ idrac_attributes:
+ Time.1.Timezone: CST6CDT
+ NTPConfigGroup.1.NTPEnable: Enabled
+ NTPConfigGroup.1.NTP1: 192.168.0.5
+ NTPConfigGroup.1.NTP2: 192.168.0.6
+ NTPConfigGroup.1.NTP3: 192.168.0.7
+
+- name: Configure all attributes
+ ansible.builtin.include_role:
+ name: idrac_attributes
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ idrac_attributes:
+ SNMP.1.AgentCommunity: test
+ SNMP.1.AgentEnable: Enabled
+ SNMP.1.DiscoveryPort: 161
+ system_attributes:
+ ServerOS.1.HostName: demohostname
+ lifecycle_controller_attributes:
+ LCAttributes.1.AutoUpdate: Disabled
+```
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Kritika Bhateja (Kritika.Bhateja@Dell.com) 2023 \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/defaults/main.yml
new file mode 100644
index 000000000..60830a744
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+# defaults file for idrac_attributes
+validate_certs: true
+https_timeout: 30
+https_port: 443
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/handlers/main.yml
new file mode 100644
index 000000000..ed09341c4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_attributes
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/meta/argument_specs.yml
new file mode 100644
index 000000000..2109b32a0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/meta/argument_specs.yml
@@ -0,0 +1,71 @@
+---
+argument_specs:
+ main:
+ version_added: "7.6.0"
+ short_description: Role to configure the iDRAC attribute
+ description:
+ - Role to configure the iDRAC system, manager and lifecycle attributes for Dell PowerEdge servers.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address or hostname.
+ username:
+ type: str
+ description: iDRAC username with admin privileges.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The HTTPS socket level timeout in seconds.
+ type: int
+ default: 30
+ idrac_attributes:
+ description:
+ - Dictionary of iDRAC attributes and value. The attributes should be part of the
+ Integrated Dell Remote Access Controller Attribute Registry.
+ - To view the list of attributes in Attribute Registry for iDRAC9 and above, use
+ the Role idrac_gather_facts with idrac components.
+ - For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
+ (for Example, 'SNMP.1#AgentCommunity') then the equivalent attribute name for Redfish is <GroupName>.
+ <Instance>.<AttributeName> (for Example, 'SNMP.1.AgentCommunity').
+ type: dict
+ system_attributes:
+ description:
+ - Dictionary of System attributes and value. The attributes should be part of the Integrated Dell Remote Access Controller Attribute Registry.
+ - To view the list of attributes in Attribute Registry for iDRAC9 and above, use the Role idrac_gather_facts with idrac components
+ - For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
+ (for Example, 'ThermalSettings.1#ThermalProfile') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName>
+ (for Example, 'ThermalSettings.1.ThermalProfile').
+ type: dict
+ lifecycle_controller_attributes:
+ description:
+ - Dictionary of Lifecycle Controller attributes and value. The attributes should be part of the
+ Integrated Dell Remote Access Controller Attribute Registry.
+ - To view the list of attributes in Attribute Registry for iDRAC9 and above, use the Role idrac_gather_facts with idrac components
+ - For iDRAC8 based servers, derive the manager attribute name from Server Configuration Profile.
+ - If the manager attribute name in Server Configuration Profile is <GroupName>.<Instance>#<AttributeName>
+ (for Example, 'LCAttributes.1#AutoUpdate') then the equivalent attribute name for Redfish is <GroupName>.<Instance>.<AttributeName>
+ (for Example, 'LCAttributes.1.AutoUpdate')."
+ type: dict
+ manager_id:
+ description:
+ Redfish ID of the resource. If the Redfish ID of the resource is not specified, then the first ID from the Manager IDs list
+ will be picked up.
+ type: str
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/meta/main.yml
new file mode 100644
index 000000000..737f01569
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/meta/main.yml
@@ -0,0 +1,25 @@
+galaxy_info:
+ role_name: idrac_attributes
+ author: "Kritika Bhateja"
+ description: The role helps to configure the iDRAC system, manager and lifecycle attributes for Dell PowerEdge servers.
+ company: Dell Technologies
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.13"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/default/converge.yml
new file mode 100644
index 000000000..f1ae0f548
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/default/converge.yml
@@ -0,0 +1,306 @@
+---
+- name: Converge file for default/negative scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Perform update with wrong hostname
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "randomHostname"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ idrac_attributes:
+ SSH.1.Timeout: 1800
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with wrong hostname
+ ansible.builtin.assert:
+ that:
+ - "'Incorrect username or password,
+ unreachable iDRAC IP' in '{{ idrac_attributes_out.msg }}' or
+ 'Name or service not known' in '{{ idrac_attributes_out.msg }}'"
+
+ - name: Perform update with wrong username
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "WrongUsername123"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ idrac_attributes:
+ SSH.1.Timeout: 1800
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with wrong username
+ ansible.builtin.assert:
+ that:
+ - '"HTTP Error 401" in idrac_attributes_out.msg'
+
+ - name: Perform update with wrong password
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "WrongPassword@123"
+ validate_certs: false
+ idrac_attributes:
+ SSH.1.Timeout: 1800
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with wrong password
+ ansible.builtin.assert:
+ that: |-
+ ('"HTTP Error 401" in idrac_attributes_out.msg')
+ or
+ ('"urlopen error timed out" in idrac_attributes_out.msg')
+
+ - name: Perform update with invalid https_port
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ https_port: 9999999
+ idrac_attributes:
+ SSH.1.Timeout: 1800
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with invalid https_port
+ ansible.builtin.assert:
+ that:
+ - ('"Connection refused" in idrac_attributes_out.msg')
+ or
+ ('"urlopen error timed out" in idrac_attributes_out.msg')
+
+ - name: Perform update with invalid validate_certs
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: "someStringValue"
+ idrac_attributes:
+ SSH.1.Timeout: 1800
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with invalid validate_certs
+ ansible.builtin.assert:
+ that:
+ - '"Valid booleans include" in idrac_attributes_out.msg'
+
+ - name: Perform update with wrong ca_path
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ ca_path: ""
+ validate_certs: true
+ idrac_attributes:
+ SSH.1.Timeout: 1800
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with wrong ca_path
+ ansible.builtin.assert:
+ that:
+ - '"certificate verify failed" in idrac_attributes_out.msg'
+
+ - name: Perform update with wrong manager_id
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: false
+ idrac_attributes:
+ SSH.1.Timeout: 1800
+ manager_id: idrac.random.5
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with wrong manager_id
+ ansible.builtin.assert:
+ that:
+ - '"HTTP Error 404" in idrac_attributes_out.msg'
+
+ - name: Perform update with read only attributes
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: false
+ idrac_attributes:
+ SSH.1.MaxSessions: 5
+ manager_id: "{{ null | default(omit) }}"
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with read only attributes
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Attributes have invalid values."
+
+ - name: Perform idrac_attributes update with invalid attributes
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: false
+ idrac_attributes:
+ invalidAttr: enabled
+ manager_id: "{{ null | default(omit) }}"
+ ignore_errors: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with invalid attributes
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Attributes have invalid values."
+
+ - name: Perform idrac_attributes update with
+ valid attributes and invalid values
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: false
+ idrac_attributes:
+ SSH.1.Timeout: false
+ manager_id: "{{ null | default(omit) }}"
+ ignore_errors: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation with
+ valid attributes and invalid values
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Attributes have invalid values."
+
+ - name: Perform system_attributes update with invalid attributes
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: false
+ system_attributes:
+ invalidAttr: enabled
+ idrac_attributes: "{{ null | default(omit) }}"
+ manager_id: "{{ null | default(omit) }}"
+ ignore_errors: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation for system_attributes
+ with invalid attributes
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Attributes have invalid values."
+
+ - name: Perform system_attributes update with
+ valid attributes and invalid values
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: false
+ system_attributes:
+ Job.1.JobTimeout: stringValue
+ manager_id: "{{ null | default(omit) }}"
+ idrac_attributes: "{{ null | default(omit) }}"
+ ignore_errors: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation for system_attributes
+ with valid attributes and invalid values
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Attributes have invalid values."
+
+ - name: Perform lifecycle_controller_attributes update
+ with invalid attributes
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: false
+ lifecycle_controller_attributes:
+ invalidAttr: false
+ idrac_attributes: "{{ null | default(omit) }}"
+ system_attributes: "{{ null | default(omit) }}"
+ manager_id: "{{ null | default(omit) }}"
+ ignore_errors: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation for
+ lifecycle_controller_attributes with invalid attributes
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Attributes have invalid values."
+
+ - name: Perform lifecycle_controller_attributes update with
+ valid attributes and invalid values
+ ansible.builtin.import_role:
+ name: idrac_attributes
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ https_port: 443
+ validate_certs: false
+ lifecycle_controller_attributes:
+ LCAttributes.1.AutoUpdate: 12345
+ manager_id: "{{ null | default(omit) }}"
+ idrac_attributes: "{{ null | default(omit) }}"
+ system_attributes: "{{ null | default(omit) }}"
+ ignore_errors: true
+ register: idrac_attributes_error_msg
+
+ - name: Asserting after performing operation for
+ lifecycle_controller_attributes with
+ valid attributes and invalid values
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Attributes have invalid values."
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/default/molecule.yml
new file mode 100644
index 000000000..210914970
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/default/molecule.yml
@@ -0,0 +1,6 @@
+---
+scenario:
+ test_sequence:
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/cleanup.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/cleanup.yml
new file mode 100644
index 000000000..58b3846e3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/cleanup.yml
@@ -0,0 +1,12 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_to_default: Default
+ roles:
+ - role: idrac_reset
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/converge.yml
new file mode 100644
index 000000000..a116aad93
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/converge.yml
@@ -0,0 +1,32 @@
+---
+- name: Converge file for idrac_attributes
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ roles:
+ - role: idrac_attributes
+ vars:
+ idrac_attributes:
+ SSH.1.Timeout: 1700
+ tasks:
+ - name: Asserting idrac_attributes update in check mode
+ ansible.builtin.assert:
+ that: idrac_attributes_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting idrac_attributes update in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Successfully updated the attributes."
+ when: not ansible_check_mode and idrac_attributes_out.changed
+
+ - name: Asserting idrac_attributes update in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_attributes_out.changed
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/molecule.yml
index ed97d539c..ed97d539c 100644
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/main.yml
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/idrac_attr/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/cleanup.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/cleanup.yml
new file mode 100644
index 000000000..58b3846e3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/cleanup.yml
@@ -0,0 +1,12 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_to_default: Default
+ roles:
+ - role: idrac_reset
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/converge.yml
new file mode 100644
index 000000000..b9e7b2124
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/converge.yml
@@ -0,0 +1,32 @@
+---
+- name: Converge file for lifecycle_controller_attributes
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ roles:
+ - role: idrac_attributes
+ vars:
+ lifecycle_controller_attributes:
+ LCAttributes.1.AutoUpdate: Enabled
+ tasks:
+ - name: Asserting lifecycle_controller_attributes update in check mode
+ ansible.builtin.assert:
+ that: idrac_attributes_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting lifecycle_controller_attributes update in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Successfully updated the attributes."
+ when: not ansible_check_mode and idrac_attributes_out.changed
+
+ - name: Asserting lifecycle_controller_attributes update in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_attributes_out.changed
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/molecule.yml
index ed97d539c..ed97d539c 100644
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/lifecycle_controller_attr/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/cleanup.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/cleanup.yml
new file mode 100644
index 000000000..58b3846e3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/cleanup.yml
@@ -0,0 +1,12 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_to_default: Default
+ roles:
+ - role: idrac_reset
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/converge.yml
new file mode 100644
index 000000000..ae59956e7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/converge.yml
@@ -0,0 +1,32 @@
+---
+- name: Converge file for system_attributes
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ roles:
+ - role: idrac_attributes
+ vars:
+ system_attributes:
+ ServerOS.1.HostName: demohostname
+ tasks:
+ - name: Asserting system_attributes update in check mode
+ ansible.builtin.assert:
+ that: idrac_attributes_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting system_attributes update in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "Successfully updated the attributes."
+ when: not ansible_check_mode and idrac_attributes_out.changed
+
+ - name: Asserting system_attributes update in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_attributes_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_attributes_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/molecule/system_attr/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/tasks/main.yml
new file mode 100644
index 000000000..80bc1edb4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+- name: Configure attributes
+ dellemc.openmanage.idrac_attributes:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ idrac_attributes: "{{ idrac_attributes | default(omit) }}"
+ system_attributes: "{{ system_attributes | default(omit) }}"
+ lifecycle_controller_attributes: "{{ lifecycle_controller_attributes | default(omit) }}"
+ resource_id: "{{ manager_id | default(omit) }}"
+ timeout: "{{ https_timeout }}"
+ register: idrac_attributes_out
+ delegate_to: "{{ idrac_attributes_delegate }}"
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/tests/inventory
index 878877b07..878877b07 100644
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/inventory
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/tests/inventory
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/tests/test.yml
new file mode 100644
index 000000000..0016e133a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Tests for idrac attributes
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_attributes
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_attributes/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/vars/main.yml
new file mode 100644
index 000000000..9684f674f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_attributes/vars/main.yml
@@ -0,0 +1,3 @@
+---
+# vars file for idrac_attributes
+idrac_attributes_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_bios/README.md
new file mode 100644
index 000000000..25f439dc2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/README.md
@@ -0,0 +1,368 @@
+# idrac_bios
+
+This role allows to modify BIOS attributes, clear pending BIOS attributes, and reset the BIOS to default settings.
+
+## Requirements
+
+---
+
+Requirements to develop and contribute to the role.
+
+### Development
+
+```text
+ansible
+docker
+molecule
+python
+```
+
+### Production
+
+Requirements to use the role.
+
+```text
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```text
+dellemc.openmanage
+```
+
+## Role Variables
+
+---
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC user password</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>iDRAC port</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>
+ - If C(false), the SSL certificates will not be validated. <br>
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used
+ </td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ </td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>The socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>attributes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>
+ - "Dictionary of BIOS attributes and value pair. Attributes should be part of the Redfish Dell BIOS Attribute Registry. Use U(https://I(idrac_ip)/redfish/v1/Systems/System.Embedded.1/Bios) to view the Redfish URI." <br>
+ - This is mutually exclusive with I(reset_bios).
+ </td>
+ </tr>
+ <tr>
+ <td>apply_time</td>
+ <td>false</td>
+ <td>Immediate</td>
+ <td>Immediate, OnReset, AtMaintenanceWindowStart, InMaintenanceWindowOnReset</td>
+ <td>str</td>
+ <td>
+ - Apply time of the I(attributes). <br>
+ - This is applicable only to I(attributes). <br>
+ - C(Immediate) Allows the user to immediately reboot the host and apply the changes.
+ I(job_wait) is applicable. <br>
+ - C(OnReset) Allows the user to apply the changes on the next reboot of the host server. <br>
+ - C(AtMaintenanceWindowStart) Allows the user to apply the changes at the start of a maintenance window as specifiedin
+ I(maintenance_window). A reboot job will be scheduled. <br>
+ - C(InMaintenanceWindowOnReset) Allows to apply the changes after a manual reset but within the maintenance window as specified in
+ I(maintenance_window).
+ </td>
+ </tr>
+ <tr>
+ <td>maintenance_window</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>
+ - Option to schedule the maintenance window. <br>
+ - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or
+ C(InMaintenanceWindowOnReset).
+ </td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;start_time</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>
+ - The start time for the maintenance window to be scheduled. <br>
+ - The format is YYYY-MM-DDThh:mm:ss<offset>, <offset> is the time offset from UTC that
+ the current time zone set in iDRAC in the format: +05:30 for IST.
+ </td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;duration</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>int</td>
+ <td>
+ - The duration in seconds for the maintenance window. <br>
+ </td>
+ </tr>
+ <tr>
+ <td>clear_pending</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>
+ - Allows the user to clear all pending BIOS attributes changes. <br>
+ - C(true) discards any pending changes to BIOS attributes or removes the job if in
+ scheduled state. <br>
+ - This operation will not create any job. <br>
+ - C(false) does not perform any operation. <br>
+ - This is mutually exclusive with I(reset_bios). <br>
+ - C(Note) Any BIOS job scheduled will not be cleared because of boot sources configuration. <br>
+ </td>
+</tr>
+ <tr>
+ <td>reset_bios</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>
+ - Resets the BIOS to default settings and triggers a reboot of host system. <br>
+ - This is applied to the host after the restart. <br>
+ - This operation will not create any job. <br>
+ - C(false) does not perform any operation. <br>
+ - This is mutually exclusive with I(attributes), and I(clear_pending). <br>
+ - When C(true), this action will always report as changes found to be applicable.
+ </td>
+ </tr>
+ <tr>
+ <td>reset_type</td>
+ <td>false</td>
+ <td>graceful_restart</td>
+ <td>graceful_restart <br> force_restart</td>
+ <td>str</td>
+ <td>
+ - C(force_restart) Forcefully reboot the host system. <br>
+ - C(graceful_restart) Gracefully reboot the host system. <br>
+ - This is applicable for I(reset_bios), and I(attributes) when I(apply_time) is
+ C(Immediate).
+ </td>
+ </tr>
+ <tr>
+ <td>job_wait</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>
+ - Provides the option to wait for job completion. <br>
+ - This is applicable for I(attributes) when I(apply_time) is C(Immediate). <br>
+ </td>
+ </tr>
+ <tr>
+ <td>job_wait_timeout</td>
+ <td>false</td>
+ <td>1200</td>
+ <td></td>
+ <td>int</td>
+ <td>
+ - The maximum wait time of I(job_wait) in seconds. <br>
+ The job is tracked only for this duration. <br>
+ - This option is applicable when I(job_wait) is C(True). <br>
+ </td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_bios_out</td>
+ <td>{
+ "attributes": {
+ "ansible_facts": {},
+ "changed": true,
+ "failed": false,
+ "job_id": "JID_XXXXXXXXXXXX",
+ "msg": {
+ "ActualRunningStartTime": "2023-05-19T04:55:01",
+ "ActualRunningStopTime": "2023-05-19T04:59:21",
+ "CompletionTime": "2023-05-19T04:59:21",
+ "Description": "Job Instance",
+ "EndTime": "TIME_NA",
+ "Id": "JID_844899049402",
+ "JobState": "Completed",
+ "JobType": "BIOSConfiguration",
+ "Message": "Job completed successfully.",
+ "MessageArgs": [],
+ "MessageId": "PR19",
+ "Name": "Configure: BIOS.Setup.1-1",
+ "PercentComplete": 100,
+ "StartTime": "2023-05-19T04:51:44",
+ "TargetSettingsURI": null
+ },
+ "status_msg": "Successfully applied the BIOS attributes update."
+ },
+ "clear_pending": {
+ "changed": false,
+ "skip_reason": "Conditional result was False",
+ "skipped": true
+ },
+ "reset_bios": {
+ "changed": false,
+ "skip_reason": "Conditional result was False",
+ "skipped": true
+ }
+}</td>
+ <td>Module output of the idrac_bios job.</td>
+ </tr>
+ </tbody>
+</table>
+
+## Examples
+
+---
+
+```yaml
+- name: Configure generic attributes of the BIOS
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ attributes:
+ BootMode : "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+```
+
+```yaml
+- name: Configure BIOS attributes at Maintenance window.
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 600
+ attributes:
+ BootMode : "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+```
+
+```yaml
+- name: Clear pending BIOS attributes.
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ clear_pending: true
+```
+
+```yaml
+- name: Reset BIOS attributes to default settings.
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_bios: true
+```
+
+## Author Information
+
+---
+
+Dell Technologies <br>
+Abhishek Sinha (Abhishek.Sinha10@Dell.com) 2023
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/defaults/main.yml
new file mode 100644
index 000000000..6b146abc8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# defaults file for idrac_bios
+https_port: 443
+validate_certs: true
+https_timeout: 30
+apply_time: "Immediate"
+clear_pending: false
+reset_type: "graceful_restart"
+job_wait: true
+job_wait_timeout: 1200
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/handlers/main.yml
new file mode 100644
index 000000000..7cbf8c33d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_bios
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/meta/argument_specs.yml
new file mode 100644
index 000000000..febdd96ca
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/meta/argument_specs.yml
@@ -0,0 +1,122 @@
+---
+argument_specs:
+ main:
+ version_added: "7.6.0"
+ short_description: Modify and clear BIOS attributes, and reset BIOS settings
+ description:
+ - This role allows to modify BIOS attributes, clear pending BIOS attributes, and reset the BIOS to default settings.
+ options:
+ hostname:
+ required: true
+ type: str
+ description:
+ - iDRAC IP Address.
+ username:
+ type: str
+ description:
+ - iDRAC username.
+ password:
+ type: str
+ description:
+ - iDRAC user password.
+ https_port:
+ type: int
+ description:
+ - iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description:
+ - The socket level timeout in seconds.
+ type: int
+ default: 30
+ attributes:
+ type: dict
+ description:
+ - Dictionary of BIOS attributes and value pair.
+ Attributes should be part of the Redfish Dell BIOS Attribute Registry.
+ Use idrac_gather_facts role to fetch the BIOS attributes.
+ - This is mutually exclusive with I(reset_bios).
+ apply_time:
+ description:
+ - Apply time of the I(attributes).
+ - This is applicable only to I(attributes).
+ - C(Immediate) Allows the user to immediately reboot the host and apply the changes. I(job_wait) is applicable.
+ - C(OnReset) Allows the user to apply the changes on the next reboot of the host server.
+ - C(AtMaintenanceWindowStart) Allows the user to apply the changes at the start of a maintenance window as specified in
+ I(maintenance_window). A reboot job will be scheduled.
+ - C(InMaintenanceWindowOnReset) Allows to apply the changes after a manual reset but within the maintenance window as specified in
+ I(maintenance_window).
+ choices:
+ [
+ Immediate,
+ OnReset,
+ AtMaintenanceWindowStart,
+ InMaintenanceWindowOnReset,
+ ]
+ default: Immediate
+ maintenance_window:
+ type: dict
+ description:
+ - Option to schedule the maintenance window.
+ - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset).
+ options:
+ start_time:
+ type: str
+ description:
+ - The start time for the maintenance window to be scheduled.
+ - "The format is YYYY-MM-DDThh:mm:ss<offset>"
+ - "<offset> is the time offset from UTC that the current time zone set in iDRAC in the format: +05:30 for IST."
+ required: true
+ duration:
+ type: int
+ description:
+ - The duration in seconds for the maintenance window.
+ required: true
+ clear_pending:
+ type: bool
+ description:
+ - Allows the user to clear all pending BIOS attributes changes.
+ - C(true) discards any pending changes to BIOS attributes or removes the job if in scheduled state.
+ - This operation will not create any job.
+ - C(false) does not perform any operation.
+ - This is mutually exclusive with I(boot_sources), I(attributes), and I(reset_bios).
+ - C(Note) Any BIOS job scheduled will not be cleared because of boot sources configuration.
+ reset_bios:
+ type: bool
+ description:
+ - Resets the BIOS to default settings and triggers a reboot of host system.
+ - This is applied to the host after the restart.
+ - This operation will not create any job.
+ - C(false) does not perform any operation.
+ - This is mutually exclusive with I(boot_sources), I(attributes), and I(clear_pending).
+ - When C(true), this action will always report as changes found to be applicable.
+ reset_type:
+ type: str
+ description:
+ - C(force_restart) Forcefully reboot the host system.
+ - C(graceful_restart) Gracefully reboot the host system.
+ - This is applicable for I(reset_bios), and I(attributes) when I(apply_time) is C(Immediate).
+ choices: [graceful_restart, force_restart]
+ default: graceful_restart
+ job_wait:
+ type: bool
+ description:
+ - Provides the option to wait for job completion.
+ - This is applicable for I(attributes) when I(apply_time) is C(Immediate).
+ default: true
+ job_wait_timeout:
+ type: int
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(true).
+ default: 1200
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/meta/main.yml
new file mode 100644
index 000000000..e660452c3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/meta/main.yml
@@ -0,0 +1,25 @@
+galaxy_info:
+ author: Abhishek Sinha ('Abhishek-Dell')
+ description: The role performs idrac bios operations.
+ company: Dell Technologies
+
+ license: GPL-3.0-only
+
+ min_ansible_version: '2.13'
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/__get_data.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/__get_data.yml
new file mode 100644
index 000000000..a7ee9c67e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/__get_data.yml
@@ -0,0 +1,16 @@
+---
+- name: Get uri data
+ ansible.builtin.uri:
+ url: "{{ url }}"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ body: {}
+ body_format: json
+ force_basic_auth: true
+ return_content: true
+ status_code: 200
+ headers: 'Accept=application/json'
+ check_mode: false
+ no_log: true
+ register: idrac_bios_uri_data
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/converge.yml
new file mode 100644
index 000000000..d81646ccb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/converge.yml
@@ -0,0 +1,39 @@
+---
+- name: Converge file for clear pending attributes
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Clear pending attributes
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ clear_pending: true
+
+ - name: Verify clear pending attributes in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.clear_pending.status_msg == "Successfully cleared
+ the pending BIOS attributes."
+ - idrac_bios_out.reset_bios.skipped
+ - idrac_bios_out.attributes.skipped
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verify clear pending attributes in check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.clear_pending.status_msg == "Changes found
+ to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verify clear pending attributes in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.clear_pending.status_msg == "No changes found
+ to be applied."
+ when: not ansible_check_mode and not idrac_bios_out.clear_pending.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/prepare.yml
new file mode 100644
index 000000000..46d74222a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/clear_pending_attributes/prepare.yml
@@ -0,0 +1,62 @@
+---
+- name: Converge file to update attributes with apply time as Immediate
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_port: "{{ lookup('env', 'IDRAC_PORT') }}"
+ tasks:
+ - name: Fetch Jobs Data
+ ansible.builtin.include_tasks:
+ file: ../__get_data.yml
+ vars:
+ url: "https://{{ idrac_ip }}:{{ idrac_port }}/redfish/v1/Managers\
+ /iDRAC.Embedded.1/Jobs?$expand=*($levels=1)"
+
+ - name: Fetch Bios Jobs Data
+ when: idrac_bios_uri_data.json.Members | length > 0
+ ansible.builtin.set_fact:
+ idrac_bios_jobs_items: "{{ idrac_bios_uri_data.json.Members
+ | json_query(query) }}"
+ vars:
+ query: "[?JobType=='BIOSConfiguration' && JobState=='Scheduled'
+ || JobState=='Scheduling' ]"
+ no_log: true
+
+ - name: Block for creating a bios job as a pre-requisite
+ when: idrac_bios_jobs_items | length == 0
+ block:
+ - name: Fetch IDRAC Data
+ ansible.builtin.include_tasks:
+ file: ../__get_data.yml
+ vars:
+ url: "https://{{ idrac_ip }}:{{ idrac_port }}/redfish/v1/\
+ Systems/System.Embedded.1/Bios"
+
+ - name: Fetch the existing boot mode
+ ansible.builtin.set_fact:
+ boot_mode: "{{ idrac_bios_uri_data.json.Attributes.BootMode }}"
+
+ - name: Set the boot mode
+ ansible.builtin.set_fact:
+ set_boot_mode: "{{ 'Bios' if (boot_mode == 'Uefi') else 'Uefi' }}"
+ when: set_boot_mode is not defined
+
+ - name: Update attributes with apply time as Immediate
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ idrac_ip }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: "OnReset"
+ attributes:
+ BootMode: "{{ set_boot_mode }}"
+ register: idrac_bios_change_bios_setting
+
+ - name: Verify job is scheduled
+ ansible.builtin.assert:
+ that:
+ - "'Successfully committed changes. The job is in pending state'
+ in idrac_bios_out.attributes.status_msg"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/default/converge.yml
new file mode 100644
index 000000000..6f8488153
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/default/converge.yml
@@ -0,0 +1,135 @@
+---
+- name: Converge file for negative scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ ########## Below snippet is commented because of Issue: JIT-285533 ########
+ # - name: Perform reset bios with invalid hostname
+ # ansible.builtin.import_role:
+ # name: idrac_bios
+ # vars:
+ # hostname: "randomHostname"
+ # username: "{{ lookup('env', 'IDRAC_USER') }}"
+ # password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ # validate_certs: false
+ # reset_bios: true
+ # ignore_unreachable: true
+
+ # - name: Assert reset bios with invalid hostname
+ # ansible.builtin.assert:
+ # that:
+ # - "'Unable to communicate with iDRAC randomHostname' in
+ # idrac_bios_out.reset_bios.msg"
+ ###########################################################################
+
+ - name: Block to reset bios with invalid username
+ block:
+ - name: Perform reset bios with invalid username
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "randomusername"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_bios: true
+ clear_pending: false
+ rescue:
+ - name: Verify reset bios with invalid username
+ ansible.builtin.assert:
+ that:
+ - "'HTTP Error 401' in ansible_failed_result.msg"
+
+ - name: Block for clear pending attributes with invalid value
+ block:
+ - name: Clear pending attributes with invalid value
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ clear_pending: yess
+ rescue:
+ - name: Assert clear pending attributes with invalid value
+ ansible.builtin.assert:
+ that: ansible_failed_result.msg is
+ search('unable to convert to bool')
+
+ - name: Block for reset bios with invalid value
+ block:
+ - name: Perform reset bios with invalid value
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_bios: truee
+ rescue:
+ - name: Assert perform reset bios with invalid value
+ ansible.builtin.assert:
+ that: ansible_failed_result.msg is
+ search('unable to convert to bool')
+
+ - name: Block for reset bios with invalid password
+ block:
+ - name: Perform reset bios with invalid password
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "randompassword"
+ validate_certs: false
+ clear_pending: false
+ reset_bios: true
+ rescue:
+ - name: Assert reset bios with invalid password
+ ansible.builtin.assert:
+ that: |-
+ ("'HTTP Error 401' in ansible_failed_result.msg")
+ or
+ ("'urlopen error timed out' in ansible_failed_result.msg")
+
+ - name: Block for reset type with invalid value
+ block:
+ - name: Perform reset type with invalid value
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ clear_pending: false
+ reset_bios: true
+ reset_type: graceful_restartt
+ rescue:
+ - name: Assert reset type with invalid value
+ ansible.builtin.assert:
+ that:
+ - ansible_failed_result.msg is
+ search('value of reset_type must be one of')
+ - ansible_failed_result.msg is
+ search('graceful_restart, force_restart')
+
+ - name: Clear pending attributes with reset
+ block:
+ - name: Clear pending attributes with reset
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ clear_pending: true
+ reset_bios: true
+ rescue:
+ - name: Assert clear pending attributes with reset
+ ansible.builtin.assert:
+ that: "ansible_failed_result.msg is
+ search('clear_pending and reset_bios is mutually exclusive')"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/default/molecule.yml
new file mode 100644
index 000000000..210914970
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/default/molecule.yml
@@ -0,0 +1,6 @@
+---
+scenario:
+ test_sequence:
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/converge.yml
new file mode 100644
index 000000000..44439ab07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/converge.yml
@@ -0,0 +1,159 @@
+---
+- name: Converge file for negative scenarios with maintenance window
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Block to update attributes with maintenance window and no start_time
+ block:
+ - name: Update attributes with maintenance window and no start_time
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ duration: 600
+ attributes:
+ BootMode: "Bios"
+ rescue:
+ - name: Assert update attributes with maintenance window
+ and no start_time
+ ansible.builtin.assert:
+ that: "ansible_failed_result.msg is
+ search('missing required arguments')"
+
+ - name: Block to update attributes with maintenance window
+ with invalid start_time
+ block:
+ - name: Update attributes with maintenance window with
+ invalid start_time
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022"
+ duration: 600
+ attributes:
+ BootMode: "Bios"
+ rescue:
+ - name: Assert update attributes with maintenance window
+ with invalid start_time
+ ansible.builtin.assert:
+ that: "idrac_bios_out.attributes.status_msg is search('The
+ maintenance time must be post-fixed with local offset
+ to -06:00.')"
+
+ - name: Block to update attributes with maintenance window
+ with invalid duration
+ block:
+ - name: Update attributes with maintenance window with invalid duration
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-30-09T05:15:40-21"
+ duration: "10 minutes"
+ attributes:
+ BootMode: "Bios"
+ rescue:
+ - name: Assert update attributes with maintenance window
+ with invalid duration
+ ansible.builtin.assert:
+ that:
+ - "ansible_failed_result.msg is
+ search('Validation of arguments failed')"
+
+ - name: Block to update attributes with maintenance window
+ with invalid apply time
+ block:
+ - name: Update attributes with maintenance window
+ with invalid apply time
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: AtMaintenanceWindowBegin
+ maintenance_window:
+ start_time: "2022-30-09T05:15:40-21"
+ duration: 10
+ attributes:
+ BootMode: "Bios"
+ rescue:
+ - name: Assert update attributes with maintenance window
+ with invalid apply time
+ ansible.builtin.assert:
+ that:
+ - "ansible_failed_result.msg is search('value of
+ apply_time must be one of')"
+ - "ansible_failed_result.msg is search('Immediate,
+ OnReset, AtMaintenanceWindowStart, InMaintenanceWindowOnReset')"
+
+ - name: Block to update attributes with maintenance
+ window with invalid job wait
+ block:
+ - name: Update attributes with maintenance window with invalid job wait
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2023-05-18T05:17:40-05:00"
+ duration: 600
+ attributes:
+ BootMode: "Bios"
+ job_wait: truee
+ rescue:
+ - name: Assert update attributes with maintenance
+ window with invalid job wait
+ ansible.builtin.assert:
+ that: "ansible_failed_result.msg is
+ search('unable to convert to bool')"
+
+ - name: Block to update attributes with maintenance window
+ with invalid job wait timeout
+ block:
+ - name: Update attributes with maintenance window
+ with invalid job wait timeout
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2023-05-18T05:17:40-05:00"
+ duration: 600
+ attributes:
+ BootMode: "Bios"
+ OneTimeBootMode: "Enabled"
+ BootSeqRetry: "Enabled"
+ job_wait_timeout: -10
+ rescue:
+ - name: Assert attributes with maintenance window
+ with invalid job wait timeout
+ ansible.builtin.assert:
+ that:
+ - "'The parameter job_wait_timeout value cannot
+ be negative or zero' in idrac_bios_out.attributes.msg"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/molecule.yml
new file mode 100644
index 000000000..210914970
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/negative_scenarios_with_maintenance_window/molecule.yml
@@ -0,0 +1,6 @@
+---
+scenario:
+ test_sequence:
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios/converge.yml
new file mode 100644
index 000000000..6c2e8098f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios/converge.yml
@@ -0,0 +1,32 @@
+---
+- name: Converge file for reset bios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Perform reset bios operation
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_bios: true
+
+ - name: Verify reset bios operation in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.reset_bios.status_msg == "BIOS reset to defaults has
+ been completed successfully."
+ - idrac_bios_out.clear_pending.skipped
+ - idrac_bios_out.attributes.skipped
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verify reset bios operation in check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.reset_bios.status_msg == "Changes found
+ to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios/molecule.yml
new file mode 100644
index 000000000..608be28b1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios/molecule.yml
@@ -0,0 +1,7 @@
+---
+scenario:
+ test_sequence:
+ - create
+ - check
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/converge.yml
new file mode 100644
index 000000000..03b93a73f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/converge.yml
@@ -0,0 +1,35 @@
+---
+- name: Converge file for reset type as force restart
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Perform reset operation with reset type as force restart
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_bios: true
+ reset_type: force_restart
+
+ - name: Verify reset bios operation with reset type as force
+ restart in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.reset_bios.status_msg == "BIOS reset to defaults
+ has been completed successfully."
+ - idrac_bios_out.clear_pending.skipped
+ - idrac_bios_out.attributes.skipped
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verify reset bios operation with reset type as force restart
+ in check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.reset_bios.status_msg == "Changes found to
+ be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/molecule.yml
new file mode 100644
index 000000000..608be28b1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/reset_bios_with_reset_type_as_force_restart/molecule.yml
@@ -0,0 +1,7 @@
+---
+scenario:
+ test_sequence:
+ - create
+ - check
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/resources/cleanup.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/resources/cleanup.yml
new file mode 100644
index 000000000..a107545a8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/resources/cleanup.yml
@@ -0,0 +1,17 @@
+---
+- name: CleanUp file for update attributes with apply time as Immediate
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Checking file exists- boot_mode.txt
+ ansible.builtin.stat:
+ path: "/tmp/boot_mode.txt"
+ delegate_to: localhost
+ register: boot_mode_file
+
+ - name: Deleting the file if exists
+ ansible.builtin.file:
+ path: "/tmp/boot_mode.txt"
+ state: absent
+ delegate_to: localhost
+ when: boot_mode_file.stat.exists
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/resources/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/resources/prepare.yml
new file mode 100644
index 000000000..a8bf8042b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/resources/prepare.yml
@@ -0,0 +1,30 @@
+---
+- name: Prepare file to update attributes with apply time as Immediate
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_port: "{{ lookup('env', 'IDRAC_PORT') }}"
+ tasks:
+ - name: Fetch IDRAC Data
+ ansible.builtin.include_tasks:
+ file: ../__get_data.yml
+ vars:
+ url: "https://{{ idrac_ip }}:{{ idrac_port }}/redfish/v1/\
+ Systems/System.Embedded.1/Bios"
+
+ - name: Fetch the existing boot mode
+ ansible.builtin.set_fact:
+ boot_mode: "{{ idrac_bios_uri_data.json.Attributes.BootMode }}"
+
+ - name: Set the boot mode
+ ansible.builtin.set_fact:
+ set_boot_mode: "{{ 'Bios' if (boot_mode == 'Uefi') else 'Uefi' }}"
+ when: set_boot_mode is not defined
+
+ - name: Copy the variable 'set_boot_mode' in file
+ ansible.builtin.copy:
+ content: "{{ set_boot_mode }}"
+ dest: "/tmp/boot_mode.txt"
+ mode: "0755"
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate/converge.yml
new file mode 100644
index 000000000..9c247b7a1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate/converge.yml
@@ -0,0 +1,54 @@
+---
+- name: Converge file to update attributes with apply time as Immediate
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Read file content for set_boot_mode variable
+ ansible.builtin.command: cat /tmp/boot_mode.txt
+ register: file_content
+ check_mode: false
+ delegate_to: localhost
+ changed_when: true
+
+ - name: Set set_boot_mode variable
+ ansible.builtin.set_fact:
+ set_boot_mode: "{{ file_content.stdout }}"
+ delegate_to: localhost
+
+ - name: Update attributes with apply time as Immediate
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: Immediate
+ attributes:
+ BootMode: "{{ set_boot_mode }}"
+
+ - name: Assert update attributes with apply time as Immediate - normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "Successfully applied the
+ BIOS attributes update."
+ - idrac_bios_out.reset_bios.skipped
+ - idrac_bios_out.clear_pending.skipped
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Assert update attributes with apply time as Immediate - check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "Changes found to
+ be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Assert update attributes with apply time as Immediate
+ in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "No changes found
+ to be applied."
+ when: not ansible_check_mode and not idrac_bios_out.attributes.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate/molecule.yml
new file mode 100644
index 000000000..df2ebca6b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate/molecule.yml
@@ -0,0 +1,6 @@
+---
+provisioner:
+ name: ansible
+ playbooks:
+ prepare: ../resources/prepare.yml
+ cleanup: ../resources/cleanup.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/converge.yml
new file mode 100644
index 000000000..1800ce04f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/converge.yml
@@ -0,0 +1,60 @@
+---
+- name: Converge file to update attributes with
+ apply time as Immediate with job wait
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_port: "{{ lookup('env', 'IDRAC_PORT') }}"
+ tasks:
+ - name: Read file content for set_boot_mode variable
+ ansible.builtin.command: cat /tmp/boot_mode.txt
+ register: file_content
+ check_mode: false
+ delegate_to: localhost
+ changed_when: true
+
+ - name: Set set_boot_mode variable
+ ansible.builtin.set_fact:
+ set_boot_mode: "{{ file_content.stdout }}"
+ delegate_to: localhost
+
+ - name: Update attributes with apply time as Immediate with job wait
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: Immediate
+ attributes:
+ BootMode: "{{ set_boot_mode }}"
+ job_wait: true
+
+ - name: Assert update attributes with apply time as Immediate - normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "Successfully
+ applied the BIOS attributes update."
+ - idrac_bios_out.reset_bios.skipped
+ - idrac_bios_out.clear_pending.skipped
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Assert update attributes with apply time as Immediate
+ in check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "Changes found
+ to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Assert update attributes with apply time as Immediate in
+ idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "No changes found
+ to be applied."
+ when: not ansible_check_mode and not idrac_bios_out.attributes.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/molecule.yml
new file mode 100644
index 000000000..df2ebca6b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_immediate_with_jobwait/molecule.yml
@@ -0,0 +1,6 @@
+---
+provisioner:
+ name: ansible
+ playbooks:
+ prepare: ../resources/prepare.yml
+ cleanup: ../resources/cleanup.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset/converge.yml
new file mode 100644
index 000000000..fb36db3b7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset/converge.yml
@@ -0,0 +1,50 @@
+---
+- name: Converge file for update attributes with apply time as reset
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_port: "{{ lookup('env', 'IDRAC_PORT') }}"
+ tasks:
+ - name: Read file content for set_boot_mode variable
+ ansible.builtin.command: cat /tmp/boot_mode.txt
+ register: file_content
+ check_mode: false
+ delegate_to: localhost
+ changed_when: true
+
+ - name: Set set_boot_mode variable
+ ansible.builtin.set_fact:
+ set_boot_mode: "{{ file_content.stdout }}"
+ delegate_to: localhost
+
+ - name: Update attributes with apply time as reset
+ ansible.builtin.include_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: OnReset
+ attributes:
+ BootMode: "{{ set_boot_mode }}"
+ job_wait: false
+
+ - name: Assert update attributes with apply time as reset in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "Successfully committed
+ changes. The job is in pending state. The changes will be
+ applied OnReset"
+ - idrac_bios_out.reset_bios.skipped
+ - idrac_bios_out.clear_pending.skipped
+
+ when: not ansible_check_mode
+
+ - name: Assert update attributes with apply time as reset in check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "Changes found
+ to be applied."
+ when: ansible_check_mode
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset/molecule.yml
new file mode 100644
index 000000000..d3bacf777
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset/molecule.yml
@@ -0,0 +1,14 @@
+---
+provisioner:
+ name: ansible
+ playbooks:
+ prepare: ../resources/prepare.yml
+ cleanup: ../resources/cleanup.yml
+scenario:
+ test_sequence:
+ - create
+ - prepare
+ - check
+ - converge
+ - cleanup
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/converge.yml
new file mode 100644
index 000000000..7ac8c1bad
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/converge.yml
@@ -0,0 +1,74 @@
+---
+- name: Converge file for update attributes with apply
+ time at reset of maintenance window
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_port: "{{ lookup('env', 'IDRAC_PORT') }}"
+ tasks:
+ - name: Read file content for set_boot_mode variable
+ ansible.builtin.command: cat /tmp/boot_mode.txt
+ register: file_content
+ check_mode: false
+ delegate_to: localhost
+ changed_when: true
+
+ - name: Set set_boot_mode variable
+ ansible.builtin.set_fact:
+ set_boot_mode: "{{ file_content.stdout }}"
+ delegate_to: localhost
+
+ - name: Get tomorrow's date
+ ansible.builtin.command: date -d "+1 day" +'%Y-%m-%dT%H:%M:%S'
+ register: tomorrow_date
+ changed_when: false
+
+ - name: Convert tomorrow's date to string
+ ansible.builtin.set_fact:
+ date_str: "{{ tomorrow_date.stdout }}"
+
+ - name: Fetch IDRAC time offset
+ ansible.builtin.include_tasks:
+ file: ../__get_data.yml
+ vars:
+ url: "https://{{ idrac_ip }}:{{ idrac_port }}/redfish/v1\
+ /Managers/iDRAC.Embedded.1"
+
+ - name: Set the local offset
+ when: idrac_bios_uri_data.json.DateTimeLocalOffset is defined
+ ansible.builtin.set_fact:
+ local_offset: "{{ idrac_bios_uri_data.json.DateTimeLocalOffset }}"
+
+ - name: Update attributes with apply time at reset of maintenance window
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: InMaintenanceWindowOnReset
+ maintenance_window:
+ start_time: "{{ date_str }}{{ local_offset }}"
+ duration: 600
+ attributes:
+ BootMode: "{{ set_boot_mode }}"
+
+ - name: Assert update attributes with apply time at reset
+ of maintenance window normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Successfully committed changes. The job is in pending state'
+ in idrac_bios_out.attributes.status_msg"
+ - idrac_bios_out.reset_bios.skipped
+ - idrac_bios_out.clear_pending.skipped
+ when: not ansible_check_mode
+
+ - name: Assert Update attributes with apply time at reset of
+ maintenance window in check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "Changes found to
+ be applied."
+ when: ansible_check_mode
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/molecule.yml
new file mode 100644
index 000000000..d3bacf777
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_on_reset_with_maintenance_window/molecule.yml
@@ -0,0 +1,14 @@
+---
+provisioner:
+ name: ansible
+ playbooks:
+ prepare: ../resources/prepare.yml
+ cleanup: ../resources/cleanup.yml
+scenario:
+ test_sequence:
+ - create
+ - prepare
+ - check
+ - converge
+ - cleanup
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_with_maintenance_window/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_with_maintenance_window/converge.yml
new file mode 100644
index 000000000..43004cea7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_with_maintenance_window/converge.yml
@@ -0,0 +1,74 @@
+---
+- name: Converge file to update attributes with apply time
+ at start of maintenance window
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_port: "{{ lookup('env', 'IDRAC_PORT') }}"
+ tasks:
+ - name: Read file content for set_boot_mode variable
+ ansible.builtin.command: cat /tmp/boot_mode.txt
+ register: file_content
+ check_mode: false
+ delegate_to: localhost
+ changed_when: true
+
+ - name: Set set_boot_mode variable
+ ansible.builtin.set_fact:
+ set_boot_mode: "{{ file_content.stdout }}"
+ delegate_to: localhost
+
+ - name: Get tomorrow's date
+ ansible.builtin.command: date -d "+1 day" +'%Y-%m-%dT%H:%M:%S'
+ register: tomorrow_date
+ changed_when: false
+
+ - name: Convert tomorrow's date to string
+ ansible.builtin.set_fact:
+ date_str: "{{ tomorrow_date.stdout }}"
+
+ - name: Fetch IDRAC time offset
+ ansible.builtin.include_tasks:
+ file: ../__get_data.yml
+ vars:
+ url: "https://{{ idrac_ip }}:{{ idrac_port }}/redfish/v1\
+ /Managers/iDRAC.Embedded.1"
+
+ - name: Set the local offset
+ when: idrac_bios_uri_data.json.DateTimeLocalOffset is defined
+ ansible.builtin.set_fact:
+ local_offset: "{{ idrac_bios_uri_data.json.DateTimeLocalOffset }}"
+
+ - name: Update attributes with apply time at start of maintenance window
+ ansible.builtin.import_role:
+ name: idrac_bios
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "{{ date_str }}{{ local_offset }}"
+ duration: 600
+ attributes:
+ BootMode: "{{ set_boot_mode }}"
+
+ - name: Assert update attributes with apply time at start of
+ maintenance window in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Successfully committed changes. The job is in pending state' in
+ idrac_bios_out.attributes.status_msg"
+ - idrac_bios_out.reset_bios.skipped
+ - idrac_bios_out.clear_pending.skipped
+ when: not ansible_check_mode
+
+ - name: Assert update attributes with apply time at start of
+ maintenance window in check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_bios_out.attributes.status_msg == "Changes found to
+ be applied."
+ when: ansible_check_mode
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_with_maintenance_window/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_with_maintenance_window/molecule.yml
new file mode 100644
index 000000000..d3bacf777
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/molecule/update_attributes_with_maintenance_window/molecule.yml
@@ -0,0 +1,14 @@
+---
+provisioner:
+ name: ansible
+ playbooks:
+ prepare: ../resources/prepare.yml
+ cleanup: ../resources/cleanup.yml
+scenario:
+ test_sequence:
+ - create
+ - prepare
+ - check
+ - converge
+ - cleanup
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/tasks/main.yml
new file mode 100644
index 000000000..18567035c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/tasks/main.yml
@@ -0,0 +1,67 @@
+---
+# tasks file for idrac_bios
+- name: Performing idrac_bios operation
+ block:
+ - name: Checking attributes/clear_pending and reset_bios is mutually exclusive
+ ansible.builtin.fail:
+ msg: "{{ idrac_bios_mutual_exclusive_msg }}"
+ when:
+ - (attributes is defined and (reset_bios is defined and reset_bios))
+ or
+ (clear_pending and (reset_bios is defined and reset_bios))
+
+ - name: Setting idrac_inputs
+ ansible.builtin.set_fact:
+ idrac_inputs: &idrac_inputs
+ idrac_ip: "{{ hostname }}"
+ idrac_port: "{{ https_port }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ https_timeout }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ no_log: true
+
+ - name: Performing clear pending operation
+ dellemc.openmanage.idrac_bios:
+ <<: *idrac_inputs
+ clear_pending: "{{ clear_pending }}"
+ register: idrac_bios_clear_pending_out
+ delegate_to: "{{ idrac_bios_delegate }}"
+ when: clear_pending
+
+ - name: Configuring the bios attributes
+ dellemc.openmanage.idrac_bios:
+ <<: *idrac_inputs
+ attributes: "{{ attributes }}"
+ apply_time: "{{ apply_time }}"
+ maintenance_window: "{{ maintenance_window | default(omit) }}"
+ reset_type: "{{ reset_type }}"
+ register: idrac_bios_attributes_out
+ delegate_to: "{{ idrac_bios_delegate }}"
+ when: attributes is defined
+
+ - name: Performing the reset bios operation
+ dellemc.openmanage.idrac_bios:
+ <<: *idrac_inputs
+ reset_bios: "{{ reset_bios }}"
+ reset_type: "{{ reset_type }}"
+ register: idrac_bios_reset_bios_out
+ delegate_to: "{{ idrac_bios_delegate }}"
+ when: reset_bios is defined and reset_bios
+
+ always:
+ - name: Set fact for idrac_bios_out
+ ansible.builtin.set_fact:
+ idrac_bios_out: "{{ idrac_bios_out | default({}) | combine({item.key: item.value}) }}"
+ with_items:
+ - { "key": "clear_pending", "value": "{{ idrac_bios_clear_pending_out }}" }
+ - { "key": "attributes", "value": "{{ idrac_bios_attributes_out }}" }
+ - { "key": "reset_bios", "value": "{{ idrac_bios_reset_bios_out }}" }
+ no_log: true
+
+ - name: Printing idrac_bios_out
+ ansible.builtin.debug:
+ var: idrac_bios_out
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_bios/tests/inventory
index 878877b07..878877b07 100644
--- a/ansible_collections/dellemc/os6/roles/os6_qos/tests/inventory
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/tests/inventory
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/tests/test.yml
new file mode 100644
index 000000000..ec7d7f005
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Testing idrac_bios
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_bios
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_bios/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_bios/vars/main.yml
new file mode 100644
index 000000000..cdba5a342
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_bios/vars/main.yml
@@ -0,0 +1,7 @@
+---
+# vars file for idrac_bios
+idrac_bios_mutual_exclusive_msg: "attributes/clear_pending and reset_bios is mutually exclusive."
+idrac_bios_clear_pending_out: ""
+idrac_bios_attributes_out: ""
+idrac_bios_reset_bios_out: ""
+idrac_bios_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_boot/README.md
new file mode 100644
index 000000000..f7903b07e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/README.md
@@ -0,0 +1,373 @@
+# idrac_boot
+
+Role to configure the boot order settings
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role
+```
+ansible
+docker
+molecule
+python
+```
+
+### Production
+Requirements to use the role
+```
+ansible
+python
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>boot_options</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>list</td>
+ <td>- Options to enable or disable the boot devices.<br>- This is mutually exclusive with I(boot_order), I(boot_source_override_mode), I(boot_source_override_enabled), I(boot_source_override_target), and I(uefi_target_boot_source_override).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;boot_option_reference</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- FQDD of the boot device.<br>- This is mutually exclusive with I(display_name).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;display_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Display name of the boot source device.<br>- This is mutually exclusive with I(boot_option_reference).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;enabled</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- Enable or disable the boot device.</td>
+ </tr>
+ <tr>
+ <td>boot_order</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>list</td>
+ <td>- This option allows to set the boot devices in the required boot order sequence.<br>- This is mutually exclusive with I(boot_options).</td>
+ </tr>
+ <tr>
+ <td>boot_source_override_mode</td>
+ <td>false</td>
+ <td></td>
+ <td>'legacy', 'uefi'</td>
+ <td>str</td>
+ <td>- The BIOS boot mode (either Legacy or UEFI) to be used when I(boot_source_override_target) boot source is booted.<br>- C(legacy) The system boot in non-UEFI(Legacy) boot mode to the I(boot_source_override_target).<br>- C(uefi) The system boot in UEFI boot mode to the I(boot_source_override_target).<br>- This is mutually exclusive with I(boot_options).</td>
+ </tr>
+ <tr>
+ <td>boot_source_override_enabled</td>
+ <td>false</td>
+ <td></td>
+ <td>'continuous', 'disabled', 'once'</td>
+ <td>str</td>
+ <td>- The state of the Boot Source Override feature.<br>- C(disabled), the system boots normally.<br>- C(once), the system boots 1 time to the I(boot_source_override_target).<br>- C(continuous), the system boots to the target specified in the I(boot_source_override_target) until this property is set to Disabled.<br>- The state is set to C(once) for the 1 time boot override and C(continuous) for the remain-active-until—cancelled override. If the state is set C(once) or C(continuous), the value is reset to C(disabled) after the I(boot_source_override_target) actions have completed successfully.<br>- Changes to these options do not alter the BIOS persistent boot order configuration.<br>- This is mutually exclusive with I(boot_options).</td>
+ </tr>
+ <tr>
+ <td>boot_source_override_target</td>
+ <td>false</td>
+ <td></td>
+ <td>'uefi_http', 'sd_card', 'uefi_target', 'utilities', 'bios_setup', 'hdd', 'cd', 'floppy', 'pxe', 'none'</td>
+ <td>str</td>
+ <td>- The boot source override targets the device to use during the next boot instead of the normal boot device.<br>- C(pxe) performs PXE boot from the primary NIC.<br>- C(floppy), C(cd), C(hdd), and C(sd_card) performs boot from their devices respectively.<br>- C(bios_setup) performs boot into the native BIOS setup.<br>- C(uefi_http) performs boot from a URI over HTTP.<br>- C(utilities) performs boot from the local utilities.<br>- C(uefi_target) performs boot from the UEFI device path found in I(uefi_target_boot_source_override).<br>- C(none) if the I(boot_source_override_target) is set to a value other than C(none) then the I(boot_source_override_enabled) is automatically set to C(once).<br>- Changes to these options do not alter the BIOS persistent boot order configuration.<br>- This is mutually exclusive with I(boot_options).</td>
+ </tr>
+ <tr>
+ <td>uefi_target_boot_source_override</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The UEFI device path of the device from which to boot when I(boot_source_override_target) is C(uefi_target).<br>- If I(boot_source_override_target) is set to C(uefi_target), then I(boot_source_override_enabled) cannot be set to c(continuous) because this setting is defined in UEFI as a one-time-boot setting.<br>- Changes to these options do not alter the BIOS persistent boot order configuration.<br>- This is required if I(boot_source_override_target) is C(uefi_target).<br>- This is mutually exclusive with I(boot_options).</td>
+ </tr>
+ <tr>
+ <td>reset_type</td>
+ <td>false</td>
+ <td>graceful_restart</td>
+ <td>'graceful_restart', 'force_restart', 'none'</td>
+ <td>str</td>
+ <td>- C(none) Host system is not rebooted and I(job_wait) is not applicable.<br>- C(force_restart) Forcefully reboot the Host system.<br>- C(graceful_restart) Gracefully reboot the Host system.</td>
+ </tr>
+ <tr>
+ <td>job_wait</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Provides the option to wait for job completion.<br>- This is applicable when I(reset_type) is C(force_reset) or C(graceful_reset).</td>
+ </tr>
+ <tr>
+ <td>job_wait_timeout</td>
+ <td>false</td>
+ <td>900</td>
+ <td></td>
+ <td>int</td>
+ <td>- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.<br>- This option is applicable when I(job_wait) is C(True).</td>
+ </tr>
+ <tr>
+ <td>resource_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Redfish ID of the resource.</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_boot_out</td>
+ <td>{
+ "boot": {
+ "BootOptions": {
+ "Description": "Collection of BootOptions",
+ "Members": [
+ {
+ "BootOptionEnabled": true,
+ "BootOptionReference": "Boot0005",
+ "Description": "Current settings of the UEFI Boot option",
+ "DisplayName": "Integrated RAID Controller 1: VMware ESXi",
+ "Id": "Boot0005",
+ "Name": "Uefi Boot Option",
+ "UefiDevicePath": "HD(1,GPT,740C46A9-4A43-47AA-9C09-65E821376E48,0x40,0x32000)/\\EFI\\VMware\\safeboot64.efi"
+ },
+ {
+ "BootOptionEnabled": false,
+ "BootOptionReference": "Boot0004",
+ "Description": "Current settings of the UEFI Boot option",
+ "DisplayName": "Unavailable: Windows Boot Manager",
+ "Id": "Boot0004",
+ "Name": "Uefi Boot Option",
+ "UefiDevicePath": "HD(1,GPT,AEB2A96B-5C31-4F8F-9927-B48B08D907BE,0x800,0xF9800)/\\EFI\\Microsoft\\Boot\\bootmgfw.efi"
+ },
+ {
+ "BootOptionEnabled": true,
+ "BootOptionReference": "Boot0006",
+ "Description": "Current settings of the UEFI Boot option",
+ "DisplayName": "Unavailable: Red Hat Enterprise Linux",
+ "Id": "Boot0006",
+ "Name": "Uefi Boot Option",
+ "UefiDevicePath": "HD(1,GPT,14759088-1AE7-4EA4-A60B-BE82546E21B6,0x800,0x12C000)/\\EFI\\redhat\\shimx64.efi"
+ },
+ {
+ "BootOptionEnabled": true,
+ "BootOptionReference": "Boot0003",
+ "Description": "Current settings of the UEFI Boot option",
+ "DisplayName": "Unavailable: Rocky Linux",
+ "Id": "Boot0003",
+ "Name": "Uefi Boot Option",
+ "UefiDevicePath": "HD(1,GPT,ADC59C44-A0D3-4917-9376-33EE44DE96F0,0x800,0x12C000)/\\EFI\\rocky\\shimx64.efi"
+ }
+ ],
+ "Name": "Boot Options Collection"
+ },
+ "BootOrder": [
+ "Boot0005",
+ "Boot0004",
+ "Boot0006",
+ "Boot0003"
+ ],
+ "BootSourceOverrideEnabled": "Disabled",
+ "BootSourceOverrideMode": "UEFI",
+ "BootSourceOverrideTarget": "None",
+ "UefiTargetBootSourceOverride": null
+ },
+ "job": {
+ "ActualRunningStartTime": "2023-06-19T09:48:41",
+ "ActualRunningStopTime": "2023-06-19T09:51:53",
+ "CompletionTime": "2023-06-19T09:51:53",
+ "Description": "Job Instance",
+ "EndTime": "TIME_NA",
+ "Id": "JID_871679370016",
+ "JobState": "Completed",
+ "JobType": "BIOSConfiguration",
+ "Message": "Job completed successfully.",
+ "MessageArgs": [],
+ "MessageId": "PR19",
+ "Name": "Configure: BIOS.Setup.1-1",
+ "PercentComplete": 100,
+ "StartTime": "2023-06-19T09:45:36",
+ "TargetSettingsURI": null
+ },
+ "msg": "Successfully updated the boot settings."
+}</td>
+ <td>Role output of the idrac_boot job.</td>
+ </tr>
+ </tbody>
+</table>
+
+## Example Playbook
+
+```
+- name: Configure the system boot options settings.
+ ansible.builtin.include_role:
+ name: dellemc.openmanage.idrac_boot
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ boot_options:
+ - display_name: Hard drive C
+ enabled: true
+ - boot_option_reference: NIC.PxeDevice.2-1
+ enabled: true
+```
+
+```
+- name: Configure the boot order settings.
+ ansible.builtin.include_role:
+ name: dellemc.openmanage.idrac_boot
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ boot_order:
+ - Boot0001
+ - Boot0002
+ - Boot0004
+ - Boot0003
+```
+
+```
+- name: Configure the boot source override mode.
+ ansible.builtin.include_role:
+ name: dellemc.openmanage.idrac_boot
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ boot_source_override_mode: legacy
+ boot_source_override_target: cd
+ boot_source_override_enabled: once
+```
+
+```
+- name: Configure the UEFI target settings.
+ ansible.builtin.include_role:
+ name: dellemc.openmanage.idrac_boot
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ boot_source_override_mode: uefi
+ boot_source_override_target: uefi_target
+ uefi_target_boot_source_override: "VenHw(3A191845-5F86-4E78-8FCE-C4CFF59F9DAA)"
+```
+
+```
+- name: Configure the boot source override mode as pxe.
+ ansible.builtin.include_role:
+ name: dellemc.openmanage.idrac_boot
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ boot_source_override_mode: legacy
+ boot_source_override_target: pxe
+ boot_source_override_enabled: continuous
+```
+
+Author Information
+------------------
+
+Dell Technologies <br>
+Felix Stephen (felix_s@dell.com) 2023
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/defaults/main.yml
new file mode 100644
index 000000000..8a8919ff5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+reset_type: graceful_restart
+job_wait: true
+job_wait_timeout: 900
+https_port: 443
+https_timeout: 30
+validate_certs: true
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/handlers/main.yml
new file mode 100644
index 000000000..143bdf802
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_boot
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/meta/argument_specs.yml
new file mode 100644
index 000000000..d90140e06
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/meta/argument_specs.yml
@@ -0,0 +1,133 @@
+---
+argument_specs:
+ main:
+ version_added: "8.0.0"
+ short_description: Configure the boot order settings
+ description:
+ - This role allows to configure the boot order settings.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address.
+ username:
+ type: str
+ description: iDRAC username.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ boot_options:
+ description:
+ - Options to enable or disable the boot devices.
+ - This is mutually exclusive with I(boot_order), I(boot_source_override_mode),
+ I(boot_source_override_enabled), I(boot_source_override_target), and I(uefi_target_boot_source_override).
+ type: list
+ elements: dict
+ options:
+ boot_option_reference:
+ description:
+ - FQDD of the boot device.
+ - This is mutually exclusive with I(display_name).
+ type: str
+ display_name:
+ description:
+ - Display name of the boot source device.
+ - This is mutually exclusive with I(boot_option_reference).
+ type: str
+ enabled:
+ description: Enable or disable the boot device.
+ type: bool
+ required: true
+ boot_order:
+ description:
+ - This option allows to set the boot devices in the required boot order sequence.
+ - This is mutually exclusive with I(boot_options).
+ type: list
+ elements: str
+ boot_source_override_mode:
+ description:
+ - The BIOS boot mode (either Legacy or UEFI) to be used when I(boot_source_override_target)
+ boot source is booted.
+ - C(legacy) The system boot in non-UEFI(Legacy) boot mode to the I(boot_source_override_target).
+ - C(uefi) The system boot in UEFI boot mode to the I(boot_source_override_target).
+ - This is mutually exclusive with I(boot_options).
+ type: str
+ choices: [legacy, uefi]
+ boot_source_override_enabled:
+ description:
+ - The state of the Boot Source Override feature.
+ - C(disabled), the system boots normally.
+ - C(once), the system boots 1 time to the I(boot_source_override_target).
+ - C(continuous), the system boots to the target specified in the I(boot_source_override_target)
+ until this property is set to Disabled.
+ - The state is set to C(once) for the 1 time boot override and C(continuous) for the
+ remain-active-until—cancelled override. If the state is set C(once) or C(continuous), the value is reset
+ to C(disabled) after the I(boot_source_override_target) actions have completed successfully.
+ - Changes to these options do not alter the BIOS persistent boot order configuration.
+ - This is mutually exclusive with I(boot_options).
+ choices: [continuous, disabled, once]
+ boot_source_override_target:
+ description:
+ - The boot source override targets the device to use during the next boot instead of the normal boot device.
+ - C(pxe) performs PXE boot from the primary NIC.
+ - C(floppy), C(cd), C(hdd), and C(sd_card) performs boot from their devices respectively.
+ - C(bios_setup) performs boot into the native BIOS setup.
+ - C(uefi_http) performs boot from a URI over HTTP.
+ - C(utilities) performs boot from the local utilities.
+ - C(uefi_target) performs boot from the UEFI device path found in I(uefi_target_boot_source_override).
+ - C(none) if the I(boot_source_override_target) is set to a value other than C(none) then the
+ I(boot_source_override_enabled) is automatically set to C(once).
+ - Changes to these options do not alter the BIOS persistent boot order configuration.
+ - This is mutually exclusive with I(boot_options).
+ type: str
+ choices: [uefi_http, sd_card, uefi_target, utilities, bios_setup, hdd, cd, floppy, pxe, none]
+ uefi_target_boot_source_override:
+ description:
+ - The UEFI device path of the device from which to boot when I(boot_source_override_target) is C(uefi_target).
+ - If I(boot_source_override_target) is set to C(uefi_target), then I(boot_source_override_enabled) cannot be
+ set to c(continuous) because this setting is defined in UEFI as a one-time-boot setting.
+ - Changes to these options do not alter the BIOS persistent boot order configuration.
+ - This is required if I(boot_source_override_target) is C(uefi_target).
+ - This is mutually exclusive with I(boot_options).
+ type: str
+ reset_type:
+ description:
+ - C(none) Host system is not rebooted and I(job_wait) is not applicable.
+ - C(force_restart) Forcefully reboot the Host system.
+ - C(graceful_restart) Gracefully reboot the Host system.
+ type: str
+ choices: [graceful_restart, force_restart, none]
+ default: graceful_restart
+ job_wait:
+ description:
+ - Provides the option to wait for job completion.
+ - This is applicable when I(reset_type) is C(force_reset) or C(graceful_reset).
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 900
+ resource_id:
+ description: Redfish ID of the resource.
+ type: str
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/meta/main.yml
new file mode 100644
index 000000000..116f8e05a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/meta/main.yml
@@ -0,0 +1,26 @@
+galaxy_info:
+ role_name: idrac_boot
+ author: "Felix Stephen"
+ description: Role to configure the boot order settings
+ company: Dell Technologies
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.13"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/converge.yml
new file mode 100644
index 000000000..7db461ce1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/converge.yml
@@ -0,0 +1,119 @@
+---
+- name: Testing boot_options_using_boot_option_reference_enabled_true
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_option_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1/BootOptions?$expand=*($levels=1)"
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Fetching boot_option_reference from iDRAC
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ boot_option_uri }}"
+ method: GET
+ register: result_data
+ check_mode: false
+ no_log: true
+
+ - name: Extracing BootOptionReference from output
+ ansible.builtin.set_fact:
+ data: "{{ result_data.json.Members[0].BootOptionReference }}"
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure enabled is false on first boot_option_reference
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_options:
+ - boot_option_reference: "{{ data | default('') }}"
+ enabled: false
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115424 - Validate boot_options using boot_option_reference and default enabled
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_options:
+ - boot_option_reference: "{{ data | default('') }}"
+ enabled: true
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115424 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115424 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Successfully updated the boot settings."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115424 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_boot_option_reference_enabled_true/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/converge.yml
new file mode 100644
index 000000000..9bf8ed1e4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/converge.yml
@@ -0,0 +1,119 @@
+---
+- name: Testing boot_options_using_display_name_enabled_false
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_option_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1/BootOptions?$expand=*($levels=1)"
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Fetching iDRAC
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ boot_option_uri }}"
+ method: GET
+ register: result_data
+ check_mode: false
+ no_log: true
+
+ - name: Extracing DisplayName from output
+ ansible.builtin.set_fact:
+ data: "{{ result_data.json.Members[0].DisplayName }}"
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure enabled is true for fisrt display_name
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_options:
+ - display_name: "{{ data | default('') }}"
+ enabled: true
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115426 - Validate boot_options using display_name and enabled false
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_options:
+ - display_name: "{{ data | default('') }}"
+ enabled: false
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115426 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115426 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Successfully updated the boot settings."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115426 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_options_using_display_name_enabled_false/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/converge.yml
new file mode 100644
index 000000000..92d1958d7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/converge.yml
@@ -0,0 +1,120 @@
+---
+- name: Testing boot_order_using_legacy_mode_force_restart
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_order_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1/"
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Fetching boot order from iDRAC
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ boot_order_uri }}"
+ method: GET
+ register: result_data
+ check_mode: false
+ no_log: true
+
+ - name: Extracing BootOrder from output
+ ansible.builtin.set_fact:
+ data: "{{ result_data.json.Boot.BootOrder | default([]) }}"
+
+ - name: Reversing the boot order
+ ansible.builtin.set_fact:
+ reverse_boot_order: "{{ data | reverse | list }}"
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure boot mode is legacy
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_mode: legacy
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115427 - Validate boot_order with legacy mode with force_restart
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_order: "{{ reverse_boot_order }}"
+ reset_type: force_restart
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115427 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115427 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Successfully updated the boot settings."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115427 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_legacy_mode_force_restart/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/converge.yml
new file mode 100644
index 000000000..58cd441fe
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/converge.yml
@@ -0,0 +1,120 @@
+---
+- name: Testing boot_order_using_uefi_mode_graceful_restart
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_order_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1/"
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Fetching boot order from iDRAC
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ boot_order_uri }}"
+ method: GET
+ register: result_data
+ check_mode: false
+ no_log: true
+
+ - name: Extracing BootOrder from output
+ ansible.builtin.set_fact:
+ data: "{{ result_data.json.Boot.BootOrder | default([]) }}"
+
+ - name: Reversing the boot order
+ ansible.builtin.set_fact:
+ reverse_boot_order: "{{ data | reverse | list }}"
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure boot mode is uefi
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_mode: uefi
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115428 - Validate boot_order with uefi mode with graceful_restart
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_order: "{{ reverse_boot_order }}"
+ reset_type: graceful_restart
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115428 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115428 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Successfully updated the boot settings."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115428 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_order_using_uefi_mode_graceful_restart/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/converge.yml
new file mode 100644
index 000000000..076cdd755
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/converge.yml
@@ -0,0 +1,102 @@
+---
+- name: Testing boot_source_override_enabled_as_continuous_reset_type_none
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure boot_source_override_enabled is disabled
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_enabled: disabled
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115433 - Validate boot_source_override_enabled as continuous
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_enabled: continuous
+ reset_type: none
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115433 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115433 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "The boot settings job is triggered successfully."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115433 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_continuous_reset_type_none/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/converge.yml
new file mode 100644
index 000000000..8c8d2d443
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/converge.yml
@@ -0,0 +1,102 @@
+---
+- name: Testing boot_source_override_enabled_as_disabled_reset_type_none
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure boot_source_override_enabled is continuous
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_enabled: continuous
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115431 - Validate boot_source_override_enabled as disabled
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_enabled: disabled
+ reset_type: none
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115431 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115431 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "The boot settings job is triggered successfully."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115431 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_disabled_reset_type_none/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/converge.yml
new file mode 100644
index 000000000..50bb281b2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/converge.yml
@@ -0,0 +1,102 @@
+---
+- name: Testing boot_source_override_enabled_as_once_reset_type_none
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure boot_source_override_enabled is disabled
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_enabled: disabled
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115432 - Validate boot_source_override_enabled as once
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_enabled: once
+ reset_type: none
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115432 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115432 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "The boot settings job is triggered successfully."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115432 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_enabled_as_once_reset_type_none/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/converge.yml
new file mode 100644
index 000000000..86b7b39ea
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/converge.yml
@@ -0,0 +1,116 @@
+---
+- name: Testing boot_source_override_mode_legacy_job_wait_false
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ job_status_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Managers/iDRAC.Embedded.1/Jobs"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure boot mode is uefi
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_mode: uefi
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115429 - Validate boot_source_override_mode as legacy with job_wait false
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_mode: legacy
+ job_wait: false
+
+ - name: Job tracking after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ job_status_uri }}/{{ idrac_boot_out.job.Id }}"
+ method: GET
+ register: job_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: job_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115429 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115429 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "The boot settings job is triggered successfully."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115429 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_legacy_job_wait_false/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/converge.yml
new file mode 100644
index 000000000..a2b6ef922
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/converge.yml
@@ -0,0 +1,129 @@
+---
+- name: Testing boot_source_override_mode_uefi_with_resource_id
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ system_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems"
+ lc_uri: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ retry_count: 60
+ delay_count: 30
+ tasks:
+ - name: Preparing set_fact for uri
+ ansible.builtin.set_fact:
+ uri_input: &uri_input
+ user: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ return_content: true
+ force_basic_auth: true
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+ - name: Fetching resource_id from iDRAC
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ system_uri }}"
+ method: GET
+ register: result_data
+ check_mode: false
+ no_log: true
+
+ - name: Extracting resource_id from output
+ ansible.builtin.set_fact:
+ resource_id_data: "{{ result_data.json.Members[0]['@odata.id'] | split('/') | last }}"
+
+ - name: Checking for LCStatus before running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Pre-requisite - Making sure boot mode is legacy
+ check_mode: false
+ ansible.builtin.import_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_mode: legacy
+ tags: molecule-idempotence-notest
+
+ - name: Checking for LCStatus after running pre-requisite
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: TC-115430 - Validate boot_source_override_mode as uefi with resource_id
+ ansible.builtin.include_role:
+ name: "idrac_boot"
+ vars:
+ boot_source_override_mode: uefi
+ resource_id: "{{ resource_id_data }}"
+
+ - name: Job tracking after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ job_status_uri }}/{{ idrac_boot_out.job.Id }}"
+ method: GET
+ register: job_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: job_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Checking for LCStatus after performing operation
+ ansible.builtin.uri:
+ <<: *uri_input
+ url: "{{ lc_uri }}"
+ method: POST
+ body: {}
+ register: lc_status_result
+ check_mode: false
+ when: idrac_boot_out.changed # noqa: no-handler
+ until: lc_status_result.json.LCStatus == "Ready"
+ retries: "{{ retry_count }}"
+ delay: "{{ delay_count }}"
+ no_log: true
+
+ - name: Asserting TC-115430 in check mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting TC-115430 in normal mode
+ ansible.builtin.assert:
+ that: idrac_boot_out.msg == "The boot settings job is triggered successfully."
+ when: not ansible_check_mode and idrac_boot_out.changed
+
+ - name: Asserting TC-115430 in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_boot_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/boot_source_override_mode_uefi_with_resource_id/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/default/converge.yml
new file mode 100644
index 000000000..08e7ab3be
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/default/converge.yml
@@ -0,0 +1,311 @@
+---
+- name: Play 1 - Negative Scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Play 1 - TC-115436 - Ansible - Role - idrac_boot - Providing wrong hostname
+ ansible.builtin.import_role:
+ name: idrac_boot
+ vars:
+ hostname: "WrongHostname"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_source_override_mode: 'uefi'
+ ignore_unreachable: true
+
+ - name: Verify - TC-115436 - Ansible - Role - idrac_boot - Providing wrong hostname
+ ansible.builtin.assert:
+ that:
+ - "'Unable to communicate with iDRAC WrongHostname' in idrac_boot_out.msg"
+
+ - name: TC-115437 - Ansible - Role - idrac_boot - Providing wrong username
+ ansible.builtin.import_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "wrongUsername"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_source_override_mode: 'uefi'
+ register: idrac_boot_error_reg
+ ignore_errors: true
+
+ - name: Verify - TC-115437 - Ansible - Role - idrac_boot - Providing wrong username
+ ansible.builtin.assert:
+ that:
+ - "'Unable to communicate with iDRAC' in idrac_boot_out.msg"
+
+ - name: TC-115438 - Ansible - Role - idrac_boot - Providing wrong password
+ ansible.builtin.import_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "wrongPassword"
+ validate_certs: false
+ boot_source_override_mode: 'uefi'
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_boot_error_reg
+
+ - name: Verify - TC-115438 - Ansible - Role - idrac_boot - Providing wrong password
+ ansible.builtin.assert:
+ that:
+ - "'Unable to communicate with iDRAC' in idrac_boot_out.msg"
+
+- name: Play 2 - TC-115439 - Ansible - Role - idrac_boot - Providing invalid https_port
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: TC-115439 - Ansible - Role - idrac_boot - Providing invalid https_port
+ ansible.builtin.import_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ https_port: 999999
+ validate_certs: false
+ boot_source_override_mode: 'uefi'
+ ignore_unreachable: true
+
+ - name: Verify - TC-115439 - Ansible - Role - idrac_boot - Providing invalid https_port
+ ansible.builtin.assert:
+ that:
+ - "'Unable to communicate with iDRAC' in idrac_boot_out.msg"
+
+- name: Play 3 - TC-115440 - Ansible - Role - idrac_boot - Providing wrong resource_id
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: TC-115440 - Ansible - Role - idrac_boot - Providing wrong resource_id
+ ansible.builtin.import_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ resource_id: "Invalid.System.ID"
+ validate_certs: false
+ boot_source_override_mode: 'uefi'
+ ignore_errors: true
+ register: idrac_boot_error_reg
+
+ - name: Verify - TC-115440 - Ansible - Role - idrac_boot - Providing wrong resource_id
+ ansible.builtin.assert:
+ that:
+ - "'HTTP Error 404' in idrac_boot_out.msg"
+
+- name: Play 4 - TC-115441 - Ansible - Role - idrac_boot - Providing wrong ca_path
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: TC-115441 - Ansible - Role - idrac_boot - Providing wrong ca_path
+ ansible.builtin.import_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ ca_path: "/root/filenotexists.pem"
+ validate_certs: false
+ boot_source_override_mode: 'uefi'
+ ignore_errors: true
+ register: idrac_boot_error_reg
+
+ - name: Verify - TC-115441 - Ansible - Role - idrac_boot - Providing wrong ca_path
+ ansible.builtin.assert:
+ that:
+ - idrac_boot_out.failed # TBD
+
+- name: Play 5 - TC-115442 - Ansible - Role - idrac_boot - Providing invalid https_timeout
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: TC-115442 - Ansible - Role - idrac_boot - Providing invalid https_timeout
+ ansible.builtin.import_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ https_timeout: -1000
+ validate_certs: false
+ boot_source_override_mode: 'uefi'
+ ignore_errors: true
+ register: idrac_boot_error_reg
+
+ - name: Verify - TC-115442 - Ansible - Role - idrac_boot - Providing invalid https_timeout
+ ansible.builtin.assert:
+ that:
+ - "'Timeout value out of range' == idrac_boot_out.msg"
+
+- name: Play 6 - TC-115443 - Ansible - Role - idrac_boot - Providing invalid job_wait_timeout
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: TC-115443 - Ansible - Role - idrac_boot - Providing invalid job_wait_timeout
+ block:
+ - name: TC-115443 - Ansible - Role - idrac_boot - Providing invalid job_wait_timeout
+ ansible.builtin.import_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ job_wait_timeout: -1000
+ validate_certs: false
+ boot_source_override_mode: 'legacy'
+ register: idrac_boot_error_reg
+ rescue:
+ - name: Verify - TC-115443 - Ansible - Role - idrac_boot - Providing invalid job_wait_timeout
+ ansible.builtin.assert:
+ that:
+ - "'The parameter job_wait_timeout value cannot be negative or zero.' == ansible_failed_result.msg"
+
+- name: Play 7 - TC-115444 - Ansible - Role - idrac_boot - Validate all the mutually exclusive scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Check 1 - TC-115444 - Entering block
+ block:
+ - name: Check 1 - TC-115444 - mutual exclusivity between boot_option_reference and display_name
+ ansible.builtin.include_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_options:
+ - display_name: Hard drive C
+ boot_option_reference: NIC.PxeDevice.2-1
+ enabled: true
+ ignore_errors: true
+ register: idrac_boot_error_reg
+ rescue:
+ - name: Verify - Check 1 - TC-115444 - mutual exclusivity between boot_option_reference and display_name
+ ansible.builtin.assert:
+ that:
+ - "'parameters are mutually exclusive' in idrac_boot_out.msg"
+ - "'boot_option_reference' in idrac_boot_out.msg"
+ - "'display_name' in idrac_boot_out.msg"
+ - idrac_boot_out.failed
+
+ - name: Check 2 - TC-115444 - Entering block
+ block:
+ - name: Check 2 - TC-115444 - mutual exclusivity between boot_options and boot_order
+ ansible.builtin.include_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_options:
+ - display_name: Hard drive C
+ enabled: true
+ boot_order:
+ - Boot0001
+ - Boot0002
+ ignore_errors: true
+ register: idrac_boot_error_reg
+ rescue:
+ - name: Verify - Check 2 - TC-115444 - mutual exclusivity between boot_options and boot_order
+ ansible.builtin.assert:
+ that: >
+ "'parameters are mutually exclusive: boot_options|boot_order found in
+ boot_options' == idrac_boot_out.msg"
+
+ - name: Check 3 - TC-115444 - Entering block
+ block:
+ - name: Check 3 - TC-115444 - mutual exclusivity between boot_options and boot_source_override_mode
+ ansible.builtin.include_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_options:
+ - display_name: Hard drive C
+ enabled: true
+ boot_source_override_mode: legacy
+ ignore_errors: true
+ register: idrac_boot_error_reg
+ rescue:
+ - name: Verify - Check 3 - TC-115444 - mutual exclusivity between boot_options and boot_source_override_mode
+ ansible.builtin.assert:
+ that: >
+ "'parameters are mutually exclusive: boot_options|boot_source_override_mode found in
+ boot_options' == idrac_boot_out.msg"
+
+ - name: Check 4 - TC-115444 - Entering block
+ block:
+ - name: Check 4 - TC-115444 - mutual exclusivity between boot_options and boot_source_override_enabled
+ ansible.builtin.include_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_options:
+ - display_name: Hard drive C
+ enabled: true
+ boot_source_override_enabled: once
+ ignore_errors: true
+ register: idrac_boot_error_reg
+ rescue:
+ - name: Verify - Check 4 - TC-115444 - mutual exclusivity between boot_options and boot_source_override_enabled
+ ansible.builtin.assert:
+ that: >
+ "'parameters are mutually exclusive: boot_options|boot_source_override_enabled found in
+ boot_options' == idrac_boot_out.msg"
+
+ - name: Check 5 - TC-115444 - Entering block
+ block:
+ - name: Check 5 - TC-115444 - mutual exclusivity between boot_options and boot_source_override_target
+ ansible.builtin.include_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_options:
+ - display_name: Hard drive C
+ enabled: true
+ boot_source_override_target: cd
+ ignore_errors: true
+ register: idrac_boot_error_reg
+ rescue:
+ - name: Verify - Check 5 - TC-115444 - mutual exclusivity between boot_options and boot_source_override_target
+ ansible.builtin.assert:
+ that: >
+ "'parameters are mutually exclusive: boot_options|boot_source_override_target found in
+ boot_options' == idrac_boot_out.msg"
+
+ - name: Check 6 - TC-115444 - Entering block
+ block:
+ - name: Check 6 - TC-115444 - mutual exclusivity between boot_options and uefi_target_boot_source_override
+ ansible.builtin.include_role:
+ name: idrac_boot
+ vars:
+ hostname: "{{ lookup('ansible.builtin.env', 'IDRAC_IP') }}"
+ username: "{{ lookup('ansible.builtin.env', 'IDRAC_USER') }}"
+ password: "{{ lookup('ansible.builtin.env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ boot_options:
+ - display_name: Hard drive C
+ enabled: true
+ uefi_target_boot_source_override: "VenHw(3A191845-5F86-4E78-8FCE-C4CFF59F9DAA)"
+ ignore_errors: true
+ register: idrac_boot_error_reg
+ rescue:
+ - name: Verify - Check 6 - TC-115444 - mutual exclusivity between boot_options and uefi_target_boot_source_override
+ ansible.builtin.assert:
+ that: >
+ "'parameters are mutually exclusive: boot_options|uefi_target_boot_source_override found in
+ boot_options' == idrac_boot_out.msg"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/default/molecule.yml
new file mode 100644
index 000000000..fbbe91b9c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/molecule/default/molecule.yml
@@ -0,0 +1,9 @@
+---
+scenario:
+ test_sequence:
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/tasks/main.yml
new file mode 100644
index 000000000..78047aa51
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+- name: Fail when job_wait_timeout is negative.
+ ansible.builtin.fail:
+ msg: "{{ idrac_boot_job_wait_timeout_err }}"
+ when: job_wait_timeout < 1
+
+- name: Configure the system boot settings
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ https_timeout }}"
+ boot_options: "{{ boot_options | default(omit) }}"
+ boot_order: "{{ boot_order | default(omit) }}"
+ boot_source_override_mode: "{{ boot_source_override_mode | default(omit) }}"
+ boot_source_override_enabled: "{{ boot_source_override_enabled | default(omit) }}"
+ boot_source_override_target: "{{ boot_source_override_target | default(omit) }}"
+ uefi_target_boot_source_override: "{{ uefi_target_boot_source_override | default(omit) }}"
+ reset_type: "{{ reset_type }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ resource_id: "{{ resource_id | default(omit) }}"
+ register: idrac_boot_out
+ delegate_to: "{{ idrac_boot_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_boot/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/tests/test.yml
new file mode 100644
index 000000000..f043a741f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Testing for idrac boot
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_boot
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_boot/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_boot/vars/main.yml
new file mode 100644
index 000000000..55198828b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_boot/vars/main.yml
@@ -0,0 +1,3 @@
+---
+idrac_boot_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+idrac_boot_job_wait_timeout_err: "The parameter job_wait_timeout value cannot be negative or zero."
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/README.md
new file mode 100644
index 000000000..30f8f8008
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/README.md
@@ -0,0 +1,386 @@
+# idrac_certificate
+
+Role to manage the iDRAC certificates - Generate Certificate Signing Request, Import/Export certificates, and Reset configuration - for PowerEdge servers.
+
+## Requirements
+
+---
+
+Requirements to develop and contribute to the role.
+
+### Development
+
+```
+ansible
+docker
+molecule
+python
+```
+
+### Production
+
+Requirements to use the role.
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+---
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>command</td>
+ <td>false</td>
+ <td>generate_csr</td>
+ <td>'import', 'export', 'generate_csr', 'reset'</td>
+ <td>str</td>
+ <td>- C(generate_csr), generate CSR. This requires I(cert_params) and I(certificate_path).
+ <br>- C(import), import the certificate file. This requires I(certificate_path).
+ <br>- C(export), export the certificate. This requires I(certificate_path).
+ <br>- C(reset), reset the certificate to default settings. This is applicable only for C(HTTPS).
+ </td>
+ </tr>
+ <tr>
+ <td>certificate_type</td>
+ <td>false</td>
+ <td>HTTPS</td>
+ <td>'HTTPS', 'CA', 'CSC', 'CLIENT_TRUST_CERTIFICATE', 'CUSTOMCERTIFICATE'</td>
+ <td>str</td>
+ <td>-Type of the iDRAC certificate:
+ <br>- C(HTTPS) The Dell self-signed SSL certificate.
+ <br>- C(CA) Certificate Authority(CA) signed SSL certificate.
+ <br>- C(CSC) The custom signed SSL certificate.
+ <br>- C(CLIENT_TRUST_CERTIFICATE) Client trust certificate.
+ <br>- C(CUSTOMCERTIFICATE) The custom PKCS12 certificate and private key. Export of custom certificate is supported only on iDRAC firmware version 7.00.00.00 and above.</td>
+ </tr>
+ <tr>
+ <td>certificate_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- Absolute path of the certificate file if I(command) is C(import).
+ <br>- Directory path with write permissions if I(command) is C(generate_csr) or C(export).<br></td>
+ </tr>
+ <tr>
+ <td>passpharse</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The passphrase string if the certificate to be imported is passphrase protected.</td>
+ </tr>
+ <tr>
+ <td>ssl_key</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- Absolute path of the private or SSL key file.
+ <br>- This is applicable only when I(command) is C(import) and I(certificate_type) is C(HTTPS).
+ <br>- Uploading the SSL key on iDRAC is supported on version 6.00.02.00 and newer versions.<br></td>
+ </tr>
+ <tr>
+ <td>cert_params</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;common_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The common name of the certificate.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;organization_unit</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>str</td>
+ <td>- The name associated with an organizational unit. For example, department name.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;locality_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The city or other location where the entity applying for certification is located.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;state_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The state where the entity applying for certification is located.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;country_code</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td> - The country code of the country where the entity applying for certification is located.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;email_address</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The email associated with the CSR.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;organization_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The name associated with an organization.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;subject_alt_name</td>
+ <td>false</td>
+ <td>[]</td>
+ <td></td>
+ <td>list</td>
+ <td>- The alternative domain names associated with the request.</td>
+ </tr>
+ <tr>
+ <td>resource_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Redfish ID of the resource.</td>
+ </tr>
+ <tr>
+ <td>reset</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- To reset the iDRAC after the certificate operation.<br>- This is applicable when I(command) is C(import) or C(reset).<br></td>
+ </tr>
+ <tr>
+ <td>wait</td>
+ <td>false</td>
+ <td>300</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Maximum wait time for iDRAC to start after the reset, in seconds.<br>- This is applicable when I(command) is C(import) or C(reset) and I(reset) is C(True).<br></td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_certificate_out</td>
+ <td>{
+"certificate_path": "/root/Certs/192.168.0.1_202333_4130_HTTPS.pem",
+ "changed": false,
+ "msg": "Successfully performed the 'export' operation."
+}</td>
+ <td>Module output of the cerificate export job.</td>
+ </tr>
+ </tbody>
+</table>
+
+## Examples
+
+---
+
+```
+- name: Generate HTTPS certificate signing request
+ ansible.builtin.import_role:
+ name: idrac_certificate
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "generate_csr"
+ certificate_type: "HTTPS"
+ certificate_path: "/home/omam/mycerts"
+ cert_params:
+ common_name: "sample.domain.com"
+ organization_unit: "OrgUnit"
+ locality_name: "Bangalore"
+ state_name: "Karnataka"
+ country_code: "IN"
+ email_address: "admin@domain.com"
+ organization_name: "OrgName"
+ subject_alt_name:
+ - 192.198.2.1
+```
+
+```
+- name: Importing certificate.
+ ansible.builtin.import_role:
+ name: idrac_certificate
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "/path/to/cert.pem"
+```
+
+```
+- name: Exporting certificate.
+ ansible.builtin.import_role:
+ name: idrac_certificate
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "export"
+ certificate_type: "HTTPS"
+ certificate_path: "/home/omam/mycert_dir"
+```
+
+```
+- name: Importing Custom Signing Certificate.
+ ansible.builtin.import_role:
+ name: idrac_certificate
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "CSC"
+ certificate_path: "/path/to/cert.pem"
+```
+
+```
+- name: Import an HTTPS certificate with private key.
+ ansible.builtin.import_role:
+ name: idrac_certificate
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "/path/to/cert.pem"
+ ssl_key: "/path/to/ssl_key"
+```
+
+```
+- name: Exporting certificate.
+ ansible.builtin.import_role:
+ name: idrac_certificate
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ command: "export"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "/home/omam/mycert_dir"
+```
+
+## Author Information
+---
+Dell Technologies <br>
+Shivam Sharma (Shivam.Sharma3@Dell.com) 2023<br>
+Jagadeesh N V (Jagadeesh.N.V@Dell.com) 2023
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/defaults/main.yml
new file mode 100644
index 000000000..5c3acbfe1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# defaults file for idrac_certificate
+
+https_port: 443
+validate_certs: true
+https_timeout: 30
+certificate_type: "HTTPS"
+command: generate_csr
+reset: true
+wait: 300
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/handlers/main.yml
new file mode 100644
index 000000000..edfc1a30b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_certificate
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/meta/argument_specs.yml
new file mode 100644
index 000000000..9b1220ae5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/meta/argument_specs.yml
@@ -0,0 +1,132 @@
+---
+argument_specs:
+ main:
+ version_added: "7.4.0"
+ short_description: This role allows to generate certificate signing
+ request, import, and export certificates on iDRAC
+ description:
+ - Role to manage the iDRAC certificates - Generate CSR,
+ Import/Export certificates, and Reset configuration - for
+ PowerEdge servers.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address.
+ username:
+ type: str
+ description: iDRAC username.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where
+ self-signed certificates are used.
+ - Prior to collection version C(5.0.0), the I(validate_certs) is
+ C(false) by default.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a
+ CA certificate to be used for the validation.
+ type: str
+ https_timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ command:
+ description: C(generate_csr), generate CSR. This requires
+ I(cert_params) and I(certificate_path).
+ choices: ["import", "export", "generate_csr", "reset"]
+ default: "generate_csr"
+ type: str
+ certificate_type:
+ description: Type of the iDRAC certificate
+ - C(HTTPS) The Dell self-signed SSL certificate.
+ - C(CA) Certificate Authority(CA) signed SSL certificate.
+ - C(CSC) The custom signed SSL certificate.
+ - C(CLIENT_TRUST_CERTIFICATE) Client trust certificate.
+ - C(CUSTOMCERTIFICATE) The custom PKCS12 certificate and private key.
+ Export of custom certificate is supported only on
+ iDRAC firmware version 7.00.00.00 and above.
+ type: str
+ choices: ["HTTPS", "CA", "CSC", "CLIENT_TRUST_CERTIFICATE",
+ "CUSTOMCERTIFICATE"]
+ default: "HTTPS"
+ certificate_path:
+ description:
+ - Absolute path of the certificate file if I(command) is C(import).
+ - Directory path with write permissions if I(command)
+ is C(generate_csr) or C(export).
+ type: path
+ passphrase:
+ description: The passphrase string if the certificate to be
+ imported is passphrase protected.
+ type: str
+ ssl_key:
+ description:
+ - Absolute path of the private or SSL key file.
+ - This is applicable only when I(command) is C(import)
+ and I(certificate_type) is C(HTTPS).
+ - Uploading the SSL key on iDRAC is supported on version
+ 6.00.02.00 and newer versions.
+ type: path
+ version_added: 8.6.0
+ cert_params:
+ description: Certificate parameters to generate signing request.
+ type: dict
+ options:
+ common_name:
+ description: The common name of the certificate.
+ type: str
+ organization_unit:
+ description: The name associated with an organizational unit.
+ For example, department name.
+ type: str
+ default: true
+ locality_name:
+ description: The city or other location where the entity
+ applying for certification is located.
+ type: str
+ state_name:
+ description: The state where the entity applying for
+ certification is located.
+ type: str
+ country_code:
+ description: The country code of the country where the entity
+ applying for certification is located.
+ type: str
+ email_address:
+ description: The email associated with the CSR.
+ type: str
+ organization_name:
+ description: The name associated with an organization.
+ type: str
+ subject_alt_name:
+ description: The alternative domain names associated with the request.
+ type: list
+ elements: str
+ default: []
+ resource_id:
+ description: Redfish ID of the resource.
+ type: str
+ reset:
+ description:
+ - To reset the iDRAC after the certificate operation.
+ - This is applicable when I(command) is C(import) or C(reset).
+ type: bool
+ default: true
+ wait:
+ description:
+ - Maximum wait time for iDRAC to start after the reset, in seconds.
+ - This is applicable when I(command) is C(import) or C(reset)
+ and I(reset) is C(True).
+ type: int
+ default: 300
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/meta/main.yml
new file mode 100644
index 000000000..d6a65cf69
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/meta/main.yml
@@ -0,0 +1,22 @@
+galaxy_info:
+ author: |
+ "Shivam Sharma
+ Jagadeesh N V"
+ description: Role to manage the iDRAC certificates - Generate CSR, Import/Export certificates, and Reset configuration - for PowerEdge servers.
+ company: Dell Technologies
+ license: GPL-3.0-only
+ min_ansible_version: "2.13"
+ platforms:
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ galaxy_tags: []
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CA/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CA/converge.yml
new file mode 100644
index 000000000..64e2a242f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CA/converge.yml
@@ -0,0 +1,90 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+ ca_cert_name: "{{ lookup('env', 'ca_cert_name') }}"
+ import_cert_path: "{{ lookup('env', 'path_for_import_cert') }}"
+ export_cert_path: "{{ lookup('env', 'path_for_export_cert') }}"
+ idrac_delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+ tasks:
+ - name: Fetching CA certificate from share
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ idrac_cert_name:
+ - "{{ ca_cert_name }}"
+
+ - name: Import CA certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CA"
+ certificate_path: "{{ import_cert_path }}{{ ca_cert_name }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+
+ - name: Waiting for idrac readiness
+ ansible.builtin.wait_for:
+ timeout: 30
+ when:
+ - not ansible_check_mode
+ - idrac_certificate_out is defined
+ - idrac_certificate_out.changed
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the
+ 'import' certificate operation.iDRAC
+ has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
+
+ - name: Export CA certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "export"
+ certificate_type: "CA"
+ certificate_path: "{{ export_cert_path }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+ when: not ansible_check_mode
+
+ - name: Setting up CA certificate path for exported file
+ when: idrac_certificate_out is defined
+ and idrac_certificate_out.certificate_path is defined
+ ansible.builtin.stat:
+ path: "{{ idrac_certificate_out.certificate_path }}"
+ register: ca_cert_file
+ delegate_to: "{{ idrac_delegate_to }}"
+ no_log: true
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that:
+ - ca_cert_file.stat.exists
+ - not idrac_certificate_out.changed
+ - not idrac_certificate_out.failed
+ - idrac_certificate_out.msg == "Successfully performed the
+ 'export' certificate operation."
+ when: not ansible_check_mode and not idrac_certificate_out.changed
+
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CA/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CA/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CA/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CSC/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CSC/converge.yml
new file mode 100644
index 000000000..2a8708f27
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CSC/converge.yml
@@ -0,0 +1,168 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+ csc_cert_name: "{{ lookup('env', 'csc_certificate') }}"
+ csc_cert_pass_name: "{{ lookup('env', 'csc_passphrase_certificate') }}"
+ import_cert_path: "{{ lookup('env', 'path_for_import_cert') }}"
+ export_cert_path: "{{ lookup('env', 'path_for_export_cert') }}"
+ idrac_delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+ tasks:
+ - name: Fetching CSC certificates from share
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ idrac_cert_name:
+ - "{{ csc_cert_name }}"
+
+ - name: Import CSC certificate without passphrase
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CSC"
+ passphrase: ""
+ certificate_path: "{{ import_cert_path }}{{ csc_cert_name }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+
+ - name: Waiting for idrac readiness
+ ansible.builtin.wait_for:
+ timeout: 30
+ when:
+ - not ansible_check_mode
+ - idrac_certificate_out is defined
+ - idrac_certificate_out.changed
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the
+ 'import' certificate operation.iDRAC
+ has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
+
+ - name: Export CSC certificate without passphrase
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "export"
+ certificate_type: "CSC"
+ certificate_path: "{{ export_cert_path }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+ when: not ansible_check_mode
+
+ - name: Setting up CSC certificate path for exported file
+ when: idrac_certificate_out is defined
+ and idrac_certificate_out.certificate_path is defined
+ ansible.builtin.stat:
+ path: "{{ idrac_certificate_out.certificate_path }}"
+ register: csc_cert_file
+ delegate_to: "{{ idrac_delegate_to }}"
+ no_log: true
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that:
+ - csc_cert_file.stat.exists
+ - not idrac_certificate_out.changed
+ - not idrac_certificate_out.failed
+ - idrac_certificate_out.msg == "Successfully performed the
+ 'export' certificate operation."
+ when: not ansible_check_mode and not idrac_certificate_out.changed
+
+ - name: Fetching CSC certificates from share
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ idrac_cert_name:
+ - "{{ csc_cert_pass_name }}"
+
+ - name: Import CSC certificate with passphrase
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CSC"
+ passphrase: "{{ lookup('env', 'passphrase') }}"
+ certificate_path: "{{ import_cert_path }}{{ csc_cert_pass_name }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+
+ - name: Waiting for idrac readiness
+ ansible.builtin.wait_for:
+ timeout: 30
+ when:
+ - not ansible_check_mode
+ - idrac_certificate_out is defined
+ - idrac_certificate_out.changed
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the
+ 'import' certificate operation.iDRAC
+ has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
+
+ - name: Export CSC certificate with passphrase
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "export"
+ certificate_type: "CSC"
+ certificate_path: "{{ export_cert_path }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+ when: not ansible_check_mode
+
+ - name: Setting up CSC certificate path for exported file
+ when: idrac_certificate_out is defined
+ and idrac_certificate_out.certificate_path is defined
+ ansible.builtin.stat:
+ path: "{{ idrac_certificate_out.certificate_path }}"
+ register: csc_cert_file
+ delegate_to: "{{ idrac_delegate_to }}"
+ no_log: true
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that:
+ - csc_cert_file.stat.exists
+ - not idrac_certificate_out.changed
+ - not idrac_certificate_out.failed
+ - idrac_certificate_out.msg == "Successfully performed the
+ 'export' certificate operation."
+ when: not ansible_check_mode and not idrac_certificate_out.changed
+
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CSC/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CSC/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CSC/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CTC/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CTC/converge.yml
new file mode 100644
index 000000000..cdf53ff08
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CTC/converge.yml
@@ -0,0 +1,90 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+ ctc_cert_name: "{{ lookup('env', 'ctc_cert_name') }}"
+ import_cert_path: "{{ lookup('env', 'path_for_import_cert') }}"
+ export_cert_path: "{{ lookup('env', 'path_for_export_cert') }}"
+ idrac_delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+ tasks:
+ - name: Fetching CTC certificate from share
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ idrac_cert_name:
+ - "{{ ctc_cert_name }}"
+
+ - name: Import CTC certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "{{ import_cert_path }}{{ ctc_cert_name }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+
+ - name: Waiting for idrac readiness
+ ansible.builtin.wait_for:
+ timeout: 30
+ when:
+ - not ansible_check_mode
+ - idrac_certificate_out is defined
+ - idrac_certificate_out.changed
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the
+ 'import' certificate operation.iDRAC
+ has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
+
+ - name: Export CTC certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "export"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "{{ export_cert_path }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+ when: not ansible_check_mode
+
+ - name: Setting up CTC certificate path for exported file
+ when: idrac_certificate_out is defined
+ and idrac_certificate_out.certificate_path is defined
+ ansible.builtin.stat:
+ path: "{{ idrac_certificate_out.certificate_path }}"
+ register: ctc_cert_file
+ delegate_to: "{{ idrac_delegate_to }}"
+ no_log: true
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that:
+ - ctc_cert_file.stat.exists
+ - not idrac_certificate_out.changed
+ - not idrac_certificate_out.failed
+ - idrac_certificate_out.msg == "Successfully performed the
+ 'export' certificate operation."
+ when: not ansible_check_mode and not idrac_certificate_out.changed
+
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CTC/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CTC/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CTC/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CustomCertificate/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CustomCertificate/converge.yml
new file mode 100644
index 000000000..0f07f68ca
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CustomCertificate/converge.yml
@@ -0,0 +1,207 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ custom_certificate_failure: {}
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+ custom_cert_name: "{{ lookup('env', 'custom_cert_name') }}"
+ cust_crt_name_pass: "{{ lookup('env', 'custom_cert_name_pass') }}"
+ import_cert_path: "{{ lookup('env', 'path_for_import_cert') }}"
+ idrac_delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+ tasks:
+ - name: Fetching firmware version for IDRAC
+ ansible.builtin.include_tasks:
+ file: ../__extract_firmware_version.yml
+ vars:
+ idrac_ip: "{{ lookup('env', 'hostname') }}"
+ idrac_user: "{{ lookup('env', 'username') }}"
+ idrac_password: "{{ lookup('env', 'password') }}"
+
+ - name: Set expected firmware version
+ ansible.builtin.set_fact:
+ firmware_version_expected: "6.10.80.00"
+ firmware_version_expected_export: "7.00.00.00"
+
+ - name: Import CUSTOMCERTIFICATE without passphrase
+ when: idrac_certificate_firmware_version is defined and
+ "idrac_certificate_firmware_version >= firmware_version_expected"
+ and custom_cert_name
+ block:
+ - name: Fetching Custom certificate from share
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ idrac_cert_name:
+ - "{{ custom_cert_name }}"
+
+ - name: Import a custom certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "{{ import_cert_path }}{{ custom_cert_name }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+ passphrase: ""
+
+ - name: Waiting for idrac readiness
+ ansible.builtin.wait_for:
+ timeout: 60
+ when:
+ - not ansible_check_mode
+ - idrac_certificate_out is defined
+ - idrac_certificate_out.changed
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the
+ 'import' certificate operation.iDRAC
+ has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
+
+ rescue:
+ - name: Set the failure messages for CUSTOMECERT
+ ansible.builtin.set_fact:
+ custom_certificate_failure: "{{ custom_certificate_failure |
+ combine({'CUSTOMCERTIFICATE_WITHOUT_PASS_IMPORT':
+ {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+ always:
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
+
+ - name: Export CUSTOMCERTIFICATE
+ when:
+ - idrac_certificate_firmware_version is defined
+ - "idrac_certificate_firmware_version >=
+ firmware_version_expected_export"
+ block:
+ - name: Fetching Custom certificate from share
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+
+ - name: Export a custom certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "export"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "{{ import_cert_path }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+ when: not ansible_check_mode
+
+ - name: Setting up CustomCertificate certificate path for exported file
+ when: idrac_certificate_out is defined
+ and idrac_certificate_out.certificate_path is defined
+ ansible.builtin.stat:
+ path: "{{ idrac_certificate_out.certificate_path }}"
+ register: csc_cert_file
+ delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+ no_log: true
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that:
+ - csc_cert_file.stat.exists
+ - not idrac_certificate_out.changed
+ - not idrac_certificate_out.failed
+ - idrac_certificate_out.msg == "Successfully performed the
+ 'export' certificate operation."
+ when: not ansible_check_mode and not idrac_certificate_out.changed
+
+ rescue:
+ - name: Set the failure messages for CUSTOMECERT
+ ansible.builtin.set_fact:
+ custom_certificate_failure: "{{ custom_certificate_failure |
+ combine({'CUSTOMCERTIFICATE_EXPORT':
+ {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
+
+ - name: Import CUSTOMCERTIFICATE with passphrase
+ when: idrac_certificate_firmware_version is defined and
+ "idrac_certificate_firmware_version >= firmware_version_expected"
+ and cust_crt_name_pass
+ block:
+ - name: Fetching Custom certificate from share
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ idrac_cert_name:
+ - "{{ cust_crt_name_pass }}"
+
+ - name: Import a custom certificate with passphrase
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "{{ import_cert_path }}{{ cust_crt_name_pass }}"
+ passphrase: "{{ lookup('env', 'passphrase') }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Waiting for idrac readiness
+ ansible.builtin.wait_for:
+ timeout: 60
+ when:
+ - not ansible_check_mode
+ - idrac_certificate_out is defined
+ - idrac_certificate_out.changed
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the
+ 'import' certificate operation.iDRAC
+ has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
+
+ rescue:
+ - name: Set the failure messages for CUSTOMECERT
+ ansible.builtin.set_fact:
+ custom_certificate_failure: "{{ custom_certificate_failure |
+ combine({'CUSTOMCERTIFICATE_WITH_PASS_IMPORT':
+ {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: custom_certificate_failure
+ when: custom_certificate_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CustomCertificate/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CustomCertificate/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/CustomCertificate/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/HTTPS/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/HTTPS/converge.yml
new file mode 100644
index 000000000..28cdf16b8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/HTTPS/converge.yml
@@ -0,0 +1,90 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+ https_cert_name: "{{ lookup('env', 'https_cert_name') }}"
+ import_cert_path: "{{ lookup('env', 'path_for_import_cert') }}"
+ export_cert_path: "{{ lookup('env', 'path_for_export_cert') }}"
+ idrac_delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+ tasks:
+ - name: Fetching HTTPS certificate from share
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ idrac_cert_name:
+ - "{{ https_cert_name }}"
+
+ - name: Import HTTPS certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "{{ import_cert_path }}{{ https_cert_name }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+
+ - name: Waiting for idrac readiness
+ ansible.builtin.wait_for:
+ timeout: 30
+ when:
+ - not ansible_check_mode
+ - idrac_certificate_out is defined
+ - idrac_certificate_out.changed
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the
+ 'import' certificate operation.iDRAC
+ has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
+
+ - name: Export a custom certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "export"
+ certificate_type: "HTTPS"
+ certificate_path: "{{ export_cert_path }}"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+ when: not ansible_check_mode
+
+ - name: Setting up HTTPS certificate path for exported file
+ when: idrac_certificate_out is defined
+ and idrac_certificate_out.certificate_path is defined
+ ansible.builtin.stat:
+ path: "{{ idrac_certificate_out.certificate_path }}"
+ register: https_cert_file
+ delegate_to: "{{ idrac_delegate_to }}"
+ no_log: true
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that:
+ - https_cert_file.stat.exists
+ - not idrac_certificate_out.changed
+ - not idrac_certificate_out.failed
+ - idrac_certificate_out.msg == "Successfully performed the
+ 'export' certificate operation."
+ when: not ansible_check_mode and not idrac_certificate_out.changed
+
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/HTTPS/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/HTTPS/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/HTTPS/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/SSLKEY/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/SSLKEY/converge.yml
new file mode 100644
index 000000000..c90e4e53e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/SSLKEY/converge.yml
@@ -0,0 +1,94 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ https_cert_ssl_failure: {}
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+ path_for_import_cert: "{{ lookup('env', 'path_for_import_cert') }}"
+ idrac_delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+ tasks:
+ - name: Fetching firmware version for IDRAC
+ ansible.builtin.include_tasks:
+ file: ../__extract_firmware_version.yml
+ vars:
+ idrac_ip: "{{ lookup('env', 'hostname') }}"
+ idrac_user: "{{ lookup('env', 'username') }}"
+ idrac_password: "{{ lookup('env', 'password') }}"
+
+ - name: Set expected firmware version
+ ansible.builtin.set_fact:
+ firmware_version_expected: "6.00.02.00"
+
+ - name: Import Https certificate using ssl_key
+ when: idrac_certificate_firmware_version is defined and
+ "idrac_certificate_firmware_version >= firmware_version_expected"
+ block:
+ - name: Create directory
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+
+ - name: Create SSL Key ans self-signed certificate
+ when: idrac_certificate_check_file_created.stat.exists
+ ansible.builtin.include_tasks:
+ file: ../__get_ssl_key.yml
+
+ - name: Importing HTTPS certificate using ssl_key
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "{{ path_for_import_cert }}cert.pem"
+ ssl_key: "{{ path_for_import_cert }}cert.key"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+
+ - name: Waiting for idrac readiness
+ ansible.builtin.wait_for:
+ timeout: 60
+ when:
+ - not ansible_check_mode
+ - idrac_certificate_out is defined
+ - idrac_certificate_out.changed
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the SSL
+ key upload and 'import' certificate operation.
+ iDRAC has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not idrac_certificate_out.changed
+
+ rescue:
+ - name: Set the failure messages for SSLKEY
+ ansible.builtin.set_fact:
+ https_cert_ssl_failure: "{{ https_cert_ssl_failure |
+ combine({'HTTPS_SSL_KEY_CERT_IMPORT':
+ {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: https_cert_ssl_failure
+ when: https_cert_ssl_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/SSLKEY/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/SSLKEY/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/SSLKEY/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__delete_directory.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__delete_directory.yml
new file mode 100644
index 000000000..d301ea290
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__delete_directory.yml
@@ -0,0 +1,6 @@
+---
+- name: Delete the directory
+ ansible.builtin.file:
+ path: "{{ lookup('env', 'path_for_import_cert') }}"
+ state: absent
+ delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__extract_firmware_version.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__extract_firmware_version.yml
new file mode 100644
index 000000000..9ffc8b8df
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__extract_firmware_version.yml
@@ -0,0 +1,21 @@
+---
+- name: Fetch firmware version
+ ansible.builtin.uri:
+ url: "https://{{ idrac_ip }}/redfish/v1/Managers/iDRAC.Embedded.1"
+ user: "{{ idrac_user }}"
+ password: "{{ idrac_password }}"
+ method: GET
+ force_basic_auth: true
+ validate_certs: false
+ body_format: json
+ return_content: true
+ status_code: 200
+ register: idrac_certificate_uri_data
+ when: idrac_ip is defined and idrac_password is defined
+ and idrac_user is defined
+ check_mode: false
+
+- name: Set firmware version
+ ansible.builtin.set_fact:
+ idrac_certificate_firmware_version: "{{ idrac_certificate_uri_data.json.FirmwareVersion }}"
+ when: idrac_certificate_uri_data.json is defined
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__get_helper.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__get_helper.yml
new file mode 100644
index 000000000..3994eed1e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__get_helper.yml
@@ -0,0 +1,40 @@
+---
+- name: Set the share vars
+ ansible.builtin.set_fact:
+ https_share_ip: "{{ lookup('env', 'https_share_ip') }}"
+ https_certificate_path: "{{ lookup('env', 'https_certificate_path') }}"
+ https_share_username: "{{ lookup('env', 'https_share_username') }}"
+ https_share_password: "{{ lookup('env', 'https_share_password') }}"
+ path_for_import_cert: "{{ lookup('env', 'path_for_import_cert') }}"
+ idrac_cert_dlg_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+ no_log: true
+
+- name: Create Directory
+ ansible.builtin.file:
+ path: "{{ path_for_import_cert }}"
+ state: directory
+ mode: "0755"
+ register: idrac_certificate_created_directory
+ check_mode: false
+ delegate_to: "{{ idrac_cert_dlg_to }}"
+
+- name: Setting up certificate path
+ ansible.builtin.stat:
+ path: "{{ path_for_import_cert }}"
+ register: idrac_certificate_check_file_created
+ check_mode: false
+ delegate_to: "{{ idrac_cert_dlg_to }}"
+
+- name: Copy file from HTTPS share to local machine
+ when: idrac_cert_name is defined and (idrac_cert_name | length > 0)
+ and idrac_certificate_check_file_created.stat.exists
+ ansible.builtin.uri:
+ url: "https://{{ https_share_ip }}{{ https_certificate_path }}{{ item }}"
+ dest: "{{ path_for_import_cert }}"
+ force_basic_auth: true
+ validate_certs: false
+ url_username: "{{ https_share_username }}"
+ url_password: "{{ https_share_password }}"
+ check_mode: false
+ loop: "{{ idrac_cert_name }}"
+ delegate_to: "{{ idrac_cert_dlg_to }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__get_ssl_key.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__get_ssl_key.yml
new file mode 100644
index 000000000..ed3c34000
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/__get_ssl_key.yml
@@ -0,0 +1,18 @@
+---
+- name: Create private key is present
+ community.crypto.openssl_privatekey:
+ path: "{{ lookup('env', 'path_for_import_cert') }}cert.key"
+ size: 2048
+ type: RSA
+ check_mode: false
+ no_log: true
+ delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+- name: Ensure self-signed cert is present
+ community.crypto.x509_certificate:
+ path: "{{ lookup('env', 'path_for_import_cert') }}cert.pem"
+ privatekey_path: "{{ lookup('env', 'path_for_import_cert') }}cert.key"
+ provider: selfsigned
+ check_mode: false
+ no_log: true
+ delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/default/converge.yml
new file mode 100644
index 000000000..56c26b4a6
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/default/converge.yml
@@ -0,0 +1,381 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+ cert_export_path: "{{ lookup('env', 'certificate_path') }}"
+ import_cert_path: "{{ lookup('env', 'path_for_import_cert') }}"
+ custom_cert_name: "{{ lookup('env', 'custom_cert_name') }}"
+ cust_crt_name_pass: "{{ lookup('env', 'custom_cert_name_pass') }}"
+ csc_pass_cert: "{{ lookup('env', 'csc_passphrase_certificate') }}"
+ cert_delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+ tasks:
+ - name: Invalid Scenarios
+ when: not ansible_check_mode
+ block:
+ - name: Set the failure messages
+ ansible.builtin.set_fact:
+ ssl_key_fail_msg: "Unable to locate the SSL key file"
+ ctc_invalid_path: "[Errno 2] No such file or directory"
+
+ - name: Create directory and fetch certificates
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ idrac_cert_name:
+ - "{{ custom_cert_name }}"
+ - "{{ cust_crt_name_pass }}"
+ - "{{ csc_pass_cert }}"
+
+ - name: Create SSL Key ans self-signed certificate
+ when: idrac_certificate_check_file_created is defined and
+ idrac_certificate_check_file_created.stat.exists
+ ansible.builtin.include_tasks:
+ file: ../__get_ssl_key.yml
+
+ - name: Export a Client Trust Certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "export"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "{{ cert_export_path }}"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ ignore_errors: true
+ register: idrac_certificate_res
+
+ - name: Setting up Client Trust certificate path for exported file
+ when: idrac_certificate_out.certificate_path is defined
+ ansible.builtin.stat:
+ path: "{{ idrac_certificate_out.certificate_path }}"
+ register: ctc_cert_file
+ no_log: true
+
+ - name: Import a Client Trust Certificate invalid path
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "/path/invalid-path/to/certificate.pem"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ ignore_errors: true
+ register: idrrac_certificate_res_err
+
+ - name: Verifying Import a Client Trust Certificate invalid path
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - "ctc_invalid_path in idrac_certificate_out.msg"
+
+ - name: Import a Client Trust Certificate invalid certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "{{ lookup('env', 'invalid_certificate') }}"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ ignore_errors: true
+ register: idrac_certificate_res_err1
+
+ - name: Verifying Import a Client Trust Certificate invalid certificate
+ ansible.builtin.assert:
+ that:
+ - ('"HTTP Error 400" in idrac_certificate_out.msg')
+ - idrac_certificate_out.failed
+
+ - name: Import a Client Trust Certificate invalid certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "{{ lookup('env', 'invalid_certificate') }}"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ ignore_errors: true
+ register: idrac_certificate_res_err2
+
+ - name: Verifying Import a Client Trust Certificate invalid certificate
+ ansible.builtin.assert:
+ that:
+ - ('"HTTP Error 400" in idrac_certificate_out.msg')
+ - idrac_certificate_out.failed
+
+ - name: Import a Client Trust Certificate with invalid credentials
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'invalid_password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "import"
+ certificate_type: "CLIENT_TRUST_CERTIFICATE"
+ certificate_path: "{{ ctc_cert_file.stat.path }}"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ ignore_errors: true
+ register: res_err3
+
+ - name: Verifying Import a Client Trust Certificate
+ with invalid credentials
+ ansible.builtin.assert:
+ that:
+ - ('"HTTP Error 401" in idrac_certificate_out.msg')
+ - idrac_certificate_out.failed
+ when: idrac_certificate_out is defined
+
+ - name: Negative - unreachable host
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "999.999.999.999"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "reset"
+ certificate_type: "HTTPS"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_unreachable
+ ignore_errors: true
+ ignore_unreachable: true
+
+ - name: Verify task status - Negative - invalid unreachable host
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.unreachable
+ - '"Unable to communicate with" in idrac_certificate_out.msg'
+
+ - name: Negative - invalid idrac user
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: invalid
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "reset"
+ certificate_type: "HTTPS"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_idrac_user
+ ignore_errors: true
+
+ - name: Verify task status - Negative - invalid idrac user
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - '"HTTP Error 401: Unauthorized" in idrac_certificate_out.msg'
+
+ - name: Negative - invalid idrac password
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: invalid
+ validate_certs: false
+ command: "reset"
+ certificate_type: "HTTPS"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_idrac_pass
+ ignore_errors: true
+
+ - name: Verify task status - Negative - invalid idrac password
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - '"HTTP Error 401: Unauthorized" in idrac_certificate_out.msg'
+
+ - name: Invalid command
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "invalid"
+ certificate_type: "HTTPS"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_command
+ ignore_errors: true
+
+ - name: Verify task status - Negative - invalid command
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - not idrac_certificate_out.changed
+
+ - name: Invalid certificate path
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "export"
+ certificate_type: "HTTPS"
+ certificate_path: "invalid_dir"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_cert_path
+ ignore_errors: true
+
+ - name: Verify task status - Negative - invalid certificate path
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - not idrac_certificate_out.changed
+ - "'Provided directory path \\'invalid_dir\\' is
+ not valid.' == idrac_certificate_out.msg"
+
+ - name: Invalid passphrase
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "import"
+ certificate_type: "CSC"
+ certificate_path: "{{ import_cert_path }}{{ csc_pass_cert }}"
+ passphrase: "invalid"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_passphrase
+ ignore_errors: true
+
+ - name: Verify task status - Negative - invalid passphrase
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - not idrac_certificate_out.changed
+ - "'HTTP Error 400: Bad Request' == idrac_certificate_out.msg"
+
+ - name: Invalid certificate parameters to generate signing request
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "generate_csr"
+ certificate_type: "HTTPS"
+ certificate_path: "/root/"
+ cert_params:
+ invalid_args: "invalid"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_cert_params
+ ignore_errors: true
+
+ - name: Verify task status - Negative -
+ Invalid certificate parameters to generate signing request
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - not idrac_certificate_out.changed
+ - "'missing required arguments: common_name, country_code,
+ locality_name, organization_name, organization_unit,
+ state_name found in cert_params' == idrac_certificate_out.msg"
+
+ - name: Invalid passphrase for a valid custom
+ certificate without passphrase
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "import"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "{{ import_cert_path }}{{ custom_cert_name }}"
+ passphrase: "invalid"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_custom_cert_pass
+ ignore_errors: true
+
+ - name: Verify task status
+ - Negative - invalid custom certificate passphrase
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - not idrac_certificate_out.changed
+ - "'HTTP Error 400: Bad Request' == idrac_certificate_out.msg"
+
+ - name: Invalid custom certificate
+ passphrase for a valid custom certificate
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "import"
+ certificate_type: "CUSTOMCERTIFICATE"
+ certificate_path: "{{ import_cert_path }}/{{ cust_crt_name_pass }}"
+ passphrase: "invalid"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_custom_cert_without_pass
+ ignore_errors: true
+
+ - name: Verify task status - Negative - invalid custom certificate
+ passphrase for a valid custom certificate
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - not idrac_certificate_out.changed
+ - "'HTTP Error 400: Bad Request' == idrac_certificate_out.msg"
+
+ - name: Invalid ssl key
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ command: "import"
+ certificate_type: "HTTPS"
+ certificate_path: "{{ import_cert_path }}cert.pem"
+ ssl_key: "invalid"
+ idrac_certificate_delegate: "{{ cert_delegate_to }}"
+ register: invalid_ssl_key
+ ignore_errors: true
+
+ - name: Verify task status - Negative - invalid ssl key
+ ansible.builtin.assert:
+ that:
+ - idrac_certificate_out.failed
+ - not idrac_certificate_out.changed
+ - "ssl_key_fail_msg in idrac_certificate_out.msg"
+
+ always:
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/default/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/default/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/generateCSR/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/generateCSR/converge.yml
new file mode 100644
index 000000000..9f57c7e84
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/generateCSR/converge.yml
@@ -0,0 +1,56 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+ cert_export_path: "{{ lookup('env', 'path_for_export_cert') }}"
+ idrac_delegate_to: "{{ lookup('env', 'idrac_certificate_delegate_to') }}"
+
+ tasks:
+ - name: Setting up directory
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+
+ - name: Generate HTTPS CSR signing request
+ ansible.builtin.import_role:
+ name: idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "generate_csr"
+ certificate_type: "HTTPS"
+ certificate_path: "{{ cert_export_path }}"
+ cert_params:
+ common_name: "sample.domain.com"
+ organization_unit: "OrgUnit"
+ locality_name: "Bangalore"
+ state_name: "Karnataka"
+ country_code: "IN"
+ email_address: "admin@domain.com"
+ organization_name: "OrgName"
+ subject_alt_name:
+ - "hostname1.chassis.com"
+ idrac_certificate_delegate: "{{ idrac_delegate_to }}"
+
+ - name: Setting up HTTPS CSR certificate path for exported file
+ ansible.builtin.stat:
+ path: "{{ idrac_certificate_out.certificate_path }}"
+ register: csr_cert_file
+ delegate_to: "{{ idrac_delegate_to }}"
+ no_log: true
+
+ - name: Verifying HTTPS generate CSR certificate
+ ansible.builtin.assert:
+ that:
+ - csr_cert_file.stat.exists
+ - not idrac_certificate_out.changed
+ - not idrac_certificate_out.failed
+ - idrac_certificate_out.msg == "Successfully performed the 'generate_csr' certificate operation."
+
+ - name: Deleting the directory
+ ansible.builtin.include_tasks:
+ file: ../__delete_directory.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/generateCSR/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/generateCSR/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/generateCSR/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/reset/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/reset/converge.yml
new file mode 100644
index 000000000..8a3e23ab5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/reset/converge.yml
@@ -0,0 +1,31 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ ca_cert_path: "{{ lookup('env', 'ca_cert_path') }}"
+
+ tasks:
+ - name: Reset HTTPS certificate
+ ansible.builtin.import_role:
+ name: idrac_certificate
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ca_path: "{{ ca_cert_path }}"
+ command: "reset"
+ certificate_type: "HTTPS"
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with Normal/Idempotence mode.
+ ansible.builtin.assert:
+ that: idrac_certificate_out.msg == "Successfully performed the
+ 'reset' certificate operation.iDRAC
+ has been reset successfully."
+ when: not ansible_check_mode and idrac_certificate_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/reset/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/reset/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/molecule/reset/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/export.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/export.yml
new file mode 100644
index 000000000..3f4044e31
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/export.yml
@@ -0,0 +1,14 @@
+- name: Exporting certificate.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ timeout: "{{ https_timeout }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ command: "export"
+ certificate_type: "{{ certificate_type }}"
+ certificate_path: "{{ certificate_path }}"
+ register: idrac_certificate_out
+ delegate_to: "{{ idrac_certificate_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/generate_csr.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/generate_csr.yml
new file mode 100644
index 000000000..25a958fa2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/generate_csr.yml
@@ -0,0 +1,24 @@
+---
+- name: Generate HTTPS certificate signing request
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ timeout: "{{ https_timeout }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ command: "generate_csr"
+ certificate_type: "{{ certificate_type }}"
+ certificate_path: "{{ certificate_path }}"
+ cert_params:
+ common_name: "{{ cert_params.common_name | default(omit) }}"
+ organization_unit: "{{ cert_params.organization_unit | default(omit) }}"
+ locality_name: "{{ cert_params.locality_name | default(omit) }}"
+ state_name: "{{ cert_params.state_name | default(omit) }}"
+ country_code: "{{ cert_params.country_code | default(omit) }}"
+ email_address: "{{ cert_params.email_address | default(omit) }}"
+ organization_name: "{{ cert_params.organization_name | default(omit) }}"
+ subject_alt_name: "{{ cert_params.subject_alt_name | default(omit) }}"
+ register: idrac_certificate_out
+ delegate_to: "{{ idrac_certificate_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/import.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/import.yml
new file mode 100644
index 000000000..eab08d4b7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/import.yml
@@ -0,0 +1,18 @@
+- name: Importing certificate.
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ timeout: "{{ https_timeout }}"
+ validate_certs: "{{ validate_certs }}"
+ passphrase: "{{ passphrase | default(omit) }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ command: "import"
+ certificate_type: "{{ certificate_type }}"
+ certificate_path: "{{ certificate_path }}"
+ ssl_key: "{{ ssl_key | default(omit) }}"
+ reset: "{{ reset }}"
+ wait: "{{ wait }}"
+ register: idrac_certificate_out
+ delegate_to: "{{ idrac_certificate_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/main.yml
new file mode 100644
index 000000000..1c586570b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# tasks file for idrac_certificate
+
+- name: Generate CSR
+ ansible.builtin.include_tasks: generate_csr.yml
+ when: command == "generate_csr"
+
+- name: Import certificate
+ ansible.builtin.include_tasks: import.yml
+ when: command == "import"
+
+- name: Export certificate
+ ansible.builtin.include_tasks: export.yml
+ when: command == "export"
+
+- name: Reset certificate
+ ansible.builtin.include_tasks: reset.yml
+ when: command == "reset"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/reset.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/reset.yml
new file mode 100644
index 000000000..dd8f01d1c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tasks/reset.yml
@@ -0,0 +1,16 @@
+---
+- name: Reset Certificate
+ dellemc.openmanage.idrac_certificates:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ timeout: "{{ https_timeout }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ command: "reset"
+ reset: "{{ reset }}"
+ wait: "{{ wait }}"
+ certificate_type: "HTTPS"
+ register: idrac_certificate_out
+ delegate_to: "{{ idrac_certificate_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tests/inventory
new file mode 100644
index 000000000..2fbb50c4a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tests/inventory
@@ -0,0 +1 @@
+localhost
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tests/test.yml
new file mode 100644
index 000000000..bdd3628e3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: This role is to generate certificate signing request, import, and export certificates on iDRAC.
+- hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_certificate
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_certificate/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/vars/main.yml
new file mode 100644
index 000000000..ea6e3efad
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_certificate/vars/main.yml
@@ -0,0 +1,3 @@
+---
+# vars file for idrac_certificate
+idrac_certificate_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/README.md
new file mode 100644
index 000000000..69b76936b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/README.md
@@ -0,0 +1,361 @@
+# idrac_export_server_config_profile
+
+Role to Export the Server Configuration Profile (SCP) from the iDRAC to a network share (CIFS, NFS, HTTP, HTTPS) or a local path.
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+docker
+molecule
+python
+```
+### Production
+Requirements to use the role.
+```
+ansible
+python
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>idrac_ip</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>idrac_user</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>idrac_password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>idrac_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>idrac_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>share_parameters</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>Network share parameters.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;share_name</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Network share or local path.<br>- CIFS, NFS, HTTP, and HTTPS network share types are supported.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;scp_file</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Name of the server configuration profile (SCP) file.</br>- The default format `idrac_ip_YYMMDD_HHMMSS_scp` is used if this option is not specified.</br>- I(export_format) is used if the valid extension file is not provided.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;share_user</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Network share user in the format 'user@domain' or 'domain\\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share..</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;share_password</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Network share user password. This option is mandatory for CIFS Network Share.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_support</td>
+ <td>false</td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Proxy to be enabled or disabled.</br>- I(proxy_support) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_type</td>
+ <td>false</td>
+ <td>http</td>
+ <td>http, socks4</td>
+ <td>str</td>
+ <td>- C(http) to select HTTP type proxy.</br>- C(socks4) to select SOCKS4 type proxy.</br>- I(proxy_type) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_server</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td> - I(proxy_server) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).</br>- I(proxy_server) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_port</td>
+ <td>false</td>
+ <td>80</td>
+ <td></td>
+ <td>str</td>
+ <td>- Proxy port to authenticate.</br> - I(proxy_port) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).</br>- I(proxy_port) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_username</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Proxy username to authenticate.</br>- I(proxy_username) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_password</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Proxy password to authenticate.</br>- I(proxy_password) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ignore_certificate_warning</td>
+ <td>false</td>
+ <td>ignore</td>
+ <td>ignore, showerror</td>
+ <td>str</td>
+ <td>- If C(ignore), it ignores the certificate warnings.</br>- If C(showerror), it shows the certificate warnings.</br>- I(ignore_certificate_warning) is considered only when I(share_name) is of type HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>include_in_export</td>
+ <td>false</td>
+ <td>default</td>
+ <td>default, readonly, passwordhashvalues, customtelemetry</td>
+ <td>str</td>
+ <td>- This option is applicable when I(command) is C(export).<br>- If C(default), it exports the default Server Configuration Profile.<br>- If C(readonly), it exports the SCP with readonly attributes.<br>- If C(passwordhashvalues), it exports the SCP with password hash values.<br>- If C(customtelemetry), exports the SCP with custom telemetry attributes supported only in the iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>target</td>
+ <td>false</td>
+ <td>['ALL']</td>
+ <td>'ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'</td>
+ <td>str</td>
+ <td>- If C(ALL), this module exports or imports all components configurations from SCP file.<br>- If C(IDRAC), this module exports or imports iDRAC configuration from SCP file.<br>- If C(BIOS), this module exports or imports BIOS configuration from SCP file.<br>- If C(NIC), this module exports or imports NIC configuration from SCP file.<br>- If C(RAID), this module exports or imports RAID configuration from SCP file.</br>- When I(command) is C(export) or C(import) I(target) with multiple components is supported only on iDRAC9 with firmware 6.10.00.00 and above.</td>
+ </tr>
+ <tr>
+ <td>export_format</td>
+ <td>false</td>
+ <td>'XML'</td>
+ <td>'JSON', 'XML'</td>
+ <td>str</td>
+ <td>- Specify the output file format. This option is applicable for C(export) command.</td>
+ </tr>
+ <tr>
+ <td>export_use</td>
+ <td>false</td>
+ <td>'Default'</td>
+ <td>'Default', 'Clone', 'Replace'</td>
+ <td></td>
+ <td>- Specify the type of Server Configuration Profile (SCP) to be exported.<br>- This option is applicable when I(command) is C(export).<br>- C(Default) Creates a non-destructive snapshot of the configuration.<br>- C(Replace) Replaces a server with another or restores the servers settings to a known baseline.<br>- C(Clone) Clones settings from one server to another server with the identical hardware setup.</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact varaibles
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>out_scp</td>
+ <td>{
+ "changed": false,
+ "failed": false,
+ "msg": "Successfully exported the Server Configuration Profile.",
+ "scp_status": {
+ "ActualRunningStartTime": "2023-02-21T10:59:50",
+ "ActualRunningStopTime": "2023-02-21T11:00:08",
+ "CompletionTime": "2023-02-21T11:00:08",
+ "Id": "JID_769771903262",
+ "JobState": "Completed",
+ "JobType": "ExportConfiguration",
+ "Message": "Successfully exported Server Configuration Profile",
+ "MessageArgs": [],
+ "MessageId": "SYS043",
+ "PercentComplete": 100,
+ "TargetSettingsURI": null,
+ "file": ".\\192.1.2.1_2023221_16301_scp.xml",
+ "retval": true
+ }
+ }</td>
+ <td>Module output of the Server Configuration Job</td>
+ </tr>
+ <tr>
+ <td>share_type</td>
+ <td>NFS</td>
+ <td>Stores the share type sent as part of the role variables</td>
+ </tr>
+ </tbody>
+</table>
+
+## Examples
+-----
+
+```
+- name: Exporting SCP local path with all components
+ ansible.builtin.import_role:
+ name: idrac_export_server_config_profile
+ vars:
+ idrac_ip: "192.1.2.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_parameters:
+ share_name: "/root/tmp"
+ scp_file: "file.xml"
+```
+```
+- name: "Exporting SCP to NFS with iDRAC components"
+ ansible.builtin.import_role:
+ name: "idrac_export_server_config_profile"
+ vars:
+ idrac_ip: "192.1.2.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['IDRAC']
+ share_parameters:
+ share_name: "191.2.1.1:/nfs"
+ scp_file: "file.json"
+```
+```
+- name: "Exporting SCP to CIFS with BIOS components"
+ ansible.builtin.import_role:
+ name: "idrac_export_server_config_profile"
+ vars:
+ idrac_ip: "192.1.2.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['BIOS']
+ share_parameters:
+ share_name: "\\\\191.1.1.1\\cifs"
+ share_user: "username"
+ share_password: "password"
+ scp_file: "file.xml"
+```
+```
+- name: "Exporting SCP to HTTPS with RAID components"
+ ansible.builtin.import_role:
+ name: "idrac_export_server_config_profile"
+ vars:
+ idrac_ip: "192.1.2.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['RAID']
+ share_parameters:
+ share_name: "https://192.1.1.1/share"
+ share_user: "username"
+ share_password: "password"
+ scp_file: "filename.json"
+```
+```
+- name: "Exporting SCP to HTTP with NIC components"
+ ansible.builtin.import_role:
+ name: "idrac_export_server_config_profile"
+ vars:
+ idrac_ip: "192.1.2.1"
+ idrac_user: "username"
+ idrac_password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['NIC']
+ share_parameters:
+ share_name: "http://192.1.1.1/share"
+ share_user: "username"
+ share_password: "password"
+ scp_file: "filename.xml"
+```
+```
+- name: Export SCP
+ hosts: idrac
+ roles:
+ - role: idrac_export_server_config_profile
+```
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Abhishek Sinha (Abhishek.Sinha10@Dell.com) 2023 \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/defaults/main.yml
new file mode 100644
index 000000000..96b7d2127
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+# defaults file for idrac_export_server_config_profile
+
+idrac_port: 443
+validate_certs: true
+idrac_timeout: 30
+share_parameters:
+ proxy_support: false
+ proxy_type: http
+ proxy_port: "80"
+ ignore_certificate_warning: ignore
+target: ['ALL']
+export_format: XML
+export_use: Default
+include_in_export: default
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/handlers/main.yml
new file mode 100644
index 000000000..f1a862b9a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_export_server_config_profile
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/meta/argument_specs.yml
new file mode 100644
index 000000000..d9e222844
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/meta/argument_specs.yml
@@ -0,0 +1,143 @@
+---
+argument_specs:
+ main:
+ version_added: "7.3.0"
+ short_description: Role to export iDRAC Server Configuration Profile (SCP)
+ description:
+ - Role to export the Server Configuration Profile (SCP) from the iDRAC to a network share (CIFS, NFS, HTTP, HTTPS) or a local path.
+ options:
+ idrac_ip:
+ required: true
+ type: str
+ description: iDRAC IP Address.
+ idrac_user:
+ type: str
+ description: iDRAC username.
+ idrac_password:
+ type: str
+ description: iDRAC user password.
+ idrac_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ idrac_timeout:
+ description: The HTTPS socket level timeout in seconds.
+ type: int
+ default: 30
+ share_parameters:
+ description: Network share parameters.
+ type: dict
+ options:
+ share_name:
+ required: true
+ description:
+ - Network share or local path.
+ - CIFS, NFS, HTTP, and HTTPS network share types are supported.
+ - I(share_name) is mutually exclusive with I(import_buffer).
+ type: str
+ scp_file:
+ description:
+ - Name of the server configuration profile (SCP) file.
+ - The default format <idrac_ip>_YYMMDD_HHMMSS_scp is used if this option is not specified.
+ - I(export_format) is used if the valid extension file is not provided.
+ type: str
+ share_user:
+ description:
+ Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ proxy_support:
+ description:
+ - Proxy to be enabled or disabled.
+ - I(proxy_support) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: bool
+ default: false
+ proxy_type:
+ description:
+ - C(http) to select HTTP type proxy.
+ - C(socks4) to select SOCKS4 type proxy.
+ - I(proxy_type) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ choices: [http, socks4]
+ default: http
+ proxy_server:
+ description:
+ - I(proxy_server) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).
+ - I(proxy_server) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ proxy_port:
+ description:
+ - Proxy port to authenticate.
+ - I(proxy_port) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).
+ - I(proxy_port) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ default: 80
+ proxy_username:
+ description:
+ - Proxy username to authenticate.
+ - I(proxy_username) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ proxy_password:
+ description:
+ - Proxy password to authenticate.
+ - I(proxy_password) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ ignore_certificate_warning:
+ description:
+ - If C(ignore), it ignores the certificate warnings.
+ - If C(showerror), it shows the certificate warnings.
+ - I(ignore_certificate_warning) is considered only when I(share_name) is of type HTTPS and is
+ supported only on iDRAC9.
+ type: str
+ choices: [ignore, showerror]
+ default: ignore
+ include_in_export:
+ description:
+ - This option is applicable when I(command) is C(export).
+ - If C(default), it exports the default Server Configuration Profile.
+ - If C(readonly), it exports the SCP with readonly attributes.
+ - If C(passwordhashvalues), it exports the SCP with password hash values.
+ - If C(customtelemetry), exports the SCP with custom telemetry attributes supported only in the iDRAC9.
+ type: str
+ choices: [default, readonly, passwordhashvalues, customtelemetry]
+ default: default
+ target:
+ description:
+ - If C(ALL), this module exports or imports all components configurations from SCP file.
+ - If C(IDRAC), this module exports or imports iDRAC configuration from SCP file.
+ - If C(BIOS), this module exports or imports BIOS configuration from SCP file.
+ - If C(NIC), this module exports or imports NIC configuration from SCP file.
+ - If C(RAID), this module exports or imports RAID configuration from SCP file.
+ choices: ["ALL", "IDRAC", "BIOS", "NIC", "RAID"]
+ default: ["ALL"]
+ type: list
+ export_format:
+ description: Specify the output file format. This option is applicable for C(export) command.
+ type: str
+ choices: ["JSON", "XML"]
+ default: "XML"
+ export_use:
+ description:
+ - Specify the type of Server Configuration Profile (SCP) to be exported.
+ - This option is applicable when I(command) is C(export).
+ - C(Default) Creates a non-destructive snapshot of the configuration.
+ - C(Replace) Replaces a server with another or restores the servers settings to a known baseline.
+ - C(Clone) Clones settings from one server to another server with the identical hardware setup.
+ All settings except I/O identity are updated (e.g. will reset RAID). The settings in this export
+ will be destructive when uploaded to another system.
+ type: str
+ choices: ["Default", "Clone", "Replace"]
+ default: "Default"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/meta/main.yml
new file mode 100644
index 000000000..6c63527fe
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/meta/main.yml
@@ -0,0 +1,53 @@
+galaxy_info:
+ author: 'Abhishek-Dell'
+ description: The role performs Export operation of Server Configuration Profile.
+ company: Dell Technologies
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: http://example.com/issue/tracker
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.13"
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/cleanup.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/cleanup.yml
new file mode 100644
index 000000000..9ade81e90
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/cleanup.yml
@@ -0,0 +1,96 @@
+---
+# This is an example playbook to execute Ansible cleanup.
+
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ vars:
+ local_path: "{{ lookup('env', 'local_path') }}"
+ local_filename: "{{ lookup('env', 'local_filename') }}"
+ nfs_filename: "{{ lookup('env', 'nfs_filename') }}"
+ cifs_filename: "{{ lookup('env', 'cifs_filename') }}"
+ https_filename: "{{ lookup('env', 'https_filename') }}"
+ http_filename: "{{ lookup('env', 'http_filename') }}"
+ nfs_mount_path: "{{ lookup('env', 'nfs_mount_path') }}"
+ cifs_mount_path: "{{ lookup('env', 'cifs_mount_path') }}"
+
+ nfs_url: "{{ lookup('env', 'NFS_URL') }}"
+ cifs_url: "{{ lookup('env', 'CIFS_URL') }}"
+ cifs_username: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ cifs_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+
+ https_url: "{{ lookup('env', 'HTTPS_URL') }}"
+ https_username: "{{ lookup('env', 'HTTPS_USERNAME') }}"
+ https_password: "{{ lookup('env', 'HTTPS_PASSWORD') }}"
+
+ http_url: "{{ lookup('env', 'HTTP_URL') }}"
+ http_username: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ http_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ tasks:
+ - name: Checking file exists in NFS mount localhost
+ ansible.builtin.stat:
+ path: "{{ nfs_mount_path }}/{{ nfs_filename }}"
+ delegate_to: localhost
+ register: nfs_file
+
+ - name: Checking file exists in CIFS mount localhost
+ ansible.builtin.stat:
+ path: "{{ cifs_mount_path }}/{{ cifs_filename }}"
+ delegate_to: localhost
+ register: cifs_file
+
+ - name: Checking file exists in current location
+ ansible.builtin.stat:
+ path: "{{ http_filename }}"
+ delegate_to: localhost
+ register: http_file
+
+ - name: Checking file exists in current location
+ ansible.builtin.stat:
+ path: "{{ https_filename }}"
+ delegate_to: localhost
+ register: https_file
+
+ - name: Deleting the file if exists in NFS mounted localhost
+ ansible.builtin.file:
+ path: "{{ nfs_mount_path }}/{{ nfs_filename }}"
+ state: absent
+ delegate_to: localhost
+ when: nfs_file.stat.exists
+
+ - name: Deleting the file if exists in CIFS mounted localhost
+ ansible.builtin.file:
+ path: "{{ cifs_mount_path }}/{{ cifs_filename }}"
+ state: absent
+ delegate_to: localhost
+ when: cifs_file.stat.exists
+
+ - name: Deleting the file if exists in HTTP localhost
+ ansible.builtin.file:
+ path: "{{ http_filename }}"
+ state: absent
+ delegate_to: localhost
+ when: http_file.stat.exists
+
+ - name: Deleting the file if exists in HTTPS localhost
+ ansible.builtin.file:
+ path: "{{ https_filename }}"
+ state: absent
+ delegate_to: localhost
+ when: https_file.stat.exists
+
+ - name: Unmounting NFS volume from localhost
+ ansible.posix.mount:
+ src: "{{ nfs_url }}"
+ path: "{{ nfs_mount_path }}"
+ state: unmounted
+ fstype: nfs
+ delegate_to: localhost
+
+ - name: Unmounting CIFS volume from localhost
+ ansible.posix.mount:
+ src: "{{ cifs_url }}"
+ path: "{{ cifs_mount_path }}"
+ state: unmounted
+ fstype: nfs
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/converge.yml
new file mode 100644
index 000000000..8073a85bc
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/converge.yml
@@ -0,0 +1,100 @@
+- name: Converge
+ hosts: all
+ vars:
+ local_path: "{{ lookup('env', 'local_path') }}"
+ local_filename: "{{ lookup('env', 'local_filename') }}"
+ nfs_filename: "{{ lookup('env', 'nfs_filename') }}"
+ cifs_filename: "{{ lookup('env', 'cifs_filename') }}"
+ https_filename: "{{ lookup('env', 'https_filename') }}"
+ http_filename: "{{ lookup('env', 'http_filename') }}"
+ nfs_mount_path: "{{ lookup('env', 'nfs_mount_path') }}"
+ cifs_mount_path: "{{ lookup('env', 'nfs_mount_path') }}"
+
+ nfs_url: "{{ lookup('env', 'NFS_URL') }}"
+ cifs_url: "{{ lookup('env', 'CIFS_URL') }}"
+ cifs_username: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ cifs_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+
+ https_url: "{{ lookup('env', 'HTTPS_URL') }}"
+ https_username: "{{ lookup('env', 'HTTPS_USERNAME') }}"
+ https_password: "{{ lookup('env', 'HTTPS_PASSWORD') }}"
+
+ http_url: "{{ lookup('env', 'HTTP_URL') }}"
+ http_username: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ http_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ gather_facts: false
+ tasks:
+ - name: Exporting SCP local path with all components
+ ansible.builtin.import_role:
+ name: idrac_export_server_config_profile
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_user: "{{ lookup('env', 'IDRAC_USER') }}"
+ idrac_password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ job_wait: true
+ share_parameters:
+ share_name: "{{ local_path }}"
+ scp_file: "{{ local_filename }}"
+
+ - name: "Exporting SCP to NFS with iDRAC components"
+ ansible.builtin.import_role:
+ name: "idrac_export_server_config_profile"
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_user: "{{ lookup('env', 'IDRAC_USER') }}"
+ idrac_password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ target: ['IDRAC']
+ job_wait: true
+ share_parameters:
+ share_name: "{{ nfs_url }}"
+ scp_file: "{{ nfs_filename }}"
+
+ - name: "Exporting SCP to CIFS with BIOS components"
+ ansible.builtin.import_role:
+ name: "idrac_export_server_config_profile"
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_user: "{{ lookup('env', 'IDRAC_USER') }}"
+ idrac_password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ target: ['BIOS']
+ job_wait: true
+ share_parameters:
+ share_name: "{{ cifs_url }}"
+ share_user: "{{ cifs_username }}"
+ share_password: "{{ cifs_password }}"
+ scp_file: "{{ cifs_filename }}"
+
+ - name: "Exporting SCP to HTTPS with RAID components"
+ ansible.builtin.import_role:
+ name: "idrac_export_server_config_profile"
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_user: "{{ lookup('env', 'IDRAC_USER') }}"
+ idrac_password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ target: ['RAID']
+ job_wait: true
+ share_parameters:
+ share_name: "{{ https_url }}"
+ share_user: "{{ https_username }}"
+ share_password: "{{ https_password }}"
+ scp_file: "{{ https_filename }}"
+
+ - name: "Exporting SCP to HTTP with NIC components"
+ ansible.builtin.import_role:
+ name: "idrac_export_server_config_profile"
+ vars:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_user: "{{ lookup('env', 'IDRAC_USER') }}"
+ idrac_password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ target: ['NIC']
+ job_wait: true
+ share_parameters:
+ share_name: "{{ http_url }}"
+ share_user: "{{ http_username }}"
+ share_password: "{{ http_password }}"
+ scp_file: "{{ http_filename }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/molecule.yml
new file mode 100644
index 000000000..c2dae4dce
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/molecule.yml
@@ -0,0 +1,12 @@
+---
+provisioner:
+ name: ansible
+ env:
+ local_path: "/tmp"
+ local_filename: "exported_scp_local.xml"
+ nfs_filename: "exported_scp_nfs.json"
+ cifs_filename: 'exported_scp_cifs.xml'
+ https_filename: "exported_scp_https.json"
+ http_filename: "exported_scp_http.xml"
+ nfs_mount_path: "/tmp/nfs"
+ cifs_mount_path: "/tmp/cifs"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/verify.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/verify.yml
new file mode 100644
index 000000000..25206e2d3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/molecule/default/verify.yml
@@ -0,0 +1,115 @@
+---
+# This is an example playbook to execute Ansible tests.
+
+- name: Verify
+ hosts: all
+ gather_facts: false
+ vars:
+ local_path: "{{ lookup('env', 'local_path') }}"
+ local_filename: "{{ lookup('env', 'local_filename') }}"
+ nfs_filename: "{{ lookup('env', 'nfs_filename') }}"
+ cifs_filename: "{{ lookup('env', 'cifs_filename') }}"
+ https_filename: "{{ lookup('env', 'https_filename') }}"
+ http_filename: "{{ lookup('env', 'http_filename') }}"
+ nfs_mount_path: "{{ lookup('env', 'nfs_mount_path') }}"
+ cifs_mount_path: "{{ lookup('env', 'cifs_mount_path') }}"
+
+ nfs_url: "{{ lookup('env', 'NFS_URL') }}"
+ cifs_url: "{{ lookup('env', 'CIFS_URL') }}"
+ cifs_username: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ cifs_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+
+ https_url: "{{ lookup('env', 'HTTPS_URL') }}"
+ https_username: "{{ lookup('env', 'HTTPS_USERNAME') }}"
+ https_password: "{{ lookup('env', 'HTTPS_PASSWORD') }}"
+
+ http_url: "{{ lookup('env', 'HTTP_URL') }}"
+ http_username: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ http_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ tasks:
+ - name: Checking exported file exists in Local path
+ ansible.builtin.stat:
+ path: "{{ local_path }}/{{ local_filename }}"
+ delegate_to: localhost
+ register: local_file
+
+ - name: Mounting NFS volume to localhost
+ ansible.posix.mount:
+ src: "{{ nfs_url }}"
+ path: "{{ nfs_mount_path }}"
+ state: mounted
+ fstype: nfs
+ delegate_to: localhost
+ register: nfs_mount
+
+ - name: Checking file exists in NFS mount localhost
+ ansible.builtin.stat:
+ path: "{{ nfs_mount_path }}/{{ nfs_filename }}"
+ delegate_to: localhost
+ register: nfs_file
+
+ - name: Mounting CIFS volume to localhost
+ ansible.posix.mount:
+ src: "{{ cifs_url }}"
+ path: "{{ cifs_mount_path }}"
+ opts: "username={{ cifs_username }},password={{ cifs_password }}"
+ state: mounted
+ fstype: cifs
+ delegate_to: localhost
+ register: cifs_mount
+ no_log: true
+
+ - name: Checking file exists in CIFS mount localhost
+ ansible.builtin.stat:
+ path: "{{ cifs_mount_path }}/{{ cifs_filename }}"
+ delegate_to: localhost
+ register: cifs_file
+
+ - name: Downloading HTTP file to localhost
+ ansible.builtin.uri:
+ url: "{{ http_url }}/{{ http_filename }}"
+ dest: .
+ force_basic_auth: true
+ validate_certs: false
+ url_username: "{{ http_username }}"
+ url_password: "{{ http_password }}"
+ mode: '0755'
+ delegate_to: localhost
+ register: http_file_download
+ no_log: true
+ changed_when: false
+
+ - name: Checking file exists in current location
+ ansible.builtin.stat:
+ path: "{{ http_filename }}"
+ delegate_to: localhost
+ register: http_file
+
+ - name: Downloading HTTPS file to localhost
+ ansible.builtin.uri:
+ url: "{{ https_url }}/{{ https_filename }}"
+ dest: .
+ force_basic_auth: true
+ validate_certs: false
+ url_username: "{{ https_username }}"
+ url_password: "{{ https_password }}"
+ mode: '0755'
+ delegate_to: localhost
+ register: https_file_download
+ no_log: true
+ changed_when: false
+
+ - name: Checking file exists in current location
+ ansible.builtin.stat:
+ path: "{{ https_filename }}"
+ delegate_to: localhost
+ register: https_file
+
+ - name: Verifying file exists
+ ansible.builtin.assert:
+ that:
+ - local_file.stat.exists
+ - nfs_file.stat.exists
+ - cifs_file.stat.exists
+ - http_file.stat.exists
+ - https_file.stat.exists
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/main.yml
new file mode 100644
index 000000000..2b19130c5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/main.yml
@@ -0,0 +1,24 @@
+---
+# tasks file for idrac_export_server_config_profile
+- name: Pre-req
+ ansible.builtin.include_tasks: pre_req.yml
+
+- name: Local path
+ ansible.builtin.include_tasks: scp_export_local.yml
+ when: share_type == 'Local'
+
+- name: NFS
+ ansible.builtin.include_tasks: scp_export_nfs.yml
+ when: share_type == 'NFS'
+
+- name: CIFS
+ ansible.builtin.include_tasks: scp_export_cifs.yml
+ when: share_type == 'CIFS'
+
+- name: HTTP
+ ansible.builtin.include_tasks: scp_export_http.yml
+ when: share_type == 'HTTP'
+
+- name: HTTPS
+ ansible.builtin.include_tasks: scp_export_https.yml
+ when: share_type == 'HTTPS'
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/pre_req.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/pre_req.yml
new file mode 100644
index 000000000..9668f7ee1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/pre_req.yml
@@ -0,0 +1,13 @@
+- name: Initializing share_type
+ ansible.builtin.set_fact:
+ share_type: ''
+
+- name: Checking network share type is CIFS, HTTPS, HTTP
+ ansible.builtin.set_fact:
+ share_type: "{{ item.key if share_parameters.share_name.startswith(item.value) else share_type }}"
+ with_dict: { 'HTTPS': 'https://', 'HTTP': 'http://', 'CIFS': '\\'}
+
+- name: Checking network share type is NFS, Local
+ ansible.builtin.set_fact:
+ share_type: "{{ 'NFS' if ':' in share_parameters.share_name else 'Local' }}"
+ when: share_type == ''
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_cifs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_cifs.yml
new file mode 100644
index 000000000..5fd79dcbb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_cifs.yml
@@ -0,0 +1,21 @@
+---
+- name: Exporting the SCP components to CIFS
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_port: "{{ idrac_port }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ idrac_timeout }}"
+ export_format: "{{ export_format }}"
+ export_use: "{{ export_use }}"
+ include_in_export: "{{ include_in_export }}"
+ share_user: "{{ share_parameters.share_user | default(omit) }}"
+ share_password: "{{ share_parameters.share_password | default(omit) }}"
+ share_name: "{{ share_parameters.share_name }}"
+ scp_file: "{{ share_parameters.scp_file | default(omit) }}"
+ target: "{{ target }}"
+ job_wait: "{{ idrac_export_server_config_profile_job_wait }}"
+ register: out_scp
+ delegate_to: "{{ idrac_export_server_config_profile_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_http.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_http.yml
new file mode 100644
index 000000000..abb85c268
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_http.yml
@@ -0,0 +1,27 @@
+---
+- name: Exporting the SCP components to HTTP
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_port: "{{ idrac_port }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ idrac_timeout }}"
+ export_format: "{{ export_format }}"
+ export_use: "{{ export_use }}"
+ include_in_export: "{{ include_in_export }}"
+ share_user: "{{ share_parameters.share_user | default(omit) }}"
+ share_password: "{{ share_parameters.share_password | default(omit) }}"
+ share_name: "{{ share_parameters.share_name }}"
+ scp_file: "{{ share_parameters.scp_file | default(omit) }}"
+ target: "{{ target }}"
+ proxy_support: "{{ share_parameters.proxy_support | default(omit) }}"
+ proxy_type: "{{ share_parameters.proxy_type | default(omit) }}"
+ proxy_server: "{{ share_parameters.proxy_server | default(omit) }}"
+ proxy_port: "{{ share_parameters.proxy_port | default(omit) }}"
+ proxy_username: "{{ share_parameters.proxy_username | default(omit) }}"
+ proxy_password: "{{ share_parameters.proxy_password | default(omit) }}"
+ job_wait: "{{ idrac_export_server_config_profile_job_wait }}"
+ register: out_scp
+ delegate_to: "{{ idrac_export_server_config_profile_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_https.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_https.yml
new file mode 100644
index 000000000..233a5083d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_https.yml
@@ -0,0 +1,28 @@
+---
+- name: Exporting the SCP components to HTTPS
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_port: "{{ idrac_port }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ idrac_timeout }}"
+ export_format: "{{ export_format }}"
+ export_use: "{{ export_use }}"
+ include_in_export: "{{ include_in_export }}"
+ share_user: "{{ share_parameters.share_user | default(omit) }}"
+ share_password: "{{ share_parameters.share_password | default(omit) }}"
+ share_name: "{{ share_parameters.share_name }}"
+ scp_file: "{{ share_parameters.scp_file | default(omit) }}"
+ target: "{{ target }}"
+ proxy_support: "{{ share_parameters.proxy_support | default(omit) }}"
+ proxy_type: "{{ share_parameters.proxy_type | default(omit) }}"
+ proxy_server: "{{ share_parameters.proxy_server | default(omit) }}"
+ proxy_port: "{{ share_parameters.proxy_port | default(omit) }}"
+ proxy_username: "{{ share_parameters.proxy_username | default(omit) }}"
+ proxy_password: "{{ share_parameters.proxy_password | default(omit) }}"
+ ignore_certificate_warning: "{{ share_parameters.ignore_certificate_warning | default(omit) }}"
+ job_wait: "{{ idrac_export_server_config_profile_job_wait }}"
+ register: out_scp
+ delegate_to: "{{ idrac_export_server_config_profile_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_local.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_local.yml
new file mode 100644
index 000000000..a6da9df20
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_local.yml
@@ -0,0 +1,19 @@
+---
+- name: Exporting the SCP components to local
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_port: "{{ idrac_port }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ idrac_timeout }}"
+ export_format: "{{ export_format }}"
+ export_use: "{{ export_use }}"
+ include_in_export: "{{ include_in_export }}"
+ share_name: "{{ share_parameters.share_name }}"
+ target: "{{ target }}"
+ scp_file: "{{ share_parameters.scp_file | default(omit) }}"
+ job_wait: "{{ idrac_export_server_config_profile_job_wait }}"
+ register: out_scp
+ delegate_to: "{{ idrac_export_server_config_profile_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_nfs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_nfs.yml
new file mode 100644
index 000000000..355da446f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tasks/scp_export_nfs.yml
@@ -0,0 +1,19 @@
+---
+- name: Exporting the SCP components to NFS
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ idrac_ip }}"
+ idrac_port: "{{ idrac_port }}"
+ idrac_user: "{{ idrac_user }}"
+ idrac_password: "{{ idrac_password }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ idrac_timeout }}"
+ export_format: "{{ export_format }}"
+ export_use: "{{ export_use }}"
+ include_in_export: "{{ include_in_export }}"
+ share_name: "{{ share_parameters.share_name }}"
+ target: "{{ target }}"
+ scp_file: "{{ share_parameters.scp_file | default(omit) }}"
+ job_wait: "{{ idrac_export_server_config_profile_job_wait }}"
+ register: out_scp
+ delegate_to: "{{ idrac_export_server_config_profile_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tests/test.yml
new file mode 100644
index 000000000..313357676
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Exporitng idrac server config profile for iDRAC
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_export_server_config_profile
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/vars/main.yml
new file mode 100644
index 000000000..af054eb79
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_export_server_config_profile/vars/main.yml
@@ -0,0 +1,4 @@
+---
+# vars file for idrac_export_server_config_profile
+idrac_export_server_config_profile_job_wait: true
+idrac_export_server_config_profile_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/README.md
new file mode 100644
index 000000000..6b4dace7a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/README.md
@@ -0,0 +1,342 @@
+# idrac_firmware
+
+Update the Firmware by connecting to a network share (CIFS, NFS, HTTP, HTTPS, FTP) that contains a catalog of available updates.
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+docker
+molecule
+python
+omsdk
+```
+### Production
+Requirements to use the role.
+```
+ansible
+python
+omsdk
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>share_name</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Network share path of update repository. CIFS, NFS, HTTP, HTTPS and FTP share types are supported.</td>
+ </tr>
+ <tr>
+ <td>share_user</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Network share user in the format 'user@domain' or 'domain\\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share.</td>
+ </tr>
+ <tr>
+ <td>share_password</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Network share user password. This option is mandatory for CIFS Network Share.</td>
+ </tr>
+ <tr>
+ <td>catalog_file_name</td>
+ <td>false</td>
+ <td>Catalog.xml</td>
+ <td></td>
+ <td>str</td>
+ <td>- Catalog file name relative to the I(share_name</td>
+ </tr>
+ <tr>
+ <td>ignore_cert_warning</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Specifies if certificate warnings are ignored when HTTPS share is used.</br>- If C(true) option is set, then the certificate warnings are ignored.</td>
+ </tr>
+ <tr>
+ <td>apply_update</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td> - If I(apply_update) is set to C(true), then the packages are applied.</br>- If I(apply_update) is set to C(false), no updates are applied, and a catalog report of packages is generated and returned.</td>
+ </tr>
+ <tr>
+ <td>reboot</td>
+ <td>false</td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Provides the option to apply the update packages immediately or in the next reboot.</br> - If I(reboot) is set to C(true), then the packages are applied immediately.</br>- If I(reboot) is set to C(false), then the packages are staged and applied in the next reboot.</br>- Packages that do not require a reboot are applied immediately irrespective of I (reboot).
+ </td>
+ </tr>
+ <tr>
+ <td>proxy_support</td>
+ <td>false</td>
+ <td>Off</td>
+ <td>"ParametersProxy", "DefaultProxy", "Off"</td>
+ <td>str</td>
+ <td>- Specifies if a proxy should be used.</br>- Proxy parameters are applicable on C(HTTP), C(HTTPS), and C(FTP) share type of repositories.</br> - C(ParametersProxy), sets the proxy parameters for the current firmware operation.</br>- C(DefaultProxy), iDRAC uses the proxy values set by default.</br>- Default Proxy can be set in the Lifecycle Controller attributes using M(dellemc.openmanage.idrac_attributes).</br>- C(Off), will not use the proxy.</br>- For iDRAC8 based servers, use proxy server with basic authentication.</br>- For iDRAC9 based servers, ensure that you use digest authentication for the proxy server, basic authentication is not supported.
+ </td>
+ </tr>
+ <tr>
+ <td>proxy_server</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The IP address of the proxy server.</br>- This IP will not be validated. The download job will be created even for invalid I(proxy_server).</br>- Please check the results of the job for error details.</br>- This is required when I(proxy_support) is C(ParametersProxy). </td>
+ </tr>
+ <tr>
+ <td>proxy_port</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>int</td>
+ <td>- The Port for the proxy server.</br>- This is required when I(proxy_support) is C(ParametersProxy).</td>
+ </tr>
+ <tr>
+ <td>proxy_type</td>
+ <td>false</td>
+ <td></td>
+ <td>HTTP, SOCKS</td>
+ <td>str</td>
+ <td>- The proxy type of the proxy server.</br>- This is required when I(proxy_support) is C(ParametersProxy).</td>
+ </tr>
+ <tr>
+ <td>proxy_uname</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The user name for the proxy server.</td>
+ </tr>
+ <tr>
+ <td>proxy_passwd</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The password for the proxy server.</td>
+ </tr>
+ <tr>
+ <td>job_wait</td>
+ <td>false</td>
+ <td></td>
+ <td>true</td>
+ <td>bool</td>
+ <td>- Whether to wait for job completion or not.</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_firmware_out</td>
+ <td>{
+msg: "Successfully updated the firmware."
+update_status: {
+ 'InstanceID': 'JID_XXXXXXXXXXXX',
+ 'JobState': 'Completed',
+ 'Message': 'Job completed successfully.',
+ 'MessageId': 'REDXXX',
+ 'Name': 'Repository Update',
+ 'JobStartTime': 'NA',
+ 'Status': 'Success',
+ }
+}</td>
+<td>Returns the output of the firmware update status</td>
+</tbody>
+</table>
+
+## Examples
+-----
+
+```yml
+- name: Update firmware from repository on a NFS Share
+ ansible.builtin.include_role:
+ name: idrac_firmware
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ share_name: "192.168.0.0:/share"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+
+```
+```yml
+- name: Update firmware from repository on a CIFS Share
+ ansible.builtin.ansible.builtin.include_role:
+ name: idrac_firmware
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ share_name: "full_cifs_path"
+ share_user: "share_user"
+ share_password: "share_password"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+```
+```yml
+- name: Update firmware from repository on a HTTP
+ ansible.builtin.include_role:
+ name: idrac_firmware
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ share_name: "http://downloads.dell.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+```
+```yml
+- name: Update firmware from repository on a HTTPS
+ ansible.builtin.include_role:
+ name: idrac_firmware
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ share_name: "https://downloads.dell.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ ```
+ ```yml
+- name: Update firmware from repository on a HTTPS via proxy
+ ansible.builtin.include_role:
+ name: idrac_firmware
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ share_name: "https://downloads.dell.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: ParametersProxy
+ proxy_server: 192.168.1.10
+ proxy_type: HTTP
+ proxy_port: 80
+ proxy_uname: "proxy_user"
+ proxy_passwd: "proxy_pwd"
+ ```
+ ```yml
+- name: Update firmware from repository on a FTP
+ ansible.builtin.include_role:
+ name: idrac_firmware
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ share_name: "ftp://ftp.mydomain.com"
+ reboot: true
+ job_wait: true
+ apply_update: true
+```
+## Author Information
+------------------
+
+Dell Technologies <br>
+Sachin Apagundi (Sachin.Apagundi@Dell.com) 2023 \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/defaults/main.yml
new file mode 100644
index 000000000..a684e7406
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/defaults/main.yml
@@ -0,0 +1,11 @@
+---
+# defaults file for idrac_firmware
+https_port: 443
+validate_certs: true
+https_timeout: 30
+apply_update: true
+reboot: true
+proxy_support: "Off"
+job_wait: true
+ignore_cert_warning: true
+catalog_file_name: "Catalog.xml"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/handlers/main.yml
new file mode 100644
index 000000000..af82235fe
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_firmware
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/meta/argument_specs.yml
new file mode 100644
index 000000000..b6ac77a86
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/meta/argument_specs.yml
@@ -0,0 +1,114 @@
+---
+argument_specs:
+ main:
+ version_added: "7.5.0"
+ short_description: Firmware update from a repository on a network share (CIFS, NFS, HTTP, HTTPS, FTP)
+ description:
+ - Update the Firmware by connecting to a network share (CIFS, NFS, HTTP, HTTPS, FTP) that contains a catalog of available updates.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address or hostname.
+ username:
+ type: str
+ description: iDRAC username with admin privileges.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ http_timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ share_name:
+ description: Network share path of update repository. CIFS, NFS, HTTP, HTTPS and FTP share types are supported.
+ type: str
+ required: true
+ share_user:
+ description:
+ Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ catalog_file_name:
+ description: Catalog file name relative to the I(share_name).
+ type: str
+ default: "Catalog.xml"
+ ignore_cert_warning:
+ description:
+ Specifies if certificate warnings are ignored when HTTPS share is used.
+ If C(true) option is set, then the certificate warnings are ignored.
+ type: bool
+ default: true
+ apply_update:
+ description:
+ - If I(apply_update) is set to C(true), then the packages are applied.
+ - If I(apply_update) is set to C(false), no updates are applied, and a catalog report
+ of packages is generated and returned.
+ type: bool
+ default: true
+ reboot:
+ description:
+ - Provides the option to apply the update packages immediately or in the next reboot.
+ - If I(reboot) is set to C(true), then the packages are applied immediately.
+ - If I(reboot) is set to C(false), then the packages are staged and applied in the next reboot.
+ - Packages that do not require a reboot are applied immediately irrespective of I (reboot).
+ type: bool
+ default: false
+ proxy_support:
+ description:
+ - Specifies if a proxy should be used.
+ - Proxy parameters are applicable on C(HTTP), C(HTTPS), and C(FTP) share type of repositories.
+ - C(ParametersProxy), sets the proxy parameters for the current firmware operation.
+ - C(DefaultProxy), iDRAC uses the proxy values set by default.
+ - Default Proxy can be set in the Lifecycle Controller attributes using M(dellemc.openmanage.idrac_attributes).
+ - C(Off), will not use the proxy.
+ - For iDRAC8 based servers, use proxy server with basic authentication.
+ - For iDRAC9 based servers, ensure that you use digest authentication for the proxy server, basic authentication is not supported.
+ choices: ["ParametersProxy", "DefaultProxy", "Off"]
+ type: str
+ default: "Off"
+ proxy_server:
+ description:
+ - The IP address of the proxy server.
+ - This IP will not be validated. The download job will be created even for invalid I(proxy_server).
+ Please check the results of the job for error details.
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ type: str
+ proxy_port:
+ description:
+ - The Port for the proxy server.
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ type: int
+ proxy_type:
+ description:
+ - The proxy type of the proxy server.
+ - This is required when I(proxy_support) is C(ParametersProxy).
+ choices: [HTTP, SOCKS]
+ type: str
+ proxy_uname:
+ description: The user name for the proxy server.
+ type: str
+ proxy_passwd:
+ description: The password for the proxy server.
+ type: str
+ job_wait:
+ description: Whether to wait for job completion or not.
+ type: bool
+ default: true
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/meta/main.yml
new file mode 100644
index 000000000..77872e297
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/meta/main.yml
@@ -0,0 +1,20 @@
+galaxy_info:
+ author: "Sachin Apagundi"
+ description: Firmware update from a repository on a network share (CIFS, NFS, HTTP, HTTPS, FTP).
+ company: Dell Technologies
+ license: GPL-3.0-only
+ min_ansible_version: "2.13"
+ platforms:
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ galaxy_tags: []
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/cifs_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/cifs_share/converge.yml
new file mode 100644
index 000000000..161a35cf4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/cifs_share/converge.yml
@@ -0,0 +1,39 @@
+---
+- name: Converge idrac_firmware for cifsshare
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Update firmware from repository on a CIFS Share
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'cifsshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a CIFS Share in check mode"
+ ansible.builtin.assert:
+ that: idrac_firmware_out.msg == "Changes found to commit!"
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a CIFS Share in normal mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Successfully updated the firmware."
+ when: not ansible_check_mode and idrac_firmware_out.changed
+
+ - name: "Verifying update firmware from repository on a CIFS Share in idempotence mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode and not idrac_firmware_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/cifs_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/cifs_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/cifs_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/default/converge.yml
new file mode 100644
index 000000000..bc30806f4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/default/converge.yml
@@ -0,0 +1,101 @@
+---
+- name: Converge idrac_firmware
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Update firmware from repository on HTTPS Share with apply_update as false
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: false
+ catalog_file_name: "Catalog.xml"
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTPS Share with apply_update as false"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode
+
+ - name: Update firmware from repository on HTTPS Share with ignore_cert_warning as false
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+ ignore_cert_warning: false
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTPS Share with ignore_cert_warning as false"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode
+
+ - name: Update firmware from repository on HTTPS Share with reboot as false
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: false
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTPS Share with reboot as false"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode
+
+ - name: Update firmware from repository on HTTPS Share with job_wait as false
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: false
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTPS Share with job_wait as false"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/default/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/default/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/ftp_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/ftp_share/converge.yml
new file mode 100644
index 000000000..a94da723a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/ftp_share/converge.yml
@@ -0,0 +1,39 @@
+---
+- name: Converge idrac_firmware for ftp share
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Update firmware from repository on a FTP Share
+ ansible.builtin.import_role:
+ name: idrac_firmware
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'ftpshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ catalog_file_name: "Catalog.xml"
+ reboot: true
+ job_wait: true
+ apply_update: true
+
+ - name: "Verifying update firmware from repository on a FTP Share in check mode"
+ ansible.builtin.assert:
+ that: idrac_firmware_out.msg == "Changes found to commit!"
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a FTP Share in normal mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Successfully updated the firmware."
+ when: not ansible_check_mode and idrac_firmware_out.changed
+
+ - name: "Verifying update firmware from repository on a FTP Share in idempotence mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode and not idrac_firmware_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/ftp_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/ftp_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/ftp_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/http_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/http_share/converge.yml
new file mode 100644
index 000000000..82df756b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/http_share/converge.yml
@@ -0,0 +1,39 @@
+---
+- name: Converge idrac_firmware for httpshare
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Update firmware from repository on HTTP Share
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a HTTP Share in check mode"
+ ansible.builtin.assert:
+ that: idrac_firmware_out.msg == "Changes found to commit!"
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTP Share in normal mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Successfully updated the firmware."
+ when: not ansible_check_mode and idrac_firmware_out.changed
+
+ - name: "Verifying update firmware from repository on a HTTP Share in idempotence mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode and not idrac_firmware_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/http_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/http_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/http_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/https_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/https_share/converge.yml
new file mode 100644
index 000000000..a94983cae
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/https_share/converge.yml
@@ -0,0 +1,39 @@
+---
+- name: Converge idrac_firmware for httpsshare
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Update firmware from repository on HTTPS Share
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a HTTPS Share in check mode"
+ ansible.builtin.assert:
+ that: idrac_firmware_out.msg == "Changes found to commit!"
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTPS Share in normal mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Successfully updated the firmware."
+ when: not ansible_check_mode and idrac_firmware_out.changed
+
+ - name: "Verifying update firmware from repository on a HTTPS Share in idempotence mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode and not idrac_firmware_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/https_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/https_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/https_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/httpsproxy_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/httpsproxy_share/converge.yml
new file mode 100644
index 000000000..b4bd4bdc1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/httpsproxy_share/converge.yml
@@ -0,0 +1,117 @@
+---
+- name: Converge idrac_firmware for https share via proxy
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Update firmware from repository on a HTTPS via parameter proxy Share
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsproxy') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: "ParametersProxy"
+ proxy_server: "{{ lookup('env', 'proxyserver') }}"
+ proxy_type: "HTTP"
+ proxy_port: 3128
+ proxy_uname: "{{ lookup('env', 'proxyuname') }}"
+ proxy_passwd: "{{ lookup('env', 'proxypass') }}"
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a HTTPS via parameter proxy share in check mode"
+ ansible.builtin.assert:
+ that: idrac_firmware_out.msg == "Changes found to commit!"
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTPS via parameter proxy share in normal mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Successfully updated the firmware."
+ when: not ansible_check_mode and idrac_firmware_out.changed
+
+ - name: "Verifying update firmware from repository on a HTTPS via parameter proxy share in idempotence mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode and not idrac_firmware_out.changed
+
+ - name: Update firmware from repository on a HTTPS via default proxy Share
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: "DefaultProxy"
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a HTTPS via default proxy share in check mode"
+ ansible.builtin.assert:
+ that: idrac_firmware_out.msg == "Changes found to commit!"
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTPS via default proxy share in normal mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Successfully updated the firmware."
+ when: not ansible_check_mode and idrac_firmware_out.changed
+
+ - name: "Verifying update firmware from repository on a HTTPS via default proxy share in idempotence mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode and not idrac_firmware_out.changed
+
+ - name: Update firmware from repository on a HTTPS via parameter proxy Share with proxy_type as SOCKS
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsproxy') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: "ParametersProxy"
+ proxy_server: "{{ lookup('env', 'proxyserversocks') }}"
+ proxy_type: "SOCKS"
+ proxy_port: 1080
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a HTTPS via parameter proxy with proxy_type as SOCKS share in check mode"
+ ansible.builtin.assert:
+ that: idrac_firmware_out.msg == "Changes found to commit!"
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a HTTPS via parameter proxy with proxy_type as SOCKS share in normal mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Successfully updated the firmware."
+ when: not ansible_check_mode and idrac_firmware_out.changed
+
+ - name: "Verifying update firmware from repository on a HTTPS via parameter proxy share with proxy_type as SOCKS in idempotence mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode and not idrac_firmware_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/httpsproxy_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/httpsproxy_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/httpsproxy_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/negative_scenarios/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/negative_scenarios/converge.yml
new file mode 100644
index 000000000..37b959272
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/negative_scenarios/converge.yml
@@ -0,0 +1,206 @@
+---
+- name: Converge idrac_firmware for negative scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Updating firmware with an invalid hostname
+ ansible.builtin.import_role:
+ name: idrac_firmware
+ vars:
+ hostname: "invalidHostname"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ share_name: "{{ lookup('env', 'httpshare') }}"
+ catalog_file_name: "Catalog.xml"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ ignore_errors: true
+ register: idrac_firmware_result
+
+ - name: "Verifying Updating firmware with an invalid hostname"
+ ansible.builtin.assert:
+ that:
+ - "'unreachable iDRAC IP' in idrac_firmware_out.msg"
+
+ - name: Updating firmware with an invalid username
+ ansible.builtin.import_role:
+ name: idrac_firmware
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "invalidUsername"
+ password: "{{ lookup('env', 'password') }}"
+ share_name: "{{ lookup('env', 'httpshare') }}"
+ catalog_file_name: "Catalog.xml"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ ignore_errors: true
+ register: idrac_firmware_result
+
+ - name: "Verifying Updating firmware with an invalid username"
+ ansible.builtin.assert:
+ that:
+ - "'Incorrect username' in idrac_firmware_out.msg"
+
+ - name: Updating firmware with an invalid password
+ ansible.builtin.import_role:
+ name: idrac_firmware
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "invalidPassword"
+ share_name: "{{ lookup('env', 'httpshare') }}"
+ catalog_file_name: "Catalog.xml"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ ignore_errors: true
+ register: idrac_firmware_result
+
+ - name: "Verifying Updating firmware with an invalid password"
+ ansible.builtin.assert:
+ that:
+ - "'password' in idrac_firmware_out.msg"
+
+ - name: Updating firmware with an invalid ca_path
+ ansible.builtin.import_role:
+ name: idrac_firmware
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ share_name: "{{ lookup('env', 'httpshare') }}"
+ ca_path: "{{ lookup('env', 'capath') }}"
+ catalog_file_name: "Catalog.xml"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ ignore_errors: true
+ register: idrac_firmware_result
+
+ - name: "Verifying Updating firmware with an invalid ca_path"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Firmware update failed."
+
+ - name: Updating firmware with catalog file without extension
+ ansible.builtin.import_role:
+ name: idrac_firmware
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ share_name: "{{ lookup('env', 'httpshare') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: Catalog
+ ignore_errors: true
+ register: idrac_firmware_result
+
+ - name: "Verifying Updating firmware with catalog file without extension"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "catalog_file_name should be an XML file."
+
+ - name: Update firmware from repository on HTTPS Share with invalid share_user
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsshare') }}"
+ share_user: "invalidUser"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a HTTPS Share with invalid share_user"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+
+ - name: Update firmware from repository on HTTPS Share with invalid share_password
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsshare') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "invalidPassword"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+ register: idrac_firmware_result
+
+ - name: "Verifying update firmware from repository on a HTTPS Share with invalid share_password"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+
+ - name: Update firmware from repository on a HTTPS via parameter proxy Share with invalid proxy_uname
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsproxy') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: "ParametersProxy"
+ proxy_server: "{{ lookup('env', 'proxyserver') }}"
+ proxy_type: "HTTP"
+ proxy_port: 3128
+ proxy_uname: "invalidUname"
+ proxy_passwd: "{{ lookup('env', 'proxypass') }}"
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a HTTPS via parameter proxy share with invalid proxy_uname"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+
+ - name: Update firmware from repository on a HTTPS via parameter proxy Share with invalid proxy_passwd
+ ansible.builtin.import_role:
+ name: "idrac_firmware"
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ share_name: "{{ lookup('env', 'httpsproxy') }}"
+ share_user: "{{ lookup('env', 'shareuser') }}"
+ share_password: "{{ lookup('env', 'sharepassword') }}"
+ reboot: true
+ job_wait: true
+ apply_update: true
+ proxy_support: "ParametersProxy"
+ proxy_server: "{{ lookup('env', 'proxyserver') }}"
+ proxy_type: "HTTP"
+ proxy_port: 3128
+ proxy_uname: "{{ lookup('env', 'proxyuname') }}"
+ proxy_passwd: "invalidPasswd"
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a HTTPS via parameter proxy share with invalid proxy_passwd"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/negative_scenarios/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/negative_scenarios/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/negative_scenarios/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/nfs_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/nfs_share/converge.yml
new file mode 100644
index 000000000..89e55838c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/nfs_share/converge.yml
@@ -0,0 +1,37 @@
+---
+- name: Converge idrac_firmware for nfsshare
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Update firmware from repository on a NFS Share
+ ansible.builtin.import_role:
+ name: idrac_firmware
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ share_name: "{{ lookup('env', 'nfsshare') }}"
+ validate_certs: false
+ reboot: true
+ job_wait: true
+ apply_update: true
+ catalog_file_name: "Catalog.xml"
+
+ - name: "Verifying update firmware from repository on a NFS Share in check mode"
+ ansible.builtin.assert:
+ that: idrac_firmware_out.msg == "Changes found to commit!"
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Verifying update firmware from repository on a NFS Share in normal mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Successfully updated the firmware."
+ when: not ansible_check_mode and idrac_firmware_out.changed
+
+ - name: "Verifying update firmware from repository on a NFS Share in idempotence mode"
+ ansible.builtin.assert:
+ that:
+ - idrac_firmware_out.msg == "Unable to complete the operation because the catalog name entered has either unsupported firmware packages
+ or same version installed on the server."
+ when: not ansible_check_mode and not idrac_firmware_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/nfs_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/nfs_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/molecule/nfs_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tasks/main.yml
new file mode 100644
index 000000000..c994373ce
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tasks/main.yml
@@ -0,0 +1,27 @@
+---
+# tasks file for idrac_firmware
+- name: Call iDRAC Firmware upgrade
+ dellemc.openmanage.idrac_firmware:
+ idrac_ip: "{{ hostname }}"
+ idrac_port: "{{ https_port }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ https_timeout }}"
+ share_name: "{{ share_name | default(omit) }}"
+ share_user: "{{ share_user | default(omit) }}"
+ share_password: "{{ share_password | default(omit) }}"
+ job_wait: "{{ job_wait }}"
+ catalog_file_name: "{{ catalog_file_name }}"
+ ignore_cert_warning: "{{ ignore_cert_warning }}"
+ apply_update: "{{ apply_update }}"
+ reboot: "{{ reboot }}"
+ proxy_support: "{{ proxy_support }}"
+ proxy_server: "{{ proxy_server | default(omit) }}"
+ proxy_type: "{{ proxy_type | default(omit) }}"
+ proxy_port: "{{ proxy_port | default(omit) }}"
+ proxy_uname: "{{ proxy_uname | default(omit) }}"
+ proxy_passwd: "{{ proxy_passwd | default(omit) }}"
+ register: "idrac_firmware_out"
+ delegate_to: "{{ idrac_firmware_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tests/test.yml
new file mode 100644
index 000000000..12bb9ffc6
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Firmware update using catalog for idrac
+- hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_firmware
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_firmware/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/vars/main.yml
new file mode 100644
index 000000000..ff6d7325f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_firmware/vars/main.yml
@@ -0,0 +1,3 @@
+---
+# vars file for idrac_firmware
+idrac_firmware_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/README.md
new file mode 100644
index 000000000..dfe49351f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/README.md
@@ -0,0 +1,767 @@
+# idrac_gather_facts
+
+Role to gather facts from iDRAC
+
+## Requirements
+------------
+
+### Development
+Requirements to develop and contribute to the role.
+```
+python
+ansible
+molecule
+docker
+```
+### Production
+Requirements to use the role.
+```
+python
+ansible
+```
+### Ansible collections
+Collections required to use the role.
+```
+dellemc.openmanage
+ansible.utils
+```
+
+## Role Variables
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(False), the SSL certificates will not be validated.<br>- Configure C(False) only on personally controlled sites where self-signed certificates are used.<br>- Prior to collection version 5.0.0, I(validate_certs) is C(False) by default.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>computer_system_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Computer system id.</td>
+ </tr>
+ <tr>
+ <td>manager_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Manager/BMC id.</td>
+ </tr>
+ <tr>
+ <td>target</td>
+ <td>false</td>
+ <td>- System <br></td>
+ <td>- System <br> - BIOS <br> - Controller <br> - CPU <br> - Enclosure <br> - EnclosureEMM <br> - Fan <br>
+ - Firmware <br> - HostNIC <br> - License <br> - Memory <br> - NIC <br> - PCIeSSDBackPlane <br>
+ - PowerSupply <br> - PresenceAndStatusSensor <br> - Sensors_Battery <br> - Sensors_Intrusion <br>
+ - Sensors_Voltage <br> - VirtualDisk <br> - PCIeDevice <br> - PhysicalDisk <br> - SystemMetrics<br> - SecureBoot</td>
+ <td>list</td>
+ <td>Target component for which information needs to be gathered.
+ <ul>
+ <li>C(BIOS) lists the BIOS information.</li>
+ <li>C(Chassis) lists the chassis.</li>
+ <li>C(Controller) lists the available controllers for iDRAC.</li>
+ <li>C(CPU) lists the system processors.</li>
+ <li>C(Enclosure) lists the enclosures.</li>
+ <li>C(EnclosureEMM) lists the enclosure management module specific data.</li>
+ <li>C(Fan) lists the fans.</li>
+ <li>C(Firmware) lists the firmware inventories.</li>
+ <li>C(HostNIC) lists the host NIC.</li>
+ <li>C(IDRAC) lists the attributes for iDRAC.</li>
+ <li>C(License) lists the license information.</li>
+ <li>C(Manager) lists the manager resources.</li>
+ <li>C(Memory) lists the memory device specific data.</li>
+ <li>C(NetworkAdapter) lists the network adapters.</li>
+ <li>C(NetworkPort) lists the network ports.</li>
+ <li>C(NetworkDeviceFunction) lists the network device functions.</li>
+ <li>C(NIC) lists NIC device specific data.</li>
+ <li>C(PCIeSSDBackPlane) lists PCIeSSD back plane specific data.</li>
+ <li>C(PowerSupply) lists data specific to the Power Supply devices in the managed system.</li>
+ <li>C(PresenceAndStatusSensor) lists the presence and status sensor specific data.</li>
+ <li>C(PCIeDevice) lists the PCIeDevices.</li>
+ <li>C(PhysicalDisk) lists the physical disks.</li>
+ <li>C(Sensors_Battery) lists the sensors battery information.</li>
+ <li>C(Sensors_Intrusion) lists the sensors intrusion information.</li>
+ <li>C(Sensors_Voltage) lists the sensors voltage information.</li>
+ <li>C(System) lists the ComputerSystem resources for iDRAC.</li>
+ <li>C(SystemMetrics) lists the system metrics.</li>
+ <li>C(ThermalSubSystem) lists the thermal sub system.</li>
+ <li>C(VirtualDisk) lists the virtual disks.</li>
+ <li>C(VirtualMedia) lists the virtual media.</li>
+ <li>C(SecureBoot) lists the secure boot specific data.</li>
+ </ul>
+ </td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>system</td>
+ <td>{"BIOSReleaseDate": "03/22/2022", "BaseBoardChassisSlot": "1", "BatteryRollupStatus": "OK", "BladeGeometry": "SingleWidth,FullHeight", "CMCIP": "1.2.3.4", "CPURollupStatus": "Unknown", "ChassisModel": "", "ChassisName": "", "ChassisServiceTag": "SVCTAG1", "ChassisSystemHeightUnit": 7, "CurrentRollupStatus": "OK", "EstimatedExhaustTemperatureCelsius": 255, "EstimatedSystemAirflowCFM": 255, "ExpressServiceCode": "SERVICECODE1", "FanRollupStatus": null, "IDSDMRollupStatus": null, "Id": "System.Embedded.1", "IntrusionRollupStatus": null, "IsOEMBranded": "False", "LastSystemInventoryTime": "2019-08-09T13:23:32+00:00", "LastUpdateTime": "2022-06-10T20:19:30+00:00", "LicensingRollupStatus": "OK", "ManagedSystemSize": "7 U", "MaxCPUSockets": 2, "MaxDIMMSlots": 24, "MaxPCIeSlots": 3, "MemoryOperationMode": "OptimizerMode", "Name": "DellSystem", "NodeID": "SVCTAG2", "PSRollupStatus": null, "PlatformGUID": "325a364f-c0b6-4b80-3010-00484c4c4544", "PopulatedDIMMSlots": 2, "PopulatedPCIeSlots": 3, "PowerCapEnabledState": "Disabled", "SDCardRollupStatus": "OK", "SELRollupStatus": "OK", "ServerAllocationWatts": 18, "ServerOS.1.HostName": "MINWINPC", "ServerOS.1.OEMOSVersion": "", "ServerOS.1.OSName": "", "ServerOS.1.OSVersion": "", "ServerOS.1.ProductKey": "", "ServerOS.1.ServerPoweredOnTime": 0, "StorageRollupStatus": "OK", "SysMemErrorMethodology": "Multi-bitECC", "SysMemFailOverState": "NotInUse", "SysMemLocation": "SystemBoardOrMotherboard", "SysMemPrimaryStatus": "OK", "SystemGeneration": "14G Modular", "SystemID": 1893, "SystemRevision": "I", "TempRollupStatus": "OK", "TempStatisticsRollupStatus": "OK", "UUID": "4c4c4544-0048-3010-804b-b6c04f365a32", "VoltRollupStatus": "OK", "smbiosGUID": "44454c4c-4800-1030-804b-12345678abcd"}</td>
+ <td>Response facts details for system and operating system.</td>
+ </tr>
+ <tr>
+ <td>bios</td>
+ <td>{"@Redfish.Settings": {"SupportedApplyTimes": ["OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}, "Attributes": {"AcPwrRcvry": "Last", "AdddcSetting": "Disabled", "AesNi": "Enabled", "AssetTag": "", "AuthorizeDeviceFirmware": "Disabled", "AvxIccpPregrant": "IccpHeavy128", "BootMode": "Bios", "BootSeqRetry": "Enabled", "CECriticalSEL": "Disabled", "ConTermType": "Vt100Vt220", "ControlledTurbo": "Disabled", "ControlledTurboMinusBin": 0, "CorrEccSmi": "Enabled", "CpuInterconnectBusLinkPower": "Enabled", "CpuInterconnectBusSpeed": "MaxDataRate", "CurrentEmbVideoState": "Enabled", "DcuIpPrefetcher": "Enabled", "DcuStreamerPrefetcher": "Enabled", "DeadLineLlcAlloc": "Enabled", "DellWyseP25BIOSAccess": "Enabled", "DirectoryAtoS": "Disabled", "DramRefreshDelay": "Performance", "DynamicCoreAllocation": "Disabled", "EmbSata": "AhciMode", "EmbVideo": "Enabled", "EnergyPerformanceBias": "BalancedPerformance", "ErrPrompt": "Enabled", "ExtSerialConnector": "Serial1", "FailSafeBaud": "115200", "ForceInt10": "Disabled", "GenericUsbBoot": "Disabled", "HddFailover": "Disabled", "HddPlaceholder": "Disabled", "InBandManageabilityInterface": "Enabled", "IntelTxt": "Off", "InternalUsb": "On", "IoatEngine": "Disabled", "LlcPrefetch": "Disabled", "MemFrequency": "MaxPerf", "MemOpMode": "OptimizerMode", "MemPatrolScrub": "Standard", "MemRefreshRate": "1x", "MemTest": "Disabled", "MemoryMappedIOH": "56TB", "MmioAbove4Gb": "Enabled", "MonitorMwait": "Enabled", "NativeTrfcTiming": "Enabled", "NodeInterleave": "Disabled", "NumLock": "On", "NvmeMode": "NonRaid", "OneTimeBootMode": "Disabled", "OneTimeBootSeqDev": "Floppy.iDRACVirtual.1-1", "OneTimeHddSeqDev": "", "OppSrefEn": "Disabled", "OsWatchdogTimer": "Disabled", "PCIRootDeviceUnhide": "Disabled", "PPROnUCE": "Enabled", "PasswordStatus": "Unlocked", "PcieAspmL1": "Enabled", "PowerCycleRequest": "None", "Proc1Brand": "Intel(R) Xeon(R) Bronze 3204 CPU @ 1.90GHz", "Proc1Id": "6-55-7", "Proc1L2Cache": "6x1 MB", "Proc1L3Cache": "8448 KB", "Proc1MaxMemoryCapacity": "1 TB", "Proc1Microcode": "0x5003302", "Proc1NumCores": 6, "Proc2Brand": "Intel(R) Xeon(R) Bronze 3204 CPU @ 1.90GHz", "Proc2Id": "6-55-7", "Proc2L2Cache": "6x1 MB", "Proc2L3Cache": "8448 KB", "Proc2MaxMemoryCapacity": "1 TB", "Proc2Microcode": "0x5003302", "Proc2NumCores": 6, "ProcAdjCacheLine": "Enabled", "ProcBusSpeed": "9.60 GT/s", "ProcC1E": "Enabled", "ProcCStates": "Enabled", "ProcConfigTdp": "Nominal", "ProcCoreSpeed": "1.90 GHz", "ProcCores": "All", "ProcHwPrefetcher": "Enabled", "ProcPwrPerf": "SysDbpm", "ProcVirtualization": "Enabled", "ProcX2Apic": "Enabled", "PwrButton": "Enabled", "RedirAfterBoot": "Enabled", "RedundantOsBoot": "Disabled", "RedundantOsLocation": "None", "RedundantOsState": "Visible", "SHA256SetupPassword": "", "SHA256SetupPasswordSalt": "", "SHA256SystemPassword": "", "SHA256SystemPasswordSalt": "", "SataPortA": "Auto", "SataPortACapacity": "N/A", "SataPortADriveType": "Unknown Device", "SataPortAModel": "Unknown", "SataPortB": "Auto", "SataPortBCapacity": "N/A", "SataPortBDriveType": "Unknown Device", "SataPortBModel": "Unknown", "SataPortC": "Auto", "SataPortCCapacity": "N/A", "SataPortCDriveType": "Unknown Device", "SataPortCModel": "Unknown", "SataPortD": "Auto", "SataPortDCapacity": "N/A", "SataPortDDriveType": "Unknown Device", "SataPortDModel": "Unknown", "SataPortE": "Auto", "SataPortECapacity": "N/A", "SataPortEDriveType": "Unknown Device", "SataPortEModel": "Unknown", "SataPortF": "Auto", "SataPortFCapacity": "N/A", "SataPortFDriveType": "Unknown Device", "SataPortFModel": "Unknown", "SecureBoot": "Disabled", "SecureBootMode": "DeployedMode", "SecureBootPolicy": "Standard", "SecurityFreezeLock": "Enabled", "SerialComm": "Off", "SerialPortAddress": "Com1", "SetBootOrderDis": "", "SetBootOrderEn": "Floppy.iDRACVirtual.1-1,Optical.iDRACVirtual.1-1", "SetBootOrderFqdd1": "", "SetBootOrderFqdd10": "", "SetBootOrderFqdd11": "", "SetBootOrderFqdd12": "", "SetBootOrderFqdd13": "", "SetBootOrderFqdd14": "", "SetBootOrderFqdd15": "", "SetBootOrderFqdd16": "", "SetBootOrderFqdd2": "", "SetBootOrderFqdd3": "", "SetBootOrderFqdd4": "", "SetBootOrderFqdd5": "", "SetBootOrderFqdd6": "", "SetBootOrderFqdd7": "", "SetBootOrderFqdd8": "", "SetBootOrderFqdd9": "", "SetLegacyHddOrderFqdd1": "", "SetLegacyHddOrderFqdd10": "", "SetLegacyHddOrderFqdd11": "", "SetLegacyHddOrderFqdd12": "", "SetLegacyHddOrderFqdd13": "", "SetLegacyHddOrderFqdd14": "", "SetLegacyHddOrderFqdd15": "", "SetLegacyHddOrderFqdd16": "", "SetLegacyHddOrderFqdd2": "", "SetLegacyHddOrderFqdd3": "", "SetLegacyHddOrderFqdd4": "", "SetLegacyHddOrderFqdd5": "", "SetLegacyHddOrderFqdd6": "", "SetLegacyHddOrderFqdd7": "", "SetLegacyHddOrderFqdd8": "", "SetLegacyHddOrderFqdd9": "", "SetupPassword": null, "Slot1": "Enabled", "Slot2": "Enabled", "Slot3": "Enabled", "SnoopHldOff": "Roll2KCycles", "SriovGlobalEnable": "Disabled", "SubNumaCluster": "Disabled", "SysMemSize": "32 GB", "SysMemSpeed": "2133 Mhz", "SysMemType": "ECC DDR4", "SysMemVolt": "1.20 V", "SysMfrContactInfo": "www.dell.com", "SysPassword": null, "SysProfile": "PerfPerWattOptimizedDapc", "SystemBiosVersion": "2.14.2", "SystemCpldVersion": "1.0.4", "SystemManufacturer": "Dell Inc.", "SystemMeVersion": "4.1.4.700", "SystemModelName": "PowerEdge MX740c", "SystemServiceTag": "SVCTAG3", "TpmCommand": "None", "TpmFirmware": "TpmFirmware", "TpmInfo": "Type: 1.2-NTC", "TpmPpiBypassClear": "Disabled", "TpmPpiBypassProvision": "Disabled", "TpmSecurity": "Off", "TpmStatus": "Unknown", "UefiComplianceVersion": "2.7", "UefiVariableAccess": "Standard", "UncoreFrequency": "DynamicUFS", "UpiPrefetch": "Enabled", "UsbManagedPort": "On", "UsbPorts": "AllOn", "VideoMem": "16 MB", "WorkloadProfile": "NotAvailable", "WriteCache": "Disabled", "WriteDataCrc": "Disabled"}}</td>
+ <td>Response facts details for bios.</td>
+ </tr>
+ <tr>
+ <td>controller</td>
+ <td>[{
+ "@Redfish.Settings": {
+ "SettingsObject": {},
+ "SupportedApplyTimes": [
+ "Immediate",
+ "OnReset",
+ "AtMaintenanceWindowStart",
+ "InMaintenanceWindowOnReset"
+ ]
+ },
+ "Assembly": {},
+ "CacheSummary": {
+ "TotalCacheSizeMiB": 0
+ },
+ "ControllerRates": {
+ "ConsistencyCheckRatePercent": null,
+ "RebuildRatePercent": null
+ },
+ "Description": "Integrated AHCI controller 1",
+ "FirmwareVersion": "2.6.13.3025",
+ "Id": "AHCI.Integrated.1-1",
+ "Identifiers": [
+ {
+ "DurableName": null,
+ "DurableNameFormat": null
+ }
+ ],
+ "Links": {
+ "PCIeFunctions": []
+ },
+ "Manufacturer": "DELL",
+ "Model": "BOSS-S1",
+ "Name": "BOSS-S1",
+ "Oem": {
+ "Dell": {
+ "DellStorageController": {
+ "AlarmState": "AlarmNotSupported",
+ "AutoConfigBehavior": "NotApplicable",
+ "BackgroundInitializationRatePercent": null,
+ "BatteryLearnMode": null,
+ "BootVirtualDiskFQDD": null,
+ "CacheSizeInMB": 0,
+ "CachecadeCapability": "NotSupported",
+ "CheckConsistencyMode": null,
+ "ConnectorCount": 0,
+ "ControllerBootMode": null,
+ "ControllerFirmwareVersion": "2.6.13.3025",
+ "ControllerMode": null,
+ "CopybackMode": null,
+ "CurrentControllerMode": "NotSupported",
+ "Device": "0",
+ "DeviceCardDataBusWidth": "4x or x4",
+ "DeviceCardSlotLength": "Other",
+ "DeviceCardSlotType": "M.2 Socket 3 (Mechanical Key M)",
+ "DriverVersion": null,
+ "EncryptionCapability": "None",
+ "EncryptionMode": "None",
+ "EnhancedAutoImportForeignConfigurationMode": null,
+ "KeyID": null,
+ "LastSystemInventoryTime": "2023-12-31T12:25:07+00:00",
+ "LastUpdateTime": "2023-12-31T18:50:12+00:00",
+ "LoadBalanceMode": null,
+ "MaxAvailablePCILinkSpeed": null,
+ "MaxDrivesInSpanCount": 2,
+ "MaxPossiblePCILinkSpeed": null,
+ "MaxSpansInVolumeCount": 1,
+ "MaxSupportedVolumesCount": 1,
+ "PCISlot": null,
+ "PatrolReadIterationsCount": 0,
+ "PatrolReadMode": null,
+ "PatrolReadRatePercent": null,
+ "PatrolReadState": "Unknown",
+ "PatrolReadUnconfiguredAreaMode": null,
+ "PersistentHotspare": "NotApplicable",
+ "RAIDMode": "None",
+ "RealtimeCapability": "Incapable",
+ "ReconstructRatePercent": null,
+ "RollupStatus": "OK",
+ "SASAddress": "0",
+ "SecurityStatus": "EncryptionNotCapable",
+ "SharedSlotAssignmentAllowed": "NotApplicable",
+ "SlicedVDCapability": "NotSupported",
+ "SpindownIdleTimeSeconds": 0,
+ "SupportControllerBootMode": "NotSupported",
+ "SupportEnhancedAutoForeignImport": "NotSupported",
+ "SupportRAID10UnevenSpans": "NotSupported",
+ "SupportedInitializationTypes": [
+ "Fast"
+ ],
+ "SupportsLKMtoSEKMTransition": "No",
+ "T10PICapability": "NotSupported"
+ }
+ }
+ },
+ "SpeedGbps": 6.0,
+ "Status": {
+ "Health": "OK",
+ "HealthRollup": "OK",
+ "State": "Enabled"
+ },
+ "SupportedControllerProtocols": [
+ "PCIe"
+ ],
+ "SupportedDeviceProtocols": [
+ "SATA"
+ ],
+ "SupportedRAIDTypes": [
+ "RAID1"
+ ]
+ }
+ ]</td>
+ <td>Response facts details for controller.</td>
+ </tr>
+ <tr>
+ <td>cpu</td>
+ <td>[{"Description": "Represents the properties of a Processor attached to this System", "Id": "CPU.Socket.1", "InstructionSet": "x86-64", "Manufacturer": "Intel", "MaxSpeedMHz": 4000, "Model": "Intel(R) Xeon(R) Bronze 3204 CPU @ 1.90GHz", "Name": "CPU 1", "Oem": {"Dell": {"DellAccelerators": null, "DellProcessor": {"CPUFamily": "Intel(R)Xeon(TM)", "CPUStatus": "CPUEnabled", "Cache1Associativity": "8-WaySet-Associative", "Cache1ErrorMethodology": "Parity", "Cache1InstalledSizeKB": 384, "Cache1Level": "L1", "Cache1Location": "Internal", "Cache1PrimaryStatus": "OK", "Cache1SRAMType": "Unknown", "Cache1SizeKB": 384, "Cache1Type": "Unified", "Cache1WritePolicy": "WriteBack", "Cache2Associativity": "16-WaySet-Associative", "Cache2ErrorMethodology": "Single-bitECC", "Cache2InstalledSizeKB": 6144, "Cache2Level": "L2", "Cache2Location": "Internal", "Cache2PrimaryStatus": "OK", "Cache2SRAMType": "Unknown", "Cache2SizeKB": 6144, "Cache2Type": "Unified", "Cache2WritePolicy": "WriteBack", "Cache3Associativity": "FullyAssociative", "Cache3ErrorMethodology": "Single-bitECC", "Cache3InstalledSizeKB": 8448, "Cache3Level": "L3", "Cache3Location": "Internal", "Cache3PrimaryStatus": "OK", "Cache3SRAMType": "Unknown", "Cache3SizeKB": 8448, "Cache3Type": "Unified", "Cache3WritePolicy": "WriteBack", "CurrentClockSpeedMhz": 1900, "ExternalBusClockSpeedMhz": 9600, "HyperThreadingCapable": "No", "HyperThreadingEnabled": "No", "Id": "CPU.Socket.1", "LastSystemInventoryTime": "2019-08-09T13:23:32+00:00", "LastUpdateTime": "2021-09-14T20:31:00+00:00", "Name": "DellProcessor", "TurboModeCapable": "No", "TurboModeEnabled": "No", "VirtualizationTechnologyCapable": "Yes", "VirtualizationTechnologyEnabled": "Yes", "Volts": "1.8"}, "PowerMetrics": null, "ThermalMetrics": null}}, "OperatingSpeedMHz": 1900, "ProcessorArchitecture": "x86", "ProcessorId": {"EffectiveFamily": "6", "EffectiveModel": "85", "IdentificationRegisters": "0x00050657", "MicrocodeInfo": "0x5003302", "Step": "7", "VendorId": "GenuineIntel"}, "ProcessorType": "CPU", "Socket": "CPU.Socket.1", "Status": {"Health": null, "State": "UnavailableOffline"}, "TotalCores": 6, "TotalEnabledCores": 6, "TotalThreads": 6, "TurboState": "Disabled", "Version": "Model 85 Stepping 7"}]</td>
+ <td>Response facts details for cpu.</td>
+ </tr>
+ <tr>
+ <td>enclosure</td>
+ <td>[{"AssetName": null, "Connector": 0, "Id": "Enclosure.Internal.0-0:RAID.Mezzanine.1C-1", "LastSystemInventoryTime": "2019-08-09T13:23:32+00:00", "LastUpdateTime": "2022-09-23T23:44:26+00:00", "Name": "DellEnclosure", "ServiceTag": null, "SlotCount": 6, "TempProbeCount": 0, "Version": "4.35", "WiredOrder": 0}]</td>
+ <td>Response facts details for enclosure.</td>
+ </tr>
+ <tr>
+ <td>enclosure_emm</td>
+ <td>[{"DeviceDescription": "EMM.Slot.0:Enclosure.Modular.4:NonRAID.Mezzanine.1C-1", "FQDD": "EMM.Slot.0:Enclosure.Modular.4:NonRAID.Mezzanine.1C-1", "Id": "EMM.Slot.0:Enclosure.Modular.4:NonRAID.Mezzanine.1C-1", "InstanceID": "EMM.Slot.0:Enclosure.Modular.4:NonRAID.Mezzanine.1C-1", "Name": "DellEnclosureEMM", "PartNumber": null, "PrimaryStatus": "OK", "Revision": "2.40", "State": "Ready"}]</td>
+ <td>Response facts details for enclosure_emm.</td>
+ </tr>
+ <tr>
+ <td>fan</td>
+ <td>[{"Description": "Represents fan properties of the chassis", "HotPluggable": true, "Id": "Fan.Embedded.6A", "Location": {"PartLocation": {"LocationType": "Bay", "ServiceLabel": "System Board Fan6A"}}, "Name": "Fan 6A", "PhysicalContext": "SystemBoard", "SpeedPercent": {"SpeedRPM": 11640}, "Status": {"Health": "OK", "State": "Enabled"}}]</td>
+ <td>Response facts details for fan.</td>
+ </tr>
+ <tr>
+ <td>firmware</td>
+ <td>[{"Description": "Represents Firmware Inventory", "Id": "Previous-108255-22.00.6__NIC.Embedded.2-1-1", "Name": "Broadcom Gigabit Ethernet BCM5720 - AB:CD:EF:GH:IJ:02", "Oem": {"Dell": {"DellSoftwareInventory": {"BuildNumber": 0, "Classifications": ["Firmware"], "ComponentID": "108255", "ComponentType": "FRMW", "Description": "The DellSoftwareInventory resource is a representation of an available device firmware in the managed system.", "DeviceID": "165F", "ElementName": "Broadcom Gigabit Ethernet BCM5720 - AB:CD:EF:GH:IJ:02", "HashValue": "56fa85676e6d570f714fb659f202371f1c570263b680e2d40d16059acfa9e3e6", "Id": "DCIM:PREVIOUS_0x23_701__NIC.Embedded.2-1-1", "IdentityInfoType": ["OrgID:ComponentType:VendorID:DeviceID:SubVendorID:SubDeviceID"], "IdentityInfoValue": ["DCIM:firmware:14E4:165F:1028:08FF"], "InstallationDate": "NA", "IsEntity": true, "MajorVersion": 22, "MinorVersion": 0, "Name": "DellSoftwareInventory", "PLDMCapabilitiesDuringUpdate": "0x00000000", "PLDMFDPCapabilitiesDuringUpdate": "0x00000000", "RevisionNumber": 6, "RevisionString": null, "SidebandUpdateCapable": false, "Status": "AvailableForInstallation", "SubDeviceID": "08FF", "SubVendorID": "1028", "VendorID": "14E4", "impactsTPMmeasurements": true}}}, "ReleaseDate": "2022-01-07T00:00:00Z", "SoftwareId": "108255", "Status": {"Health": "OK", "State": "Enabled"}, "Updateable": true, "Version": "22.00.6"}, {"Description": "Represents Firmware Inventory", "Id": "Previous-159-1.7.5__BIOS.Setup.1-1", "Name": "BIOS", "Oem": {"Dell": {"DellSoftwareInventory": {"BuildNumber": 0, "Classifications": ["BIOS/FCode"], "ComponentID": "159", "ComponentType": "BIOS", "Description": "The DellSoftwareInventory resource is a representation of an available device firmware in the managed system.", "DeviceID": null, "ElementName": "BIOS", "HashValue": "37e196d6b1c25ffc58f1c5c5a80a748932d22ddfbf72eedda05fbe788f57d641", "Id": "DCIM:PREVIOUS_0x23_741__BIOS.Setup.1-1", "IdentityInfoType": ["OrgID:ComponentType:ComponentID"], "IdentityInfoValue": ["DCIM:BIOS:159"], "InstallationDate": "NA", "IsEntity": true, "MajorVersion": 1, "MinorVersion": 7, "Name": "DellSoftwareInventory", "PLDMCapabilitiesDuringUpdate": "0x00000000", "PLDMFDPCapabilitiesDuringUpdate": "0x00000000", "RevisionNumber": 5, "RevisionString": null, "SidebandUpdateCapable": false, "Status": "AvailableForInstallation", "SubDeviceID": null, "SubVendorID": null, "VendorID": null, "impactsTPMmeasurements": true}}}, "ReleaseDate": "2022-09-16T00:00:00Z", "SoftwareId": "159", "Status": {"Health": "OK", "State": "Enabled"}, "Updateable": true, "Version": "1.7.5"}, {"Description": "Represents Firmware Inventory", "Id": "Previous-25227-6.00.02.00__iDRAC.Embedded.1-1", "Name": "Integrated Dell Remote Access Controller", "Oem": {"Dell": {"DellSoftwareInventory": {"BuildNumber": 7, "Classifications": ["Firmware"], "ComponentID": "25227", "ComponentType": "FRMW", "Description": "The DellSoftwareInventory resource is a representation of an available device firmware in the managed system.", "DeviceID": null, "ElementName": "Integrated Dell Remote Access Controller", "HashValue": null, "Id": "DCIM:PREVIOUS_0x23_iDRAC.Embedded.1-1_0x23_IDRACinfo", "IdentityInfoType": ["OrgID:ComponentType:ComponentID"], "IdentityInfoValue": ["DCIM:firmware:25227"], "InstallationDate": "NA", "IsEntity": true, "MajorVersion": 6, "MinorVersion": 0, "Name": "DellSoftwareInventory", "PLDMCapabilitiesDuringUpdate": "0x00000000", "PLDMFDPCapabilitiesDuringUpdate": "0x00000000", "RevisionNumber": 2, "RevisionString": null, "SidebandUpdateCapable": false, "Status": "AvailableForInstallation", "SubDeviceID": null, "SubVendorID": null, "VendorID": null, "impactsTPMmeasurements": false}}}, "ReleaseDate": "2022-08-11T00:00:00Z", "SoftwareId": "25227", "Status": {"Health": "OK", "State": "Enabled"}, "Updateable": true, "Version": "6.00.02.00"}]</td>
+ <td>Response facts details for firmware.</td>
+ </tr>
+ <tr>
+ <td>hostnic</td>
+ <td>[{"Description": "Management for Host Interface", "ExternallyAccessible": false, "HostInterfaceType": "NetworkHostInterface", "Id": "Host.1", "InterfaceEnabled": false, "Name": "Managed Host Interface 1"}]</td>
+ <td>Response facts details for hostnic.</td>
+ </tr>
+ <tr>
+ <td>license</td>
+ <td>[{"AuthorizationScope": "Service", "Description": "iDRAC9 x5 Enterprise Evaluation License", "DownloadURI": "/redfish/v1/LicenseService/Licenses/1188PA_girish_narasimhap/DownloadURI", "EntitlementId": "1188PA_girish_narasimhap", "ExpirationDate": "2023-02-23T00:00:00-06:00", "Id": "1188PA_girish_narasimhap", "InstallDate": null, "LicenseInfoURI": "", "LicenseOrigin": "Installed", "LicenseType": "Trial", "Links": {}, "Name": "1188PA_girish_narasimhap", "Removable": true, "Status": {"Health": "Warning", "State": "Enabled"}}]</td>
+ <td>Response facts details for license.</td>
+ </tr>
+ <tr>
+ <td>nic</td>
+ <td>[{"AutoNeg": true, "Description": "Embedded NIC 1 Port 1 Partition 1", "EthernetInterfaceType": "Physical", "FQDN": null, "FullDuplex": true, "HostName": null, "IPv4Addresses": [], "IPv6AddressPolicyTable": [], "IPv6Addresses": [], "IPv6DefaultGateway": null, "IPv6StaticAddresses": [], "Id": "NIC.Embedded.1-1-1", "InterfaceEnabled": true, "LinkStatus": "LinkUp", "Links": {"Chassis": {}}, "MACAddress": "AB:CD:EF:GH:IJ:02", "MTUSize": null, "MaxIPv6StaticAddresses": null, "Name": "System Ethernet Interface", "NameServers": [], "PermanentMACAddress": "AB:CD:EF:GH:IJ:02", "SpeedMbps": 1000, "Status": {"Health": "OK", "State": "Enabled"}, "UefiDevicePath": "PciRoot(0x0)/Pci(0x1C,0x5)/Pci(0x0,0x0)", "VLAN": {}}, {"AutoNeg": false, "Description": "Embedded NIC 1 Port 2 Partition 1", "EthernetInterfaceType": "Physical", "FQDN": null, "FullDuplex": false, "HostName": null, "IPv4Addresses": [], "IPv6AddressPolicyTable": [], "IPv6Addresses": [], "IPv6DefaultGateway": null, "IPv6StaticAddresses": [], "Id": "NIC.Embedded.2-1-1", "InterfaceEnabled": true, "LinkStatus": "LinkDown", "Links": {"Chassis": {}}, "MACAddress": "AB:CD:EF:GH:IJ:02", "MTUSize": null, "MaxIPv6StaticAddresses": null, "Name": "System Ethernet Interface", "NameServers": [], "PermanentMACAddress": "AB:CD:EF:GH:IJ:02", "SpeedMbps": 0, "Status": {"Health": "OK", "State": "Enabled"}, "UefiDevicePath": "PciRoot(0x0)/Pci(0x1C,0x5)/Pci(0x0,0x1)", "VLAN": {}}]</td>
+ <td>Response facts details for nic.</td>
+ </tr>
+ <tr>
+ <td>memory</td>
+ <td>[{"AllowedSpeedsMHz": [3200], "Assembly": {}, "BaseModuleType": "RDIMM", "BusWidthBits": 72, "CacheSizeMiB": 0, "CapacityMiB": 8192, "DataWidthBits": 64, "Description": "DIMM A1", "DeviceLocator": "DIMM A1", "Enabled": true, "ErrorCorrection": "MultiBitECC", "FirmwareRevision": null, "Id": "DIMM.Socket.A1", "Links": {"Chassis": {}, "Oem": {"Dell": {"CPUAffinity": []}}, "Processors": []}, "LogicalSizeMiB": 0, "Manufacturer": "Hynix Semiconductor", "MaxTDPMilliWatts": [], "MemoryDeviceType": "DDR4", "MemorySubsystemControllerManufacturerID": null, "MemorySubsystemControllerProductID": null, "MemoryType": "DRAM", "Metrics": {}, "ModuleManufacturerID": "0xad80", "ModuleProductID": null, "Name": "DIMM A1", "NonVolatileSizeMiB": 0, "Oem": {"Dell": {"DellMemory": {"BankLabel": "A", "Id": "DIMM.Socket.A1", "LastSystemInventoryTime": "2023-01-31T12:00:45+00:00", "LastUpdateTime": "2021-02-11T21:30:07+00:00", "ManufactureDate": "Mon May 04 07:00:00 2020 UTC", "MemoryTechnology": "DRAM", "Model": "DDR4 DIMM", "Name": "DellMemory", "RemainingRatedWriteEndurancePercent": null, "SystemEraseCapability": "NotSupported"}}}, "OperatingMemoryModes": ["Volatile"], "OperatingSpeedMhz": 2666, "PartNumber": "PARTNUM-XN", "RankCount": 1, "SerialNumber": "SERIAL1", "Status": {"Health": "OK", "State": "Enabled"}, "VolatileSizeMiB": 8192}]</td>
+ <td>Response facts details for memory.</td>
+ </tr>
+ <tr>
+ <td>backplane</td>
+ <td>[{"Description": "An instance of DellPCIeSSDBackPlane will have PCIeSSD back plane specific data.", "FirmwareVersion": "3.72", "Id": "Enclosure.Internal.0-2", "Name": "DellPCIeSSDBackPlane", "PCIExpressGeneration": "Gen 4", "SlotCount": 8, "WiredOrder": 2}]</td>
+ <td>Response facts details for backplane.</td>
+ </tr>
+ <tr>
+ <td>power_supply</td>
+ <td>[{"Assembly": {}, "Description": "An instance of PowerSupply", "FirmwareVersion": "00.17.28", "HotPluggable": true, "Id": "PSU.Slot.1", "InputNominalVoltageType": "AC240V", "InputRanges": [{"CapacityWatts": 1400.0, "NominalVoltageType": "AC240V"}], "LineInputStatus": "Normal", "Manufacturer": "DELL", "Metrics": {}, "Model": "PWR SPLY,1400W,RDNT,LTON", "Name": "PS1 Status", "Oem": {"Dell": {"DellPowerSupply": {"ActiveInputVoltage": "Unknown", "IsSwitchingSupply": true, "OperationalStatus": ["OK"], "RequestedState": "NotApplicable"}, "DellPowerSupplyView": {"DetailedState": "Presence Detected", "DeviceDescription": "Power Supply 1", "LastSystemInventoryTime": "2023-01-31T12:00:45+00:00", "LastUpdateTime": "2023-03-09T15:58:41+00:00", "PMBusMonitoring": "Capable", "Range1MaxInputPowerWatts": 1568, "RedMinNumberNeeded": 1, "RedTypeOfSet": ["N+1", "Sparing"], "RedundancyStatus": "Unknown"}}}, "PartNumber": "SPARE2", "PowerCapacityWatts": 1400.0, "PowerSupplyType": "AC", "SerialNumber": "ABCD1", "SparePartNumber": "SPARE1", "Status": {"Health": "OK", "State": "Enabled"}}]</td>
+ <td>Response facts details for power_supply.</td>
+ </tr>
+ <tr>
+ <td>presence_and_status_sensor</td>
+ <td>[{"CurrentState": "Present", "Description": "An instance of DellPresenceAndStatusSensor will have presence and status sensor specific data.", "DeviceID": "iDRAC.Embedded.1#VFLASHSD", "ElementName": "VFLASH SD", "Id": "iDRAC.Embedded.1_0x23_VFLASHSD", "Name": "DellPresenceAndStatusSensor", "SensorType": "Other"}]</td>
+ <td>Response facts details for presence_and_status_sensor.</td>
+ </tr>
+ <tr>
+ <td>sensor_battery</td>
+ <td>{"CurrentState": "Good", "Description": "An instance of DellSensor will represent a sensor, a hardware device that is capable of measuring the characteristics of a physical property.", "ElementName": "System Board CMOS Battery", "EnabledState": "Enabled", "HealthState": "OK", "Id": "iDRAC.Embedded.1_0x23_SystemBoardCMOSBattery", "Links": {"ComputerSystem": {}}, "Name": "DellSensor", "SensorType": "Other"}</td>
+ <td>Response facts details for sensor_battery.</td>
+ </tr>
+ <tr>
+ <td>intrusion_sensor</td>
+ <td>{"PhysicalSecurity": {"IntrusionSensor": "Normal"}}</td>
+ <td>Response facts details for intrusion_sensor.</td>
+ </tr>
+ <tr>
+ <td>virtual_disk</td>
+ <td>[{"@Redfish.Settings": {"SettingsObject": {}, "SupportedApplyTimes": ["Immediate", "OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}, "BlockSizeBytes": 512, "CapacityBytes": 240057409536, "Description": "Disk 0 on Integrated AHCI controller 1", "DisplayName": null, "Encrypted": null, "EncryptionTypes": [], "Id": "Disk.Direct.0-0:AHCI.Integrated.1-1", "Identifiers": [], "MediaSpanCount": null, "Name": "SSD 0", "Operations": [], "OptimumIOSizeBytes": null, "RAIDType": null, "ReadCachePolicy": null, "Status": {"Health": "OK", "HealthRollup": "OK", "State": "Enabled"}, "VolumeType": "RawDevice", "WriteCachePolicy": null}, {"@Redfish.Settings": {"SettingsObject": {}, "SupportedApplyTimes": ["Immediate", "OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]}, "BlockSizeBytes": 512, "CapacityBytes": 240057409536, "Description": "Disk 1 on Integrated AHCI controller 1", "DisplayName": null, "Encrypted": null, "EncryptionTypes": [], "Id": "Disk.Direct.1-1:AHCI.Integrated.1-1", "Identifiers": [], "MediaSpanCount": null, "Name": "SSD 1", "Operations": [], "OptimumIOSizeBytes": null, "RAIDType": null, "ReadCachePolicy": null, "Status": {"Health": "OK", "HealthRollup": "OK", "State": "Enabled"}, "VolumeType": "RawDevice", "WriteCachePolicy": null}]</td>
+ <td>Response facts details for virtual_disk.</td>
+ </tr>
+ <tr>
+ <td>pcie_device</td>
+ <td>
+ {"AssetTag": null, "Description": "Integrated Matrox G200eW3 Graphics Controller", "DeviceType": "SingleFunction", "FirmwareVersion": "", "Id": "3-0", "Manufacturer": "Matrox Electronics Systems Ltd.", "Model": null, "Name": "Integrated Matrox G200eW3 Graphics Controller", "PCIeFunctions": {}, "PartNumber": null, "SKU": null, "SerialNumber": null, "Status": {"Health": "OK", "HealthRollup": "OK", "State": "Enabled"}}, {"AssetTag": null, "Description": "PERC H730P MX", "DeviceType": "SingleFunction", "FirmwareVersion": "25.5.9.0001", "Id": "59-0", "Manufacturer": "Broadcom / LSI", "Model": null, "Name": "PERC H730P MX", "PCIeFunctions": {}, "PartNumber": "PART2", "SKU": null, "SerialNumber": "SERIALN1", "Status": {"Health": "OK", "HealthRollup": "OK", "State": "Enabled"}
+ }</td>
+ <td>Response facts details for pcie_device.</td>
+ </tr>
+ <tr>
+ <td>physical_disk</td>
+ <td>[{"BlockSizeBytes": 512, "CapableSpeedGbs": 6, "CapacityBytes": 240057409536, "Description": "Disk 1 on Integrated AHCI controller 1", "EncryptionAbility": "None", "EncryptionStatus": "Unencrypted", "FailurePredicted": false, "HotspareType": "None", "Id": "Disk.Direct.1-1:AHCI.Integrated.1-1", "Identifiers": [{"DurableName": null, "DurableNameFormat": null}], "Identifiers@odata.count": 1, "Location": [], "LocationIndicatorActive": null, "Manufacturer": "INTEL", "MediaType": "SSD", "Model": "SSDMODEL1", "Name": "SSD 1", "NegotiatedSpeedGbs": 6, "Oem": {"Dell": {"DellPhysicalDisk": {"AvailableSparePercent": null, "Certified": "NotApplicable", "Connector": 0, "CryptographicEraseCapable": "Capable", "Description": "An instance of DellPhysicalDisk will have Physical Disk specific data.", "DeviceProtocol": null, "DeviceSidebandProtocol": null, "DriveFormFactor": "M.2", "EncryptionProtocol": "None", "ErrorDescription": null, "ErrorRecoverable": "NotApplicable", "ForeignKeyIdentifier": null, "FreeSizeInBytes": 240057409536, "Id": "Disk.Direct.1-1:AHCI.Integrated.1-1", "LastSystemInventoryTime": "2023-03-04T05:50:09+00:00", "LastUpdateTime": "2023-02-15T16:32:30+00:00", "ManufacturingDay": 0, "ManufacturingWeek": 0, "ManufacturingYear": 0, "Name": "DellPhysicalDisk", "NonRAIDDiskCachePolicy": "Unknown", "OperationName": "None", "OperationPercentCompletePercent": 0, "PCIeCapableLinkWidth": "None", "PCIeNegotiatedLinkWidth": "None", "PPID": "TW-0919J9-PIHIT-8AB-02K7-A00", "PowerStatus": "On", "PredictiveFailureState": "SmartAlertAbsent", "ProductID": null, "RAIDType": "Unknown", "RaidStatus": "NonRAID", "SASAddress": "Not Applicable", "Slot": 1, "SystemEraseCapability": "CryptographicErasePD", "T10PICapability": "NotSupported", "UsedSizeInBytes": 0, "WWN": "Not Applicable"}}}, "Operations": [], "PartNumber": "TW-0919J9-PIHIT-8AB-02K7-A00", "PhysicalLocation": {"PartLocation": {"LocationOrdinalValue": 1, "LocationType": "Slot"}}, "PredictedMediaLifeLeftPercent": 100, "Protocol": "SATA", "Revision": "N201DL43", "RotationSpeedRPM": null, "SerialNumber": "SERIAL2", "Status": {"Health": "OK", "HealthRollup": "OK", "State": "Enabled"}, "WriteCacheEnabled": false}]</td>
+ <td>Response facts details for physical_disk.</td>
+ </tr>
+ <tr>
+ <td>secure_boot</td>
+ <td>{
+ "Actions": {
+ "#SecureBoot.ResetKeys": {
+ "ResetKeysType@Redfish.AllowableValues": [
+ "ResetAllKeysToDefault",
+ "DeleteAllKeys",
+ "DeletePK",
+ "ResetPK",
+ "ResetKEK",
+ "ResetDB",
+ "ResetDBX"
+ ],
+ "target": "/redfish/v1/Systems/System.Embedded.1/SecureBoot/Actions/SecureBoot.ResetKeys"
+ },
+ "Oem": {}
+ },
+ "Description": "UEFI Secure Boot",
+ "Id": "SecureBoot",
+ "Name": "UEFI Secure Boot",
+ "Oem": {
+ "Dell": {
+ "Certificates": {},
+ "FirmwareImageHashes": {}
+ }
+ },
+ "SecureBootCurrentBoot": "Disabled",
+ "SecureBootDatabases": [
+ {
+ "Actions": {
+ "#SecureBootDatabase.ResetKeys": {
+ "ResetKeysType@Redfish.AllowableValues": [
+ "ResetAllKeysToDefault",
+ "DeleteAllKeys"
+ ],
+ "target": "/redfish/v1/Systems/System.Embedded.1/SecureBoot/SecureBootDatabases/db/Actions/SecureBootDatabase.ResetKeys"
+ }
+ },
+ "Certificates": [
+ {
+ "CertificateString": null,
+ "CertificateType": "PEM",
+ "CertificateUsageTypes": [
+ "BIOS"
+ ],
+ "Description": "SecureBoot Certificate",
+ "Id": "StdSecbootpolicy.3",
+ "Issuer": {
+ "City": "Redmond",
+ "CommonName": "Microsoft Corporation Third Party Marketplace Root",
+ "Country": "US",
+ "Organization": "Microsoft Corporation",
+ "State": "Washington"
+ },
+ "Name": "SecureBoot Certificate",
+ "SerialNumber": "SERIAL00001",
+ "Subject": {
+ "City": "Redmond",
+ "CommonName": "Microsoft Corporation UEFI CA 2011",
+ "Country": "US",
+ "Organization": "Microsoft Corporation",
+ "State": "Washington"
+ },
+ "ValidNotAfter": "2026-6-27T21:32:45+00:00",
+ "ValidNotBefore": "2011-6-27T21:22:45+00:00"
+ },
+ {
+ "CertificateString": null,
+ "CertificateType": "PEM",
+ "CertificateUsageTypes": [
+ "BIOS"
+ ],
+ "Description": "SecureBoot Certificate",
+ "Id": "StdSecbootpolicy.4",
+ "Issuer": {
+ "City": "Redmond",
+ "CommonName": "Microsoft Root Certificate Authority 2010",
+ "Country": "US",
+ "Organization": "Microsoft Corporation",
+ "State": "Washington"
+ },
+ "Name": "SecureBoot Certificate",
+ "SerialNumber": "SERIAL000002",
+ "Subject": {
+ "City": "Redmond",
+ "CommonName": "Microsoft Windows Production PCA 2011",
+ "Country": "US",
+ "Organization": "Microsoft Corporation",
+ "State": "Washington"
+ },
+ "ValidNotAfter": "2026-10-19T18:51:42+00:00",
+ "ValidNotBefore": "2011-10-19T18:41:42+00:00"
+ },
+ {
+ "CertificateString": null,
+ "CertificateType": "PEM",
+ "CertificateUsageTypes": [
+ "BIOS"
+ ],
+ "Description": "SecureBoot Certificate",
+ "Id": "StdSecbootpolicy.5",
+ "Issuer": {
+ "City": "Palo Alto",
+ "Country": "US",
+ "Organization": "VMware, Inc.",
+ "State": "California"
+ },
+ "Name": "SecureBoot Certificate",
+ "SerialNumber": "SERIAL3",
+ "Subject": {
+ "City": "Palo Alto",
+ "Country": "US",
+ "Organization": "VMware, Inc.",
+ "State": "California"
+ },
+ "ValidNotAfter": "2019-12-31T17:16:05+00:00",
+ "ValidNotBefore": "2008-10-16T17:16:05+00:00"
+ },
+ {
+ "CertificateString": null,
+ "CertificateType": "PEM",
+ "CertificateUsageTypes": [
+ "BIOS"
+ ],
+ "Description": "SecureBoot Certificate",
+ "Id": "StdSecbootpolicy.6",
+ "Issuer": {
+ "City": "Palo Alto",
+ "CommonName": "VMware Secure Boot Signing",
+ "Country": "US",
+ "Organization": "VMware, Inc.",
+ "State": "California"
+ },
+ "Name": "SecureBoot Certificate",
+ "SerialNumber": "SERIAL00005",
+ "Subject": {
+ "City": "Palo Alto",
+ "CommonName": "VMware Secure Boot Signing",
+ "Country": "US",
+ "Organization": "VMware, Inc.",
+ "State": "California"
+ },
+ "ValidNotAfter": "2037-10-19T06:47:59+00:00",
+ "ValidNotBefore": "2017-10-24T06:47:59+00:00"
+ }
+ ],
+ "DatabaseId": "db",
+ "Description": "SecureBootDatabase",
+ "Id": "db",
+ "Name": "SecureBootDatabase",
+ "Signatures": {}
+ },
+ {
+ "Actions": {
+ "#SecureBootDatabase.ResetKeys": {
+ "ResetKeysType@Redfish.AllowableValues": [
+ "ResetAllKeysToDefault",
+ "DeleteAllKeys"
+ ],
+ "target": "/redfish/v1/Systems/System.Embedded.1/SecureBoot/SecureBootDatabases/dbx/Actions/SecureBootDatabase.ResetKeys"
+ }
+ },
+ "Certificates": [],
+ "DatabaseId": "dbx",
+ "Description": "SecureBootDatabase",
+ "Id": "dbx",
+ "Name": "SecureBootDatabase",
+ "Signatures": {}
+ },
+ {
+ "Actions": {
+ "#SecureBootDatabase.ResetKeys": {
+ "ResetKeysType@Redfish.AllowableValues": [
+ "ResetAllKeysToDefault",
+ "DeleteAllKeys"
+ ],
+ "target": "/redfish/v1/Systems/System.Embedded.1/SecureBoot/SecureBootDatabases/KEK/Actions/SecureBootDatabase.ResetKeys"
+ }
+ },
+ "Certificates": [
+ {
+ "CertificateString": null,
+ "CertificateType": "PEM",
+ "CertificateUsageTypes": [
+ "BIOS"
+ ],
+ "Description": "SecureBoot Certificate",
+ "Id": "StdSecbootpolicy.2",
+ "Issuer": {
+ "City": "Redmond",
+ "CommonName": "Microsoft Corporation Third Party Marketplace Root",
+ "Country": "US",
+ "Organization": "Microsoft Corporation",
+ "State": "Washington"
+ },
+ "Name": "SecureBoot Certificate",
+ "SerialNumber": "SERIAL000005",
+ "Subject": {
+ "City": "Redmond",
+ "CommonName": "Microsoft Corporation KEK CA 2011",
+ "Country": "US",
+ "Organization": "Microsoft Corporation",
+ "State": "Washington"
+ },
+ "ValidNotAfter": "2022-6-24T20:51:29+00:00",
+ "ValidNotBefore": "2011-6-24T20:41:29+00:00"
+ }
+ ],
+ "DatabaseId": "KEK",
+ "Description": "SecureBootDatabase",
+ "Id": "KEK",
+ "Name": "SecureBootDatabase",
+ "Signatures": {}
+ },
+ {
+ "Actions": {
+ "#SecureBootDatabase.ResetKeys": {
+ "ResetKeysType@Redfish.AllowableValues": [
+ "ResetAllKeysToDefault",
+ "DeleteAllKeys"
+ ],
+ "target": "/redfish/v1/Systems/System.Embedded.1/SecureBoot/SecureBootDatabases/PK/Actions/SecureBootDatabase.ResetKeys"
+ }
+ },
+ "Certificates": [
+ {
+ "CertificateString": null,
+ "CertificateType": "PEM",
+ "CertificateUsageTypes": [
+ "BIOS"
+ ],
+ "Description": "SecureBoot Certificate",
+ "Id": "StdSecbootpolicy.1",
+ "Issuer": {
+ "City": "Round Rock",
+ "CommonName": "Dell Inc. Platform Key",
+ "Country": "US",
+ "Organization": "Dell Inc.",
+ "State": "Texas"
+ },
+ "Name": "SecureBoot Certificate",
+ "SerialNumber": "12345ABCD",
+ "Subject": {
+ "City": "Round Rock",
+ "CommonName": "Dell Inc. Platform Key",
+ "Country": "US",
+ "Organization": "Dell Inc.",
+ "State": "Texas"
+ },
+ "ValidNotAfter": "2021-2-2T17:27:36+00:00",
+ "ValidNotBefore": "2016-2-2T17:17:37+00:00"
+ }
+ ],
+ "DatabaseId": "PK",
+ "Description": "SecureBootDatabase",
+ "Id": "PK",
+ "Name": "SecureBootDatabase",
+ "Signatures": {}
+ }
+ ],
+ "SecureBootEnable": false,
+ "SecureBootMode": "DeployedMode"
+ }</td>
+ <td>Response facts details for Secure Boot.</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+-----
+```
+- name: iDRAC gather facts for System, BIOS, Controller, CPU, Enclosure.
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target:
+ - System
+ - BIOS
+ - Controller
+ - CPU
+ - Enclosure
+
+- name: Print the System details
+ ansible.builtin.debug:
+ var: system
+
+```
+```
+# Get specific controllers
+- name: Get all controllers
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target:
+ - Controller
+
+- name: Fetch BOSS controllers
+ ansible.builtin.debug:
+ msg: "{{ controller | selectattr('Model', 'contains', 'BOSS') | list }}"
+
+- name: Fetch controller with specific id
+ ansible.builtin.debug:
+ msg: "{{ controller | selectattr('Id', 'equalto', 'AHCI.Integrated.1-1') | list }}"
+
+```
+```
+- name: iDRAC gather facts for EnclosureEMM, Fan, Firmware, HostNIC, License.
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target:
+ - EnclosureEMM
+ - Fan
+ - Firmware
+ - HostNIC
+ - License
+
+- name: Print the firmware details
+ ansible.builtin.debug:
+ var: firmware
+
+```
+```
+- name: iDRAC gather facts for Memory, NIC, PCIeSSDBackPlane, PowerSupply, PresenceAndStatusSensor, SecureBoot.
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target:
+ - Memory
+ - NIC
+ - PCIeSSDBackPlane
+ - PowerSupply
+ - PresenceAndStatusSensor
+ - SecureBoot
+
+- name: Print the secure boot details
+ ansible.builtin.debug:
+ var: secure_boot
+```
+```
+- name: iDRAC gather facts for Sensors_Battery, Sensors_Intrusion, Sensors_Voltage, VirtualDisk, PCIeDevice, PhysicalDisk, SystemMetrics using environment variables IDRAC_USERNAME and IDRAC_PASSWORD.
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "192.1.2.1"
+ # IDRAC_USERNAME and IDRAC_PASSWORD set in env
+ ca_path: "/path/to/ca_cert.pem"
+ target:
+ - Sensors_Battery
+ - Sensors_Intrusion
+ - Sensors_Voltage
+ - VirtualDisk
+ - PCIeDevice
+ - PhysicalDisk
+ - SystemMetrics
+
+- name: Print the system metrics
+ ansible.builtin.debug:
+ var: system_metrics
+```
+## Author Information
+------------------
+
+Dell Technologies <br>
+Felix Stephen A (felix.s@dell.com) <br>
+Jagadeesh N V (jagadeesh.n.v@dell.com)
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/defaults/main.yml
new file mode 100644
index 000000000..0ff68d419
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+https_port: 443
+validate_certs: true
+https_timeout: 30
+target:
+ - System
+computer_system_id: ""
+manager_id: ""
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/handlers/main.yml
new file mode 100644
index 000000000..033de9080
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_gather_facts
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/meta/argument_specs.yml
new file mode 100644
index 000000000..de3515f26
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/meta/argument_specs.yml
@@ -0,0 +1,110 @@
+---
+argument_specs:
+ main:
+ version_added: 7.4.0
+ short_description: Role to get the facts from the iDRAC Server
+ description:
+ - Role to fetch the server facts about a different components available in
+ the PowerEdge Servers.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address.
+ username:
+ type: str
+ description: iDRAC username.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where
+ self-signed certificates are used.
+ - Prior to collection version 5.0.0, I(validate_certs) is C(False) by
+ default.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate
+ to be used for the validation.
+ type: path
+ https_timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ computer_system_id:
+ description: Computer system id
+ type: str
+ manager_id:
+ description: Manager/BMC id
+ type: str
+ target:
+ description:
+ - Target component for which information needs to be gathered.
+ - C(BIOS) lists the BIOS information.
+ - C(Chassis) lists the chassis.
+ - C(Controller) lists the available controllers for iDRAC.
+ - C(CPU) lists the system processors.
+ - C(Enclosure) lists the enclosures.
+ - C(EnclosureEMM) lists the enclosure management module specific data.
+ - C(Fan) lists the fans.
+ - C(Firmware) lists the firmware inventories.
+ - C(HostNIC) lists the host NIC.
+ - C(IDRAC) lists the attributes for iDRAC.
+ - C(License) lists the license information.
+ - C(Manager) lists the manager resources.
+ - C(Memory) lists the memory device specific data.
+ - C(NetworkAdapter) lists the network adapters.
+ - C(NetworkPort) lists the network ports.
+ - C(NetworkDeviceFunction) lists the network device functions.
+ - C(NIC) lists NIC device specific data.
+ - C(PCIeSSDBackPlane) lists PCIeSSD back plane specific data.
+ - C(PowerSupply) lists data specific to the Power Supply devices in
+ the managed system.
+ - C(PresenceAndStatusSensor) lists the presence and status sensor
+ specific data.
+ - C(PCIeDevice) lists the PCIeDevices.
+ - C(PhysicalDisk) lists the physical disks.
+ - C(Sensors_Battery) lists the sensors battery information.
+ - C(Sensors_Intrusion) lists the sensors intrusion information.
+ - C(Sensors_Voltage) lists the sensors voltage information.
+ - C(System) lists the ComputerSystem resources for iDRAC.
+ - C(SystemMetrics) lists the system metrics.
+ - C(ThermalSubSystem) lists the thermal sub system.
+ - C(VirtualDisk) lists the virtual disks.
+ - C(VirtualMedia) lists the virtual media.
+ - C(SecureBoot) lists the secure boot specific data.
+ type: list
+ choices:
+ - IDRAC
+ - System
+ - BIOS
+ - Controller
+ - CPU
+ - Enclosure
+ - EnclosureEMM
+ - Fan
+ - Firmware
+ - HostNIC
+ - License
+ - Memory
+ - NIC
+ - PCIeSSDBackPlane
+ - PowerSupply
+ - PresenceAndStatusSensor
+ - Sensors_Battery
+ - Sensors_Intrusion
+ - Sensors_Voltage
+ - VirtualDisk
+ - PCIeDevice
+ - PhysicalDisk
+ - SystemMetrics
+ - SecureBoot
+ default: System
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/meta/main.yml
new file mode 100644
index 000000000..f1d10da13
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/meta/main.yml
@@ -0,0 +1,22 @@
+galaxy_info:
+ author: |
+ "Felix Stephen
+ Jagadeesh N V"
+ description: Role to gather facts
+ company: Dell Technologies
+ license: GPL-3.0-only
+ min_ansible_version: "2.13"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+ galaxy_tags: []
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/backplane/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/backplane/converge.yml
new file mode 100644
index 000000000..adb6fcf5f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/backplane/converge.yml
@@ -0,0 +1,44 @@
+---
+- name: Converge idrac_gather_facts for PCIeSSDBackPlane
+ hosts: all
+ connection: local
+ gather_facts: true
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - PCIeSSDBackPlane
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the PCIeSSDBackPlane component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert backplane dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ backplane | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/backplane_assert.yml
+ with_items: "{{ backplane }}"
+ loop_control:
+ loop_var: backplane_data
+ when: backplane | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/backplane/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/backplane/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/backplane/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/bios/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/bios/converge.yml
new file mode 100644
index 000000000..491d49d42
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/bios/converge.yml
@@ -0,0 +1,81 @@
+---
+- name: Converge idrac gather facts for bios
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - BIOS
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the BIOS component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert bios dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ bios | length > 0 }}"
+
+ - name: Fetching BIOS info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/Bios"
+ validate_certs: false
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: bios_result
+ no_log: true
+
+ - name: Response filter
+ ansible.builtin.set_fact:
+ api_response:
+ "{{ bios_result.json | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.type', '@odata.id', 'SettingsObject', 'Actions', 'AttributeRegistry', 'Description',
+ 'Id', 'Links', 'Name']) }}"
+ vars:
+ jquery: "Oem.Dell.DellSystem"
+
+ - name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ bios.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+ - name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: bios[item]}) }}"
+ loop: "{{ bios.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - bios[item] != api_response[item]
+ - item not in exclude_keys
+
+ - name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/bios/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/bios/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/bios/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/controller/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/controller/converge.yml
new file mode 100644
index 000000000..e7059f6a7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/controller/converge.yml
@@ -0,0 +1,40 @@
+---
+- name: Converge idrac_gather_facts for controller
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - Controller
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: ["Description"]
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the Controller component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert controller dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ controller | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/controller_assert.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/controller/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/controller/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/controller/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/cpu/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/cpu/converge.yml
new file mode 100644
index 000000000..8f75cd73f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/cpu/converge.yml
@@ -0,0 +1,45 @@
+---
+- name: Converge idrac_gather_facts for CPU
+ hosts: all
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: "100.96.25.90"
+ username: "root"
+ password: "Dell_123$"
+ validate_certs: false
+ target:
+ - CPU
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the CPU component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert cpu dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ cpu | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/cpu_assert.yml
+ with_items: "{{ cpu }}"
+ loop_control:
+ loop_var: cpu_data
+ when: cpu | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/cpu/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/cpu/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/cpu/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/default/converge.yml
new file mode 100644
index 000000000..3d3f3ed1d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/default/converge.yml
@@ -0,0 +1,95 @@
+---
+- name: Converge idrac_gather_facts
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - System
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: ["ServerOS.1.ServerPoweredOnTime"]
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the System component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert system dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ system | length > 0 }}"
+
+ - name: Fetching System info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}"
+ validate_certs: false
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: system_result
+ no_log: true
+
+ - name: Fetching operating system info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/redfish/v1/Managers/System.Embedded.1/Attributes?$select=ServerOS.*" # verification firmware version 5.00.00
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: os_result
+ no_log: true
+
+ - name: Response filter
+ ansible.builtin.set_fact:
+ api_response:
+ "{{ system_result.json | json_query(jquery) | combine(os_result.json.Attributes) |
+ ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
+ vars:
+ jquery: "Oem.Dell.DellSystem"
+
+ - name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ system.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+ - name: Set a Diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: system[item]}) }}"
+ loop: "{{ system.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - system[item] != api_response[item]
+ - item not in exclude_keys
+
+ - name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/default/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/default/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosure/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosure/converge.yml
new file mode 100644
index 000000000..f83d84ac7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosure/converge.yml
@@ -0,0 +1,44 @@
+---
+- name: Converge idrac_gather_facts for Enclosure
+ hosts: all
+ connection: local
+ gather_facts: true
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - Enclosure
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the Enclosure component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert enclosure dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ enclosure | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/enclosure_assert.yml
+ with_items: "{{ enclosure }}"
+ loop_control:
+ loop_var: enclosure_data
+ when: enclosure | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosure/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosure/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosure/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosureemm/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosureemm/converge.yml
new file mode 100644
index 000000000..9bddda5a7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosureemm/converge.yml
@@ -0,0 +1,38 @@
+---
+- name: Converge idrac_gather_facts for EnclosureEMM
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - EnclosureEMM
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the EnclosureEMM component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/enclosureemm_assert.yml
+ with_items: "{{ enclosure_emm }}"
+ loop_control:
+ loop_var: enclosureemm_data
+ when: enclosure_emm | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosureemm/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosureemm/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/enclosureemm/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/fan/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/fan/converge.yml
new file mode 100644
index 000000000..bdd47a873
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/fan/converge.yml
@@ -0,0 +1,38 @@
+---
+- name: Converge idrac_gather_facts for Fan
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - Fan
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: ["SpeedPercent"]
+
+ tasks:
+ - name: Gather Facts for the Fan component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/fan_assert.yml
+ with_items: "{{ fan }}"
+ loop_control:
+ loop_var: fan_data
+ when: fan | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/fan/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/fan/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/fan/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/firmware/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/firmware/converge.yml
new file mode 100644
index 000000000..88047ce5c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/firmware/converge.yml
@@ -0,0 +1,44 @@
+---
+- name: Converge idrac_gather_facts for Firmware
+ hosts: all
+ connection: local
+ gather_facts: true
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - Firmware
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the Firmware component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert firmware dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ firmware | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/firmware_assert.yml
+ with_items: "{{ firmware }}"
+ loop_control:
+ loop_var: firmware_data
+ when: firmware | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/firmware/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/firmware/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/firmware/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/hostnic/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/hostnic/converge.yml
new file mode 100644
index 000000000..1ab1f4911
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/hostnic/converge.yml
@@ -0,0 +1,43 @@
+---
+- name: Converge idrac_gather_facts for HostNIC
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - HostNIC
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the HostNIC component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert hostnic dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ hostnic | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/hostnic_assert.yml
+ with_items: "{{ hostnic }}"
+ loop_control:
+ loop_var: hostnic_data
+ when: hostnic | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/hostnic/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/hostnic/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/hostnic/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/idrac/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/idrac/converge.yml
new file mode 100644
index 000000000..2b8788274
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/idrac/converge.yml
@@ -0,0 +1,93 @@
+---
+- name: Converge idrac_gather_facts for idrac
+ hosts: all
+ connection: local
+ gather_facts: true
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - IDRAC
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: ["ServerOS.1.ServerPoweredOnTime", "SystemInfo.1.SysTime"]
+
+ tasks:
+ - name: Gather Facts for idrac component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Get System information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_manager }}/Oem/Dell/DellAttributes/System.Embedded.1"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: response_sys_attr
+ no_log: true
+
+ - name: Get Manager information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_manager }}/Oem/Dell/DellAttributes/iDRAC.Embedded.1"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: response_mgr_attr
+ no_log: true
+
+ - name: Get Lifecycle controller information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_manager }}/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: response_lc_attr
+ no_log: true
+
+ - name: Set System, Manager, Lifecycle controller facts
+ ansible.builtin.set_fact:
+ api_idrac:
+ api_system_attributes: "{{ response_sys_attr.json.Attributes }}"
+ api_manager_attributes: "{{ response_mgr_attr.json.Attributes }}"
+ api_lifecycle_controller_attributes: "{{ response_lc_attr.json.Attributes }}"
+
+ - name: Call assertion For System Attributes
+ ansible.builtin.include_tasks: ../../tests/asserts/system_assert.yml
+
+ - name: Call assertion For LC Attributes
+ ansible.builtin.include_tasks: ../../tests/asserts/lc_assert.yml
+
+ - name: Call assertion For Manager Attributes
+ ansible.builtin.include_tasks: ../../tests/asserts/manager_assert.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/idrac/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/idrac/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/idrac/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/license/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/license/converge.yml
new file mode 100644
index 000000000..b1fe0419b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/license/converge.yml
@@ -0,0 +1,43 @@
+---
+- name: Converge idrac_gather_facts for License
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - License
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the License component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert license dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ license | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/license_assert.yml
+ with_items: "{{ license }}"
+ loop_control:
+ loop_var: license_data
+ when: license | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/license/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/license/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/license/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/memory/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/memory/converge.yml
new file mode 100644
index 000000000..5a3909481
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/memory/converge.yml
@@ -0,0 +1,44 @@
+---
+- name: Converge idrac_gather_facts for Memory
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - Memory
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the Memory component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert memory dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ memory | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/memory_assert.yml
+ with_items: "{{ memory }}"
+ loop_control:
+ loop_var: memory_data
+ when: memory | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/memory/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/memory/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/memory/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/negative/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/negative/converge.yml
new file mode 100644
index 000000000..b191098a8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/negative/converge.yml
@@ -0,0 +1,92 @@
+---
+- name: Converge for negative scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: To check for wrong hostname
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "randomHostname"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_gather_facts_err
+
+ - name: Asserting after performing opeartion with invalid hostname
+ ansible.builtin.assert:
+ that:
+ - idrac_gather_facts_connection.status == -1
+
+ - name: To check for wrong username
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "randomUsername"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target: ["Bios"]
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_gather_facts_error
+
+ - name: Asserting after performing opeartion with invalid username
+ ansible.builtin.assert:
+ that:
+ - idrac_gather_facts_connection.status == 401
+
+ - name: To check for wrong password
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "randomPassword"
+ validate_certs: false
+ target: ["Bios"]
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_gather_facts_error
+
+ - name: Asserting after performing opeartion with invalid password
+ ansible.builtin.assert:
+ that:
+ - idrac_gather_facts_connection.status == -1
+
+ - name: To check for wrong system id
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ computer_system_id: "randomSystemID"
+ ignore_errors: true
+ register: idrac_gather_facts_error
+
+ - name: Asserting after performing operation with invalid system id
+ ansible.builtin.assert:
+ that:
+ - "{{ computer_system_id not in system_ids }}"
+
+ - name: To check for wrong manager id
+ ansible.builtin.import_role:
+ name: idrac_gather_facts
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ manager_id: "randomManagerID"
+ target: ["Firmware"]
+ ignore_errors: true
+ register: idrac_gather_facts_error
+
+ - name: Asserting after performing operation with invalid manager id
+ ansible.builtin.assert:
+ that:
+ - "{{ manager_id not in manager_ids }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/negative/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/negative/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/negative/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/nic/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/nic/converge.yml
new file mode 100644
index 000000000..70d00f200
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/nic/converge.yml
@@ -0,0 +1,44 @@
+---
+- name: Converge idrac_gather_facts for NIC
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - NIC
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the NIC component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert nic dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ nic | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/nic_assert.yml
+ with_items: "{{ nic }}"
+ loop_control:
+ loop_var: nic_data
+ when: nic | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/nic/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/nic/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/nic/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/passensor/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/passensor/converge.yml
new file mode 100644
index 000000000..93de081d3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/passensor/converge.yml
@@ -0,0 +1,39 @@
+---
+- name: Converge idrac_gather_facts for Presence and Status Sensor
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - PresenceAndStatusSensor
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the Presence and Status Sensor
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/passensor_assert.yml
+ with_items: "{{ presence_and_status_sensor }}"
+ loop_control:
+ loop_var: passensor_data
+ when: presence_and_status_sensor | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/passensor/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/passensor/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/passensor/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/pciedevice/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/pciedevice/converge.yml
new file mode 100644
index 000000000..b87459d13
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/pciedevice/converge.yml
@@ -0,0 +1,43 @@
+---
+- name: Converge idrac_gather_facts for PCIeDevice component
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - PCIeDevice
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the PCIeDevice
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert pcie device dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ pcie_device | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/pciedevice_assert.yml
+ with_items: "{{ pcie_device }}"
+ loop_control:
+ loop_var: pci_data
+ when: pcie_device | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/pciedevice/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/pciedevice/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/pciedevice/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/physicaldisk/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/physicaldisk/converge.yml
new file mode 100644
index 000000000..a5b66a7f9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/physicaldisk/converge.yml
@@ -0,0 +1,44 @@
+---
+- name: Converge idrac_gather_facts for Physical Disk
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - PhysicalDisk
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the Physical Disk component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert physical disk dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ physical_disk | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/physicaldisk_assert.yml
+ with_items: "{{ physical_disk }}"
+ loop_control:
+ loop_var: pd_data
+ when: physical_disk | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/physicaldisk/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/physicaldisk/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/physicaldisk/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/powersupply/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/powersupply/converge.yml
new file mode 100644
index 000000000..1fdb5a278
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/powersupply/converge.yml
@@ -0,0 +1,43 @@
+---
+- name: Converge idrac_gather_facts for Power Supply
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - PowerSupply
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the Power Supply component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert power supply dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ power_supply | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/powersupply_assert.yml
+ with_items: "{{ power_supply }}"
+ loop_control:
+ loop_var: powersupply_data
+ when: power_supply | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/powersupply/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/powersupply/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/powersupply/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/secureboot/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/secureboot/converge.yml
new file mode 100644
index 000000000..e4585165e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/secureboot/converge.yml
@@ -0,0 +1,40 @@
+---
+- name: Converge idrac_gather_facts for SecureBoot
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - SecureBoot
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the Secureboot component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert secureboot supply dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ secure_boot | length > 0 }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/secureboot_assert.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/secureboot/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/secureboot/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/secureboot/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsbattery/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsbattery/converge.yml
new file mode 100644
index 000000000..feee7473e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsbattery/converge.yml
@@ -0,0 +1,80 @@
+---
+- name: Converge idrac_gather_facts for Sensors Battery
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - Sensors_Battery
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the Sensors Battery component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert sensor battery dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ sensor_battery | length > 0 }}"
+
+ - name: Fetching Sensor Battery info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/Oem/Dell/DellSensors/iDRAC.Embedded.1_0x23_SystemBoardCMOSBattery"
+ validate_certs: false
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ no_log: true
+ register: battery_result
+
+ - name: Response filter
+ ansible.builtin.set_fact:
+ api_response:
+ "{{ battery_result.json | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.id', '@odata.type']) }}"
+ vars:
+ jquery: "Oem.Dell.DellSystem"
+
+ - name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ sensor_battery.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+ - name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: sensor_battery[item]}) }}"
+ loop: "{{ sensor_battery.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - sensor_battery[item] != api_response[item]
+ - item not in exclude_keys
+
+ - name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsbattery/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsbattery/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsbattery/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsintrusion/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsintrusion/converge.yml
new file mode 100644
index 000000000..274319cff
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsintrusion/converge.yml
@@ -0,0 +1,79 @@
+---
+- name: Converge idrac_gather_facts for Sensors Intrusion
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - Sensors_Intrusion
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the Sensors Intrusion component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert sensor battery dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ intrusion_sensor | length > 0 }}"
+
+ - name: Fetching Sensor Battery info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_chassis }}?$select=PhysicalSecurity/IntrusionSensor"
+ validate_certs: false
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ no_log: true
+ register: sensorintrusion_result
+
+ - name: Response filter
+ ansible.builtin.set_fact:
+ api_response:
+ "{{ sensorintrusion_result.json | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.id', '@odata.type']) }}"
+ vars:
+ jquery: "Oem.Dell.DellSystem"
+
+ - name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ intrusion_sensor.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+ - name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: intrusion_sensor[item]}) }}"
+ loop: "{{ intrusion_sensor.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - intrusion_sensor[item] != api_response[item]
+ - item not in exclude_keys
+
+ - name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsintrusion/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsintrusion/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsintrusion/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsvoltage/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsvoltage/converge.yml
new file mode 100644
index 000000000..16435ef11
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsvoltage/converge.yml
@@ -0,0 +1,65 @@
+---
+- name: Converge idrac_gather_facts for Voltage Sensors
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - Sensors_Voltage
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the voltage sensor component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert power supply dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ voltages | length > 0 }}"
+
+ - name: Get Sensor Voltage information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_chassis }}/Power#/Voltages"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: voltage_result
+ no_log: true
+
+ - name: Set Sensor Voltage facts
+ ansible.builtin.set_fact:
+ "api_response":
+ "{{ voltage_result.json.Voltages | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.id', '@odata.type']) }}"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/sensorsvoltage_assert.yml
+ with_items: "{{ voltages }}"
+ loop_control:
+ loop_var: sensorsvoltage_data
+ index_var: index
+ when: voltages | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsvoltage/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsvoltage/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/sensorsvoltage/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/systemmetrics/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/systemmetrics/converge.yml
new file mode 100644
index 000000000..acd31a108
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/systemmetrics/converge.yml
@@ -0,0 +1,105 @@
+---
+- name: Converge idrac_gather_facts for Power Supply
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - SystemMetrics
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+
+ tasks:
+ - name: Gather Facts for the System Metrics component
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Assert power metrics dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ power_metrics | length > 0 }}"
+
+ - name: Assert thermal metrics dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ thermal_metrics | length > 0 }}"
+
+ - name: Assert memory metrics dict for length
+ ansible.builtin.assert:
+ that:
+ - "{{ memory_metrics | length > 0 }}"
+
+ - name: Get Thermal Metrics information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_chassis }}/ThermalSubsystem/ThermalMetrics"
+ validate_certs: "{{ validate_certs }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: response_thermal_metrics
+ no_log: true
+
+ - name: Set Thermal Metrics facts
+ ansible.builtin.set_fact:
+ api_thermal_metrics: "{{ response_thermal_metrics.json |
+ ansible.utils.remove_keys(target=['@odata.context', '@odata.type', '@odata.id', 'DataSourceUri', 'TemperatureReadingsCelsius@odata.count']) }}"
+
+ - name: Call assertion for thermal metrics
+ ansible.builtin.include_tasks: ../../tests/asserts/tmetrics_assert.yml
+
+ - name: Call assertion for memory metrics
+ ansible.builtin.include_tasks: ../../tests/asserts/mmetrics_assert.yml
+ with_items: "{{ memory_metrics }}"
+ loop_control:
+ loop_var: memory_data
+ when: memory_metrics | length > 0
+
+ - name: Get Power Supply information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_chassis }}/PowerSubsystem/PowerSupplies?$expand=*($levels=1)"
+ validate_certs: "{{ validate_certs }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: response_power_supply
+ no_log: true
+
+ - name: Set query
+ ansible.builtin.set_fact:
+ jq: "[*].Id"
+
+ - name: Get Power Supply Metrics ids
+ ansible.builtin.set_fact:
+ psu_ids: "{{ power_result.json.Members | json_query(jq) }}"
+
+ - name: Call assertion for Power metrics
+ ansible.builtin.include_tasks: ../../tests/asserts/psmetrics_assert.yml
+ with_items: "{{ power_metrics }}"
+ loop_control:
+ loop_var: power_data
+ index_var: index
+ when: power_metrics | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/systemmetrics/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/systemmetrics/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/systemmetrics/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/virtualdisk/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/virtualdisk/converge.yml
new file mode 100644
index 000000000..27fd2b829
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/virtualdisk/converge.yml
@@ -0,0 +1,39 @@
+---
+- name: Converge idrac_gather_facts for Virtual Disk
+ hosts: all
+ gather_facts: false
+ vars:
+ hostname: "{{ lookup('env', 'hostname') }}"
+ username: "{{ lookup('env', 'username') }}"
+ password: "{{ lookup('env', 'password') }}"
+ validate_certs: false
+ target:
+ - VirtualDisk
+ idrac_gather_facts_uri_method: "GET"
+ idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ idrac_gather_facts_uri_body_format: "json"
+ idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ idrac_gather_facts_uri_return_content: true
+ diff_data: {}
+ exclude_keys: []
+ api_system: "/redfish/v1/Systems/System.Embedded.1"
+
+ tasks:
+ - name: Gather Facts for the Virtual Disk
+ ansible.builtin.include_role:
+ name: "idrac_gather_facts"
+
+ - name: Call assertion
+ ansible.builtin.include_tasks: ../../tests/asserts/virtualdisk_assert.yml
+ with_items: "{{ virtual_disk }}"
+ loop_control:
+ loop_var: virtualdisk_data
+ when: virtual_disk | length > 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/virtualdisk/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/virtualdisk/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/molecule/virtualdisk/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_attributes_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_attributes_info.yml
new file mode 100644
index 000000000..498acd48a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_attributes_info.yml
@@ -0,0 +1,28 @@
+---
+- name: Get System information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_manager
+ }}/Oem/Dell/DellAttributes/{{ computer_system_id }}"
+ register: sys_attr
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get Manager information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_manager
+ }}/Oem/Dell/DellAttributes/iDRAC.Embedded.1"
+ register: mgr_attr
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get Lifecycle controller information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_manager
+ }}/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"
+ register: lc_attr
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set System, Manager, Lifecycle controller facts
+ ansible.builtin.set_fact:
+ idrac:
+ system_attributes: "{{ sys_attr.json.Attributes }}"
+ manager_attributes: "{{ mgr_attr.json.Attributes }}"
+ lifecycle_controller_attributes: "{{ lc_attr.json.Attributes }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_backplane_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_backplane_info.yml
new file mode 100644
index 000000000..165eeca81
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_backplane_info.yml
@@ -0,0 +1,10 @@
+---
+- name: Get PCIeSSDBackPlane information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Chassis/Oem/Dell/DellPCIeSSDBackPlanes"
+ register: pcie_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set PCIeSSDBackPlane facts
+ ansible.builtin.set_fact:
+ backplane: "{{ pcie_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_battery_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_battery_info.yml
new file mode 100644
index 000000000..2c023a077
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_battery_info.yml
@@ -0,0 +1,12 @@
+---
+- name: Get Sensor Battery information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/Oem/Dell/DellSensors/iDRAC.Embedded.1_0x23_SystemBoardCMOSBattery"
+ register: battery_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Sensor Battery facts
+ ansible.builtin.set_fact:
+ "sensor_battery":
+ "{{ battery_result.json | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.id', '@odata.type']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_bios_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_bios_info.yml
new file mode 100644
index 000000000..bca717beb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_bios_info.yml
@@ -0,0 +1,13 @@
+---
+- name: Get BIOS information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/Bios"
+ register: bios_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set BIOS facts
+ ansible.builtin.set_fact:
+ bios:
+ "{{ bios_result.json | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.type', '@odata.id', 'SettingsObject', 'Actions', 'AttributeRegistry', 'Description',
+ 'Id', 'Links', 'Name']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_controller_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_controller_info.yml
new file mode 100644
index 000000000..8933343a3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_controller_info.yml
@@ -0,0 +1,38 @@
+---
+- name: Get Storage information.
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}{{ api_system
+ }}/Storage/?$expand=*($levels=1)
+ register: disk_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get all storage controllers.
+ ansible.builtin.set_fact:
+ controllers_list: "{{ disk_result.json.Members | selectattr('Controllers',
+ 'defined') | map(attribute='Controllers') }}"
+
+- name: Select list of values from dictionary
+ ansible.builtin.set_fact:
+ controller_list_temp: "{{ (controller_list_temp | default([])) +
+ [controller_item['@odata.id']] }}"
+ loop: "{{ controllers_list }}"
+ loop_control:
+ loop_var: controller_item
+
+- name: Get Controllers information.
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}{{ each_controller
+ }}?$expand=*($levels=1)
+ loop: "{{ controller_list_temp }}"
+ loop_control:
+ loop_var: each_controller
+ register: result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set All Controllers facts
+ ansible.builtin.set_fact:
+ controller: "{{ result.results | selectattr('json', 'defined') |
+ map(attribute='json') | selectattr('Members', 'defined') |
+ map(attribute='Members') | flatten |
+ ansible.utils.remove_keys(target=['^.*@odata.*$'],
+ matching_parameter='regex') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_cpu_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_cpu_info.yml
new file mode 100644
index 000000000..d6c78cf7d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_cpu_info.yml
@@ -0,0 +1,10 @@
+---
+- name: Get CPU information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/Processors?$expand=*($levels=1)"
+ register: cpu_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set CPU facts
+ ansible.builtin.set_fact:
+ cpu: "{{ cpu_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Assembly', 'Links']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_enclosure_emm_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_enclosure_emm_info.yml
new file mode 100644
index 000000000..d085dce2b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_enclosure_emm_info.yml
@@ -0,0 +1,10 @@
+---
+- name: Get enclosure EMM information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Chassis/Oem/Dell/DellEnclosureEMM"
+ register: emm_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set enclosure EMM facts
+ ansible.builtin.set_fact:
+ enclosure_emm: "{{ emm_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Description', 'Links']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_enclosure_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_enclosure_info.yml
new file mode 100644
index 000000000..57c05333b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_enclosure_info.yml
@@ -0,0 +1,10 @@
+---
+- name: Get enclosure information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Chassis/Oem/Dell/DellEnclosures"
+ register: enclosure_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set enclosure facts
+ ansible.builtin.set_fact:
+ enclosure: "{{ enclosure_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Links', 'Description']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_fan_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_fan_info.yml
new file mode 100644
index 000000000..772cfb1b1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_fan_info.yml
@@ -0,0 +1,10 @@
+---
+- name: Get Fan information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_chassis }}/ThermalSubsystem/Fans?$expand=*($levels=1)"
+ register: fan_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Fan facts
+ ansible.builtin.set_fact:
+ fan: "{{ fan_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_firmware_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_firmware_info.yml
new file mode 100644
index 000000000..f979a07dd
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_firmware_info.yml
@@ -0,0 +1,12 @@
+---
+- name: Get Firmware information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/redfish/v1/UpdateService/FirmwareInventory?$expand=*($levels=1)"
+ register: firmware_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Firmware facts
+ ansible.builtin.set_fact:
+ firmware:
+ "{{ firmware_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type',
+ 'Classifications@odata.count', 'IdentityInfoType@odata.count', 'IdentityInfoValue@odata.count']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_host_nic_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_host_nic_info.yml
new file mode 100644
index 000000000..3f17fde1a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_host_nic_info.yml
@@ -0,0 +1,12 @@
+---
+- name: Get HostNIC information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_manager }}/HostInterfaces?$expand=*($levels=1)"
+ register: nic_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set HostNIC facts
+ ansible.builtin.set_fact:
+ hostnic:
+ "{{ nic_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type',
+ 'HostEthernetInterfaces', 'ManagerEthernetInterface']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_intrusion_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_intrusion_info.yml
new file mode 100644
index 000000000..68cd81a59
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_intrusion_info.yml
@@ -0,0 +1,10 @@
+---
+- name: Get Sensor Battery information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_chassis }}?$select=PhysicalSecurity/IntrusionSensor"
+ register: intrusion_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Sensor Battery facts
+ ansible.builtin.set_fact:
+ "intrusion_sensor": "{{ intrusion_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_license_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_license_info.yml
new file mode 100644
index 000000000..2a1044282
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_license_info.yml
@@ -0,0 +1,10 @@
+---
+- name: Get License information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/redfish/v1/LicenseService/Licenses?$expand=*($levels=1)"
+ register: license_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set License facts
+ ansible.builtin.set_fact:
+ license: "{{ license_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_memory_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_memory_info.yml
new file mode 100644
index 000000000..4537b19d7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_memory_info.yml
@@ -0,0 +1,13 @@
+---
+- name: Get Memory information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/Memory?$expand=*($levels=1)"
+ register: memory_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Memory facts
+ ansible.builtin.set_fact:
+ memory:
+ "{{ memory_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type',
+ 'AllowedSpeedsMHz@odata.count', 'CPUAffinity@odata.count', 'Processors@odata.count', 'MaxTDPMilliWatts@odata.count',
+ 'OperatingMemoryModes@odata.count']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_metrics_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_metrics_info.yml
new file mode 100644
index 000000000..2a7498098
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_metrics_info.yml
@@ -0,0 +1,93 @@
+---
+- name: Get Power Supply information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_chassis }}/PowerSubsystem/PowerSupplies?$expand=*($levels=1)"
+ register: power_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get Power Supply Metrics ids
+ ansible.builtin.set_fact:
+ power_metrics_ids: "{{ power_result.json.Members | selectattr('Metrics', 'defined') | map(attribute='Metrics') | flatten }}"
+
+- name: Get Power Supply Metrics information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ item['@odata.id'] }}"
+ validate_certs: "{{ validate_certs }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ timeout: "{{ https_timeout }}"
+ force_basic_auth: true
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ loop: "{{ power_metrics_ids }}"
+ register: power_metrics_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get Thermal Metrics information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_chassis }}/ThermalSubsystem/ThermalMetrics"
+ validate_certs: "{{ validate_certs }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ timeout: "{{ https_timeout }}"
+ force_basic_auth: true
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: thermal_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get Memory information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/Memory?$expand=*($levels=1)"
+ validate_certs: "{{ validate_certs }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ timeout: "{{ https_timeout }}"
+ force_basic_auth: true
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: memory_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get Memory Metrics ids
+ ansible.builtin.set_fact:
+ memory_metrics_ids: "{{ memory_result.json.Members | selectattr('Metrics', 'defined') | map(attribute='Metrics') | flatten }}"
+
+- name: Get Memory Metrics information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ item['@odata.id'] }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ timeout: "{{ https_timeout }}"
+ force_basic_auth: true
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ loop: "{{ memory_metrics_ids }}"
+ register: memory_metrics_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Power Supply/Thermal/Memory Metrics facts
+ ansible.builtin.set_fact:
+ power_metrics:
+ "{{ power_metrics_result.results | selectattr('json', 'defined') | map(attribute='json') | flatten |
+ ansible.utils.remove_keys(target=['@odata.context', '@odata.type', '@odata.id', 'DataSourceUri']) }}"
+ thermal_metrics:
+ "{{ thermal_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.type', '@odata.id', 'DataSourceUri',
+ 'TemperatureReadingsCelsius@odata.count']) }}"
+ memory_metrics:
+ "{{ memory_metrics_result.results | selectattr('json', 'defined') | map(attribute='json') | flatten |
+ ansible.utils.remove_keys(target=['@odata.context', '@odata.type', '@odata.id', 'DataSourceUri']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_nic_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_nic_info.yml
new file mode 100644
index 000000000..6a022134f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_nic_info.yml
@@ -0,0 +1,12 @@
+---
+- name: Get NIC information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/EthernetInterfaces?$expand=*($levels=1)"
+ register: nic_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set NIC facts
+ ansible.builtin.set_fact:
+ nic:
+ "{{ nic_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'IPv4Addresses@odata.count',
+ 'IPv6AddressPolicyTable@odata.count', 'IPv6Addresses@odata.count', 'IPv6StaticAddresses@odata.count', 'NameServers@odata.count']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_pas_sensor_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_pas_sensor_info.yml
new file mode 100644
index 000000000..6e58aeca7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_pas_sensor_info.yml
@@ -0,0 +1,12 @@
+---
+- name: Get PresenceAndStatusSensor information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/Oem/Dell/DellPresenceAndStatusSensors"
+ register: pas_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set PresenceAndStatusSensor facts
+ ansible.builtin.set_fact:
+ "presence_and_status_sensor":
+ "{{ pas_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Assembly',
+ 'Links']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_pcie_device_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_pcie_device_info.yml
new file mode 100644
index 000000000..f76377b17
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_pcie_device_info.yml
@@ -0,0 +1,10 @@
+---
+- name: Get PCIeDevice information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_chassis }}/PCIeDevices?$expand=*($levels=1)"
+ register: pcie_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set PCIeDevice facts
+ ansible.builtin.set_fact:
+ pcie_device: "{{ pcie_result.json.Members | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Links', '@odata.etag']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_physical_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_physical_info.yml
new file mode 100644
index 000000000..92ad3e454
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_physical_info.yml
@@ -0,0 +1,24 @@
+---
+- name: Get Storage information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/Storage"
+ register: disk_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get all storage controller ids.
+ ansible.builtin.set_fact:
+ storage_ids_list: "{{ disk_result.json.Members | map('dict2items') | flatten | map(attribute='value') }}"
+
+- name: Get PhysicalDisk information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ item }}?$expand=*($levels=1)"
+ loop: "{{ storage_ids_list }}"
+ register: result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Physical Disk facts
+ ansible.builtin.set_fact:
+ physical_disk:
+ "{{ result.results | selectattr('json', 'defined') | map(attribute='json') | selectattr('Drives', 'defined') |
+ map(attribute='Drives') | flatten | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type',
+ 'Actions', 'Assembly', 'Links', 'DellDriveSMARTAttributes', 'DellNVMeSMARTAttributes', 'Operations@odata.count']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_power_supply_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_power_supply_info.yml
new file mode 100644
index 000000000..e67fcd882
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_power_supply_info.yml
@@ -0,0 +1,13 @@
+---
+- name: Get PowerSupply information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_chassis }}/PowerSubsystem/PowerSupplies?$expand=*($levels=1)"
+ register: power_supply_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set PowerSupply facts
+ ansible.builtin.set_fact:
+ power_supply:
+ "{{ power_supply_result.json.Members | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.id', '@odata.type', 'ActiveInputVoltage@Redfish.Deprecated', 'OperationalStatus@odata.count',
+ 'RedTypeOfSet@odata.count']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_resource_id.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_resource_id.yml
new file mode 100644
index 000000000..5a1035533
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_resource_id.yml
@@ -0,0 +1,60 @@
+---
+- name: Get system resource api id
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems
+ register: system_api_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+- name: Get first System Id from the system response
+ ansible.builtin.set_fact:
+ api_system: "{{ system_api_result.json.Members[0]['@odata.id'] |
+ default('') }}"
+ computer_system_id: "{{ system_api_result.json.Members[0]['@odata.id'] |
+ split('/') | last | default('System.Embedded.1') }}"
+ when: computer_system_id == ''
+- name: Get all system Ids
+ ansible.builtin.set_fact:
+ system_ids_list: "{{ system_api_result.json.Members | map('dict2items') |
+ flatten | map(attribute='value') }}"
+ when: computer_system_id != ''
+- name: Split system ids from the string
+ ansible.builtin.set_fact:
+ system_ids: '{{ (system_ids | default([])) + ([item | split("/") | last]) }}'
+ with_list: "{{ system_ids_list }}"
+ when: computer_system_id != ''
+- name: Fail when system id is incorrect
+ ansible.builtin.fail:
+ msg: "{{ idrac_gather_facts_invalid_sys_id_message |
+ format(computer_system_id, (system_ids | join(','))) }}"
+ when: computer_system_id != "" and not computer_system_id in system_ids
+- name: Get manager resource api id
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}/redfish/v1/Managers
+ register: manager_api_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+- name: Get first manager resource id from manager response.
+ ansible.builtin.set_fact:
+ api_manager: "{{ manager_api_result.json.Members[0]['@odata.id'] | default('') }}"
+ when: manager_id == ''
+- name: Get all manager resource ids.
+ ansible.builtin.set_fact:
+ manager_ids_list: "{{ manager_api_result.json.Members | map('dict2items') |
+ flatten | map(attribute='value') }}"
+ when: manager_id != ''
+- name: Split manager ids from the string
+ ansible.builtin.set_fact:
+ manager_ids: '{{ (manager_ids | default([])) + ([item | split("/") | last]) }}'
+ with_list: "{{ manager_ids_list }}"
+ when: manager_id != ''
+- name: Fail when manager id is incorrect
+ ansible.builtin.fail:
+ msg: "{{ idrac_gather_facts_invalid_manager_id_message | format(manager_id,
+ (manager_ids | join(','))) }}"
+ when: manager_id != "" and not manager_id in manager_ids
+- name: Get chassis resource api id
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}/redfish/v1/Chassis
+ register: chassis_api_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+- name: Get first chassis resource id from manager response.
+ ansible.builtin.set_fact:
+ api_chassis: "{{ chassis_api_result.json.Members[0]['@odata.id'] | default('') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_secure_boot_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_secure_boot_info.yml
new file mode 100644
index 000000000..20a72cfe2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_secure_boot_info.yml
@@ -0,0 +1,41 @@
+---
+- name: Get Secure Boot information.
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}{{ api_system
+ }}/SecureBoot/?$expand=*($levels=1)
+ register: secure_boot_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get Secure Boot Databases.
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}{{ api_system
+ }}/SecureBoot/SecureBootDatabases?$expand=*($levels=1)
+ register: secure_boot_db_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get Secureboot db certificates information.
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}{{
+ sec_boot_db_item['Certificates']['@odata.id'] }}?$expand=*($levels=1)
+ loop: "{{ secure_boot_db_result.json.Members }}"
+ loop_control:
+ loop_var: sec_boot_db_item
+ register: secure_boot_results
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Combine certificates with corresponding db information.
+ ansible.builtin.set_fact:
+ interim_secure_boot: "{{ interim_secure_boot | default([]) +
+ [sec_boot_cert['sec_boot_db_item'] | combine({'Certificates':
+ sec_boot_cert['json']['Members']})] }}"
+ loop: "{{ secure_boot_results.results }}"
+ loop_control:
+ loop_var: sec_boot_cert
+ no_log: true
+
+- name: Combine the secure boot database and certificates into secure boot.
+ ansible.builtin.set_fact:
+ secure_boot: '{{ secure_boot_result.json | combine({"SecureBootDatabases":
+ interim_secure_boot}) |
+ ansible.utils.remove_keys(target=["^.*@odata\..*$"],
+ matching_parameter="regex") }}'
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_system_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_system_info.yml
new file mode 100644
index 000000000..953fde59d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_system_info.yml
@@ -0,0 +1,19 @@
+---
+- name: Get system information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}"
+ register: system_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get operating system information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Managers/{{
+ computer_system_id }}/Attributes?$select=ServerOS.*"
+ register: os_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set system facts
+ ansible.builtin.set_fact:
+ system:
+ "{{ system_result.json.Oem.Dell.DellSystem | combine(os_result.json.Attributes) |
+ ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_virtual_disk_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_virtual_disk_info.yml
new file mode 100644
index 000000000..6d913a477
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_virtual_disk_info.yml
@@ -0,0 +1,25 @@
+---
+- name: Get Storage information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_system }}/Storage"
+ register: virtual_disk_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Get all storage controller ids.
+ ansible.builtin.set_fact:
+ storage_ids_list: "{{ virtual_disk_result.json.Members | map('dict2items') | flatten | map(attribute='value') | default('') }}"
+
+- name: Get Virtual Disk information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ item }}/Volumes?$expand=*($levels=1)"
+ loop: "{{ storage_ids_list }}"
+ register: result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Virtual Disk facts
+ ansible.builtin.set_fact:
+ virtual_disk:
+ "{{ result.results | selectattr('json', 'defined') | map(attribute='json') | selectattr('Members', 'defined') |
+ map(attribute='Members') | flatten | ansible.utils.remove_keys(target=['@odata.context', '@odata.type',
+ '@odata.id', 'Actions', 'EncryptionTypes@odata.count', 'Identifiers@odata.count', 'Links',
+ 'Operations@odata.count', 'DellVirtualDisk', 'DellVirtualDisk@Redfish.Deprecated']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_voltage_info.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_voltage_info.yml
new file mode 100644
index 000000000..a1b9d4e91
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/get_voltage_info.yml
@@ -0,0 +1,12 @@
+---
+- name: Get Sensor Voltage information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ api_chassis }}/Power#/Voltages"
+ register: voltage_result
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+- name: Set Sensor Voltage facts
+ ansible.builtin.set_fact:
+ "voltages":
+ "{{ voltage_result.json.Voltages | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.id', '@odata.type']) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/main.yml
new file mode 100644
index 000000000..b67ed56f0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tasks/main.yml
@@ -0,0 +1,81 @@
+---
+- name: IDRAC Gather facts
+ module_defaults:
+ ansible.builtin.uri:
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ timeout: "{{ https_timeout }}"
+ force_basic_auth: true
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ block:
+ - name: Check whether atleast one of 'IDRAC_USERNAME' or username is provided
+ ansible.builtin.fail:
+ msg: Ensure the value for environment variable 'IDRAC_USERNAME'
+ or the argument 'username' is set.
+ when: username is not defined and not lookup('env', 'IDRAC_USERNAME')
+
+ - name: Check whether atleast one of 'IDRAC_PASSWORD' or password is provided
+ ansible.builtin.fail:
+ msg: Ensure the value for environment variable 'IDRAC_PASSWORD'
+ or the argument 'password' is set.
+ when: password is not defined and not lookup('env', 'IDRAC_PASSWORD')
+
+ - name: Set default facts
+ ansible.builtin.set_fact:
+ idrac: {}
+ system: {}
+ bios: {}
+ controller: []
+ cpu: []
+ enclosure: []
+ enclosure_emm: []
+ fan: []
+ firmware: []
+ hostnic: []
+ license: []
+ memory: []
+ nic: []
+ backplane: []
+ power_supply: []
+ presence_and_status_sensor: []
+ sensor_battery: {}
+ intrusion_sensor: {}
+ voltages: []
+ virtual_disk: []
+ pcie_device: {}
+ physical_disk: []
+ power_metrics: []
+ thermal_metrics: []
+ memory_metrics: []
+ secure_boot: {}
+
+ - name: Get connection
+ ansible.builtin.uri:
+ url: https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems
+ register: idrac_gather_facts_connection
+ delegate_to: "{{ idrac_gather_facts_delegate }}"
+
+ - name: Fail when hostname or certificate is incorrect or invalid.
+ ansible.builtin.fail:
+ msg: "{{ idrac_gather_facts_connection.msg }}"
+ when: idrac_gather_facts_connection.status == -1
+
+ - name: Fail when credentials are incorrect or invalid.
+ ansible.builtin.fail:
+ msg: The authentication credentials included with this request
+ are missing or invalid.
+ when: idrac_gather_facts_connection.status == 401
+
+ - name: Get System, Manager and Chassis resource id.
+ ansible.builtin.include_tasks: get_resource_id.yml
+
+ - name: Get target facts in loop
+ ansible.builtin.include_tasks: "{{
+ idrac_gather_facts_target_yml_map[item] }}"
+ loop: "{{ target }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/backplane_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/backplane_assert.yml
new file mode 100644
index 000000000..a4562f9e0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/backplane_assert.yml
@@ -0,0 +1,39 @@
+- name: Get PCIeSSDBackPlanes information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/redfish/v1/Chassis/Oem/Dell/DellPCIeSSDBackPlanes/{{ backplane_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: backplane_result
+ no_log: true
+
+- name: Set backplane facts
+ ansible.builtin.set_fact:
+ api_response: "{{ backplane_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ backplane_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a Diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: backplane_data[item]}) }}"
+ loop: "{{ backplane_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - backplane_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/controller_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/controller_assert.yml
new file mode 100644
index 000000000..277e524a2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/controller_assert.yml
@@ -0,0 +1,70 @@
+---
+- name: Fetching Controller info
+ ansible.builtin.uri: &uri_params
+ url: "https://{{ hostname }}{{ api_system }}/Storage"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: idrac_gather_facts_storage_entity
+ no_log: true
+
+- name: Get storage entities
+ ansible.builtin.set_fact:
+ collected_storage_entity:
+ "{{ (collected_storage_entity | default([])) + [item['@odata.id']] }}"
+ loop: "{{ idrac_gather_facts_storage_entity.json.Members }}"
+
+- name: Get Controllers URL information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ item }}"
+ <<: *uri_params
+ loop: "{{ collected_storage_entity }}"
+ register: idrac_gather_facts_controllers_url
+
+- name: Pick Controllers key value in one list
+ ansible.builtin.set_fact:
+ controllers_list:
+ "{{ controllers_list | default([]) +
+ [item.json.Controllers | default(omit)] }}"
+ loop: "{{ idrac_gather_facts_controllers_url.results }}"
+
+- name: Select list of controller values from dictionary
+ ansible.builtin.set_fact:
+ collected_values:
+ "{{ (collected_values | default([])) +
+ [item['@odata.id'] | default(omit)] }}"
+ loop: "{{ controllers_list }}"
+
+- name: Get Controllers information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ item }}?$expand=*($levels=1)"
+ <<: *uri_params
+ loop: "{{ collected_values | reject('match', '__omit_place_holder__.*') }}"
+ register: idrac_gather_facts_result
+
+- name: Set All Controllers facts
+ ansible.builtin.set_fact:
+ api_response:
+ "{{ idrac_gather_facts_result.results | selectattr('json', 'defined') |
+ map(attribute='json') |
+ selectattr('Members', 'defined') |
+ map(attribute='Members') | flatten |
+ ansible.utils.remove_keys(target=['^.*@odata.*$'],
+ matching_parameter='regex') }}"
+
+- name: Check whether output differs
+ ansible.builtin.set_fact:
+ result_diff: "{{ controller | symmetric_difference(api_response) }}"
+
+- name: Assert the differences in List
+ ansible.builtin.assert:
+ that:
+ - "{{ (result_diff | length) == 0 }}"
+ fail_msg: "The response from the role does not match"
+ success_msg: "The response from the role matches"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/cpu_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/cpu_assert.yml
new file mode 100644
index 000000000..8ff78d62d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/cpu_assert.yml
@@ -0,0 +1,40 @@
+---
+- name: Fetching CPU information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/Processors/{{ cpu_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: cpu_result
+ no_log: true
+
+- name: Set CPU facts
+ ansible.builtin.set_fact:
+ api_response: "{{ cpu_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Assembly', 'Links']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ cpu_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: cpu_data[item]}) }}"
+ loop: "{{ cpu_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - cpu_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/enclosure_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/enclosure_assert.yml
new file mode 100644
index 000000000..49f457708
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/enclosure_assert.yml
@@ -0,0 +1,39 @@
+- name: Fetching Enclosure info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/redfish/v1/Chassis/Oem/Dell/DellEnclosures/{{ enclosure_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: enclosure_result
+ no_log: true
+
+- name: Set enclosure facts
+ ansible.builtin.set_fact:
+ api_response: "{{ enclosure_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Links', 'Description']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ enclosure_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: enclosure_data[item]}) }}"
+ loop: "{{ enclosure_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - enclosure_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/enclosureemm_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/enclosureemm_assert.yml
new file mode 100644
index 000000000..7919c984b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/enclosureemm_assert.yml
@@ -0,0 +1,39 @@
+- name: Fetching EnclosureEMM info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/redfish/v1/Chassis/Oem/Dell/DellEnclosureEMM/{{ enclosureemm_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: enclosureemm_result
+ no_log: true
+
+- name: Set enclosureemm facts
+ ansible.builtin.set_fact:
+ api_response: "{{ enclosureemm_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Description', 'Links']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ enclosureemm_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: enclosureemm_data[item]}) }}"
+ loop: "{{ enclosureemm_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - enclosureemm_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/fan_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/fan_assert.yml
new file mode 100644
index 000000000..b3f50e760
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/fan_assert.yml
@@ -0,0 +1,39 @@
+- name: Fetching Fan info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_chassis }}/ThermalSubsystem/Fans/{{ fan_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: fan_result
+ no_log: true
+
+- name: Set fan facts
+ ansible.builtin.set_fact:
+ api_response: "{{ fan_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ fan_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: fan_data[item]}) }}"
+ loop: "{{ fan_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - fan_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/firmware_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/firmware_assert.yml
new file mode 100644
index 000000000..49168f07e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/firmware_assert.yml
@@ -0,0 +1,40 @@
+- name: Fetching Firmware info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/redfish/v1/UpdateService/FirmwareInventory/{{ firmware_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: firmware_result
+ no_log: true
+
+- name: Set firmware facts
+ ansible.builtin.set_fact:
+ api_response: "{{ firmware_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Classifications@odata.count',
+ 'IdentityInfoType@odata.count', 'IdentityInfoValue@odata.count']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ firmware_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: firmware_data[item]}) }}"
+ loop: "{{ firmware_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - firmware_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/hostnic_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/hostnic_assert.yml
new file mode 100644
index 000000000..23f2648d5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/hostnic_assert.yml
@@ -0,0 +1,40 @@
+- name: Fetching HostNIC info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_manager }}/HostInterfaces/{{ hostnic_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: hostnic_result
+ no_log: true
+
+- name: Set hostnic facts
+ ansible.builtin.set_fact:
+ api_response: "{{ hostnic_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'HostEthernetInterfaces',
+ 'ManagerEthernetInterface']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ hostnic_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: hostnic_data[item]}) }}"
+ loop: "{{ hostnic_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - hostnic_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/lc_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/lc_assert.yml
new file mode 100644
index 000000000..df83810d6
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/lc_assert.yml
@@ -0,0 +1,28 @@
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ idrac_lc_attributes: "{{ idrac.lifecycle_controller_attributes }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ api_idrac_lc_attributes: "{{ api_idrac.api_lifecycle_controller_attributes }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ idrac_lc_attributes.keys() | list | symmetric_difference((api_idrac_lc_attributes.keys() | list)) }}"
+
+- name: Set a Diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: idrac_lc_attributes[item]}) }}"
+ loop: "{{ idrac_lc_attributes.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - idrac_lc_attributes[item] != api_idrac_lc_attributes[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/license_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/license_assert.yml
new file mode 100644
index 000000000..93c76ac69
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/license_assert.yml
@@ -0,0 +1,39 @@
+- name: Get license information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/redfish/v1/LicenseService/Licenses/{{ license_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: license_result
+ no_log: true
+
+- name: Set license facts
+ ansible.builtin.set_fact:
+ api_response: "{{ license_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ license_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: license_data[item]}) }}"
+ loop: "{{ license_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - license_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/manager_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/manager_assert.yml
new file mode 100644
index 000000000..6e62062a5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/manager_assert.yml
@@ -0,0 +1,28 @@
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ idrac_manager_attributes: "{{ idrac.manager_attributes }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ api_idrac_manager_attributes: "{{ api_idrac.api_manager_attributes }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ idrac_manager_attributes.keys() | list | symmetric_difference((api_idrac_manager_attributes.keys() | list)) }}"
+
+- name: Set a Diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: idrac_manager_attributes[item]}) }}"
+ loop: "{{ idrac_manager_attributes.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - idrac_manager_attributes[item] != api_idrac_manager_attributes[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/memory_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/memory_assert.yml
new file mode 100644
index 000000000..08b56689b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/memory_assert.yml
@@ -0,0 +1,40 @@
+- name: Fetching memory info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/Memory/{{ memory_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: memory_result
+ no_log: true
+
+- name: Set memory facts
+ ansible.builtin.set_fact:
+ api_response: "{{ memory_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'AllowedSpeedsMHz@odata.count',
+ 'CPUAffinity@odata.count', 'Processors@odata.count', 'MaxTDPMilliWatts@odata.count', 'OperatingMemoryModes@odata.count']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ memory_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: memory_data[item]}) }}"
+ loop: "{{ memory_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - memory_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/mmetrics_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/mmetrics_assert.yml
new file mode 100644
index 000000000..1cc040b73
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/mmetrics_assert.yml
@@ -0,0 +1,38 @@
+- name: Get Memory information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/Memory/{{ memory_data.Id }}/MemoryMetrics"
+ validate_certs: "{{ validate_certs }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: response_memory_metrics
+ no_log: true
+
+- name: Set Memory Metrics facts
+ ansible.builtin.set_fact:
+ api_memory_metrics: "{{ response_memory_metrics.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.type', '@odata.id', 'DataSourceUri']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ memory_data.keys() | list | symmetric_difference((api_memory_metrics.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: memory_data[item]}) }}"
+ loop: "{{ memory_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - memory_data[item] != api_memory_metrics[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/nic_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/nic_assert.yml
new file mode 100644
index 000000000..3c334bd2a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/nic_assert.yml
@@ -0,0 +1,40 @@
+- name: Get NIC information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/EthernetInterfaces/{{ nic_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: nic_result
+ no_log: true
+
+- name: Set nic facts
+ ansible.builtin.set_fact:
+ api_response: "{{ nic_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'IPv4Addresses@odata.count',
+ 'IPv6AddressPolicyTable@odata.count', 'IPv6Addresses@odata.count', 'IPv6StaticAddresses@odata.count', 'NameServers@odata.count']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ nic_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: nic_data[item]}) }}"
+ loop: "{{ nic_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - nic_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/passensor_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/passensor_assert.yml
new file mode 100644
index 000000000..6931c9204
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/passensor_assert.yml
@@ -0,0 +1,39 @@
+- name: Fetching Presence and Status Sensor info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/Oem/Dell/DellPresenceAndStatusSensors/{{ passensor_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ no_log: true
+ register: passensor_result
+
+- name: Set presence and status sensor facts
+ ansible.builtin.set_fact:
+ api_response: "{{ passensor_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Assembly', 'Links']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ passensor_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: passensor_data[item]}) }}"
+ loop: "{{ passensor_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - passensor_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/pciedevice_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/pciedevice_assert.yml
new file mode 100644
index 000000000..de762c302
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/pciedevice_assert.yml
@@ -0,0 +1,39 @@
+- name: Fetching PCIeDevice info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_chassis }}/PCIeDevices/{{ pci_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ no_log: true
+ register: pci_result
+
+- name: Set pcie device facts
+ ansible.builtin.set_fact:
+ api_response: "{{ pci_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type', 'Links', '@odata.etag']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ pci_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: pci_data[item]}) }}"
+ loop: "{{ pci_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - pci_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/physicaldisk_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/physicaldisk_assert.yml
new file mode 100644
index 000000000..76ec6624f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/physicaldisk_assert.yml
@@ -0,0 +1,45 @@
+---
+- name: Get controller id
+ ansible.builtin.set_fact:
+ ctrl_id: "{{ pd_data.Id | split(':') | last }}"
+
+- name: Get Storage information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/Storage/{{ ctrl_id }}/Drives/{{ pd_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ no_log: true
+ register: disk_result
+
+- name: Filter Physical Disk data
+ ansible.builtin.set_fact:
+ api_response: "{{ disk_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.id', '@odata.type',
+ 'Actions', 'Assembly', 'Links', 'DellDriveSMARTAttributes', 'DellNVMeSMARTAttributes', 'Operations@odata.count']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ pd_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a Diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: pd_data[item]}) }}"
+ loop: "{{ pd_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - pd_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/powersupply_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/powersupply_assert.yml
new file mode 100644
index 000000000..ddb77ce04
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/powersupply_assert.yml
@@ -0,0 +1,42 @@
+- name: Fetching Power Supply info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_chassis }}/PowerSubsystem/PowerSupplies/{{ powersupply_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: powersupply_result
+ no_log: true
+
+- name: Set powersupply facts
+ ansible.builtin.set_fact:
+ api_response:
+ "{{ powersupply_result.json | ansible.utils.remove_keys(target=['@odata.context',
+ '@odata.id', '@odata.type', 'ActiveInputVoltage@Redfish.Deprecated', 'OperationalStatus@odata.count',
+ 'RedTypeOfSet@odata.count']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ powersupply_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: powersupply_data[item]}) }}"
+ loop: "{{ powersupply_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - powersupply_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/psmetrics_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/psmetrics_assert.yml
new file mode 100644
index 000000000..ae53db89d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/psmetrics_assert.yml
@@ -0,0 +1,29 @@
+- name: Get Power information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_chassis }}//PowerSubsystem/PowerSupplies/{{ psu_ids[index] }}/Metrics"
+ validate_certs: "{{ validate_certs }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: response_power_metrics
+ no_log: true
+
+- name: Set Power Supply Metrics facts
+ ansible.builtin.set_fact:
+ api_power_metrics: "{{ response_power_metrics.json |
+ ansible.utils.remove_keys(target=['@odata.context', '@odata.type', '@odata.id', 'DataSourceUri']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ power_data.keys() | list | symmetric_difference((api_power_metrics.keys() | list)) }}"
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/secureboot_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/secureboot_assert.yml
new file mode 100644
index 000000000..9df2660c1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/secureboot_assert.yml
@@ -0,0 +1,62 @@
+---
+- name: Fetching Secureboot info
+ ansible.builtin.uri: &uri_params
+ url: "https://{{ hostname }}{{ api_system
+ }}/SecureBoot/?$expand=*($levels=1)"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ register: idrac_gather_facts_secureboot_details
+
+- name: Fetching Secureboot database info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system
+ }}/SecureBoot/SecureBootDatabases?$expand=*($levels=1)"
+ <<: *uri_params
+ register: idrac_gather_facts_secureboot_database_details
+
+- name: Fetching Secureboot database certificates info
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{
+ sec_boot_db_item['Certificates']['@odata.id'] }}
+ ?$expand=*($levels=1)"
+ <<: *uri_params
+ loop: "{{ idrac_gather_facts_secureboot_database_details.json.Members }}"
+ loop_control:
+ loop_var: sec_boot_db_item
+ register: idrac_gather_facts_secure_boot_results
+
+- name: Combine certificates with corresponding database information.
+ ansible.builtin.set_fact:
+ interim_secure_boot: "{{ interim_secure_boot | default([]) +
+ [sec_boot_cert['sec_boot_db_item'] | combine({'Certificates':
+ sec_boot_cert['json']['Members']})] }}"
+ loop: "{{ secure_boot_results.results }}"
+ loop_control:
+ loop_var: sec_boot_cert
+ no_log: true
+
+- name: Combine the secure boot database and certificates into secure boot.
+ ansible.builtin.set_fact:
+ api_response: '{{ idrac_gather_facts_secureboot_details.json |
+ combine({"SecureBootDatabases":
+ interim_secure_boot}) |
+ ansible.utils.remove_keys(target=["^.*@odata\..*$"],
+ matching_parameter="regex") }}'
+
+- name: Check whether output differs
+ ansible.builtin.set_fact:
+ result_diff: "{{ secure_boot | symmetric_difference(api_response) }}"
+
+- name: Assert the differences in List
+ ansible.builtin.assert:
+ fail_msg: "The response from the role does not match"
+ success_msg: "The response from the role matches"
+ that:
+ - "{{ (result_diff | length) == 0 }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/sensorsvoltage_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/sensorsvoltage_assert.yml
new file mode 100644
index 000000000..0e6e2a0da
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/sensorsvoltage_assert.yml
@@ -0,0 +1,20 @@
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ sensorsvoltage_data.keys() | list | symmetric_difference((api_response[index].keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: sensorsvoltage_data[item]}) }}"
+ loop: "{{ sensorsvoltage_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - sensorsvoltage_data[item] != api_response[index][item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/system_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/system_assert.yml
new file mode 100644
index 000000000..5376d1d42
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/system_assert.yml
@@ -0,0 +1,28 @@
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ idrac_system_attributes: "{{ idrac.system_attributes }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ api_idrac_system_attributes: "{{ api_idrac.api_system_attributes }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ idrac_system_attributes.keys() | list | symmetric_difference((api_idrac_system_attributes.keys() | list)) }}"
+
+- name: Set a Diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: idrac_system_attributes[item]}) }}"
+ loop: "{{ idrac_system_attributes.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - idrac_system_attributes[item] != api_idrac_system_attributes[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/tmetrics_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/tmetrics_assert.yml
new file mode 100644
index 000000000..25a8229cf
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/tmetrics_assert.yml
@@ -0,0 +1,20 @@
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ thermal_metrics.keys() | list | symmetric_difference((api_thermal_metrics.keys() | list)) }}"
+
+- name: Set a diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: thermal_metrics[item]}) }}"
+ loop: "{{ thermal_metrics.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - thermal_metrics[item] != api_thermal_metrics[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/virtualdisk_assert.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/virtualdisk_assert.yml
new file mode 100644
index 000000000..6622031c7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/asserts/virtualdisk_assert.yml
@@ -0,0 +1,45 @@
+---
+- name: Get controller id
+ ansible.builtin.set_fact:
+ ctrl_id: "{{ virtualdisk_data.Id | split(':') | last }}"
+
+- name: Get Storage information.
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}{{ api_system }}/Storage/{{ ctrl_id }}/Volumes/{{ virtualdisk_data.Id }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: "{{ idrac_gather_facts_uri_method }}"
+ user: "{{ username }}"
+ password: "{{ password }}"
+ headers: "{{ idrac_gather_facts_uri_headers }}"
+ body_format: "{{ idrac_gather_facts_uri_body_format }}"
+ status_code: "{{ idrac_gather_facts_uri_status_code }}"
+ return_content: "{{ idrac_gather_facts_uri_return_content }}"
+ no_log: true
+ register: virtualdisk_result
+
+- name: Filter Virtual Disk data
+ ansible.builtin.set_fact:
+ api_response: "{{ virtualdisk_result.json | ansible.utils.remove_keys(target=['@odata.context', '@odata.type', '@odata.id', 'Actions',
+ 'EncryptionTypes@odata.count', 'Identifiers@odata.count', 'Links', 'Operations@odata.count', 'DellVirtualDisk', 'DellVirtualDisk@Redfish.Deprecated']) }}"
+
+- name: Set the keys diff
+ ansible.builtin.set_fact:
+ diff_keys: "{{ virtualdisk_data.keys() | list | symmetric_difference((api_response.keys() | list)) }}"
+
+- name: Set a Diff of dict
+ ansible.builtin.set_fact:
+ diff_data: "{{ diff_data | combine({item: virtualdisk_data[item]}) }}"
+ loop: "{{ virtualdisk_data.keys() }}"
+ when:
+ - diff_keys | length == 0
+ - virtualdisk_data[item] != api_response[item]
+ - item not in exclude_keys
+
+- name: Assert the difference in Keys
+ ansible.builtin.assert:
+ that:
+ - "{{ (diff_keys | length) == 0 }}"
+ - "{{ (diff_data | length) == 0 }}"
+ fail_msg: "The response from the role does not match | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
+ success_msg: "The response from the role matches | Diff Keys : {{ diff_keys }} Diff Data : {{ diff_data }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/test.yml
new file mode 100644
index 000000000..f9e315314
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Call Role Gather Facts
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_gather_facts
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/vars/main.yml
new file mode 100644
index 000000000..efe6c061f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_gather_facts/vars/main.yml
@@ -0,0 +1,46 @@
+---
+# vars file for idrac_gather_facts
+idrac_gather_facts_invalid_sys_id_message: "Invalid computer system id : %s, valid values are %s"
+idrac_gather_facts_invalid_manager_id_message: "Invalid computer manager id : %s, valid values are %s"
+
+idrac_gather_facts_uri_method: "GET"
+idrac_gather_facts_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+idrac_gather_facts_uri_body_format: "json"
+idrac_gather_facts_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+idrac_gather_facts_uri_return_content: true
+idrac_gather_facts_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+# Mapping of target facts to yml file
+idrac_gather_facts_target_yml_map:
+ IDRAC: get_attributes_info.yml
+ System: get_system_info.yml
+ BIOS: get_bios_info.yml
+ Controller: get_controller_info.yml
+ CPU: get_cpu_info.yml
+ Enclosure: get_enclosure_info.yml
+ EnclosureEMM: get_enclosure_emm_info.yml
+ Fan: get_fan_info.yml
+ Firmware: get_firmware_info.yml
+ HostNIC: get_host_nic_info.yml
+ License: get_license_info.yml
+ Memory: get_memory_info.yml
+ NIC: get_nic_info.yml
+ PCIeSSDBackPlane: get_backplane_info.yml
+ PowerSupply: get_power_supply_info.yml
+ PresenceAndStatusSensor: get_pas_sensor_info.yml
+ Sensors_Battery: get_battery_info.yml
+ Sensors_Intrusion: get_intrusion_info.yml
+ Sensors_Voltage: get_voltage_info.yml
+ VirtualDisk: get_virtual_disk_info.yml
+ PCIeDevice: get_pcie_device_info.yml
+ PhysicalDisk: get_physical_info.yml
+ SystemMetrics: get_metrics_info.yml
+ SecureBoot: get_secure_boot_info.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/README.md
new file mode 100644
index 000000000..70164206b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/README.md
@@ -0,0 +1,393 @@
+# idrac_import_server_config_profile
+
+Role to import the Server Configuration Profile (SCP) from the iDRAC to a network share (CIFS, NFS, HTTP, HTTPS) or a local path.
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+docker
+molecule
+python
+```
+### Production
+Requirements to use the role.
+```
+ansible
+python
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td> The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>share_parameters</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>Network share parameters.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;share_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Network share or local path.<br>- CIFS, NFS, HTTP, and HTTPS network share types are supported.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;scp_file</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Name of the server configuration profile (SCP) file.</br>- The default format `idrac_ip_YYMMDD_HHMMSS_scp` is used if this option is not specified.</br>- I(export_format) is used if the valid extension file is not provided.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;share_user</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>Network share user in the format 'user@domain' or 'domain\\user' if user is part of a domain else 'user'. This option is mandatory for CIFS Network Share..</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;share_password</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>Network share user password. This option is mandatory for CIFS Network Share.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_support</td>
+ <td>false</td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Proxy to be enabled or disabled.</br>- I(proxy_support) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_type</td>
+ <td>false</td>
+ <td>http</td>
+ <td>http, socks4</td>
+ <td>str</td>
+ <td>- C(http) to select HTTP type proxy.</br>- C(socks4) to select SOCKS4 type proxy.</br>- I(proxy_type) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_server</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td> - I(proxy_server) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).</br>- I(proxy_server) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_port</td>
+ <td>false</td>
+ <td>80</td>
+ <td></td>
+ <td>str</td>
+ <td>- Proxy port to authenticate.</br> - I(proxy_port) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).</br>- I(proxy_port) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_username</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Proxy username to authenticate.</br>- I(proxy_username) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;proxy_password</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Proxy password to authenticate.</br>- I(proxy_password) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ignore_certificate_warning</td>
+ <td>false</td>
+ <td>ignore</td>
+ <td>ignore, showerror</td>
+ <td>str</td>
+ <td>- If C(ignore), it ignores the certificate warnings.</br>- If C(showerror), it shows the certificate warnings.</br>
+ - I(ignore_certificate_warning) is considered only when I(share_name) is of type HTTPS and is supported only on iDRAC9.</td>
+ </tr>
+ <tr>
+ <td>target</td>
+ <td>false</td>
+ <td>['ALL']</td>
+ <td>'ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID'</td>
+ <td>str</td>
+ <td>- If C(ALL), this module exports or imports all components configurations from SCP file.<br>- If C(IDRAC), this module exports or imports iDRAC configuration from SCP file.<br>- If C(BIOS), this module exports or imports BIOS configuration from SCP file.<br>- If C(NIC), this module exports or imports NIC configuration from SCP file.<br>- If C(RAID), this module exports or imports RAID configuration from SCP file.</br>- When I(command) is C(export) or C(import) I(target) with multiple components is supported only on iDRAC9 with firmware 6.10.00.00 and above.</td>
+ </tr>
+ <tr>
+ <td>import_buffer</td>
+ <td>false</td>
+ <td></td>
+ <td>'Enabled', 'Disabled'</td>
+ <td>str</td>
+ <td> - SCP content buffer.<br>
+ - This is mutually exclusive with share_parameters.scp_file.
+ </td>
+ </tr>
+ <tr>
+ <td>end_host_power_state</td>
+ <td>false</td>
+ <td>'On'</td>
+ <td>'On', 'Off'</td>
+ <td>str</td>
+ <td> Host power state after import of server configuration profile.</td>
+ </tr>
+ <tr>
+ <td>shutdown_type</td>
+ <td>false</td>
+ <td>'Graceful'</td>
+ <td>'Graceful', 'Forced', 'NoReboot'</td>
+ <td>str</td>
+ <td> Server shutdown type.</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_import_server_config_profile_out</td>
+ <td>{
+ "changed": false,
+ "msg": "Successfully imported the Server Configuration Profile.",
+ "scp_status": {
+ "CompletionTime": "2023-02-21T04:12:37",
+ "Description": "Job Instance",
+ "EndTime": null,
+ "Id": "JID_774927528227",
+ "JobState": "Completed",
+ "JobType": "ImportConfiguration",
+ "Message": "No changes were applied since the current component configuration matched the requested configuration.",
+ "MessageArgs": [],
+ "MessageId": "IDRAC.2.8.SYS069",
+ "Name": "Configure: Import Server Configuration Profile",
+ "PercentComplete": 100,
+ "StartTime": "TIME_NOW",
+ "TargetSettingsURI": null,
+ "TaskStatus": "OK",
+ "file": ".\\192.1.2.1_2023221_5549_scp.xml",
+ "retval": true
+ }
+}</td>
+ <td>Module output of the Server Configuration Job</td>
+ </tr>
+ </tbody>
+</table>
+
+## Examples
+-----
+
+```
+- name: Importing SCP from local path with all components
+ ansible.builtin.import_role:
+ name: idrac_import_server_config_profile
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ share_parameters:
+ share_name: "/root/tmp"
+ scp_file: "file.xml"
+```
+```
+- name: Importing SCP from NFS with iDRAC components
+ ansible.builtin.import_role:
+ name: idrac_import_server_config_profile
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['IDRAC']
+ share_parameters:
+ share_name: "191.2.1.1:/nfs"
+ scp_file: "file.json"
+```
+```
+- name: Importing SCP from CIFS with BIOS components
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['BIOS']
+ share_parameters:
+ share_name: "\\\\191.1.1.1\\cifs"
+ share_user: "username"
+ share_password: "password"
+ scp_file: "file.xml"
+```
+```
+- name: Importing SCP from HTTPS with RAID components
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['RAID']
+ share_parameters:
+ share_name: "https://192.1.1.1/share"
+ share_user: "username"
+ share_password: "password"
+ scp_file: "filename.json"
+```
+```
+- name: "Importing SCP from HTTP with NIC components"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['NIC']
+ share_parameters:
+ share_name: "http://192.1.1.1/share"
+ share_user: "username"
+ share_password: "password"
+ scp_file: "filename.xml"
+```
+```
+- name: "Importing SCP using import buffer with NIC components"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['NIC']
+ import_buffer: "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'> Disabled</Attribute></Component></SystemConfiguration>"
+```
+```
+- name: "Importing SCP from HTTP with NIC components using proxy"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ target: ['NIC']
+ share_parameters:
+ share_name: "http://192.1.1.1/share"
+ share_user: "username"
+ share_password: "password"
+ scp_file: "filename.xml"
+ proxy_support: true
+ proxy_server: 192.168.0.6
+ proxy_port: 8080
+ proxy_type: socks4
+```
+```
+- name: Import SCP
+ hosts: idrac
+ roles:
+ - role: idrac_import_server_config_profile
+```
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Abhishek Sinha (Abhishek.Sinha10@Dell.com) 2023
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/defaults/main.yml
new file mode 100644
index 000000000..79e251f31
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/defaults/main.yml
@@ -0,0 +1,15 @@
+---
+# defaults file for idrac_import_server_config_profile
+
+https_port: 443
+validate_certs: true
+https_timeout: 30
+end_host_power_state: 'On'
+shutdown_type: Graceful
+share_parameters:
+ proxy_support: false
+ proxy_type: http
+ proxy_port: "80"
+ ignore_certificate_warning: ignore
+target:
+ - 'ALL'
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/handlers/main.yml
new file mode 100644
index 000000000..6a96ab10f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_import_server_config_profile
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/meta/argument_specs.yml
new file mode 100644
index 000000000..3c61a094e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/meta/argument_specs.yml
@@ -0,0 +1,138 @@
+---
+argument_specs:
+ main:
+ version_added: "7.4.0"
+ short_description: Import iDRAC Server Configuration Profile (SCP)
+ description:
+ - The role performs Import operation of Server Configuration Profile.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address.
+ username:
+ type: str
+ description: iDRAC username.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ share_parameters:
+ description: Network share parameters.
+ type: dict
+ options:
+ share_name:
+ description:
+ - Network share or local path.
+ - CIFS, NFS, HTTP, and HTTPS network share types are supported.
+ - I(share_name) is mutually exclusive with I(import_buffer).
+ type: str
+ scp_file:
+ description:
+ - Name of the server configuration profile (SCP) file.
+ - This option is mandatory if I(command) is C(import).
+ - The default format <idrac_ip>_YYMMDD_HHMMSS_scp is used if this option is not specified for C(import).
+ - I(export_format) is used if the valid extension file is not provided for C(export).
+ type: str
+ share_user:
+ description: Network share user in the format 'user@domain' or 'domain\\user' if user is
+ part of a domain else 'user'. This option is mandatory for CIFS Network Share.
+ type: str
+ share_password:
+ description: Network share user password. This option is mandatory for CIFS Network Share.
+ type: str
+ proxy_support:
+ description:
+ - Proxy to be enabled or disabled.
+ - I(proxy_support) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: bool
+ default: false
+ proxy_type:
+ description:
+ - C(http) to select HTTP type proxy.
+ - C(socks4) to select SOCKS4 type proxy.
+ - I(proxy_type) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ choices: [http, socks4]
+ default: http
+ proxy_server:
+ description:
+ - I(proxy_server) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).
+ - I(proxy_server) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ proxy_port:
+ description:
+ - Proxy port to authenticate.
+ - I(proxy_port) is required when I(share_name) is of type HTTPS or HTTP and I(proxy_support) is C(true).
+ - I(proxy_port) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: int
+ default: 80
+ proxy_username:
+ description:
+ - Proxy username to authenticate.
+ - I(proxy_username) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ proxy_password:
+ description:
+ - Proxy password to authenticate.
+ - I(proxy_password) is considered only when I(share_name) is of type HTTP or HTTPS and is supported only on iDRAC9.
+ type: str
+ ignore_certificate_warning:
+ description:
+ - If C(ignore), it ignores the certificate warnings.
+ - If C(showerror), it shows the certificate warnings.
+ - I(ignore_certificate_warning) is considered only when I(share_name) is of type HTTPS and is
+ supported only on iDRAC9.
+ type: str
+ choices: [ignore, showerror]
+ default: ignore
+ target:
+ description:
+ - If C(ALL), this module exports or imports all components configurations from SCP file.
+ - If C(IDRAC), this module exports or imports iDRAC configuration from SCP file.
+ - If C(BIOS), this module exports or imports BIOS configuration from SCP file.
+ - If C(NIC), this module exports or imports NIC configuration from SCP file.
+ - If C(RAID), this module exports or imports RAID configuration from SCP file.
+ choices: ['ALL', 'IDRAC', 'BIOS', 'NIC', 'RAID']
+ default: ['ALL']
+ type: list
+ import_buffer:
+ description:
+ - Used to import the buffer input of xml or json into the iDRAC.
+ - This option is applicable when I(command) is C(import) and C(preview).
+ - I(import_buffer) is mutually exclusive with I(share_name).
+ type: str
+ shutdown_type:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(Graceful), the job gracefully shuts down the operating system and turns off the server.
+ - If C(Forced), it forcefully shuts down the server.
+ - If C(NoReboot), the job that applies the SCP will pause until you manually reboot the server.
+ type: str
+ choices: ['Graceful', 'Forced', 'NoReboot']
+ default: 'Graceful'
+ end_host_power_state:
+ description:
+ - This option is applicable for C(import) command.
+ - If C(On), End host power state is on.
+ - If C(Off), End host power state is off.
+ type: str
+ choices: ['On', 'Off']
+ default: 'On'
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/meta/main.yml
new file mode 100644
index 000000000..c4ba1bd9c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/meta/main.yml
@@ -0,0 +1,53 @@
+galaxy_info:
+ author: Abhishek Sinha ('Abhishek-Dell')
+ description: The role performs import operation of Server Configuration Profile.
+ company: Dell Technologies
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: http://example.com/issue/tracker
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: GPL-3.0-only
+
+ min_ansible_version: '2.13'
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/converge.yml
new file mode 100644
index 000000000..29ff66275
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/converge.yml
@@ -0,0 +1,43 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: "Importing SCP from CIFS with ALL components"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'CIFS_URL') }}"
+ share_user: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ share_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'cifs_filename') }}"
+
+ - name: Verifying Import SCP from CIFS with ALL components
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from CIFS with ALL components in check mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from CIFS with ALL components in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/cifs_share/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/default/converge.yml
new file mode 100644
index 000000000..c0ae89edf
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/default/converge.yml
@@ -0,0 +1,313 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: "Importing SCP without share_name"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target: ['IDRAC']
+ share_parameters:
+ share_user: "{{ lookup('env', 'USERNAME') }}"
+ share_password: "{{ lookup('env', 'PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'http_filename') }}"
+ ignore_errors: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP without share_name"
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "argument of type 'NoneType' is not iterable"
+
+ - name: "Importing SCP without scp_file"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target: ['IDRAC']
+ share_parameters:
+ share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ share_user: "{{ lookup('env', 'USERNAME') }}"
+ share_password: "{{ lookup('env', 'PASSWORD') }}"
+ ignore_errors: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP without scp_file"
+ ansible.builtin.assert:
+ that:
+ - "'Invalid file path provided.' in '{{ idrac_import_server_config_profile_out.msg }}' or
+ 'HTTP Error 400: Bad Request' in '{{ idrac_import_server_config_profile_out.msg }}'"
+
+ - name: "Importing SCP from CIFS with ALL components with invalid file"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'CIFS_URL') }}"
+ share_user: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ share_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+ scp_file: "invalid_file.xml"
+ ignore_errors: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP from CIFS with ALL components with invalid file"
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Invalid file path provided." or
+ idrac_import_server_config_profile_out.msg == "Failed to import scp."
+
+ - name: Wait for 15 seconds
+ ansible.builtin.pause:
+ seconds: 15
+
+ - name: "Importing SCP from CIFS with ALL components with invalid share"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "192.168.0.1:/cifsshare"
+ share_user: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ share_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'cifs_filename') }}"
+ ignore_errors: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP from CIFS with ALL components with invalid share"
+ ansible.builtin.assert:
+ that:
+ - "'HTTP Error 500' in '{{ idrac_import_server_config_profile_out.msg }}' or
+ 'Failed to import scp.' in '{{ idrac_import_server_config_profile_out.msg }}'"
+
+ - name: "Importing SCP with invalid hostname"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "randomHostname"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'CIFS_URL') }}"
+ share_user: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ share_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'cifs_filename') }}"
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP with invalid hostname"
+ ansible.builtin.assert:
+ that:
+ - "'<urlopen error Unable to communicate with iDRAC randomHostname.' in '{{ idrac_import_server_config_profile_out.msg }}' or
+ '<urlopen error [Errno -2] Name or service not known>' in '{{ idrac_import_server_config_profile_out.msg }}'"
+
+ - name: "Importing SCP with invalid username"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "WrongUsername123"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'CIFS_URL') }}"
+ share_user: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ share_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'cifs_filename') }}"
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP with invalid username"
+ ansible.builtin.assert:
+ that:
+ - "'HTTP Error 401' in '{{ idrac_import_server_config_profile_out.msg }}'"
+
+ - name: "Importing SCP with invalid password"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "WrongPassword@123"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'CIFS_URL') }}"
+ share_user: "{{ lookup('env', 'CIFS_USERNAME') }}"
+ share_password: "{{ lookup('env', 'CIFS_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'cifs_filename') }}"
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP with invalid password"
+ ansible.builtin.assert:
+ that:
+ - "'HTTP Error 401' in '{{ idrac_import_server_config_profile_out.msg }}'"
+
+ - name: "Importing SCP with invalid target"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target: ['idrac']
+ share_parameters:
+ share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ share_user: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ share_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'http_filename') }}"
+ ignore_errors: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP with invalid target"
+ ansible.builtin.assert:
+ that:
+ - "'value of scp_components must be one or more of: ALL, IDRAC, BIOS, NIC, RAID' in '{{ idrac_import_server_config_profile_out.msg }}'"
+
+ ############### Below snippet is commented because of Issue: JIT-284466 ###############
+ # - name: "Importing SCP with invalid username of share access"
+ # ansible.builtin.import_role:
+ # name: "idrac_import_server_config_profile"
+ # vars:
+ # hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ # username: "{{ lookup('env', 'USERNAME') }}"
+ # password: "{{ lookup('env', 'PASSWORD') }}"
+ # validate_certs: false
+ # target: ['IDRAC']
+ # share_parameters:
+ # share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ # share_user: "WrongUsername123"
+ # share_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ # scp_file: "{{ lookup('env', 'http_filename') }}"
+ # ignore_errors: true
+ # register: idrac_import_server_config_profile_status
+
+ # - name: Wait for 15 seconds
+ # ansible.builtin.pause:
+ # seconds: 15
+
+ # - name: "Verifying Import SCP with invalid username of share access"
+ # ansible.builtin.assert:
+ # that:
+ # - idrac_import_server_config_profile_out.msg == "Invalid file path provided." or
+ # idrac_import_server_config_profile_out.msg == "Failed to import scp."
+ ###################################################################################################################
+
+ ############### Below snippet is commented because of Issue: JIT-284466 ###############
+ # - name: "Importing SCP with invalid password of share access"
+ # ansible.builtin.import_role:
+ # name: "idrac_import_server_config_profile"
+ # vars:
+ # hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ # username: "{{ lookup('env', 'USERNAME') }}"
+ # password: "{{ lookup('env', 'PASSWORD') }}"
+ # validate_certs: false
+ # target: ['IDRAC']
+ # share_parameters:
+ # share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ # share_user: "{{ lookup('env', 'USERNAME') }}"
+ # share_password: "WrongPassword@123"
+ # scp_file: "{{ lookup('env', 'http_filename') }}"
+ # ignore_errors: true
+ # register: idrac_import_server_config_profile_status
+
+ # - name: "Verifying Import SCP with invalid password of share access"
+ # ansible.builtin.assert:
+ # that:
+ # - idrac_import_server_config_profile_out.msg == "Invalid file path provided." or
+ # idrac_import_server_config_profile_out.msg == "Failed to import scp."
+ ###################################################################################################################
+
+ - name: "Importing SCP with share_name as None"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target: ['IDRAC']
+ share_parameters:
+ share_name: None
+ share_user: "{{ lookup('env', 'USERNAME') }}"
+ share_password: "WrongPassword@123"
+ scp_file: "{{ lookup('env', 'http_filename') }}"
+ ignore_errors: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP with share_name as None"
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Invalid file path provided."
+
+ - name: "Importing SCP with proxy_ssupport enabled but no other proxy parameters"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target: ['ALL']
+ share_parameters:
+ share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ share_user: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ share_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'http_filename') }}"
+ proxy_support: true
+ ignore_errors: true
+ register: idrac_import_server_config_profile_status
+
+ - name: "Verifying Import SCP with proxy_ssupport enabled but no other proxy parameters"
+ ansible.builtin.assert:
+ that:
+ - "'proxy_support is True but all of the following are missing' in '{{ idrac_import_server_config_profile_out.msg }}'"
+
+ ############### Below snippet is commented because of Issue: JIT-284466 ###############
+ # - name: "Importing SCP with invalid proxy parameters"
+ # ansible.builtin.import_role:
+ # name: "idrac_import_server_config_profile"
+ # vars:
+ # hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ # username: "{{ lookup('env', 'USERNAME') }}"
+ # password: "{{ lookup('env', 'PASSWORD') }}"
+ # validate_certs: false
+ # target: ['ALL']
+ # share_parameters:
+ # share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ # share_user: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ # share_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ # scp_file: "{{ lookup('env', 'http_filename') }}"
+ # proxy_support: true
+ # proxy_type: http
+ # proxy_server: "randomProxyServer"
+ # proxy_port: "{{ lookup('env', 'PROXY_PORT') }}"
+ # proxy_password: "{{ lookup('env', 'PROXY_PASSWORD') }}"
+ # ignore_errors: true
+ # register: idrac_import_server_config_profile_status
+
+ # - name: "Verifying Import SCP with invalid proxy parameter"
+ # ansible.builtin.assert:
+ # that:
+ # - idrac_import_server_config_profile_out.msg == "Invalid file path provided." or
+ # idrac_import_server_config_profile_out.msg == "Failed to import scp."
+ ###################################################################################################################
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/default/molecule.yml
new file mode 100644
index 000000000..ccf982411
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/default/molecule.yml
@@ -0,0 +1,9 @@
+scenario:
+ test_sequence:
+ - dependency
+ - destroy
+ - syntax
+ - create
+ - converge
+ - cleanup
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/converge.yml
new file mode 100644
index 000000000..f9761ebc1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/converge.yml
@@ -0,0 +1,39 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+
+ - name: "Importing SCP from HTTPS"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target: "RAID"
+ share_parameters:
+ share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ share_user: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ share_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'http_filename') }}"
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from HTTP with in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from HTTP with in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/converge.yml
new file mode 100644
index 000000000..a0348544a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/converge.yml
@@ -0,0 +1,43 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: "Importing SCP from HTTP with proxy parameters"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ share_user: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ share_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'http_filename') }}"
+ proxy_support: true
+ proxy_type: http
+ proxy_server: "{{ lookup('env', 'PROXY_SERVER') }}"
+ proxy_port: "{{ lookup('env', 'PROXY_PORT') }}"
+ proxy_username: "{{ lookup('env', 'PROXY_USER') }}"
+ proxy_password: "{{ lookup('env', 'PROXY_PASSWORD') }}"
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from HTTP with proxy parameters in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from HTTP with proxy parameters in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_proxy_parameters/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/converge.yml
new file mode 100644
index 000000000..b96730d75
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/converge.yml
@@ -0,0 +1,43 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: "Importing SCP from HTTPS with ignore_certificate_warning as showerror"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'HTTP_URL') }}"
+ share_user: "{{ lookup('env', 'HTTP_USERNAME') }}"
+ share_password: "{{ lookup('env', 'HTTP_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'http_filename') }}"
+ proxy_support: true
+ proxy_server: "{{ lookup('env', 'PROXY_SERVER') }}"
+ proxy_port: "{{ lookup('env', 'PROXY_PORT') }}"
+ proxy_username: "{{ lookup('env', 'PROXY_USER') }}"
+ proxy_password: "{{ lookup('env', 'PROXY_PASSWORD') }}"
+ ignore_certificate_warning: showerror
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from HTTP with ignore_certificate_warning as showerror in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from HTTP with ignore_certificate_warning as showerror in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/http_share_with_showerror_certificate_warning/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/converge.yml
new file mode 100644
index 000000000..7981a536c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/converge.yml
@@ -0,0 +1,37 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: "Importing SCP from HTTPS"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'HTTPS_URL') }}"
+ share_user: "{{ lookup('env', 'HTTPS_USERNAME') }}"
+ share_password: "{{ lookup('env', 'HTTPS_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'https_filename') }}"
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from HTTPS in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from HTTPS in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/converge.yml
new file mode 100644
index 000000000..013505814
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/converge.yml
@@ -0,0 +1,43 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: "Importing SCP from HTTPS with proxy parameters"
+ ansible.builtin.import_role:
+ name: "idrac_import_server_config_profile"
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'HTTPS_URL') }}"
+ share_user: "{{ lookup('env', 'HTTPS_USERNAME') }}"
+ share_password: "{{ lookup('env', 'HTTPS_PASSWORD') }}"
+ scp_file: "{{ lookup('env', 'https_filename') }}"
+ proxy_support: true
+ proxy_type: http
+ proxy_server: "{{ lookup('env', 'PROXY_SERVER') }}"
+ proxy_username: "{{ lookup('env', 'PROXY_USER') }}"
+ proxy_port: "{{ lookup('env', 'PROXY_PORT') }}"
+ proxy_password: "{{ lookup('env', 'PROXY_PASSWORD') }}"
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from HTTPS with proxy parameters in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from HTTPS with proxy parameters in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/https_share_with_proxy_parameters/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/converge.yml
new file mode 100644
index 000000000..3fbeec584
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/converge.yml
@@ -0,0 +1,36 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: "Importing SCP from import buffer with IDRAC components"
+ ansible.builtin.import_role:
+ name: idrac_import_server_config_profile
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target:
+ - IDRAC
+ import_buffer: "{\"SystemConfiguration\": {\"Components\": [{\"FQDD\": \"iDRAC.Embedded.1\",\"Attributes\":
+ [{\"Name\": \"Time.1#Timezone\",\"Value\": \"CST6CDT\"}]}]}}"
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from import buffer with IDRAC components in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from import buffer with IDRAC components in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_json/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/converge.yml
new file mode 100644
index 000000000..bd956dc0c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/converge.yml
@@ -0,0 +1,35 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: "Importing SCP from import buffer with IDRAC components"
+ ansible.builtin.import_role:
+ name: idrac_import_server_config_profile
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target: ['IDRAC']
+ import_buffer: '<SystemConfiguration><Component FQDD="iDRAC.Embedded.1"><Attribute Name="Time.1#Timezone">CST6CDT</Attribute>
+ </Component></SystemConfiguration>'
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from import buffer with IDRAC components in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from import buffer with IDRAC components in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_buffer_xml/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/converge.yml
new file mode 100644
index 000000000..860e63b52
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/converge.yml
@@ -0,0 +1,41 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+
+ - name: "Importing SCP from NFS with multiple components"
+ ansible.builtin.import_role:
+ name: idrac_import_server_config_profile
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ target:
+ - 'NIC'
+ - 'IDRAC'
+ share_parameters:
+ share_name: "{{ lookup('env', 'NFS_URL') }}"
+ scp_file: "{{ lookup('env', 'nfs_filename') }}"
+ shutdown_type: 'Forced'
+ end_host_power_state: 'On'
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from NFS with multiple components in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from NFS with multiple components in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/import_multiple_target/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/converge.yml
new file mode 100644
index 000000000..bb839b38b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/converge.yml
@@ -0,0 +1,38 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+
+ - name: "Importing SCP from NFS"
+ ansible.builtin.import_role:
+ name: idrac_import_server_config_profile
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ share_parameters:
+ share_name: "{{ lookup('env', 'NFS_URL') }}"
+ scp_file: "{{ lookup('env', 'nfs_filename') }}"
+ shutdown_type: 'Forced'
+ end_host_power_state: 'On'
+ when: not ansible_check_mode
+
+ - name: Verifying Import SCP from NFS with in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "Successfully imported and applied Server Configuration Profile."
+ when: not ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Verifying Import SCP from NFS in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_import_server_config_profile_out.msg == "Successfully imported the Server Configuration Profile."
+ - idrac_import_server_config_profile_out.scp_status.JobState == "Completed"
+ - idrac_import_server_config_profile_out.scp_status.Message == "No changes were applied since the
+ current component configuration matched the requested configuration."
+ when: not ansible_check_mode and not idrac_import_server_config_profile_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/prepare.yml
new file mode 100644
index 000000000..5fadc24b5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/nfs_share/prepare.yml
@@ -0,0 +1,7 @@
+---
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Cleanup config
+ ansible.builtin.include_tasks: ../resources/tests/prepare.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/resources/tests/prepare.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/resources/tests/prepare.yml
new file mode 100644
index 000000000..aa9fd74c5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/molecule/resources/tests/prepare.yml
@@ -0,0 +1,18 @@
+---
+- name: "Cleanup config"
+ ansible.builtin.import_role:
+ name: idrac_import_server_config_profile
+ vars:
+ hostname: "{{ lookup('env', 'HOSTNAME') }}"
+ username: "{{ lookup('env', 'USERNAME') }}"
+ password: "{{ lookup('env', 'PASSWORD') }}"
+ validate_certs: false
+ import_buffer: "{ \"SystemConfiguration\": {\"Components\": [
+ { \"FQDD\": \"iDRAC.Embedded.1\",\"Attributes\": [{ \"Name\": \"Time.1#Timezone\",
+ \"Value\": \"UTC\",
+ \"Set On Import\": \"True\",
+ \"Comment\": \"Read and Write\" }]},{ \"FQDD\":
+ \"RAID.Integrated.1-1\",\"Attributes\": [{ \"Name\": \"RAIDrebuildRate\",
+ \"Value\": \"31\",
+ \"Set On Import\": \"True\",
+ \"Comment\": \"Read and Write\" }]}]}}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tasks/main.yml
new file mode 100644
index 000000000..0205f5990
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tasks/main.yml
@@ -0,0 +1,31 @@
+---
+# tasks file for idrac_import_server_config_profile
+
+- name: Importing the SCP components
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ hostname }}"
+ idrac_port: "{{ https_port }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ validate_certs: "{{ validate_certs }}"
+ timeout: "{{ https_timeout }}"
+ share_name: "{{ share_parameters.share_name | default(omit) }}"
+ scp_file: "{{ share_parameters.scp_file | default(omit) }}"
+ share_user: "{{ share_parameters.share_user | default(omit) }}"
+ share_password: "{{ share_parameters.share_password | default(omit) }}"
+ proxy_support: "{{ share_parameters.proxy_support | default(omit) }}"
+ proxy_type: "{{ share_parameters.proxy_type | default(omit) }}"
+ proxy_server: "{{ share_parameters.proxy_server | default(omit) }}"
+ proxy_port: "{{ share_parameters.proxy_port | default(omit) }}"
+ proxy_username: "{{ share_parameters.proxy_username | default(omit) }}"
+ proxy_password: "{{ share_parameters.proxy_password | default(omit) }}"
+ ignore_certificate_warning: "{{ share_parameters.ignore_certificate_warning | default(omit) }}"
+ import_buffer: "{{ import_buffer | default(omit) | string }}"
+ target: "{{ target }}"
+ shutdown_type: "{{ shutdown_type }}"
+ end_host_power_state: "{{ end_host_power_state }}"
+ command: 'import'
+ job_wait: "{{ idrac_import_server_config_profile_job_wait }}"
+ register: idrac_import_server_config_profile_out
+ delegate_to: "{{ idrac_import_server_config_profile_delagate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tests/test.yml
new file mode 100644
index 000000000..7623255a8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- name: Importing server config profile for iDRAC
+ hosts: localhost
+ roles:
+ - idrac_import_server_config_profile
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/vars/main.yml
new file mode 100644
index 000000000..72987b246
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_import_server_config_profile/vars/main.yml
@@ -0,0 +1,4 @@
+---
+# vars file for idrac_import_server_config_profile
+idrac_import_server_config_profile_job_wait: true
+idrac_import_server_config_profile_delagate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/README.md
new file mode 100644
index 000000000..890207bb7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/README.md
@@ -0,0 +1,181 @@
+# idrac_job_queue
+
+ Role to manage the iDRAC(iDRAC8 and iDRAC9 only) lifecycle controller job queue.
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+docker
+molecule
+python
+```
+### Production
+Requirements to use the role.
+```
+ansible
+python
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>clear_job_queue</td>
+ <td>false</td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Clears the job queue of the iDRAC.</td>
+ </tr>
+ <tr>
+ <td>job_id</td>
+ <td>false</td>
+ <td>false</td>
+ <td></td>
+ <td>str</td>
+ <td>- Id of the job to be deleted.<br>- If I(clear_job_queue) is C(true) then the I(job_id) will be ignored.</td>
+ </tr>
+ <tr>
+ <td>force</td>
+ <td>false</td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Clears the job queue of the iDRAC forcefully.</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_job_queue_out</td>
+ <td>{msg: "The job queue can been cleared successfully"
+}</td>
+<td>Module output of idrac job queue</td>
+</tbody>
+</table>
+
+## Examples
+-----
+
+```
+- name: Delete a Job
+ ansible.builtin.include_role:
+ name: dellemc.openmanage.idrac_job_queue:
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ job_id: JID_XXXXXXXXXXXX
+
+- name: Clear the job queue
+ ansible.builtin.include_role:
+ name: dellemc.openmanage.idrac_job_queue:
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ clear_job_queue: true
+
+- name: Clear the job queue forcefully
+ ansible.builtin.include_role:
+ name: dellemc.openmanage.idrac_job_queue:
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ clear_job_queue: true
+ force: true
+```
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Kritika Bhateja (Kritika.Bhateja@Dell.com) 2023
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/defaults/main.yml
new file mode 100644
index 000000000..afa700d6e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# defaults file for idrac_job_queue
+validate_certs: true
+https_timeout: 30
+https_port: 443
+clear_job_queue: false
+force: false
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/handlers/main.yml
new file mode 100644
index 000000000..8b95e7024
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_job_queue
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/meta/argument_specs.yml
new file mode 100644
index 000000000..426c7c08b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/meta/argument_specs.yml
@@ -0,0 +1,53 @@
+---
+argument_specs:
+ main:
+ version_added: "8.0.0"
+ short_description: Role to manage the iDRAC lifecycle controller job queue.
+ description:
+ - Role to role to manage the iDRAC lifecycle controller job queue.
+ - Delete a job from the job queue
+ - Clear job queue
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address.
+ username:
+ type: str
+ description: iDRAC username.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled
+ sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains
+ a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The HTTPS socket level timeout in seconds.
+ type: int
+ default: 30
+ clear_job_queue:
+ description: Clear all the jobs from the iDRAC job queue.
+ type: bool
+ default: false
+ job_id:
+ description:
+ - Id of the job to be deleted.
+ - If I(clear_job_queue) is C(true) then the I(job_id) will be ignored.
+ type: str
+ force:
+ description: Clear all the jobs from the iDRAC job queue forcefully.
+ type: bool
+ default: false
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/meta/main.yml
new file mode 100644
index 000000000..2563450dd
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/meta/main.yml
@@ -0,0 +1,28 @@
+---
+galaxy_info:
+ role_name: idrac_job_queue
+ author: "Kritika Bhateja"
+ description: Role to role to manage the iDRAC lifecycle controller job queue
+ Delete a job from the job queue
+ Clear job queue
+ company: Dell Technologies
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.13"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/clear_job_queue/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/clear_job_queue/converge.yml
new file mode 100644
index 000000000..6bf6af48b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/clear_job_queue/converge.yml
@@ -0,0 +1,112 @@
+---
+- name: Job Queue Clear Scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Setting input facts
+ ansible.builtin.set_fact:
+ input: &input
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ no_log: true
+
+ - name: Creating a job to which configures iDRAC attributes
+ ansible.builtin.include_role:
+ name: idrac_import_server_config_profile
+ vars:
+ <<: *input
+ target: ["NIC"]
+ import_buffer:
+ "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'>
+ Disabled</Attribute></Component></SystemConfiguration>"
+
+ - name: Clear the job queue
+ ansible.builtin.include_role:
+ name: "idrac_job_queue"
+ vars:
+ <<: *input
+ clear_job_queue: true
+
+ - name: "Verifying job queue clear"
+ ansible.builtin.assert:
+ that:
+ - idrac_job_queue_out.msg == "The job queue has been cleared successfully."
+
+ - name: Waiting for the data to be available on iDRAC
+ ansible.builtin.wait_for:
+ timeout: 180
+
+ - name: Creating a job which configures iDRAC attributes
+ ansible.builtin.include_role:
+ name: idrac_import_server_config_profile
+ vars:
+ <<: *input
+ target: ["NIC"]
+ import_buffer:
+ "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'>
+ Disabled</Attribute></Component></SystemConfiguration>"
+
+ - name: Force clear the job queue
+ ansible.builtin.include_role:
+ name: "idrac_job_queue"
+ vars:
+ <<: *input
+ clear_job_queue: true
+ force: true
+
+ - name: "Verifying force job queue clear"
+ ansible.builtin.assert:
+ that:
+ - idrac_job_queue_out.msg == "The job queue has been cleared successfully."
+
+ - name: Waiting for the data to be available on iDRAC
+ ansible.builtin.wait_for:
+ timeout: 180
+
+ - name: Clear the job queue
+ block:
+ - name: Clear the job queue when there is no jobs
+ ansible.builtin.include_role:
+ name: "idrac_job_queue"
+ vars:
+ <<: *input
+ clear_job_queue: true
+
+ rescue:
+ - name: "Verifying job queue clear when there is no job"
+ ansible.builtin.assert:
+ that:
+ - idrac_job_queue_out.msg == "There are no jobs in the job queue."
+
+ - name: Creating a job which exports SCP local path with all components
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_user: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ idrac_password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ scp_components:
+ - IDRAC
+ share_name: "/root/"
+ scp_file: "file1.xml"
+ export_format: JSON
+ export_use: Clone
+ job_wait: false
+ async: 45
+ poll: 0
+
+ - name: Clear the job queue
+ block:
+ - name: Clear the job queue when any of the job is not in state to be deleted
+ ansible.builtin.include_role:
+ name: "idrac_job_queue"
+ vars:
+ <<: *input
+ clear_job_queue: true
+ rescue:
+ - name: "Verifying job queue clear"
+ ansible.builtin.assert:
+ that:
+ - idrac_job_queue_out.msg == "One or more jobs cannot be deleted, Retry the operation or
+ delete the jobs by checking the job status."
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/clear_job_queue/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/clear_job_queue/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/clear_job_queue/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/default/converge.yml
new file mode 100644
index 000000000..1d14502f8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/default/converge.yml
@@ -0,0 +1,90 @@
+---
+- name: Negative Scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Creating job to export SCP local path with all components
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_user: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ idrac_password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: "{{ lookup('env', 'VALIDATE_CERT') }}"
+ scp_components:
+ - IDRAC
+ share_name: "/root/"
+ scp_file: "file1.xml"
+ export_format: JSON
+ export_use: Clone
+ job_wait: false
+
+ - name: Deleting a job
+ block:
+ - name: Delete a job from the job queue with an invalid id
+ ansible.builtin.import_role:
+ name: "idrac_job_queue"
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: "{{ lookup('env', 'VALIDATE_CERT') }}"
+ job_id: JID_12345678
+ rescue:
+ - name: "Verifying invalid job deletion from the job queue"
+ ansible.builtin.assert:
+ that:
+ - idrac_job_queue_out.msg == "The job JID_12345678 is invalid."
+
+ - name: Clear job queue
+ block:
+ - name: Clearing job queue with an invalid hostname
+ ansible.builtin.import_role:
+ name: idrac_job_queue
+ vars:
+ hostname: "invalidHostname"
+ username: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ clear_job_queue: true
+ validate_certs: "{{ lookup('env', 'VALIDATE_CERT') }}"
+
+ rescue:
+ - name: "Verifying job queue clear with an invalid hostname"
+ ansible.builtin.assert:
+ that:
+ - "'<urlopen error [Errno -2] Name or service not known>' in idrac_job_queue_out.msg or
+ '<urlopen error [Errno -3] Temporary failure in name resolution>' in idrac_job_queue_out.msg"
+
+ - name: Clear job queue
+ block:
+ - name: Clearing job queue with an invalid username
+ ansible.builtin.import_role:
+ name: idrac_job_queue
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "invalidUsername"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ clear_job_queue: true
+ validate_certs: "{{ lookup('env', 'VALIDATE_CERT') }}"
+
+ rescue:
+ - name: "Verifying job queue clear with an invalid username"
+ ansible.builtin.assert:
+ that:
+ - "'The authentication credentials included with this request are missing or invalid.' in idrac_job_queue_out.msg"
+
+ - name: Clear job queue
+ block:
+ - name: Clearing job queue with an invalid password
+ ansible.builtin.import_role:
+ name: idrac_job_queue
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ password: "invalidPassword"
+ clear_job_queue: true
+ validate_certs: "{{ lookup('env', 'VALIDATE_CERT') }}"
+
+ rescue:
+ - name: "Verifying job queue clear with an invalid password"
+ ansible.builtin.assert:
+ that:
+ - "'The authentication credentials included with this request are missing or invalid.' in idrac_job_queue_out.msg"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/default/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/default/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/delete_job/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/delete_job/converge.yml
new file mode 100644
index 000000000..ecf859bf7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/delete_job/converge.yml
@@ -0,0 +1,91 @@
+---
+- name: Job Deletion Scenarios
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Setting input facts
+ ansible.builtin.set_fact:
+ input: &input
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: "{{ lookup('env', 'VALIDATE_CERT') }}"
+ no_log: true
+
+ - name: Creating job which configures iDRAC attributes
+ ansible.builtin.include_role:
+ name: idrac_import_server_config_profile
+ vars:
+ <<: *input
+ target: ["NIC"]
+ import_buffer:
+ "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'>
+ Disabled</Attribute></Component></SystemConfiguration>"
+
+ - name: Delete a job from the job queue
+ ansible.builtin.include_role:
+ name: "idrac_job_queue"
+ vars:
+ <<: *input
+ job_id: "{{ idrac_import_server_config_profile_out.scp_status.Id }}"
+
+ - name: "Verifying job deletion from the job queue"
+ ansible.builtin.assert:
+ that:
+ - '"deleted sucessfully" in idrac_job_queue_out.msg'
+
+ - name: Creating job to export SCP local path with all components
+ dellemc.openmanage.idrac_server_config_profile:
+ idrac_ip: "{{ lookup('env', 'IDRAC_IP') }}"
+ idrac_user: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ idrac_password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: "{{ lookup('env', 'VALIDATE_CERT') }}"
+ scp_components:
+ - IDRAC
+ share_name: "/root/"
+ scp_file: "file1.xml"
+ export_format: JSON
+ export_use: Clone
+ job_wait: false
+ async: 45
+ poll: 0
+
+ - name: Get Job ID.
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}/redfish/v1/Managers/iDRAC.Embedded.1/Jobs"
+ validate_certs: "{{ lookup('env', 'VALIDATE_CERT') }}"
+ method: "GET"
+ user: "{{ lookup('env', 'IDRAC_USERNAME') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+ return_content: true
+ register: job
+
+ - name: Set fact for Job ID
+ ansible.builtin.set_fact:
+ jobid: "{{ job.json.Members[-1]['@odata.id'] | split('/') }}"
+
+ - name: Delete a job
+ block:
+ - name: Delete a job from the job queue which cannot be deleted
+ ansible.builtin.include_role:
+ name: "idrac_job_queue"
+ vars:
+ <<: *input
+ job_id: "{{ jobid[-1] }}"
+
+ rescue:
+ - name: "Verifying job deletion from the job queue"
+ ansible.builtin.assert:
+ that:
+ - '"cannot be deleted" in idrac_job_queue_out.msg'
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/delete_job/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/delete_job/molecule.yml
new file mode 100644
index 000000000..de4ada585
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/molecule/delete_job/molecule.yml
@@ -0,0 +1,10 @@
+---
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/clear_jobs_with_api.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/clear_jobs_with_api.yml
new file mode 100644
index 000000000..7120e339a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/clear_jobs_with_api.yml
@@ -0,0 +1,66 @@
+---
+- name: Set uri options
+ ansible.builtin.set_fact:
+ idrac_job_queue_idrac_opts: &idrac_job_queue_idrac_opts
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers: "{{ idrac_job_queue_uri_headers }}"
+ body_format: "{{ idrac_job_queue_uri_body_format }}"
+ return_content: "{{ idrac_job_queue_uri_return_content }}"
+ force_basic_auth: "{{ idrac_job_queue_force_basic_auth }}"
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+- name: Perform clear job queue operation for iDRAC
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}
+ {{ idrac_job_queue_clear_job_queue_api }}"
+ <<: *idrac_job_queue_idrac_opts
+ method: "POST"
+ body: '{"JobID" : "JID_CLEARALL"}'
+ status_code: [200, 400]
+ register: idrac_job_queue_clear_job_queue_out
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+ changed_when: idrac_job_queue_clear_job_queue_out.status == 200
+ when: force is undefined or force is false
+
+- name: Perform clear job queue operation with force for iDRAC
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}
+ {{ idrac_job_queue_clear_job_queue_api }}"
+ <<: *idrac_job_queue_idrac_opts
+ method: "POST"
+ body: '{ "JobID" : "JID_CLEARALL_FORCE"}'
+ status_code: 200
+ register: idrac_job_queue_clear_job_queue_force_out
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+ changed_when: idrac_job_queue_clear_job_queue_force_out.status == 200
+ when: force is defined and force is true
+
+- name: Set output Message for clear job queue successfully for iDRAC
+ ansible.builtin.set_fact:
+ idrac_job_queue_out:
+ msg: "{{ idrac_job_queue_job_clear_queue_success_msg }}"
+ when:
+ - idrac_job_queue_clear_job_queue_out is defined
+ - idrac_job_queue_clear_job_queue_out.status is defined
+ - idrac_job_queue_clear_job_queue_out.status == 200
+
+- name: Set output Message for clear job queue with force successfully for iDRAC
+ ansible.builtin.set_fact:
+ idrac_job_queue_out:
+ msg: "{{ idrac_job_queue_job_clear_queue_success_msg }}"
+ when:
+ - idrac_job_queue_clear_job_queue_force_out is defined
+ - idrac_job_queue_clear_job_queue_force_out.status is defined
+ - idrac_job_queue_clear_job_queue_force_out.status == 200
+
+- name: Set output Message for clear job queue failure for iDRAC
+ ansible.builtin.fail:
+ msg: "{{ idrac_job_queue_job_clear_queue_failure_msg }}"
+ when:
+ - idrac_job_queue_clear_job_queue_out is defined
+ - idrac_job_queue_clear_job_queue_out.status is defined
+ - idrac_job_queue_clear_job_queue_out.status == 400
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/clear_jobs_with_wsman.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/clear_jobs_with_wsman.yml
new file mode 100644
index 000000000..6ba7d3c7d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/clear_jobs_with_wsman.yml
@@ -0,0 +1,70 @@
+---
+- name: Set uri options
+ ansible.builtin.set_fact:
+ idrac_job_queue_idrac_opts: &idrac_job_queue_idrac_opts
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ body_format: "{{ idrac_job_queue_uri_body_format }}"
+ return_content: "{{ idrac_job_queue_uri_return_content }}"
+ force_basic_auth: "{{ idrac_job_queue_force_basic_auth }}"
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+- name: Perform clear job queue operation for iDRAC
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/wsman"
+ <<: *idrac_job_queue_idrac_opts
+ headers:
+ Content-Type: "application/xml"
+ body: "{{ lookup('template', 'idrac_delete_job_queue.j2') }}"
+ status_code: 200
+ register: idrac_job_queue_clear_job_queue_out
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+ changed_when: idrac_job_queue_clear_job_queue_out.content
+ is search(".*<n1:MessageID>SUP020</n1:MessageID>.*")
+ when: force is undefined or force is false
+
+- name: Perform clear job queue operation with force for iDRAC
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/wsman"
+ <<: *idrac_job_queue_idrac_opts
+ headers:
+ Content-Type: "application/xml"
+ body: "{{ lookup('template', 'idrac_delete_job_queue_force.j2') }}"
+ status_code: 200
+ register: idrac_job_queue_clear_job_queue_force_out
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+ changed_when: idrac_job_queue_clear_job_queue_force_out.content
+ is search(".*<n1:MessageID>SUP020</n1:MessageID>.*")
+ when: force is defined and force is true
+
+- name: Set output Message for clear job queue successfully for iDRAC
+ ansible.builtin.set_fact:
+ idrac_job_queue_out:
+ msg: "{{ idrac_job_queue_job_clear_queue_success_msg }}"
+ when:
+ - idrac_job_queue_clear_job_queue_out is defined
+ - idrac_job_queue_clear_job_queue_out.changed is true
+ - idrac_job_queue_clear_job_queue_out.status is defined
+ - idrac_job_queue_clear_job_queue_out.status == 200
+
+- name: Set output Message for clear job queue with force successfully for iDRAC
+ ansible.builtin.set_fact:
+ idrac_job_queue_out:
+ msg: "{{ idrac_job_queue_job_clear_queue_success_msg }}"
+ when:
+ - idrac_job_queue_clear_job_queue_force_out is defined
+ - idrac_job_queue_clear_job_queue_force_out.changed is true
+ - idrac_job_queue_clear_job_queue_force_out.status is defined
+ - idrac_job_queue_clear_job_queue_force_out.status == 200
+
+- name: Set output Message for clear job queue failure for iDRAC
+ ansible.builtin.fail:
+ msg: "{{ idrac_job_queue_job_clear_queue_failure_msg }}"
+ when:
+ - idrac_job_queue_clear_job_queue_out is defined
+ - idrac_job_queue_clear_job_queue_out.changed is false
+ - idrac_job_queue_clear_job_queue_out.status is defined
+ - idrac_job_queue_clear_job_queue_out.status != 200
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/delete_job_with_id.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/delete_job_with_id.yml
new file mode 100644
index 000000000..81796c83a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/delete_job_with_id.yml
@@ -0,0 +1,41 @@
+---
+- name: Set uri options
+ ansible.builtin.set_fact:
+ idrac_job_queue_idrac_opts: &idrac_job_queue_idrac_opts
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers: "{{ idrac_job_queue_uri_headers }}"
+ body_format: "{{ idrac_job_queue_uri_body_format }}"
+ return_content: "{{ idrac_job_queue_uri_return_content }}"
+ force_basic_auth: "{{ idrac_job_queue_force_basic_auth }}"
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+- name: Perform delete job operation
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ idrac_job_queue_validate_job_api }}/{{ job_id }}"
+ <<: *idrac_job_queue_idrac_opts
+ method: "DELETE"
+ status_code: [200, 400]
+ register: idrac_job_queue_idrac_job_delete_out
+ changed_when: idrac_job_queue_idrac_job_delete_out.status == 200
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+
+- name: Set output Message for job deleted successfully
+ ansible.builtin.set_fact:
+ idrac_job_queue_out:
+ msg: "{{ idrac_job_queue_delete_job_success_msg }}"
+ when:
+ - idrac_job_queue_idrac_job_delete_out is defined
+ - idrac_job_queue_idrac_job_delete_out.status is defined
+ - idrac_job_queue_idrac_job_delete_out.status == 200
+
+- name: Set output Message for job delete unsuccessful
+ ansible.builtin.fail:
+ msg: "{{ idrac_job_queue_delete_job_failure_msg }}"
+ when:
+ - idrac_job_queue_idrac_job_delete_out is defined
+ - idrac_job_queue_idrac_job_delete_out.status is defined
+ - idrac_job_queue_idrac_job_delete_out.status != 200
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/get_idrac_firmware_version.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/get_idrac_firmware_version.yml
new file mode 100644
index 000000000..9a5690188
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/get_idrac_firmware_version.yml
@@ -0,0 +1,20 @@
+---
+- name: Get the manager firmware version
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/redfish/v1/Managers/iDRAC.Embedded.1"
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers: "{{ idrac_job_queue_uri_headers }}"
+ body_format: "{{ idrac_job_queue_uri_body_format }}"
+ return_content: "{{ idrac_job_queue_uri_return_content }}"
+ force_basic_auth: "{{ idrac_job_queue_force_basic_auth }}"
+ timeout: "{{ https_timeout }}"
+ method: GET
+ register: idrac_job_queue_firmware_version
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+
+- name: Set manager firmware version
+ ansible.builtin.set_fact:
+ idrac_job_queue_idrac_firmware_version: "{{ idrac_job_queue_firmware_version.json.FirmwareVersion }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/main.yml
new file mode 100644
index 000000000..ec2f93591
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/main.yml
@@ -0,0 +1,53 @@
+---
+# tasks file for idrac_job_queue
+- name: Check whether atleast one of 'IDRAC_USERNAME' or username is provided
+ ansible.builtin.fail:
+ msg: "Ensure the value for environment variable 'IDRAC_USERNAME' or
+ the argument 'username' is set."
+ when: username is not defined and not lookup('env', 'IDRAC_USERNAME')
+
+- name: Check whether atleast one of 'IDRAC_PASSWORD' or password is provided
+ ansible.builtin.fail:
+ msg: "Ensure the value for environment variable 'IDRAC_PASSWORD' or
+ the argument 'password' is set."
+ when: password is not defined and not lookup('env', 'IDRAC_PASSWORD')
+
+- name: Running idrac job queue role
+ when: (job_id is defined and job_id != "") or
+ (clear_job_queue is true)
+ block:
+ - name: Validate the inputs
+ ansible.builtin.include_tasks: validate_input_data.yml
+
+ - name: Delete job operation
+ ansible.builtin.include_tasks: delete_job_with_id.yml
+ when:
+ - job_id is defined and job_id != ""
+ - clear_job_queue is false
+
+ - name: Clear Job queue operation
+ when: clear_job_queue is true
+ block:
+ - name: Check firmware version
+ ansible.builtin.include_tasks: get_idrac_firmware_version.yml
+
+ - name: Clear job queue operation using rest
+ ansible.builtin.include_tasks: clear_jobs_with_api.yml
+ when: idrac_job_queue_idrac_firmware_version is version('3.0', '>=')
+
+ - name: Clear job queue operation for wsman
+ ansible.builtin.include_tasks: clear_jobs_with_wsman.yml
+ when: idrac_job_queue_idrac_firmware_version is version('3.0', '<')
+ rescue:
+ - name: Set the failure messages
+ ansible.builtin.set_fact:
+ idrac_job_queue_out: "{{ ansible_failed_result
+ | combine({'failed_task_name': ansible_failed_task.name}) }}"
+
+ always:
+ - name: Print the message
+ when: idrac_job_queue_out is defined
+ failed_when: idrac_job_queue_out.failed is defined
+ and idrac_job_queue_out.failed is true
+ ansible.builtin.debug:
+ var: idrac_job_queue_out
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/validate_input_data.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/validate_input_data.yml
new file mode 100644
index 000000000..cf88bf47c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tasks/validate_input_data.yml
@@ -0,0 +1,73 @@
+---
+- name: Set uri options
+ ansible.builtin.set_fact:
+ idrac_job_queue_idrac_opts: &idrac_job_queue_idrac_opts
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers: "{{ idrac_job_queue_uri_headers }}"
+ body_format: "{{ idrac_job_queue_uri_body_format }}"
+ return_content: "{{ idrac_job_queue_uri_return_content }}"
+ force_basic_auth: "{{ idrac_job_queue_force_basic_auth }}"
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+- name: Get connection
+ ansible.builtin.uri:
+ <<: *idrac_job_queue_idrac_opts
+ url: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems"
+ method: "GET"
+ status_code: "{{ idrac_job_queue_uri_status_code }}"
+ register: idrac_job_queue_idrac_connection
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+
+- name: Validate hostname or certificate.
+ ansible.builtin.fail:
+ msg: "{{ idrac_job_queue_idrac_connection.msg }}"
+ when: idrac_job_queue_idrac_connection.status == -1
+
+- name: Validate credentials.
+ ansible.builtin.fail:
+ msg: "The authentication credentials included with
+ this request are missing or invalid."
+ when: idrac_job_queue_idrac_connection.status == 401
+
+- name: Check for a valid job id
+ when:
+ - job_id is defined and job_id != ""
+ - clear_job_queue is false
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+ block:
+ - name: Get the Job with the job id
+ register: idrac_job_queue_idrac_job_exists_out
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ idrac_job_queue_validate_job_api }}/{{ job_id }}"
+ <<: *idrac_job_queue_idrac_opts
+ method: "GET"
+ status_code: [200, 404]
+
+ - name: Set output Message for invalid job ID
+ ansible.builtin.fail:
+ msg: "{{ idrac_job_queue_invalid_job_msg }}"
+ when:
+ - idrac_job_queue_idrac_job_exists_out is defined
+ - idrac_job_queue_idrac_job_exists_out.status is defined
+ - idrac_job_queue_idrac_job_exists_out.status == 404
+
+- name: Check whether jobs exists to clear
+ when: clear_job_queue is true
+ delegate_to: "{{ idrac_job_queue_delegate }}"
+ block:
+ - name: Get all the jobs
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ idrac_job_queue_validate_job_api }}"
+ <<: *idrac_job_queue_idrac_opts
+ method: "GET"
+ status_code: [200, 404]
+ register: idrac_job_queue_jobs_exists_out
+
+ - name: Set output Message if jobs doesnt exist
+ ansible.builtin.fail:
+ msg: "{{ idrac_job_queue_no_jobs_in_queue_msg }}"
+ when: idrac_job_queue_jobs_exists_out.json['Members@odata.count'] == 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/templates/idrac_delete_job_queue.j2 b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/templates/idrac_delete_job_queue.j2
new file mode 100644
index 000000000..0b03144cc
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/templates/idrac_delete_job_queue.j2
@@ -0,0 +1,25 @@
+<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:n1="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_JobService">
+ <s:Header>
+ <wsa:To s:mustUnderstand="true">https://{{ hostname }}:{{ https_port }}/wsman</wsa:To>
+ <wsman:ResourceURI s:mustUnderstand="true">http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_JobService</wsman:ResourceURI>
+ <wsa:ReplyTo>
+ <wsa:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:Address>
+ </wsa:ReplyTo>
+ <wsa:Action s:mustUnderstand="true">http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_JobService/DeleteJobQueue</wsa:Action>
+ <wsman:MaxEnvelopeSize s:mustUnderstand="true">524288</wsman:MaxEnvelopeSize>
+ <wsa:MessageID s:mustUnderstand="true">urn:uuid:{{ lookup('password', '/dev/null chars=ascii_lowercase,digits length=32') | to_uuid }}</wsa:MessageID>
+ <wsman:OperationTimeout>PT12.0S</wsman:OperationTimeout>
+ <wsman:SelectorSet>
+ <wsman:Selector Name="__cimnamespace">root/dcim</wsman:Selector>
+ <wsman:Selector Name="SystemName">Idrac</wsman:Selector>
+ <wsman:Selector Name="SystemCreationClassName">DCIM_ComputerSystem</wsman:Selector>
+ <wsman:Selector Name="Name">JobService</wsman:Selector>
+ <wsman:Selector Name="CreationClassName">DCIM_JobService</wsman:Selector>
+ </wsman:SelectorSet>
+ </s:Header>
+ <s:Body>
+ <n1:DeleteJobQueue_INPUT>
+ <n1:JobID>JID_CLEARALL</n1:JobID>
+ </n1:DeleteJobQueue_INPUT>
+ </s:Body>
+</s:Envelope>
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/templates/idrac_delete_job_queue_force.j2 b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/templates/idrac_delete_job_queue_force.j2
new file mode 100644
index 000000000..efa38f0d2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/templates/idrac_delete_job_queue_force.j2
@@ -0,0 +1,25 @@
+<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:n1="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_JobService">
+ <s:Header>
+ <wsa:To s:mustUnderstand="true">https://{{ hostname }}:{{ https_port }}/wsman</wsa:To>
+ <wsman:ResourceURI s:mustUnderstand="true">http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_JobService</wsman:ResourceURI>
+ <wsa:ReplyTo>
+ <wsa:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:Address>
+ </wsa:ReplyTo>
+ <wsa:Action s:mustUnderstand="true">http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_JobService/DeleteJobQueue</wsa:Action>
+ <wsman:MaxEnvelopeSize s:mustUnderstand="true">524288</wsman:MaxEnvelopeSize>
+ <wsa:MessageID s:mustUnderstand="true">urn:uuid:{{ lookup('password', '/dev/null chars=ascii_lowercase,digits length=32') | to_uuid }}</wsa:MessageID>
+ <wsman:OperationTimeout>PT12.0S</wsman:OperationTimeout>
+ <wsman:SelectorSet>
+ <wsman:Selector Name="__cimnamespace">root/dcim</wsman:Selector>
+ <wsman:Selector Name="SystemName">Idrac</wsman:Selector>
+ <wsman:Selector Name="SystemCreationClassName">DCIM_ComputerSystem</wsman:Selector>
+ <wsman:Selector Name="Name">JobService</wsman:Selector>
+ <wsman:Selector Name="CreationClassName">DCIM_JobService</wsman:Selector>
+ </wsman:SelectorSet>
+ </s:Header>
+ <s:Body>
+ <n1:DeleteJobQueue_INPUT>
+ <n1:JobID>JID_CLEARALL_FORCE</n1:JobID>
+ </n1:DeleteJobQueue_INPUT>
+ </s:Body>
+</s:Envelope>
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tests/test.yml
new file mode 100644
index 000000000..c430ba1ce
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Testing for idrac job queue
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_job_queue
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/vars/main.yml
new file mode 100644
index 000000000..4d8a2a593
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_job_queue/vars/main.yml
@@ -0,0 +1,30 @@
+---
+# vars file for idrac_job_queue
+idrac_job_queue_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+idrac_job_queue_uri_body_format: "json"
+idrac_job_queue_force_basic_auth: true
+idrac_job_queue_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+idrac_job_queue_uri_return_content: true
+idrac_job_queue_validate_job_api: /redfish/v1/Managers/iDRAC.Embedded.1/Jobs
+idrac_job_queue_clear_job_queue_api:
+ /redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellJobService/Actions/DellJobService.DeleteJobQueue
+idrac_job_queue_delete_job_success_msg: The job {{ job_id }} has been deleted
+ sucessfully.
+idrac_job_queue_delete_job_failure_msg: "The job {{ job_id }} cannot be
+ deleted, Retry the operation by checking the job status."
+idrac_job_queue_invalid_job_msg: "The job {{ job_id }} is invalid."
+idrac_job_queue_job_clear_queue_success_msg: "The job queue
+ has been cleared successfully."
+idrac_job_queue_no_jobs_in_queue_msg: "There are no jobs in the job queue."
+idrac_job_queue_job_clear_queue_failure_msg: "One or more jobs cannot be
+ deleted, Retry the operation or delete the jobs by checking the job status."
+idrac_job_queue_delegate: "{{ lookup('ansible.builtin.env', 'RUNON',
+ default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/README.md
new file mode 100644
index 000000000..a86526f03
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/README.md
@@ -0,0 +1,509 @@
+idrac_os_deployment
+=========
+
+Role to deploy operating system and version on the servers.</br>
+
+The role perform the following operations:
+1. Downloads or copies the source ISO as a local copy in the ansible controller machine tmp folder.
+1. Create a kickstart file using jinja template based on the os name and version .
+1. Extract the ISO using the `xorriso` library.
+1. Enable the extracted ISO to use kickstart file by modifying the boot configurations for bios and uefi.
+1. Compile the iso to generate a custom iso by embedding the kickstart file in an iso using the `mkisofs`, `isohybrid` and `implantisomd5` commands.
+1. Copy the custom ISO generated to destination share location as specfied to the role input. Based on the input a following method is used to copy the destination to a shared repository.
+ - CIFS/NFS uses the local file mount to copy the ISO to a location.
+ - HTTP/HTTPS uses the SSH to copy/transfer the ISO to a location where the web server content is served.
+1. Using an iDRAC `idrac_virtual_media` module mount the custom ISO as virtual media (virtual CD) in an iDRAC.
+1. Using an iDRAC `idrac_boot` module set the boot target to CD and enable a reboot to CD once.
+1. Track for the OS deployment for the specified amount of user input time.
+1. Eject the virtual media after the specfied time is finished.
+
+Requirements
+------------
+
+### Prerequisite
+* To Support the HTTP/HTTPS repository as a destination an ssh to a target machine should be enabled to copy the custom iso into a http/https share location.
+* To Support the CIFS/NFS repository as a destination the repository needs to be manually mounted to local (ansible controller) copy the custom iso into locally mounted CIFS/NFS share location.
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+python
+xorriso
+syslinux
+isomd5sum
+wget
+```
+### Production
+Requirements to use the role.
+```
+ansible
+python
+xorriso
+syslinux
+isomd5sum
+wget
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+ansible.utils
+ansible.windows
+```
+Role Variables
+--------------
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC IP Address or hostname</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC username with admin privilages</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>If C(false), the SSL certificates will not be validated.<br>Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td> The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>os_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The operating system name to match the jinja template of the kickstart file.</br>- Supported os name is versions for RHEL and ESXI.</br>- Jinja template file should exists in the format `os_name_upper_os_version_major.j2`</td>
+ </tr>
+ <tr>
+ <td>os_version</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The operating system version to match the jinja template of the kickstart file.</br>- Supported versions for RHEL are 9.x and 8.x and for ESXi is 8.x.</br> - Jinja template file should exists in the format `os_name_upper_os_version_major.j2`</td>
+ </tr>
+ <tr>
+ <td>source</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>HTTP/HTTPS share or local path of the ISO.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;protocol</td>
+ <td>true</td>
+ <td></td>
+ <td>["https", "http", "local", "cifs", "nfs"]</td>
+ <td>str</td>
+ <td>- Type of the the transfer protocol used to download the iso.<br/>- C(https) uses the https protocol to download the iso.<br/>- C(http) uses the http protocol to download the iso.<br/>- C(nfs) uses the locally mounted nfs folder path to download the iso.<br/>- C(cifs) uses the locally mounted cifs folder path to download the iso.<br/>- C(local) uses the local folder path to download the iso.<br/>- If I(custom_iso_true) is C(true) this will be used to mount the custom iso to virtual media.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- HTTP/HTTPS address to download the ISO.<br/>- Hostname of the http/https/cifs and nfs to mount the custom iso to virtual media.<br/>- I(hostname) is applicable to download iso only when I(protocol) is C(http) or C(https) and I(is_custom_iso) is C(false).<br/>- I(hostname) is ignored to download the iso when I(protocol) is C(local), C(nfs) or C(cifs) and I(is_custom_iso) is C(false).<br/>- I(hostname) will be used to attach the virtual media when I(is_custom_iso) is C(true).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;iso_path</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- Absolute local path or http/https share path of the iso.<br/>- when I(custom_iso) true I(iso_path) should be http, https, nfs or cifs path.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;iso_name</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>Name of the iso file.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;ks_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- Absolute local path or http/https share path kickstart file.<br/>- When I(ks_path) is provided role skips the generation of kickstart file and uses the one provided in the input.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;is_custom_iso</td>
+ <td>false</td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Specifies the source iso is a custom iso.<br/>- C(true) uses the custom iso and skips the kickstart file generation and custom iso compilation.<br/>- when C(true), I(destination) is ignored and uses the I(iso_path) to mount the virtual media on idrac.<br/>- C(false) runs the the kickstart file generation and custom iso compilation</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;username</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Username of the http, https and cifs share.<br/>- I(username) is applicable only when I(protocol) is C(http) , C(https) to download the iso file.<br/>- I(username) is used to mount the virtual media on idrac and applicable when I(protocol) is C(http), C(https) or C(cifs) and I(is_custom_iso) is C(true).<br/>- I(username) is ignored when I(protocol) is C(local).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;password</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Password of the http, https and cifs share.<br/>- I(password) is applicable only when I(protocol) is C(http) , C(https) to download the iso file.<br/>- I(password) is applicable to mount the custom iso as a virtual media in idrac when I(protocol) is C(http) , C(https), c(cifs) and I(is_custom_iso) is C(true).<br/>- I(password) is ignored when I(protocol) is C(local).</td>
+ </tr>
+ <tr>
+ <td>destination</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- Share path to mount the ISO to iDRAC.<br/>- Share needs to have a write permission to copy the generated ISO.<br/>- CIFS, NFS, HTTP and HTTPS shares are supported.<br/>- I(destination) is ignored when I(is_custom_iso) is C(true)<br>- When the protocol is of C(http), C(https) custom iso is copied into a destination location/folder where the web server content is served.<br/>- When the protocol is of C(cifs), c(nfs) custom iso is copied into the locally mounted nfs or cifs location location.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;protocol</td>
+ <td>true</td>
+ <td></td>
+ <td>["https", "http", "nfs", "cifs"]</td>
+ <td>str</td>
+ <td>- Type of the the transfer protocol used to mount the virtual media on to idrac.- C(https) uses the ssh protocol to copy the custom iso to the I(mountpoint) and uses https protocol to the mount the virtual media.- C(http) uses the ssh protocol to copy the custom iso to the I(mountpoint) and uses https protocol to the mount the virtual media.- C(nfs) copies the the custom iso to the I(mountpoint) mounted localy and uses nfs protocol to the mount the virtual media.- C(cifs) copies the the custom iso to the I(mountpoint) mounted localy and uses cifs protocol to the mount the virtual media.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Target machine address/hostname where the custom iso will be copied.<br/>- Address/hostname used to mount the iso as a virtual media.<br/>- I(hostname) is applicable to copy iso using ssh when I(protocol) is C(http) or C(https).<br/>- I(hostname) will be defaulted to localhost to copy iso when I(protocol) is C(nfs), C(cifs).<br/> - I(hostname) will be used to mount the virtual media in idrac when I(protocol) is C(http), C(https), C(nfs) or C(cifs).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;iso_path</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>Custom iso absolute path to be used to mount as a virtual media in idrac.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;iso_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>Custom iso file name. If not specified defaulted to C(hostname-source.iso_name).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;mountpoint</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- Target machine absolute path where the custom iso will be copied.<br/>- I(mountpoint) will be path where http/https is served from when I(protocol) is C(http), C(https).<br/>- I(mountpoint) will be local folder mounted with nfs/cifs share when I(protocol) is C(nfs) C(cifs).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;os_type</td>
+ <td>false</td>
+ <td>linux</td>
+ <td>["linux", "windows"]</td>
+ <td>str</td>
+ <td>HTTP/HTTPS share based on linux/Windows.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;username</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>Username of the http/https/cifs share where customized ISO is used to mount as a virtual media.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;password</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>Password of the http/https/cifs share where customized ISO is used to mount as a virtual media.</td>
+ </tr>
+ <tr>
+ <td>wait_for_os_deployment</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>Wait for the OS deployment to finish.</td>
+ </tr>
+ <tr>
+ <td>os_deployment_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>Time in minutes to wait for the OS deployment to finish.</td>
+ </tr>
+ <tr>
+ <td>eject_iso</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Eject the virtual media (ISO) after the tracking of OS deployment is finished.<br/>- ISO will be ejected if I(eject_iso) is C(true) and I(wait_for_os_deployment) is C(true).</td>
+ </tr>
+ <tr>
+ <td>delete_custom_iso</td>
+ <td>false</td>
+ <td></td>
+ <td>true</td>
+ <td>bool</td>
+ <td>- Deletes the Custom iso after the OS deployment is finshed.<br/>- ISO will be delete if I(delete_custom_iso) is C(true) and I(wait_for_os_deployment) is C(true).</td>
+ </tr>
+</tbody>
+</table>
+
+## SSH ansible Variables
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>ansible_ssh_user</td>
+ <td>user</td>
+ <td>Username of the target ssh machine where the custom iso is copied</br>This is used copy/ssh the custom ISO to the destination folder where http/https web server serves the content.</td>
+ </tr>
+ <tr>
+ <td>ansible_ssh_password</td>
+ <td>password</td>
+ <td>Password of the target ssh machine where the custom iso is copied</br>This is used copy/ssh the custom ISO to the destination folder where http/https web server serves the content.</td>
+ </tr>
+ <tr>
+ <td>ansible_remote_tmp</td>
+ <td>C://User//tmp</td>
+ <td>Temp directory of the target ssh machine where the custom iso is copied</br>This is used copy/ssh the custom ISO to the destination folder where http/https web server serves the content.</td>
+ </tr>
+ <tr>
+ <td>become_method</td>
+ <td>runas</td>
+ <td>Overrides the default method of shh</br>This is used copy/ssh the custom ISO to the destination folder where http/https web server serves the content.</td>
+ </tr>
+ <tr>
+ <td>shell_type</td>
+ <td>cmd</td>
+ <td>Defines the shell type to be used on the target ssh machine where the custom iso is copied</br>This is used copy/ssh the custom ISO to the destination folder where http/https web server serves the content.</td>
+ </tr>
+ </tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_os_deployment_out</td>
+ <td>Successfully deployed the Operating System</td>
+ <td>Output of the OS deployment role.</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_failure</td>
+ <td>The combination of OS name %s and version %s is not supported.</td>
+ <td>Error result of the task</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_kickstart_file</td>
+ <td>/tmp/omam_osd_kufwni/kickstart.cfg</td>
+ <td>Path of the kickstart file generated or downloaded</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_iso_file</td>
+ <td>/tmp/omam_osd_kufwni/rhel.iso</td>
+ <td>Path of the iso file downloaded</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_iso_extract_dir</td>
+ <td>/tmp/omam_osd_kufwni/extract</td>
+ <td>Path of the extract folder created within the tmp directory</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_custom_iso_filename</td>
+ <td>198.192.0.1_rhel.iso</td>
+ <td>Filename of the custom iso file genereated</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_custom_iso_file</td>
+ <td>/tmp/omam_osd_kufwni/198.192.0.1_rhel.iso</td>
+ <td>Path of the custom iso file genereated</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_hybrid_cmd</td>
+ <td>isohybrid --uefi /tmp/omam_osd_kufwni/198.192.0.1_rhel.iso</td>
+ <td>Command isohybrid applied the custom iso file</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_checksum_cmd</td>
+ <td>implantisomd5 --uefi /tmp/omam_osd_kufwni/198.192.0.1_rhel.iso</td>
+ <td>Command to implant md5 checksum on the custom iso file</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_xorriso_cmd</td>
+ <td>xorriso -osirrox -indev /tmp/omam_osd_kufwni/rhel.iso -extract / /tmp/omam_osd_kufwni/extract</td>
+ <td>Command xorisso to extract the downloaded iso</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_delegate</td>
+ <td>localhost</td>
+ <td>Enables the delgate task to run on localhost or container in case of molecules</td>
+ </tr>
+ <tr>
+ <td>idrac_os_deployment_supported_os</td>
+ <td>{ RHEL: ["8", "9"], ESXI: ["8"] }</td>
+ <td>Hold the map data of supported os name and version</td>
+ </tr>
+ </tbody>
+</table>
+
+## Env Varaibles
+
+When we have to SSH into a machine a fingerprint has to be added into the ansible controller machine for it to connect succesfully, if you trust the machine you are copying you use the below environment variable disable the fingerprint check.
+
+```export ANSIBLE_HOST_KEY_CHECKING=False```
+
+Example Playbook
+----------------
+
+```
+- name: Generate Kickstart file, custom iso and install RHEL OS
+ ansible.builtin.import_role:
+ name: idrac_os_deployment
+ vars:
+ hostname: 192.168.0.1
+ username: root
+ password: password
+ os_name: RHEL
+ os_version: 9
+ source:
+ protocol: https
+ hostname: 198.192.0.1
+ iso_path: /to/iso
+ iso_name: rhel9.iso
+ destination:
+ protocol: https
+ hostname: 198.192.0.1
+ mountpath: /user/www/myrepo
+ os_type: linux
+ iso_path: /to/iso
+```
+```
+- name: Generate custom iso using a kickstart file and install RHEL OS
+ ansible.builtin.import_role:
+ name: idrac_os_deployment
+ vars:
+ hostname: 192.168.0.1
+ username: root
+ password: password
+ ca_path: path/to/ca
+ os_name: RHEL
+ os_version: 9
+ source:
+ protocol: https
+ hostname: 198.192.0.1
+ ks_path: /to/iso/rhel-9.cfg
+ path: /to/iso
+ iso_name: rhel9.iso
+ destination:
+ protocol: https
+ hostname: 198.192.0.1
+ mountpath: /user/www/myrepo
+ os_type: linux
+ iso_path: /to/iso
+```
+```
+- name: Install RHEL OS using a custom iso
+ ansible.builtin.import_role:
+ name: idrac_os_deployment
+ vars:
+ hostname: 192.168.0.1
+ username: root
+ password: password
+ os_name: RHEL
+ os_version: 9
+ source:
+ protocol: https
+ hostname: 198.192.0.1
+ iso_path: /to/iso
+ iso_name: custom-rhel.iso
+ is_custom_iso: true
+```
+Author Information
+------------------
+Dell Technologies <br>
+Sachin Apagundi (Sachin_Apagundi@Dell.com) 2023 <br>
+Abhishek Sinha (Abhishek.Sinha10@Dell.com) 2023 <br>
+Jagadeesh N V (Jagadeesh.N.V@Dell.com) 2023 \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/esxi.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/esxi.yml
new file mode 100644
index 000000000..9f3c4f570
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/esxi.yml
@@ -0,0 +1,20 @@
+esxi_keyboard: null
+esxi_rootpw: ""
+esxi_iscrypted: false
+esxi_reboot: true
+esxi_install_type: install
+esxi_install_options: ["--firstdisk", "--overwritevmfs"]
+esxi_clearpart: ["--alldrives", "--overwritevmfs"]
+esxi_network: false
+esxi_partition: false
+esxi_serial_num: null
+esxi_firstboot:
+ interpreter: busybox
+ args:
+ - vim-cmd hostsvc/enable_ssh
+ - vim-cmd hostsvc/start_ssh
+ - vim-cmd hostsvc/enable_esx_shell
+ - vim-cmd hostsvc/start_esx_shell
+ - esxcli system settings advanced set -o /UserVars/SuppressShellWarning -i 1
+esxi_prescript: null
+esxi_postscript: null
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/main.yml
new file mode 100644
index 000000000..0b3c26f4f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/main.yml
@@ -0,0 +1,9 @@
+---
+https_port: 443
+https_timeout: 30
+validate_certs: true
+os_deployment_timeout: 30
+eject_iso: true
+wait_for_os_deployment: true
+delete_custom_iso: true
+set_no_log: true
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/rhel.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/rhel.yml
new file mode 100644
index 000000000..8fadadde4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/defaults/main/rhel.yml
@@ -0,0 +1,16 @@
+rhel_keyboard: us
+rhel_lang: en_US
+rhel_timezone: ["America/New_York", "--utc"]
+rhel_rootpw: ""
+rhel_iscrypted: false
+rhel_reboot: true
+rhel_install_source: cdrom # nfs --server=nfs://10.1.2.3 --dir=/ins/tree
+rhel_bootloader: [] # RHEL 8 and 9 have different defaults
+rhel_zerombr: true
+rhel_clearpart: ["--all", "--initlabel"]
+rhel_autopart: []
+rhel_firstboot: ["--disable"]
+rhel_firewall: ["--enabled"]
+rhel_selinux: ["--enforcing"]
+rhel_packages: ["@^minimal-environment"]
+rhel_network: false
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/handlers/main.yml
new file mode 100644
index 000000000..05452dfd5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_os_deployment
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/meta/argument_specs.yml
new file mode 100644
index 000000000..e1b4935aa
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/meta/argument_specs.yml
@@ -0,0 +1,191 @@
+---
+argument_specs:
+ main:
+ version_added: "7.5.0"
+ short_description: Role to deploy operating system on the iDRAC servers
+ description:
+ - Role to generate the custom iso using the kickstart configuration file and deploy operating system on the idrac servers.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address or hostname.
+ username:
+ type: str
+ description: iDRAC username with admin privilages.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description: The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ os_name:
+ type: str
+ description:
+ - The operating system name to match the jinja template of the kickstart file.
+ - Supported os name is versions for RHEL and ESXI.
+ - Jinja template file should exists in the format <os_name_upper>_<os_version_major>.j2
+ - This is required when I(is_custom_iso) is C(false).
+ os_version:
+ type: str
+ description:
+ - The operating system version to match the jinja template of the kickstart file.
+ - Supported versions for RHEL are 9.x and 8.x and for ESXi is 8.x.
+ - Jinja template file should exists in the format <os_name_upper>_<os_version_major>.j2
+ - This is required when I(is_custom_iso) is C(false)
+ source:
+ type: dict
+ description: HTTP/HTTPS share or local path of the ISO.
+ required: true
+ options:
+ protocol:
+ type: str
+ description:
+ - Type of the the transfer protocol used to download the iso.
+ - C(https) uses the https protocol to download the iso.
+ - C(http) uses the http protocol to download the iso.
+ - C(nfs) uses the locally mounted nfs folder path to download the iso.
+ - C(cifs) uses the locally mounted cifs folder path to download the iso.
+ - C(local) uses the local folder path to download the iso.
+ - If I(custom_iso_true) is C(true) this will be used to mount the custom iso to virtual media.
+ choices: ["https", "http", "local", "cifs", "nfs"]
+ required: true
+ hostname:
+ type: str
+ description:
+ - HTTP/HTTPS address to download the ISO.
+ - Hostname of the http/https/cifs and nfs to mount the custom iso to virtual media.
+ - I(hostname) is applicable to download iso only when I(protocol) is C(http) or C(https) and I(is_custom_iso) is C(false).
+ - I(hostname) is ignored to download the iso when I(protocol) is C(local), C(nfs) or C(cifs) and I(is_custom_iso) is C(false).
+ - I(hostname) will be used to attach the virtual media when I(is_custom_iso) is C(true).
+ iso_path:
+ type: path
+ description:
+ - Absolute local path or http/https share path of the iso.
+ - when I(custom_iso) true I(iso_path) should be http, https, nfs or cifs path.
+ required: true
+ iso_name:
+ type: str
+ description: Name of the iso file.
+ required: true
+ ks_path:
+ type: path
+ description:
+ - Absolute local path or http/https share path kickstart file.
+ - When I(ks_path) is provided role skips the generation of kickstart file and uses the one provided in the input.
+ is_custom_iso:
+ type: bool
+ description:
+ - Specifies the source iso is a custom iso.
+ - C(true) uses the custom iso and skips the kickstart file generation and custom iso compilation.
+ - when C(true), I(destination) is ignored and uses the I(iso_path) to mount the virtual media on idrac.
+ - C(false) runs the the kickstart file generation and custom iso compilation
+ default: false
+ username:
+ type: str
+ description:
+ - Username of the http, https and cifs share.
+ - I(username) is applicable only when I(protocol) is C(http) , C(https) to download the iso file.
+ - I(username) is applicable to mount the custom iso as a virtual media in idrac when I(protocol) is
+ C(http) , C(https), c(cifs) and I(is_custom_iso) is C(true).
+ - I(username) is ignored when I(protocol) is C(local).
+ password:
+ type: str
+ description:
+ - Password of the http, https and cifs share.
+ - I(password) is applicable only when I(protocol) is C(http) , C(https) to download the iso file.
+ - I(password) is applicable to mount the custom iso as a virtual media in idrac when
+ I(protocol) is C(http) , C(https), c(cifs) and I(is_custom_iso) is C(true).
+ - I(password) is ignored when I(protocol) is C(local).
+ destination:
+ type: dict
+ description:
+ - Share path to mount the ISO to iDRAC.
+ - Share needs to have a write permission to copy the generated ISO.
+ - CIFS, NFS, HTTP and HTTPS shares are supported.
+ - I(destination) is ignored when I(is_custom_iso) is C(true)
+ - When the protocol is of C(http), C(https) custom iso is copied into a destination location/folder where the web server content is served.
+ - When the protocol is of C(cifs), c(nfs) custom iso is copied into the locally mounted nfs or cifs location location.
+ options:
+ protocol:
+ type: str
+ description:
+ - Type of the the transfer protocol used to mount the virtual media on to idrac.
+ - C(https) uses the ssh protocol to copy the custom iso to the I(mountpoint) and uses https protocol to the mount the virtual media.
+ - C(http) uses the ssh protocol to copy the custom iso to the I(mountpoint) and uses https protocol to the mount the virtual media.
+ - C(nfs) copies the the custom iso to the I(mountpoint) mounted localy and uses nfs protocol to the mount the virtual media.
+ - C(cifs) copies the the custom iso to the I(mountpoint) mounted localy and uses cifs protocol to the mount the virtual media.
+ choices: ["https", "http", "nfs", "cifs"]
+ required: true
+ hostname:
+ type: str
+ description:
+ - Target machine address/hostname where the custom iso will be copied.
+ - Address/hostname used to mount the iso as a virtual media.
+ - I(hostname) is applicable to copy iso using ssh when I(protocol) is C(http) or C(https).
+ - I(hostname) will be defaulted to localhost to copy iso when I(protocol) is C(nfs), C(cifs).
+ - I(hostname) will be used to mount the virtual media in idrac when I(protocol) is C(http), C(https), C(nfs) or C(cifs).
+ required: true
+ iso_path:
+ type: path
+ description: Custom iso absolute path to be used to mount as a virtual media in idrac.
+ required: true
+ iso_name:
+ type: str
+ description: Custom iso file name. If not specified defaulted to C(hostname-source.iso_name).
+ mountpoint:
+ type: path
+ description:
+ - Target machine absolute path where the custom iso will be copied.
+ - I(mountpoint) will be path where http/https is served from when I(protocol) is C(http), C(https).
+ - I(mountpoint) will be local folder mounted with nfs/cifs share when I(protocol) is C(nfs) C(cifs).
+ required: true
+ os_type:
+ description: HTTP/HTTPS share based on linux/Windows.
+ type: str
+ choices: ["linux", "windows"]
+ default: "linux"
+ username:
+ type: str
+ description:
+ - Username of the http/https/cifs share where customized ISO is used to mount as a virtual media.
+ password:
+ type: str
+ description:
+ - Password of the http/https/cifs share where customized ISO is used to mount as a virtual media.
+ wait_for_os_deployment:
+ default: true
+ type: bool
+ description:
+ - Wait for the OS deployment to finish.
+ os_deployment_timeout:
+ description:
+ - Time in minutes to wait for the OS deployment to finish.
+ default: 30
+ type: int
+ eject_iso:
+ description:
+ - Eject the virtual media (ISO) after the tracking of OS deployment is finished.
+ - ISO will be ejected if I(eject_iso) is C(true) and I(wait_for_os_deployment) is C(true).
+ default: true
+ type: bool
+ delete_custom_iso:
+ description:
+ - Deletes the Custom iso after the OS deployment is finshed.
+ - ISO will be delete if I(delete_custom_iso) is C(true) and I(wait_for_os_deployment) is C(true).
+ default: true
+ type: bool
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/meta/main.yml
new file mode 100644
index 000000000..dda8bed03
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/meta/main.yml
@@ -0,0 +1,19 @@
+galaxy_info:
+ author: |
+ "Sachin Apagundi
+ Abhishek Sinha
+ Jagadeesh N V"
+ description: Role to deploy the operating system on idrac servers.
+ company: Dell Technologies
+ license: GPL-3.0-only
+ min_ansible_version: "2.13"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ galaxy_tags: []
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/clean_up/clean_up_destinations.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/clean_up/clean_up_destinations.yml
new file mode 100644
index 000000000..a07ce47fc
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/clean_up/clean_up_destinations.yml
@@ -0,0 +1,25 @@
+---
+- name: Remove custom iso from mounted path
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ when: destination.protocol in ['nfs', 'cifs']
+ ansible.builtin.file:
+ state: absent
+ path: "{{ destination.mountpoint }}/{{ idrac_os_deployment_custom_iso_filename }}"
+
+- name: Remove custom iso from HTTP or HTTPS share on Linux
+ delegate_to: "{{ destination.hostname }}"
+ when:
+ - destination.protocol in ['http', 'https']
+ - destination.os_type is undefined or destination.os_type == 'linux'
+ ansible.builtin.file:
+ path: "{{ destination.mountpoint }}/{{ idrac_os_deployment_custom_iso_filename }}"
+ state: absent
+
+- name: Remove custom iso to HTTP or HTTPS share on windows
+ delegate_to: "{{ destination.hostname }}"
+ when:
+ - destination.protocol in ['http', 'https']
+ - destination.os_type is undefined or destination.os_type == 'windows'
+ ansible.windows.win_file:
+ path: "{{ destination.mountpoint }}/{{ idrac_os_deployment_custom_iso_filename }}"
+ state: absent
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/clean_up/clean_up_working_directory.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/clean_up/clean_up_working_directory.yml
new file mode 100644
index 000000000..f3edec438
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/clean_up/clean_up_working_directory.yml
@@ -0,0 +1,7 @@
+- name: Clean up the working directory
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ when: idrac_os_deployment_wd is defined
+ ansible.builtin.file:
+ path: "{{ idrac_os_deployment_wd.path }}"
+ state: absent
+ failed_when: false
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/copy_iso_to_destination.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/copy_iso_to_destination.yml
new file mode 100644
index 000000000..b5c6f52f5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/copy_iso_to_destination.yml
@@ -0,0 +1,60 @@
+---
+- name: Copy the custom iso local destination
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ when: destination.protocol in ['nfs', 'cifs']
+ block:
+ - name: Ensure file already exists at destination dest to work around 'invalid selinux context' issue
+ ansible.builtin.file:
+ path: "{{ destination.mountpoint }}/{{ idrac_os_deployment_custom_iso_filename }}"
+ state: touch
+ mode: "{{ idrac_os_deployment_dest_mode }}"
+
+ - name: Copy custom iso to mounted path
+ register: idrac_os_deployment_copy_to_destination
+ ansible.builtin.copy:
+ src: "{{ idrac_os_deployment_custom_iso_file }}"
+ dest: "{{ destination.mountpoint }}"
+ mode: "{{ idrac_os_deployment_dest_mode }}"
+
+- name: Copy custom iso to HTTP or HTTPS share on linux
+ when:
+ - destination.protocol in ['http', 'https']
+ - destination.os_type is undefined or destination.os_type == 'linux'
+ delegate_to: "{{ destination.hostname }}"
+ block:
+ - name: Copy the iso to linux destiantion
+ ignore_unreachable: true
+ register: idrac_os_deployment_copy_to_destination
+ ansible.builtin.copy:
+ src: "{{ idrac_os_deployment_custom_iso_file }}"
+ dest: "{{ destination.mountpoint }}"
+ mode: "preserve"
+
+ - name: Fail if destination is unreachable
+ when: idrac_os_deployment_copy_to_destination is unreachable
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_copy_to_destination }}"
+
+- name: Copy custom iso to HTTP or HTTPS share on windows
+ delegate_to: "{{ destination.hostname }}"
+ when:
+ - destination.protocol in ['http', 'https']
+ - destination.os_type is defined and destination.os_type == 'windows'
+ block:
+ - name: Copy the iso to windows destination
+ ignore_unreachable: true
+ register: idrac_os_deployment_copy_to_destination
+ ansible.windows.win_copy:
+ src: "{{ idrac_os_deployment_custom_iso_file }}"
+ dest: "{{ destination.mountpoint }}"
+ mode: "preserve"
+
+ - name: Fail if destination is unreachable
+ when: idrac_os_deployment_copy_to_destination is unreachable
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_copy_to_destination }}"
+
+- name: Set Copy to destination flag
+ when: idrac_os_deployment_copy_to_destination is succeeded
+ ansible.builtin.set_fact:
+ idrac_os_deployment_copied_to_destination: true
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/create_working_directory_path.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/create_working_directory_path.yml
new file mode 100644
index 000000000..10560dba7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/create_working_directory_path.yml
@@ -0,0 +1,28 @@
+---
+- name: Create working directory
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.tempfile:
+ state: directory
+ prefix: "{{ idrac_os_deployment_temp_file_prefix }}"
+ register: idrac_os_deployment_wd
+
+- name: Set custom iso filename
+ when: destination is defined and destination.iso_name is defined
+ ansible.builtin.set_fact:
+ idrac_os_deployment_custom_iso_filename: "{{ destination.iso_name }}"
+
+- name: Set custom iso path
+ ansible.builtin.set_fact:
+ idrac_os_deployment_custom_iso_file: "{{ idrac_os_deployment_wd.path }}/{{ idrac_os_deployment_custom_iso_filename }}"
+
+- name: Create a extract directory in the working directory
+ register: idrac_os_deployment_create_extract_dir
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.file:
+ path: "{{ idrac_os_deployment_wd.path }}/{{ idrac_os_deployment_extract_dir }}"
+ state: directory
+ mode: "{{ idrac_os_deployment_iso_extract_dir_mode }}"
+
+- name: Set extracted directory path
+ ansible.builtin.set_fact:
+ idrac_os_deployment_iso_extract_dir: "{{ idrac_os_deployment_create_extract_dir.path }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/download_or_copy_source_files.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/download_or_copy_source_files.yml
new file mode 100644
index 000000000..0a0c1a919
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/download_or_copy_source_files.yml
@@ -0,0 +1,112 @@
+---
+- name: Download or copy the source iso
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ block:
+ - name: Download via http, https protocol
+ when: source.protocol in ['http', 'https']
+ block:
+ - name: Set the Download path for http https
+ when: source.protocol in ['http', 'https']
+ ansible.builtin.set_fact:
+ idrac_os_deployment_iso_path: "{{ source.protocol }}://{{ source.hostname }}{{ source.iso_path }}/{{ source.iso_name }}"
+
+ - name: Download iso from source using wget for http and https
+ register: idrac_os_deployment_wget_cmd_out
+ changed_when: idrac_os_deployment_wget_cmd_out.rc == 0
+ failed_when: idrac_os_deployment_wget_cmd_out.rc != 0
+ no_log: "{{ set_no_log }}" # to avoid printing username and password
+ ansible.builtin.command:
+ "{{ (idrac_os_deployment_wget_cmd | format(idrac_os_deployment_iso_path, idrac_os_deployment_wd.path))
+ + ('' if validate_certs else idrac_os_deployment_wget_cmd_vc)
+ + ((idrac_os_deployment_wget_cmd_creds | format(source.username, source.password)) if (source.username is defined) else '') }}"
+
+ - name: Set the iso file path
+ ansible.builtin.set_fact:
+ idrac_os_deployment_iso_file: "{{ idrac_os_deployment_wd.path }}/{{ source.iso_name }}"
+ idrac_os_deployment_success_message_os_deployment: "{{ idrac_os_deployment_success_message_os_deployment_iso }}"
+
+ - name: Copy the iso from local for protocol local, cifs, nfs
+ when: source.protocol in ['local', 'cifs', 'nfs']
+ block:
+ - name: Set the Download path for local, cifs, nfs
+ when: source.protocol in ['local', 'cifs', 'nfs']
+ ansible.builtin.set_fact:
+ idrac_os_deployment_iso_path: "{{ source.iso_path }}/{{ source.iso_name }}"
+
+ - name: Copy the iso from the local source
+ when: source.protocol in ['local', 'cifs', 'nfs']
+ ansible.builtin.copy:
+ src: "{{ idrac_os_deployment_iso_path }}"
+ dest: "{{ idrac_os_deployment_wd.path }}"
+ mode: "{{ idrac_os_deployment_src_copy_mode }}"
+ no_log: "{{ idrac_os_deployment_set_no_log }}"
+ register: idrac_os_deployment_iso_copy
+
+ - name: Set the iso file path
+ ansible.builtin.set_fact:
+ idrac_os_deployment_iso_file: "{{ idrac_os_deployment_iso_copy.dest }}"
+ idrac_os_deployment_success_message_os_deployment: "{{ idrac_os_deployment_success_message_os_deployment_iso }}"
+
+ rescue:
+ - name: Log error for source iso download
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_iso_file | format(idrac_os_deployment_iso_path) }}"
+
+- name: Download or copy the kickstart file
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ when: source.ks_path is defined and (source.is_custom_iso is undefined or source.is_custom_iso is false)
+ block:
+ - name: Download via http, https protocol
+ when: source.protocol in ['http', 'https']
+ block:
+ - name: Set the Download path for http, https
+ ansible.builtin.set_fact:
+ idrac_os_deployment_ks_path: "{{ source.protocol }}://{{ source.hostname }}{{ source.ks_path }}"
+
+ - name: Download ks from source using wget for http and https
+ register: idrac_os_deployment_wget_ks_cmd_out
+ changed_when: idrac_os_deployment_wget_ks_cmd_out.rc == 0
+ failed_when: idrac_os_deployment_wget_ks_cmd_out.rc != 0
+ no_log: "{{ set_no_log }}" # to avoid printing username and password
+ ansible.builtin.command:
+ "{{ (idrac_os_deployment_wget_cmd | format(idrac_os_deployment_ks_path, idrac_os_deployment_wd.path))
+ + ('' if validate_certs else idrac_os_deployment_wget_cmd_vc)
+ + ((idrac_os_deployment_wget_cmd_creds | format(source.username, source.password)) if (source.username is defined) else '') }}"
+
+ - name: Set the kickstart file path
+ ansible.builtin.set_fact:
+ idrac_os_deployment_kickstart_file: "{{ idrac_os_deployment_wd.path }}/{{ source.ks_path | basename }}"
+ idrac_os_deployment_success_message_os_deployment: "{{ idrac_os_deployment_success_message_os_deployment_ks }}"
+
+ - name: Copy from local for protocol local, cifs, nfs
+ when: source.protocol in ['local', 'cifs', 'nfs']
+ block:
+ - name: Set the Download path for local, cifs, nfs
+ ansible.builtin.set_fact:
+ idrac_os_deployment_ks_path: "{{ source.ks_path }}"
+
+ - name: Copy the kickstart from the local source
+ ansible.builtin.copy:
+ src: "{{ idrac_os_deployment_ks_path }}"
+ dest: "{{ idrac_os_deployment_wd.path }}"
+ mode: "{{ idrac_os_deployment_src_copy_mode }}"
+ no_log: "{{ idrac_os_deployment_set_no_log }}"
+ register: idrac_os_deployment_ks_copy
+
+ - name: Set the kickstart file path
+ ansible.builtin.set_fact:
+ idrac_os_deployment_kickstart_file: "{{ idrac_os_deployment_ks_copy.dest }}"
+ idrac_os_deployment_success_message_os_deployment: "{{ idrac_os_deployment_success_message_os_deployment_ks }}"
+
+ rescue:
+ - name: Log error for source kickstart download
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_ks_file | format(idrac_os_deployment_ks_path) }}"
+
+- name: Validate kickstart file path extension
+ when:
+ - source.ks_path is defined and (source.is_custom_iso is undefined or source.is_custom_iso is false)
+ - (idrac_os_deployment_kickstart_file | splitext | last) != ".cfg"
+ - idrac_os_deployment_validate_kickstart_file_ext is true
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_ks_file_ext | format(idrac_os_deployment_kickstart_file | splitext | last) }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/validate_inputs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/validate_inputs.yml
new file mode 100644
index 000000000..2e5ca7214
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/common/validate_inputs.yml
@@ -0,0 +1,74 @@
+---
+- name: Validate when custom iso if false
+ when: source.is_custom_iso is undefined or source.is_custom_iso is false
+ block:
+ - name: Validate the OS Name and OS version is provided
+ when: os_name is undefined or os_version is undefined
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_os_required }}"
+
+ - name: Validate the OS Name and OS version
+ when: (os_name | upper not in idrac_os_deployment_supported_os.keys()) or
+ ((os_version | string | split('.') | first) not in idrac_os_deployment_supported_os[os_name | upper])
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_unsupported_os | format(os_name, os_version) }}"
+
+ - name: Check if root password exists
+ when:
+ - source.ks_path is undefined
+ - ((os_name | upper == 'RHEL') and (rhel_rootpw | length == 0)) or
+ ((os_name | upper == 'ESXI') and (esxi_rootpw | length == 0))
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_root_password | format(os_name) }}"
+
+ - name: Check if the template file Exists
+ when: source.ks_path is undefined
+ block:
+ - name: Check if the template file exists
+ register: idrac_os_deployment_template_file
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.stat:
+ path: "{{ role_path }}/templates/{{ os_name | upper }}_{{ os_version | string | split('.') | first }}.j2"
+
+ - name: Fail if the Template file doesn't exists
+ when: not idrac_os_deployment_template_file.stat.exists
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_template | format(os_name | upper, os_version) }}"
+
+ - name: Validate destination required when custom iso is an input
+ when: destination is not defined
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_destination_required }}"
+
+ - name: Validate destination iso_name is not empty if provided
+ when:
+ - (destination is defined and destination.iso_name is defined and destination.iso_name == "")
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_destination_iso_name }}"
+
+ - name: Validate destination mountpath folder exists
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ when: destination is defined and destination.protocol in ['cifs','nfs']
+ block:
+ - name: Validate destination mountpath
+ register: idrac_os_deployment_mountpoint_folder
+ ansible.builtin.stat:
+ path: "{{ destination.mountpoint }}"
+
+ - name: Fail if the mountpoint destination doesn't exists
+ when: not idrac_os_deployment_mountpoint_folder.stat.exists
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_mountpoint_folder | format(destination.mountpoint) }}"
+
+- name: Validate when source is local custom iso is not true
+ when:
+ - (source.is_custom_iso is defined and source.is_custom_iso is true) and (source.protocol == "local")
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_custom_iso_local }}"
+
+- name: Validate hostname requirement for source
+ when:
+ - (source.hostname is undefined or source.hostname == "")
+ - (source.is_custom_iso is defined and source.is_custom_iso is true) or (source.protocol in ['http', 'https'])
+ ansible.builtin.fail:
+ msg: "{{ idrac_os_deployment_err_msg_hostname_required }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/esxi/compile_iso.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/esxi/compile_iso.yml
new file mode 100644
index 000000000..8555f1f8c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/esxi/compile_iso.yml
@@ -0,0 +1,37 @@
+---
+- name: Copy KS file to extracted directory
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.copy:
+ src: "{{ idrac_os_deployment_kickstart_file }}"
+ dest: "{{ idrac_os_deployment_iso_extract_dir }}/{{ idrac_os_deployment_esxi_ks_filename }}"
+ mode: "{{ idrac_os_deployment_copy_mode }}"
+
+- name: Append ks path to the linux boot menu
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.lineinfile:
+ path: "{{ item }}"
+ regexp: "^kernelopt="
+ line: "kernelopt=runweasel ks={{ idrac_os_deployment_esxi_ks_location }}"
+ with_items:
+ - "{{ idrac_os_deployment_iso_extract_dir }}/EFI/BOOT/BOOT.CFG"
+ - "{{ idrac_os_deployment_iso_extract_dir }}/BOOT.CFG"
+
+- name: Compile custom ISO
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.command:
+ cmd: "{{ idrac_os_deployment_esxi_mkiso_cmd | format(idrac_os_deployment_custom_iso_file, idrac_os_deployment_iso_extract_dir) }}"
+ register: idrac_os_deployment_mkisofs_output
+ changed_when: idrac_os_deployment_mkisofs_output.rc == 0
+ failed_when: idrac_os_deployment_mkisofs_output.rc != 0
+
+- name: Post-process ISO image with isohybrid
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.command: "{{ idrac_os_deployment_hybrid_cmd | format(idrac_os_deployment_custom_iso_file) }}"
+ register: idrac_os_deployment_isohybrid_output
+ changed_when: idrac_os_deployment_isohybrid_output.rc == 0
+
+- name: Add correct checksum to iso
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.command: "{{ idrac_os_deployment_checksum_cmd | format(idrac_os_deployment_custom_iso_file) }}"
+ register: idrac_os_deployment_checksum_output
+ changed_when: idrac_os_deployment_checksum_output.rc == 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/attach_iso_to_virtual_media.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/attach_iso_to_virtual_media.yml
new file mode 100644
index 000000000..8796cca17
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/attach_iso_to_virtual_media.yml
@@ -0,0 +1,43 @@
+---
+- name: Attach the iso image to idrac virtual media
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ block:
+ - name: Create virtual media mount facts
+ when: source.is_custom_iso is undefined or source.is_custom_iso is false
+ ansible.builtin.set_fact:
+ idrac_os_deployment_vm_protocol: "{{ destination.protocol }}"
+ idrac_os_deployment_vm_hostname: "{{ destination.hostname }}"
+ idrac_os_deployment_vm_iso_path: "{{ destination.iso_path }}"
+ idrac_os_deployment_vm_iso_name: "{{ idrac_os_deployment_custom_iso_filename }}"
+ idrac_os_deployment_vm_username: "{{ destination.username | default(omit) }}"
+ idrac_os_deployment_vm_password: "{{ destination.password | default(omit) }}"
+ no_log: "{{ set_no_log }}"
+
+ - name: Create virtual media mount url
+ when: idrac_os_deployment_vm_protocol
+ ansible.builtin.set_fact:
+ idrac_os_deployment_vm_url:
+ "{{ idrac_os_deployment_vm_proto_map[idrac_os_deployment_vm_protocol]
+ | format(idrac_os_deployment_vm_hostname, idrac_os_deployment_vm_iso_path, idrac_os_deployment_vm_iso_name) }}"
+
+ - name: Attach the iso to Virtual Media slot 1
+ register: idrac_os_deployment_vm_insert
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ timeout: "{{ https_timeout }}"
+ force: true
+ virtual_media:
+ - insert: true
+ image: "{{ idrac_os_deployment_vm_url }}"
+ username: "{{ idrac_os_deployment_vm_username | default(omit) }}"
+ password: "{{ idrac_os_deployment_vm_password | default(omit) }}"
+
+ - name: Set virtual media attached to success
+ when: idrac_os_deployment_vm_insert is succeeded
+ ansible.builtin.set_fact:
+ idrac_os_deployment_virtual_media_attached: true
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/clean_up_virtual_media_slot.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/clean_up_virtual_media_slot.yml
new file mode 100644
index 000000000..027fe5955
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/clean_up_virtual_media_slot.yml
@@ -0,0 +1,13 @@
+---
+- name: Eject the custom iso
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ dellemc.openmanage.idrac_virtual_media:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ timeout: "{{ https_timeout }}"
+ virtual_media:
+ - insert: false
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/set_boot_mode_and_restart.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/set_boot_mode_and_restart.yml
new file mode 100644
index 000000000..a14fecb7d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/idrac/set_boot_mode_and_restart.yml
@@ -0,0 +1,15 @@
+---
+- name: Configure the boot source override mode.
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ dellemc.openmanage.idrac_boot:
+ idrac_ip: "{{ hostname }}"
+ idrac_user: "{{ username }}"
+ idrac_password: "{{ password }}"
+ idrac_port: "{{ https_port }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ timeout: "{{ https_timeout }}"
+ boot_source_override_target: cd
+ boot_source_override_enabled: once
+ reset_type: force_restart
+ register: idrac_os_deployment_idrac_boot
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/iso/extract_iso.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/iso/extract_iso.yml
new file mode 100644
index 000000000..54e66a46e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/iso/extract_iso.yml
@@ -0,0 +1,15 @@
+---
+- name: Extract the iso on to extract folder
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ register: idrac_os_deployment_extract_cmd_out
+ changed_when: idrac_os_deployment_extract_cmd_out.rc == 0
+ failed_when: idrac_os_deployment_extract_cmd_out.rc != 0
+ ansible.builtin.command: "{{ idrac_os_deployment_xorriso_cmd | format(idrac_os_deployment_iso_file, idrac_os_deployment_iso_extract_dir) }}"
+
+- name: Update file permissions
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.file:
+ path: "{{ idrac_os_deployment_iso_extract_dir }}"
+ state: directory
+ recurse: true
+ mode: "{{ idrac_os_deployment_iso_extract_dir_mode }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/iso/generate_kickstart_file.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/iso/generate_kickstart_file.yml
new file mode 100644
index 000000000..d3951ffc1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/iso/generate_kickstart_file.yml
@@ -0,0 +1,15 @@
+---
+- name: Generate the Kickstart File
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ when: source.ks_path is undefined and (source.is_custom_iso is undefined or source.is_custom_iso is false)
+ block:
+ - name: Generate kickstart file
+ ansible.builtin.template:
+ src: "{{ os_name | upper }}_{{ os_version | string | split('.') | first }}.j2"
+ dest: "{{ idrac_os_deployment_wd.path }}/kickstart_{{ os_name }}_{{ hostname }}.cfg"
+ mode: "{{ idrac_os_deployment_ks_gen_mode }}"
+ register: idrac_os_deployment_ks_generate
+
+ - name: Set ks file for specific idrac
+ ansible.builtin.set_fact:
+ idrac_os_deployment_kickstart_file: "{{ idrac_os_deployment_ks_generate.dest }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/main.yml
new file mode 100644
index 000000000..9d636d562
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/main.yml
@@ -0,0 +1,74 @@
+---
+# tasks file for idrac_os_deployment
+- name: Operating system deployment on iDRAC
+ block:
+ - name: Validate the Inputs
+ ansible.builtin.include_tasks: common/validate_inputs.yml
+
+ - name: Create working directory , kickstart file and compile iso
+ when: source.is_custom_iso is undefined or source.is_custom_iso is false
+ block:
+ - name: Create working directory
+ ansible.builtin.include_tasks: common/create_working_directory_path.yml
+
+ - name: Download the Source iso
+ ansible.builtin.include_tasks: common/download_or_copy_source_files.yml
+
+ - name: Generate Kickstart file
+ when: source.ks_path is undefined
+ ansible.builtin.include_tasks: iso/generate_kickstart_file.yml
+
+ - name: Extract ISO
+ ansible.builtin.include_tasks: iso/extract_iso.yml
+
+ - name: Compile iso for the OS
+ ansible.builtin.include_tasks: "{{ os_name | lower }}/compile_iso.yml"
+
+ - name: Copy the iso to the destination
+ ansible.builtin.include_tasks: common/copy_iso_to_destination.yml
+
+ - name: Attach the Virtual Media to idrac
+ ansible.builtin.include_tasks: idrac/attach_iso_to_virtual_media.yml
+
+ - name: Set Boot Mode to once and restart the idrac
+ ansible.builtin.include_tasks: idrac/set_boot_mode_and_restart.yml
+
+ - name: Track for OS deployment
+ when: wait_for_os_deployment is true
+ ansible.builtin.include_tasks: tracking/track_for_os_deployment.yml
+
+ rescue:
+ - name: Set the failure messages
+ no_log: "{{ idrac_os_deployment_set_no_log }}"
+ ansible.builtin.set_fact:
+ idrac_os_deployment_failure: "{{ ansible_failed_result | combine({'failed_task_name': ansible_failed_task.name}) }}"
+
+ always:
+ - name: Clean up the Source and destination
+ when: source.is_custom_iso is undefined or source.is_custom_iso is false
+ block:
+ - name: Clean up the local directories
+ ansible.builtin.include_tasks: clean_up/clean_up_working_directory.yml
+
+ - name: Clean up the destination
+ when:
+ - wait_for_os_deployment is true and delete_custom_iso is true
+ - idrac_os_deployment_copied_to_destination is true
+ ansible.builtin.include_tasks: clean_up/clean_up_destinations.yml
+
+ - name: Clean up the virtual Media
+ when:
+ - wait_for_os_deployment is true and eject_iso is true
+ - idrac_os_deployment_virtual_media_attached is true
+ ansible.builtin.include_tasks: idrac/clean_up_virtual_media_slot.yml
+
+ - name: Report task failure
+ when: idrac_os_deployment_failure is defined
+ ansible.builtin.debug:
+ var: idrac_os_deployment_failure
+ failed_when: true
+
+ - name: Report operating system deployment success
+ when: idrac_os_deployment_failure is undefined and idrac_os_deployment_out != ""
+ ansible.builtin.debug:
+ var: idrac_os_deployment_out
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/rhel/compile_iso.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/rhel/compile_iso.yml
new file mode 100644
index 000000000..8091bba10
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/rhel/compile_iso.yml
@@ -0,0 +1,73 @@
+---
+- name: Copy KS to extracted
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.copy:
+ src: "{{ idrac_os_deployment_kickstart_file }}"
+ dest: "{{ idrac_os_deployment_iso_extract_dir }}/{{ idrac_os_deployment_rhel_ks_filename }}"
+ mode: "{{ idrac_os_deployment_copy_mode }}"
+
+- name: Append ks path to the linux boot menu
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.lineinfile:
+ path: "{{ item }}"
+ regexp: "^(.*inst.stage2=hd:LABEL.*?)( inst.ks={{ idrac_os_deployment_rhel_ks_location }})?$"
+ backrefs: true
+ firstmatch: true
+ line: '\1 inst.ks={{ idrac_os_deployment_rhel_ks_location }}'
+ with_items:
+ - "{{ idrac_os_deployment_iso_extract_dir }}/isolinux/isolinux.cfg"
+ - "{{ idrac_os_deployment_iso_extract_dir }}/EFI/BOOT/grub.cfg"
+
+- name: Menu default remove all occurrences
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.lineinfile:
+ path: "{{ idrac_os_deployment_iso_extract_dir }}/isolinux/isolinux.cfg"
+ search_string: "menu default"
+ state: absent
+
+- name: Menu default insert before kernel vmlinuz isolinux
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.lineinfile:
+ path: "{{ idrac_os_deployment_iso_extract_dir }}/isolinux/isolinux.cfg"
+ line: " menu default"
+ firstmatch: true
+ insertbefore: ".*kernel vmlinuz.*"
+
+- name: Grub menu default 0
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.lineinfile:
+ path: "{{ idrac_os_deployment_iso_extract_dir }}/EFI/BOOT/grub.cfg"
+ line: 'set default="0"'
+ firstmatch: true
+ regexp: "^set default="
+
+- name: Get iso LABEL
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.command: "blkid -s LABEL -o value {{ idrac_os_deployment_iso_file }}"
+ register: idrac_os_deployment_blkid_output
+ changed_when: idrac_os_deployment_blkid_output.rc != 0
+
+- name: Set iso LABEL
+ ansible.builtin.set_fact:
+ idrac_os_deployment_iso_label: "{{ idrac_os_deployment_blkid_output.stdout | trim }}"
+
+- name: Compile custom ISO
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.command:
+ chdir: "{{ idrac_os_deployment_iso_extract_dir }}"
+ cmd: "{{ idrac_os_deployment_rhel_mkiso_cmd | format(idrac_os_deployment_custom_iso_file, idrac_os_deployment_iso_label) }}"
+ register: idrac_os_deployment_mkisofs_output
+ changed_when: idrac_os_deployment_mkisofs_output.rc == 0
+ failed_when: idrac_os_deployment_mkisofs_output.rc != 0
+
+- name: Post-process ISO image with isohybrid
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.command: "{{ idrac_os_deployment_hybrid_cmd | format(idrac_os_deployment_custom_iso_file) }}"
+ register: idrac_os_deployment_isohybrid_output
+ changed_when: idrac_os_deployment_isohybrid_output.rc == 0
+
+- name: Add correct checksum to iso
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.command: "{{ idrac_os_deployment_checksum_cmd | format(idrac_os_deployment_custom_iso_file) }}"
+ register: idrac_os_deployment_checksum_output
+ changed_when: idrac_os_deployment_checksum_output.rc == 0
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/tracking/track_for_os_deployment.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/tracking/track_for_os_deployment.yml
new file mode 100644
index 000000000..358990874
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tasks/tracking/track_for_os_deployment.yml
@@ -0,0 +1,11 @@
+---
+- name: Wait for operating system intallation based on a wait time
+ delegate_to: "{{ idrac_os_deployment_delegate }}"
+ ansible.builtin.wait_for:
+ timeout: "{{ (os_deployment_timeout * 60) }}"
+
+- name: Set the Operating system success message
+ ansible.builtin.set_fact:
+ idrac_os_deployment_out: "{{ idrac_os_deployment_success_message_os_deployment }}"
+ changed_when: true
+ no_log: "{{ idrac_os_deployment_set_no_log }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/ESXI_8.j2 b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/ESXI_8.j2
new file mode 100644
index 000000000..67c338c58
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/ESXI_8.j2
@@ -0,0 +1,39 @@
+accepteula
+{% if esxi_keyboard is defined and esxi_keyboard != None %}
+keyboard "{{ esxi_keyboard }}"
+{% endif %}
+{% if esxi_iscrypted is defined and esxi_iscrypted == true %}
+rootpw {{ esxi_rootpw }} --iscrypted
+{% else %}
+rootpw {{ esxi_rootpw }}
+{% endif %}
+{{ ([esxi_install_type] + esxi_install_options)|join(' ') }}
+{% set esxicmds = {'clearpart': esxi_clearpart, 'network': esxi_network, 'partition': esxi_partition} %}
+{% for key,value in esxicmds.items() %}
+{% if value is defined and value != false %}
+{{ ([key] + value)|join(' ') }}
+{% endif %}
+{% endfor %}
+{% if esxi_serial_num is defined and esxi_serial_num != None %}
+serialnum --esx={{ esxi_serial_num }}
+{% endif %}
+{# boolean cmds #}
+{% set boolcmds = {'reboot': esxi_reboot, 'paranoid': esxi_paranoid, 'dryrun': esxi_dryrun} %}
+{% for key,value in boolcmds.items() %}
+{% if value is defined and value == true %}
+{{ key }}
+{% endif %}
+{% endfor %}
+{# scripts logic #}
+{% set scripts = {'pre': esxi_prescript, 'post': esxi_postscript, 'firstboot': esxi_firstboot} %}
+{% for key,value in scripts.items() %}
+{% if value is defined and value != None %}
+{% set xkey = value %}
+{% endif %}
+{% if xkey is defined and xkey.args != None %}
+%{{ key }} --interpreter={{ xkey.interpreter | default('busybox') }}
+{% for cmd in xkey.args %}
+{{ cmd }}
+{% endfor %}
+{% endif %}
+{% endfor %}
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/RHEL_8.j2 b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/RHEL_8.j2
new file mode 100644
index 000000000..de4903a08
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/RHEL_8.j2
@@ -0,0 +1,31 @@
+eula --agreed
+lang {{ rhel_lang }}
+keyboard {{ rhel_keyboard }}
+timezone {{ rhel_timezone|join(' ') }}
+{% if rhel_iscrypted is defined and rhel_iscrypted == true %}
+rootpw {{ rhel_rootpw }} --iscrypted
+{% else %}
+rootpw {{ rhel_rootpw }} --plaintext
+{% endif %}
+{{ rhel_install_source }}
+bootloader --append="rhgb quiet crashkernel=auto" {{ rhel_bootloader|join(' ') }}
+{% if rhel_zerombr is defined and rhel_zerombr == true %}
+zerombr
+{% endif %}
+{% set my_dict = {'clearpart': rhel_clearpart, 'autopart': rhel_autopart, 'firstboot': rhel_firstboot,
+'network': rhel_network, 'firewall': rhel_firewall, 'selinux': rhel_selinux} %}
+{% for key,value in my_dict.items() %}
+{% if value is defined and value != false %}
+{{ ([key] + value)|join(' ') }}
+{% endif %}
+{% endfor %}
+{% if rhel_reboot is defined and rhel_reboot == true %}
+reboot
+{% endif %}
+{% if rhel_packages is defined and rhel_packages != false and rhel_packages != [] %}
+%packages
+{% for pkg in rhel_packages %}
+{{ pkg }}
+{% endfor %}
+%end
+{% endif %}
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/RHEL_9.j2 b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/RHEL_9.j2
new file mode 100644
index 000000000..0721986e0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/templates/RHEL_9.j2
@@ -0,0 +1,31 @@
+eula --agreed
+lang {{ rhel_lang }}
+keyboard {{ rhel_keyboard }}
+timezone {{ rhel_timezone|join(' ') }}
+{% if rhel_iscrypted is defined and rhel_iscrypted == true %}
+rootpw {{ rhel_rootpw }} --iscrypted
+{% else %}
+rootpw {{ rhel_rootpw }} --plaintext
+{% endif %}
+{{ rhel_install_source }}
+bootloader --append="rhgb quiet crashkernel=1G-4G:192M,4G-64G:256M,64G-:512M" {{ rhel_bootloader|join(' ') }}
+{% if rhel_zerombr is defined and rhel_zerombr == true %}
+zerombr
+{% endif %}
+{% set my_dict = {'clearpart': rhel_clearpart, 'autopart': rhel_autopart, 'firstboot': rhel_firstboot,
+'network': rhel_network, 'firewall': rhel_firewall, 'selinux': rhel_selinux} %}
+{% for key,value in my_dict.items() %}
+{% if value is defined and value != false %}
+{{ ([key] + value)|join(' ') }}
+{% endif %}
+{% endfor %}
+{% if rhel_reboot is defined and rhel_reboot == true %}
+reboot
+{% endif %}
+{% if rhel_packages is defined and rhel_packages != false and rhel_packages != [] %}
+%packages
+{% for pkg in rhel_packages %}
+{{ pkg }}
+{% endfor %}
+%end
+{% endif %}
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tests/inventory
new file mode 100644
index 000000000..7cfd90d39
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tests/inventory
@@ -0,0 +1,8 @@
+# For SSH into Linux
+192.168.0.2 ansible_ssh_user=user ansible_ssh_pass=password
+
+# For SSH into windows
+192.168.0.3 ansible_ssh_user=user ansible_ssh_pass=password ansible_remote_tmp="C:\\Users\\user\\tmp" become_method=runas ansible_shell_type=cmd shell_type=cmd
+
+[idrac]
+192.168.0.1
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tests/test.yml
new file mode 100644
index 000000000..568c6e4ce
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- name: Operating System Deployment on iDRAC
+ hosts: idrac
+ roles:
+ - idrac_os_deployment
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/vars/main.yml
new file mode 100644
index 000000000..46220659d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_os_deployment/vars/main.yml
@@ -0,0 +1,93 @@
+---
+# vars file for idrac_os_deployment
+idrac_os_deployment_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+# OS Supported values Validation
+idrac_os_deployment_supported_os:
+ RHEL: ["8", "9"]
+ ESXI: ["8"]
+
+# Validation Error messages
+idrac_os_deployment_err_msg_os_required: "The parameters `os_name` and `os_version` is required."
+idrac_os_deployment_err_msg_unsupported_os: "The combination of OS name %s and version %s is not supported."
+idrac_os_deployment_err_msg_destination_required: "Input parameter `destination` is required."
+idrac_os_deployment_err_msg_root_password: "Please provide the root password for %s."
+idrac_os_deployment_err_msg_custom_iso_local: "Custom iso as a source with local protocol is not supported."
+idrac_os_deployment_err_msg_hostname_required: "Source hostname is required when custom iso is true or when protocol is http/https."
+idrac_os_deployment_err_msg_template: "Template with the name %s_%s.j2 doesn't exists in templates."
+idrac_os_deployment_err_msg_kickstat_file: "Kickstart file does not exist or is not readable %s."
+idrac_os_deployment_err_msg_iso_file: "Download or Copy of ISO failed from the location %s,
+ Please check if iso file exists or credentials are correct and retry."
+idrac_os_deployment_err_msg_ks_file: "Download or Copy of Kickstart failed from the location %s,
+ Please check if iso file exists or credentials are correct and retry."
+idrac_os_deployment_err_msg_destination_iso_name: "In the destination `iso_name` cannot be empty,
+ please provide the value or remove the var to auto name the custom iso file."
+idrac_os_deployment_err_msg_ks_file_ext: "The kickstart file extension should be `.cfg`and cannot be `%s`."
+idrac_os_deployment_err_msg_mountpoint_folder: "The folder specfied `destination.mountpoint` : %s doesn't exists."
+
+# Success messages
+idrac_os_deployment_out: ""
+idrac_os_deployment_success_message_os_deployment: "Successfully deployed the Operating System with the given custom iso."
+idrac_os_deployment_success_message_os_deployment_ks: "Successfully deployed the Operating System with the given kickstart file."
+idrac_os_deployment_success_message_os_deployment_iso: "Successfully deployed the Operating System."
+
+# download wget command
+idrac_os_deployment_wget_cmd: "wget %s --directory-prefix=%s"
+idrac_os_deployment_wget_cmd_vc: " --no-check-certificate"
+idrac_os_deployment_wget_cmd_creds: " --user=%s --password=%s"
+
+# Mode Settings
+idrac_os_deployment_src_copy_mode: "0744"
+idrac_os_deployment_copy_mode: "0744"
+idrac_os_deployment_iso_extract_dir_mode: "0755"
+idrac_os_deployment_dest_mode: "0755"
+idrac_os_deployment_ks_gen_mode: "0744"
+
+
+# temp directory settings
+idrac_os_deployment_extract_dir: extract
+idrac_os_deployment_temp_file_prefix: "omam_osd"
+
+# Attributes required to compile iso
+idrac_os_deployment_kickstart_file: ""
+idrac_os_deployment_iso_file: ""
+idrac_os_deployment_iso_extract_dir: ""
+idrac_os_deployment_custom_iso_filename: "{{ hostname }}_{{ source.iso_name }}"
+idrac_os_deployment_custom_iso_file: ""
+idrac_os_deployment_hybrid_cmd: isohybrid --uefi %s
+idrac_os_deployment_checksum_cmd: implantisomd5 %s
+idrac_os_deployment_xorriso_cmd: "xorriso -osirrox on -indev %s -extract / %s"
+
+# Attributes required to compile esxi iso
+idrac_os_deployment_esxi_ks_filename: "KS.CFG"
+idrac_os_deployment_esxi_ks_dest_prefix: "cdrom:/"
+idrac_os_deployment_esxi_ks_location: "{{ idrac_os_deployment_esxi_ks_dest_prefix }}{{ idrac_os_deployment_esxi_ks_filename }}"
+idrac_os_deployment_esxi_mkiso_cmd:
+ "mkisofs -relaxed-filenames -J -R -o %s -b ISOLINUX.BIN -c BOOT.CAT
+ -no-emul-boot -boot-load-size 4 -boot-info-table -eltorito-alt-boot
+ -eltorito-platform efi -b EFIBOOT.IMG -no-emul-boot %s"
+
+# Attributes required to compile rhel iso
+idrac_os_deployment_rhel_ks_filename: "ks.cfg"
+idrac_os_deployment_rhel_ks_dest_prefix: "cdrom:/"
+idrac_os_deployment_rhel_ks_location: "{{ idrac_os_deployment_rhel_ks_dest_prefix }}{{ idrac_os_deployment_rhel_ks_filename }}"
+idrac_os_deployment_rhel_mkiso_cmd:
+ "mkisofs -o %s -b isolinux/isolinux.bin -J -R -l -c isolinux/boot.cat
+ -no-emul-boot -boot-load-size 4 -boot-info-table -eltorito-alt-boot -e images/efiboot.img
+ -no-emul-boot -graft-points -joliet-long -V %s ."
+
+# Extra params
+idrac_os_deployment_set_no_log: false
+idrac_os_deployment_validate_kickstart_file_ext: true
+idrac_os_deployment_copied_to_destination: false
+idrac_os_deployment_virtual_media_attached: false
+idrac_os_deployment_vm_protocol: "{{ source.protocol }}"
+idrac_os_deployment_vm_hostname: "{{ source.hostname }}"
+idrac_os_deployment_vm_iso_path: "{{ source.iso_path }}"
+idrac_os_deployment_vm_iso_name: "{{ source.iso_name }}"
+idrac_os_deployment_vm_username: "{{ source.username }}"
+idrac_os_deployment_vm_password: "{{ source.password }}"
+idrac_os_deployment_vm_proto_map:
+ http: "http://%s%s/%s"
+ https: "https://%s%s/%s"
+ cifs: "//%s%s/%s"
+ nfs: "%s:%s/%s"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_reset/README.md
new file mode 100644
index 000000000..f45c6154c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/README.md
@@ -0,0 +1,180 @@
+# idrac_reset
+
+Role to reset and restart iDRAC (iDRAC8 and iDRAC9 only) for Dell PowerEdge servers.
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+docker
+molecule
+python
+```
+### Production
+Requirements to use the role.
+```
+ansible
+python
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>wait_for_idrac</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Wait for the iDRAC to restart and LC status to be ready.<br>- When I(reset_to_default) is C(All), the IP address of iDRAC might not be accessible because of the change in network settings.<br>- When I(reset_to_default) is C(ResetAllWithRootDefaults), the IP address of iDRAC might not be accessible because of the change in network settings.</td>
+ </tr>
+ <tr>
+ <td>force_reset</td>
+ <td>false</td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Force restart the idrac without checking the idrac lifecycle controller status.</td>
+ </tr>
+ <tr>
+ reset_to_default:
+ <td>reset_to_default</td>
+ <td>false</td>
+ <td></td>
+ <td>["All", "ResetAllWithRootDefaults", "Default"]</td>
+ <td>str</td>
+ <td>- Reset the iDRAC to factory default settings.<br>- If this value is not set, then the default behaviour is to restart the iDRAC.<br>- C(All)This action will reset your iDRAC to the factory defaults. SupportAssist settings including registration information will be permanently removed. Username and password will reset to default credentials.<br>- C(ResetAllWithRootDefaults)This action will reset your iDRAC to the factory defaults. SupportAssist settings including registration information will be permanently removed. Default username will reset to root and password to the shipping value (root/shipping value).<br>- C(Default)This action will reset your iDRAC to the factory defaults. SupportAssist settings including registration information will be permanently removed. User and network settings will be preserved.<br>- "Note: Supported only for iDRAC9."</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_reset_out</td>
+ <td>{"msg": "iDRAC reset operation completed successfully"
+}</td>
+<td>Module output of idrac reset</td>
+</tbody>
+</table>
+
+## Examples
+-----
+
+```
+- name: Restart the idrac and wait for the idrac to be ready
+ ansible.builtin.include_role:
+ name: idrac_reset
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+
+- name: Restart the idrac and do not wait for the idrac to be ready
+ ansible.builtin.include_role:
+ name: idrac_reset
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ wait_for_idrac: false
+
+- name: Reset the idrac and wait for the idrac to be ready
+ ansible.builtin.include_role:
+ name: idrac_reset
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ reset_to_default: "All"
+```
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Kritika Bhateja (Kritika.Bhateja@Dell.com) 2023 \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_reset/defaults/main.yml
new file mode 100644
index 000000000..2f93da03a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+# defaults file for idrac_reset
+validate_certs: true
+https_timeout: 30
+https_port: 443
+wait_for_idrac: true
+force_reset: false
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_reset/handlers/main.yml
new file mode 100644
index 000000000..22f42c01c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_reset
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_reset/meta/argument_specs.yml
new file mode 100644
index 000000000..f9f62c933
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/meta/argument_specs.yml
@@ -0,0 +1,70 @@
+---
+argument_specs:
+ main:
+ version_added: "7.6.0"
+ short_description: Role to reset and restart iDRAC
+ description:
+ - Role to reset and restart iDRAC (iDRAC8 and iDRAC9 only) for Dell PowerEdge servers.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address or hostname.
+ username:
+ type: str
+ description: iDRAC username with admin privileges.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The HTTPS socket level timeout in seconds.
+ type: int
+ default: 30
+ wait_for_idrac:
+ description:
+ - Wait for the iDRAC to restart and LC status to be ready.
+ - When I(reset_to_default) is C(All), the IP address of iDRAC might not be accessible because of the change in network settings.
+ - When I(reset_to_default) is C(ResetAllWithRootDefaults), the IP address of iDRAC might not be accessible because of the change in network settings.
+ type: bool
+ default: true
+ force_reset:
+ description:
+ - Force restart the idrac without checking the idrac lifecycle controller status.
+ type: bool
+ default: false
+ reset_to_default:
+ description:
+ - Reset the iDRAC to factory default settings.
+ - If this value is not set, then the default behaviour is to restart the iDRAC.
+ - C(All)This action will reset your iDRAC to the factory defaults. SupportAssist settings including registration
+ information will be permanently removed.
+ Username and password will reset to default credentials.
+ - C(ResetAllWithRootDefaults)This action will reset your iDRAC to the factory defaults. SupportAssist settings including
+ registration information will be permanently removed.
+ Default username will reset to root and password to the shipping
+ value (root/shipping value).
+ - C(Default)This action will reset your iDRAC to the factory defaults. SupportAssist settings including registration
+ information will be permanently removed.
+ User and network settings will be preserved.
+ - "Note: Supported only for iDRAC9."
+ type: str
+ choices:
+ [
+ "All",
+ "ResetAllWithRootDefaults",
+ "Default",
+ ]
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_reset/meta/main.yml
new file mode 100644
index 000000000..4cd791f50
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/meta/main.yml
@@ -0,0 +1,25 @@
+galaxy_info:
+ role_name: idrac_reset
+ author: "Kritika Bhateja"
+ description: The role helps to reset and restart iDRAC.
+ company: Dell Technologies
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.13"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/tasks/lcstatus_check.yml b/ansible_collections/dellemc/openmanage/roles/idrac_reset/tasks/lcstatus_check.yml
new file mode 100644
index 000000000..cd73a8d26
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/tasks/lcstatus_check.yml
@@ -0,0 +1,95 @@
+- name: Setting idrac_lc_status to default
+ ansible.builtin.set_fact:
+ idrac_lc_status:
+ LCStatus: ""
+
+- name: Get lifecycle controller status
+ when: idrac_lc_status.LCStatus != 'Ready'
+ block:
+ - name: Increment the retry
+ ansible.builtin.set_fact:
+ retry_count: "{{ 1 if retry_count is undefined else retry_count | int + 1 }}"
+
+ - name: Add a delay
+ ansible.builtin.pause:
+ seconds: "{{ idrac_reset_delay }}"
+
+ - name: Check whether lifecycle controller status is ready or not for iDRAC9
+ when:
+ - idrac_reset_fwm_ver is version('3.0', '>=')
+ block:
+ - name: Get lifecycle controller status for iDRAC9
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ idrac_reset_lifecycle_status_api }}"
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password |
+ default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers: "{{ idrac_reset_uri_headers }}"
+ body_format: "{{ idrac_reset_uri_body_format }}"
+ return_content: "{{ idrac_reset_uri_return_content }}"
+ force_basic_auth: "{{ idrac_reset_force_basic_auth }}"
+ timeout: "{{ https_timeout }}"
+ method: "POST"
+ body: "{}"
+ status_code: 200
+ delegate_to: "{{ idrac_reset_task_delegate }}"
+ register: result
+ ignore_errors: true
+
+ - name: Parse lifecycle controller status response
+ ansible.builtin.set_fact:
+ idrac_lc_status:
+ LCStatus: "{{ result.json.LCStatus }}"
+ RTStatus: "{{ result.json.RTStatus }}"
+ ServerStatus: "{{ result.json.ServerStatus }}"
+ Status: "{{ result.json.Status }}"
+
+ - name: Check whether lifecycle controller status is ready or not for iDRAC8
+ when:
+ - idrac_reset_fwm_ver is version('3.0', '<')
+ block:
+ - name: Check whether lifecycle controller status is ready or not
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}/wsman"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ method: POST
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password |
+ default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ headers: "{{ idrac_reset_uri_headers_xml }}"
+ body: "{{ lookup('ansible.builtin.template', 'idrac_lifecycle_controller_status.j2') }}"
+ status_code: 200
+ return_content: "{{ idrac_reset_uri_return_content }}"
+ force_basic_auth: "{{ idrac_reset_force_basic_auth }}"
+ timeout: "{{ https_timeout }}"
+ register: wsman_envelope_for_idrac_lc_status
+ delegate_to: "{{ idrac_reset_task_delegate }}"
+
+ - name: Parse lifecycle controller status response
+ ansible.builtin.set_fact:
+ idrac_lc_status: "{{ idrac_lc_status | default({}) | combine({item.key: idrac_reset_get_remote_services_api_status_code[item.key][item.value]}) }}"
+ with_dict:
+ LCStatus: "{{ wsman_envelope_for_idrac_lc_status.content | trim | regex_findall('(?<=<n1:LCStatus>).*(?=</n1:LCStatus>)') | first }}"
+ when: wsman_envelope_for_idrac_lc_status.content is search(".*<n1:ReturnValue>0</n1:ReturnValue>.*")
+
+ - name: Checking lifecycle controller status
+ ansible.builtin.fail:
+ msg: "Failed to get lifecycle controller status"
+ when: wsman_envelope_for_idrac_lc_status.content is search(".*<n1:ReturnValue>2</n1:ReturnValue>.*")
+ rescue:
+ - name: Maximum retries reached
+ ansible.builtin.fail:
+ msg: "LC status check is {{ idrac_lc_status.LCStatus }} after {{ retry_count }} number of retries, Exiting.."
+ when: (retry_count | int == idrac_reset_retries) and (idrac_lc_status.LCStatus != "Ready")
+
+ - name: Log the LC status
+ ansible.builtin.debug:
+ msg: "LC Status is Ready"
+ when: (retry_count | int <= idrac_reset_retries) and (idrac_lc_status.LCStatus == "Ready")
+
+ - name: Call LC Status Check
+ ansible.builtin.include_tasks: lcstatus_check.yml
+ when: (retry_count | int < idrac_reset_retries) or (idrac_lc_status.LCStatus != "Ready")
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_reset/tasks/main.yml
new file mode 100644
index 000000000..72deb9d61
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/tasks/main.yml
@@ -0,0 +1,146 @@
+---
+- name: Check whether atleast one of 'IDRAC_USERNAME' or username is provided
+ ansible.builtin.fail:
+ msg: "Ensure the value for environment variable 'IDRAC_USERNAME' or
+ the argument 'username' is set."
+ when: username is not defined and not lookup('env', 'IDRAC_USERNAME')
+
+- name: Check whether atleast one of 'IDRAC_PASSWORD' or password is provided
+ ansible.builtin.fail:
+ msg: "Ensure the value for environment variable 'IDRAC_PASSWORD' or
+ the argument 'password' is set."
+ when: password is not defined and not lookup('env', 'IDRAC_PASSWORD')
+
+- name: Setting uri options
+ ansible.builtin.set_fact:
+ idrac_opts: &idrac_opts
+ user: "{{ username | default(lookup('env', 'IDRAC_USERNAME')) }}"
+ password: "{{ password | default(lookup('env', 'IDRAC_PASSWORD')) }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ headers: "{{ idrac_reset_uri_headers }}"
+ body_format: "{{ idrac_reset_uri_body_format }}"
+ return_content: "{{ idrac_reset_uri_return_content }}"
+ force_basic_auth: "{{ idrac_reset_force_basic_auth }}"
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+- name: Get connection
+ ansible.builtin.uri:
+ <<: *idrac_opts
+ url: "https://{{ hostname }}:{{ https_port }}/redfish/v1/Systems"
+ method: "GET"
+ status_code: "{{ idrac_reset_uri_status_code }}"
+ register: idrac_reset_connection
+ delegate_to: "{{ idrac_reset_task_delegate }}"
+
+- name: Validate hostname or certificate.
+ ansible.builtin.fail:
+ msg: "{{ idrac_reset_connection.msg }}"
+ when: idrac_reset_connection.status == -1
+
+- name: Validate credentials.
+ ansible.builtin.fail:
+ msg: "{{ idrac_reset_invalid_creds }}"
+ when: idrac_reset_connection.status == 401
+
+- name: Get the manager firmware version
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}/redfish/v1/Managers/iDRAC.Embedded.1"
+ <<: *idrac_opts
+ method: GET
+ register: idrac_reset_firmware_version
+ delegate_to: "{{ idrac_reset_task_delegate }}"
+
+- name: Set manager firmware version
+ ansible.builtin.set_fact:
+ idrac_reset_fwm_ver: "{{ idrac_reset_firmware_version.json.FirmwareVersion }}"
+
+- name: Check LC status
+ ansible.builtin.include_tasks: lcstatus_check.yml
+
+- name: Perform graceful restart operation
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ idrac_reset_graceful_restart_api }}"
+ <<: *idrac_opts
+ method: "POST"
+ body: '{"ResetType": "GracefulRestart"}'
+ status_code: 204
+ register: idrac_reset_restart_result
+ delegate_to: "{{ idrac_reset_task_delegate }}"
+ when: reset_to_default is not defined
+
+- name: Perform reset operation
+ ansible.builtin.uri:
+ url: "https://{{ hostname }}:{{ https_port }}{{ idrac_reset_reset_api }}"
+ <<: *idrac_opts
+ method: "POST"
+ body: '{"ResetType": "{{ reset_to_default }}" }'
+ status_code: [200, 405]
+ register: idrac_reset_result
+ delegate_to: "{{ idrac_reset_task_delegate }}"
+ when: reset_to_default is defined
+ ignore_errors: true
+
+- name: Message out the iDRAC 8 support for reset operation
+ ansible.builtin.debug:
+ msg: "iDRAC reset operations are not supported"
+ when:
+ - idrac_reset_result is not skipped and idrac_reset_result is defined
+ - idrac_reset_result.status is defined
+ - idrac_reset_result.status == 405
+
+- name: Check for iDRAC connection
+ when: (idrac_reset_result is not skipped and idrac_reset_result.status == 200)
+ or (idrac_reset_restart_result is not skipped and idrac_reset_restart_result.status == 204)
+ block:
+ - name: Wait for port 443 to become open on the host
+ ansible.builtin.wait_for:
+ host: "{{ hostname }}"
+ port: "{{ https_port }}"
+ delay: "{{ idrac_reset_delay_for_idrac_connection }}"
+ timeout: "{{ idrac_reset_wait_for_idrac_reachable }}"
+ connect_timeout: "{{ idrac_reset_connect_timeout }}"
+ sleep: "{{ idrac_reset_sleep_interval }}"
+ active_connection_states:
+ - "ESTABLISHED"
+ when:
+ - wait_for_idrac is true
+ delegate_to: "{{ idrac_reset_task_delegate }}"
+ register: idrac_reset_connection_status
+ ignore_errors: true
+
+ - name: Check LC status after restart/reset operation
+ ansible.builtin.include_tasks: lcstatus_check.yml
+ when:
+ - wait_for_idrac is true
+
+ - name: Set fact when restart is triggered successfully
+ ansible.builtin.set_fact:
+ idrac_reset_out: {msg: "{{ idrac_reset_restart_trigger }}"}
+ when:
+ - wait_for_idrac is false
+ - idrac_reset_restart_result is not skipped and idrac_reset_restart_result.status == 204
+
+ - name: Set fact when reset is triggered successfully
+ ansible.builtin.set_fact:
+ idrac_reset_out: {msg: "{{ idrac_reset_reset_trigger }}"}
+ when:
+ - wait_for_idrac is false
+ - idrac_reset_result is not skipped and idrac_reset_result.status == 200
+
+ - name: Set fact when restart is done successfully
+ ansible.builtin.set_fact:
+ idrac_reset_out: {msg: "{{ idrac_reset_idrac_restarted_success }}"}
+ when:
+ - wait_for_idrac is true
+ - idrac_reset_connection_status is not failed
+ - idrac_reset_restart_result is not skipped and idrac_reset_restart_result.status == 204
+
+ - name: Set fact when reset is done successfully
+ ansible.builtin.set_fact:
+ idrac_reset_out: {msg: "{{ idrac_reset_success }}"}
+ when:
+ - wait_for_idrac is true
+ - idrac_reset_connection_status is not failed
+ - idrac_reset_result is not skipped and idrac_reset_result.status == 200
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/templates/idrac_lifecycle_controller_status.j2 b/ansible_collections/dellemc/openmanage/roles/idrac_reset/templates/idrac_lifecycle_controller_status.j2
new file mode 100644
index 000000000..f79aacb6a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/templates/idrac_lifecycle_controller_status.j2
@@ -0,0 +1,23 @@
+<s:Envelope xmlns:s="http://www.w3.org/2003/05/soap-envelope" xmlns:wsa="http://schemas.xmlsoap.org/ws/2004/08/addressing" xmlns:wsman="http://schemas.dmtf.org/wbem/wsman/1/wsman.xsd" xmlns:n1="http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_LCService">
+ <s:Header>
+ <wsa:To s:mustUnderstand="true">https://{{ hostname }}:{{ https_port }}/wsman</wsa:To>
+ <wsman:ResourceURI s:mustUnderstand="true">http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_LCService</wsman:ResourceURI>
+ <wsa:ReplyTo>
+ <wsa:Address>http://schemas.xmlsoap.org/ws/2004/08/addressing/role/anonymous</wsa:Address>
+ </wsa:ReplyTo>
+ <wsa:Action s:mustUnderstand="true">http://schemas.dmtf.org/wbem/wscim/1/cim-schema/2/DCIM_LCService/GetRemoteServicesAPIStatus</wsa:Action>
+ <wsman:MaxEnvelopeSize s:mustUnderstand="true">524288</wsman:MaxEnvelopeSize>
+ <wsa:MessageID s:mustUnderstand="true">urn:uuid:{{ lookup('password', '/dev/null chars=ascii_lowercase,digits length=32') | to_uuid }}</wsa:MessageID>
+ <wsman:OperationTimeout>PT12.0S</wsman:OperationTimeout>
+ <wsman:SelectorSet>
+ <wsman:Selector Name="__cimnamespace">root/dcim</wsman:Selector>
+ <wsman:Selector Name="SystemName">DCIM:ComputerSystem</wsman:Selector>
+ <wsman:Selector Name="SystemCreationClassName">DCIM_ComputerSystem</wsman:Selector>
+ <wsman:Selector Name="Name">DCIM:LCService</wsman:Selector>
+ <wsman:Selector Name="CreationClassName">DCIM_LCService</wsman:Selector>
+ </wsman:SelectorSet>
+ </s:Header>
+ <s:Body>
+ <n1:GetRemoteServicesAPIStatus_INPUT />
+ </s:Body>
+</s:Envelope> \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_reset/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_reset/tests/test.yml
new file mode 100644
index 000000000..f1b5e6dff
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Testing for idrac rest
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_reset
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_reset/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_reset/vars/main.yml
new file mode 100644
index 000000000..88179c6d0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_reset/vars/main.yml
@@ -0,0 +1,41 @@
+---
+# vars file for idrac_reset
+idrac_reset_uri_headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+idrac_reset_uri_headers_xml:
+ Content-Type: "application/xml"
+idrac_reset_uri_body_format: "json"
+idrac_reset_force_basic_auth: true
+idrac_reset_uri_status_code:
+ - 200
+ - 400
+ - 401
+ - 404
+ - -1
+idrac_reset_delay: 30
+idrac_reset_retries: 10
+idrac_reset_delay_for_idrac_connection: 60
+idrac_reset_wait_for_idrac_reachable: 360
+idrac_reset_connect_timeout: 10
+idrac_reset_sleep_interval: 5
+idrac_reset_uri_return_content: true
+idrac_reset_idrac_restarted_success: "iDRAC restart operation completed successfully"
+idrac_reset_success: "iDRAC reset operation completed successfully"
+idrac_reset_reset_trigger: "iDRAC reset operation triggered successfully"
+idrac_reset_restart_trigger: "iDRAC restart operation triggered successfully"
+idrac_reset_lifecycle_status_api: "/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+idrac_reset_graceful_restart_api: "/redfish/v1/Managers/iDRAC.Embedded.1/Actions/Manager.Reset"
+idrac_reset_reset_api: "/redfish/v1/Managers/iDRAC.Embedded.1/Actions/Oem/DellManager.ResetToDefaults"
+idrac_reset_get_remote_services_api_status_code:
+ LCStatus:
+ "0": "Ready"
+ "1": "Not Initialized"
+ "2": "Reloading Data"
+ "3": "Disabled"
+ "4": "In Recovery"
+ "5": "In Use"
+ "U": "Unknown"
+idrac_reset_task_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+idrac_reset_invalid_creds: "The authentication credentials included with this request are missing or invalid."
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/README.md
new file mode 100644
index 000000000..ff4152bf5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/README.md
@@ -0,0 +1,217 @@
+# idrac_server_powerstate
+
+Role to to manage the different power states of the specified device using iDRACs (iDRAC8 and iDRAC9 only) for Dell PowerEdge servers.
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+docker
+molecule
+python
+```
+### Production
+Requirements to use the role.
+```
+ansible
+python
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>resource_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The unique identifier of the device being managed.For example- U(https://<I(baseuri)>/redfish/v1/Systems/<I(resource_id)>).<br>- This option is mandatory for I(base_uri) with multiple devices.<br>- To get the device details, use the API U(https://<I(baseuri)>/redfish/v1/Systems).</td>
+ </tr>
+ <tr>
+ <td>reset_type</td>
+ <td>false</td>
+ <td>'On'</td>
+ <td>["ForceOff", "ForceOn", "ForceRestart", "GracefulRestart", "GracefulShutdown", "Nmi", "On", "PowerCycle", "PushPowerButton"]</td>
+ <td>str</td>
+ <td>- This option resets the device.<br>- If C(ForceOff), Turns off the device immediately.<br>- If C(ForceOn), Turns on the device immediately.<br>- If C(ForceRestart), Turns off the device immediately, and then restarts the device.<br>- If C(GracefulRestart), Performs graceful shutdown of the device, and then restarts the device.<br>- If C(GracefulShutdown), Performs a graceful shutdown of the device, and the turns off the device.<br>- If C(Nmi), Sends a diagnostic interrupt to the device. This is usually a non-maskable interrupt (NMI) on x86 device.<br>- If C(On), Turns on the device.<br>- If C(PowerCycle), Performs power cycle on the device.<br>- If C(PushPowerButton), Simulates the pressing of a physical power button on the device.<br>- When a power control operation is performed, which is not supported on the device, an error message is displayed with the list of operations that can be performed.</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>idrac_server_powerstate_out</td>
+ <td>{"changed": true,
+ "failed": false,
+ "msg": "Successfully performed the reset type operation 'GracefulRestart'."
+}</td>
+<td>Module output of the powercycle contol</td>
+</tbody>
+</table>
+
+## Examples
+-----
+
+```
+- name: "Performing force off operation"
+ ansible.builtin.include_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "ForceOff"
+
+- name: "Performing power on operation"
+ ansible.builtin.include_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "On"
+
+- name: "Performing graceful restart operation"
+ ansible.builtin.include_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "GracefulRestart"
+
+- name: "Performing graceful shutdown operation"
+ ansible.builtin.include_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "GracefulShutdown"
+
+- name: "Performing powercycle operation"
+ ansible.builtin.include_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "PowerCycle"
+
+- name: "Performing push power button operation"
+ ansible.builtin.include_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "PushPowerButton"
+
+- name: "Performing force restart operation"
+ ansible.builtin.include_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "192.1.2.1"
+ username: "username"
+ password: "password"
+ ca_path: "/path/to/ca_cert.pem"
+ reset_type: "ForceRestart"
+```
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Kritika Bhateja (Kritika.Bhateja@Dell.com) 2023 \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/defaults/main.yml
new file mode 100644
index 000000000..9538ec0a9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# defaults file for idrac_server_powerstate
+validate_certs: true
+https_timeout: 30
+reset_type: "On"
+https_port: 443
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/handlers/main.yml
new file mode 100644
index 000000000..032a5a52e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_server_powerstate
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/meta/argument_specs.yml
new file mode 100644
index 000000000..642da3104
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/meta/argument_specs.yml
@@ -0,0 +1,69 @@
+---
+argument_specs:
+ main:
+ version_added: "7.4.0"
+ short_description: Role to manage the different power states of the specified device
+ description:
+ - Role to manage the different power states of the specified device using iDRACs (iDRAC8 and iDRAC9 only) for Dell PowerEdge servers.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address.
+ username:
+ type: str
+ description: iDRAC username.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The HTTPS socket level timeout in seconds.
+ type: int
+ default: 30
+ resource_id:
+ description:
+ - The unique identifier of the device being managed.
+ - This option is mandatory for I(hostname) with multiple devices.
+ type: str
+ reset_type:
+ description:
+ - This option resets the device.
+ - If C(ForceOff), Turns off the device immediately.
+ - If C(ForceOn), Turns on the device immediately.
+ - If C(ForceRestart), Turns off the device immediately, and then restarts the device.
+ - If C(GracefulRestart), Performs graceful shutdown of the device, and then restarts the device.
+ - If C(GracefulShutdown), Performs a graceful shutdown of the device, and the turns off the device.
+ - If C(Nmi), Sends a diagnostic interrupt to the device. This is usually a non-maskable interrupt (NMI) on x86 device.
+ - If C(On), Turns on the device.
+ - If C(PowerCycle), Performs power cycle on the device.
+ - If C(PushPowerButton), Simulates the pressing of a physical power button on the device.
+ - When a power control operation is performed, which is not supported on the device,
+ an error message is displayed with the list of operations that can be performed.
+ type: str
+ default: "On"
+ choices:
+ [
+ "ForceOff",
+ "ForceOn",
+ "ForceRestart",
+ "GracefulRestart",
+ "GracefulShutdown",
+ "Nmi",
+ "On",
+ "PowerCycle",
+ "PushPowerButton"
+ ]
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/meta/main.yml
new file mode 100644
index 000000000..e206309b7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/meta/main.yml
@@ -0,0 +1,54 @@
+galaxy_info:
+ author: Kritika-Bhateja
+ description: The role helps to manage the different power states of the specified device.
+ company: Dell Technologies
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: http://example.com/issue/tracker
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.13"
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies: []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/default/converge.yml
new file mode 100644
index 000000000..27ec2fdff
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/default/converge.yml
@@ -0,0 +1,188 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Performing operation on the iDRAC device using default reset_type
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ resource_id: "System.Embedded.1"
+
+ - name: Asserting after performing opeartion using default reset_type
+ ansible.builtin.assert:
+ that: |-
+ ( idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'On'." )
+ or
+ ( idrac_server_powerstate_out.msg == "The device is already powered on." )
+
+ - name: Performing operation On the iDRAC with invalid resource_id
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ resource_id: "System.Embedded.0"
+ ignore_errors: true
+ register: idrac_server_powerstate_error_msg
+
+ - name: Asserting after performing opeartion with invalid resource_id
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Invalid device Id 'System.Embedded.0' is provided"
+ - not idrac_server_powerstate_out.changed
+
+ - name: Performing operation 'On' with invalid hostname
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "randomHostname"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ resource_id: "System.Embedded.1"
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_server_powerstate_error_msg
+
+ - name: Asserting after performing opeartion with invalid hostname
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "<urlopen error [Errno -2] Name or service not known>" or
+ idrac_server_powerstate_out.msg == "<urlopen error Unable to resolve hostname or IP randomHostname:443.>"
+
+ - name: Performing operation 'On' with invalid username
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "WrongUsername123"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ resource_id: "System.Embedded.1"
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_server_powerstate_error_msg
+
+ - name: Asserting after performing opeartion with invalid username
+ ansible.builtin.assert:
+ that:
+ - '"HTTP Error 401" in idrac_server_powerstate_out.msg'
+
+ - name: Performing operation 'On' with invalid password
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "WrongPassword@123"
+ validate_certs: false
+ resource_id: "System.Embedded.1"
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_server_powerstate_error_msg
+
+ - name: Asserting after performing opeartion with invalid password
+ ansible.builtin.assert:
+ that: |-
+ ('"HTTP Error 401" in idrac_server_powerstate_out.msg')
+ or
+ ('"urlopen error timed out" in idrac_server_powerstate_out.msg')
+
+ - name: Performing operation 'On' with invalid validate_certs
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: 'someStringValue'
+ resource_id: "System.Embedded.1"
+ ignore_errors: true
+ ignore_unreachable: true
+ register: idrac_server_powerstate_error_msg
+
+ - name: Asserting after performing opeartion with invalid validate_certs
+ ansible.builtin.assert:
+ that:
+ - '"Valid booleans include" in idrac_server_powerstate_out.msg'
+
+ - name: Performing operation 'ForceOn' on iDRAC server
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: 'false'
+ reset_type: 'ForceOn'
+ resource_id: "System.Embedded.1"
+ ignore_errors: true
+ register: idrac_server_powerstate_error_msg
+
+ - name: Asserting after performing opeartion "ForceOn"
+ ansible.builtin.assert:
+ that:
+ - '"The target device does not support a force on operation." in idrac_server_powerstate_out.msg'
+
+ - name: Fetching all the resource_id using URI
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: 'Accept=application/json'
+ register: uri_out
+ no_log: true
+
+ - name: Splitting the resource_id and picking last
+ ansible.builtin.set_fact:
+ idrac_server_powerstate_resource_id_list: "{{ [item['@odata.id'] | split('/') | last] + idrac_server_powerstate_resource_id_list | default([]) }}"
+ loop: "{{ uri_out.json.Members }}"
+
+ - name: Count resource_id
+ ansible.builtin.set_fact:
+ number_of_resource_id: "{{ idrac_server_powerstate_resource_id_list | length }}"
+
+ - name: Performing Operation 'On' without resource_id when iDRAC has secondary resource_id
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ resource_id: "{{ null | default(omit) }}"
+ when: number_of_resource_id > '1'
+
+ - name: Asserting after performing opeartion without resource_id when iDRAC has secondary resource_id
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Multiple devices exists in the system, but option 'resource_id' is not specified."
+ when: number_of_resource_id > '1'
+
+ - name: Performing Operation 'On' with valid secondary resource_id
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: 'On'
+ resource_id: "{{ resource_id_list[1] }}"
+ when: number_of_resource_id > '1'
+
+ - name: Asserting after performing opeartion with valid secondary resource_id
+ ansible.builtin.assert:
+ that: |-
+ ( idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'On'." )
+ or
+ ( idrac_server_powerstate_out.msg == "The device is already powered on." )
+ when: number_of_resource_id > '1'
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/default/molecule.yml
new file mode 100644
index 000000000..ccf982411
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/default/molecule.yml
@@ -0,0 +1,9 @@
+scenario:
+ test_sequence:
+ - dependency
+ - destroy
+ - syntax
+ - create
+ - converge
+ - cleanup
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forceoff/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forceoff/converge.yml
new file mode 100644
index 000000000..6900d9f3f
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forceoff/converge.yml
@@ -0,0 +1,69 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_server_powerstate_wait_seconds: "{{ lookup('env', 'idrac_powercycle_wait_seconds') }}"
+ tasks:
+ - name: Pre-requisite - Server is Power On
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "On"
+ resource_id: "System.Embedded.1"
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing Power On operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: idrac_server_powerstate_out.changed # noqa: no-handler
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: Performing ForceOff on the iDRAC device
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "ForceOff"
+ resource_id: "System.Embedded.1"
+
+ - name: Asserting ForceOff in check mode
+ ansible.builtin.assert:
+ that: idrac_server_powerstate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Fetching iDRAC server powerstate after performing ForceOff operation
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: 'Accept=application/json'
+ register: idrac_server_powerstate_current_powerstate
+ until: idrac_server_powerstate_current_powerstate.json.PowerState == "Off"
+ retries: 10
+ delay: 30
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ no_log: true
+
+ - name: Asserting ForceOff
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'ForceOff'."
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+
+ - name: Asserting 'ForceOff' in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "The device is already powered off."
+ when: not ansible_check_mode and not idrac_server_powerstate_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forceoff/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forceoff/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forceoff/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forcerestart/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forcerestart/converge.yml
new file mode 100644
index 000000000..10c8fe48b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forcerestart/converge.yml
@@ -0,0 +1,72 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_server_powerstate_wait_seconds: "{{ lookup('env', 'idrac_powercycle_wait_seconds') }}"
+ tasks:
+ - name: Pre-requisite - Server is Power On
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "On"
+ resource_id: "System.Embedded.1"
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing Power On operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: idrac_server_powerstate_out.changed # noqa: no-handler
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: Performing ForceRestart on the iDRAC device
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "ForceRestart"
+ resource_id: "System.Embedded.1"
+ tags: molecule-idempotence-notest
+
+ - name: Asserting ForceRestart in check mode
+ ansible.builtin.assert:
+ that: idrac_server_powerstate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing ForceRestart operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
+
+ - name: Fetching iDRAC server powerstate after performing ForceRestart operation
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: "Accept=application/json"
+ register: idrac_server_powerstate_current_powerstate
+ until: idrac_server_powerstate_current_powerstate.json.PowerState == "On"
+ retries: 10
+ delay: 30
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ no_log: true
+ tags: molecule-idempotence-notest
+
+ - name: Asserting ForceRestart
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'ForceRestart'."
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forcerestart/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forcerestart/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/forcerestart/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulrestart/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulrestart/converge.yml
new file mode 100644
index 000000000..df4635004
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulrestart/converge.yml
@@ -0,0 +1,72 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_server_powerstate_wait_seconds: "{{ lookup('env', 'idrac_powercycle_wait_seconds') }}"
+ tasks:
+ - name: Pre-requisite - Server is Power On
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "On"
+ resource_id: "System.Embedded.1"
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing Power On operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: idrac_server_powerstate_out.changed # noqa: no-handler
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: Performing ForceRestart on the iDRAC device
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "GracefulRestart"
+ resource_id: "System.Embedded.1"
+ tags: molecule-idempotence-notest
+
+ - name: Asserting GracefulRestart in check mode
+ ansible.builtin.assert:
+ that: idrac_server_powerstate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing GracefulRestart operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
+
+ - name: Fetching iDRAC server powerstate after performing GracefulRestart operation
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: 'Accept=application/json'
+ register: idrac_server_powerstate_current_powerstate
+ until: idrac_server_powerstate_current_powerstate.json.PowerState == "On"
+ retries: 10
+ delay: 30
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ no_log: true
+ tags: molecule-idempotence-notest
+
+ - name: Asserting GracefulRestart
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'GracefulRestart'."
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulrestart/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulrestart/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulrestart/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulshutdown/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulshutdown/converge.yml
new file mode 100644
index 000000000..ff929b07e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulshutdown/converge.yml
@@ -0,0 +1,63 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_server_powerstate_wait_seconds: "{{ lookup('env', 'idrac_powercycle_wait_seconds') }}"
+ tasks:
+ - name: Pre-requisite - Server is Power On
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "On"
+ resource_id: "System.Embedded.1"
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing Power On operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: idrac_server_powerstate_out.changed # noqa: no-handler
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: Performing GracefulShutdown on the iDRAC device
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "GracefulShutdown"
+ resource_id: "System.Embedded.1"
+
+ - name: Asserting GracefulShutdown in check mode
+ ansible.builtin.assert:
+ that: idrac_server_powerstate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Fetching iDRAC server powerstate after performing GracefulShutdown operation
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: 'Accept=application/json'
+ register: idrac_server_powerstate_current_powerstate
+ until: idrac_server_powerstate_current_powerstate.json.PowerState == "Off"
+ retries: 10
+ delay: 30
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ no_log: true
+
+ - name: Asserting GracefulShutdown
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'GracefulShutdown'."
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulshutdown/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulshutdown/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/gracefulshutdown/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/nmi/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/nmi/converge.yml
new file mode 100644
index 000000000..254c0bbd4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/nmi/converge.yml
@@ -0,0 +1,72 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_server_powerstate_wait_seconds: "{{ lookup('env', 'idrac_powercycle_wait_seconds') }}"
+ tasks:
+ - name: Pre-requisite - Server is Power On
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "On"
+ resource_id: "System.Embedded.1"
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing Power On operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: idrac_server_powerstate_out.changed # noqa: no-handler
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: Performing Nmi on the iDRAC device
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "Nmi"
+ resource_id: "System.Embedded.1"
+ tags: molecule-idempotence-notest
+
+ - name: Asserting Nmi in check mode
+ ansible.builtin.assert:
+ that: idrac_server_powerstate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing Nmi operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
+
+ - name: Fetching iDRAC server powerstate after performing Nmi operation
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: 'Accept=application/json'
+ register: idrac_server_powerstate_current_powerstate
+ until: idrac_server_powerstate_current_powerstate.json.PowerState == "On"
+ retries: 10
+ delay: 30
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ no_log: true
+ tags: molecule-idempotence-notest
+
+ - name: Asserting Nmi
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'Nmi'."
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/nmi/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/nmi/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/nmi/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/on/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/on/converge.yml
new file mode 100644
index 000000000..329fb5b73
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/on/converge.yml
@@ -0,0 +1,73 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_server_powerstate_wait_seconds: "{{ lookup('env', 'idrac_powercycle_wait_seconds') }}"
+ tasks:
+ - name: Pre-requisite - Server is Power Off
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "ForceOff"
+ resource_id: "System.Embedded.1"
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing ForceOff operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
+
+ - name: Performing Power On operation the iDRAC device
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "On"
+ resource_id: "System.Embedded.1"
+
+ - name: Asserting On in check mode
+ ansible.builtin.assert:
+ that: idrac_server_powerstate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing On operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+
+ - name: Fetching iDRAC server powerstate after performing On operation
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: 'Accept=application/json'
+ register: idrac_server_powerstate_current_powerstate
+ until: idrac_server_powerstate_current_powerstate.json.PowerState == "On"
+ retries: 10
+ delay: 30
+ when: not ansible_check_mode
+ no_log: true
+
+ - name: Asserting 'On' in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'On'."
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+
+ - name: Asserting 'On' in idempotence mode
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "The device is already powered on."
+ when: not ansible_check_mode and not idrac_server_powerstate_out.changed
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/on/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/on/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/on/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/powercycle/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/powercycle/converge.yml
new file mode 100644
index 000000000..074df5c99
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/powercycle/converge.yml
@@ -0,0 +1,72 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_server_powerstate_wait_seconds: "{{ lookup('env', 'idrac_powercycle_wait_seconds') }}"
+ tasks:
+ - name: Pre-requisite - Server is Power On
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "On"
+ resource_id: "System.Embedded.1"
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing Power On operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: idrac_server_powerstate_out.changed # noqa: no-handler
+ check_mode: false
+ tags: molecule-idempotence-notest
+
+ - name: Performing PowerCycle on the iDRAC device
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "PowerCycle"
+ resource_id: "System.Embedded.1"
+ tags: molecule-idempotence-notest
+
+ - name: Asserting PowerCycle in check mode
+ ansible.builtin.assert:
+ that: idrac_server_powerstate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing PowerCycle operation"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
+
+ - name: Fetching iDRAC server powerstate after performing PowerCycle operation
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: 'Accept=application/json'
+ register: idrac_server_powerstate_current_powerstate
+ until: idrac_server_powerstate_current_powerstate.json.PowerState == "On"
+ retries: 10
+ delay: 30
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ no_log: true
+ tags: molecule-idempotence-notest
+
+ - name: Asserting PowerCycle
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'PowerCycle'."
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/powercycle/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/powercycle/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/powercycle/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/pushpowerbutton/converge.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/pushpowerbutton/converge.yml
new file mode 100644
index 000000000..a4b430570
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/pushpowerbutton/converge.yml
@@ -0,0 +1,48 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ idrac_server_powerstate_wait_seconds: "{{ lookup('env', 'idrac_powercycle_wait_seconds') }}"
+ tasks:
+ - name: Performing PushPowerButton on the iDRAC device
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "PushPowerButton"
+ resource_id: "System.Embedded.1"
+ tags: molecule-idempotence-notest
+
+ - name: Fetching iDRAC server powerstate after performing PushPowerButton operation
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}:{{ https_port }}/redfish/v1/Systems/System.Embedded.1"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ headers: 'Accept=application/json'
+ register: idrac_server_powerstate_current_powerstate
+ no_log: true
+ tags: molecule-idempotence-notest
+
+ - name: "Waiting after performing PushPowerButton operation, Server is Powering On"
+ ansible.builtin.pause:
+ seconds: "{{ idrac_server_powerstate_wait_seconds }}"
+ when: not ansible_check_mode and idrac_server_powerstate_current_powerstate.json.PowerState == "On"
+ tags: molecule-idempotence-notest
+
+ - name: Asserting PushPowerButton in check mode
+ ansible.builtin.assert:
+ that: idrac_server_powerstate_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+ tags: molecule-idempotence-notest
+
+ - name: Asserting PushPowerButton in normal mode
+ ansible.builtin.assert:
+ that:
+ - idrac_server_powerstate_out.msg == "Successfully performed the reset type operation 'PushPowerButton'."
+ when: not ansible_check_mode and idrac_server_powerstate_out.changed
+ tags: molecule-idempotence-notest
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/pushpowerbutton/molecule.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/pushpowerbutton/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/molecule/pushpowerbutton/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tasks/init.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tasks/init.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tasks/init.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tasks/main.yml
new file mode 100644
index 000000000..1d7b315eb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# tasks file for idrac_server_powerstate
+- name: Setting up parameters
+ ansible.builtin.set_fact:
+ baseuri: "{{ hostname }}:{{ https_port }}"
+
+- name: "Performing the operation on iDRAC server: {{ reset_type }}"
+ dellemc.openmanage.redfish_powerstate:
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: "{{ https_timeout }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ reset_type: "{{ reset_type }}"
+ validate_certs: "{{ validate_certs }}"
+ resource_id: "{{ resource_id | default(omit) }}"
+ register: idrac_server_powerstate_out
+ delegate_to: "{{ idrac_server_powerstate_task_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tests/test.yml
new file mode 100644
index 000000000..89d986b52
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Executing iDRAC powercycle control role
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_server_powerstate
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/vars/main.yml
new file mode 100644
index 000000000..d571323bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_server_powerstate/vars/main.yml
@@ -0,0 +1,3 @@
+---
+# vars file for idrac_server_powerstate
+idrac_server_powerstate_task_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/README.md b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/README.md
new file mode 100644
index 000000000..2a9913839
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/README.md
@@ -0,0 +1,682 @@
+# idrac_storage_controller
+
+Role to configure the physical disk, virtual disk, and storage controller settings on iDRAC9 based PowerEdge servers.
+
+## Requirements
+
+### Development
+Requirements to develop and contribute to the role.
+```
+ansible
+docker
+molecule
+python
+```
+
+### Production
+Requirements to use the role.
+```
+ansible
+python
+```
+
+### Ansible collections
+Collections required to use the role
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>controller_id</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The ID of the controller on which the operations need to be performed.</td>
+ </tr>
+ <tr>
+ <td>volumes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- List of volume that belongs to I(controller_id).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;id</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Fully Qualified Device Descriptor (FQDD) of the volume.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;dedicated_hot_spare</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Fully Qualified Device Descriptor (FQDD) of the physical disk to assign the volume as a dedicated hot spare to a disk.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;encrypted</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- To encrypt the virtual disk.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;expand_capacity_disk</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Fully Qualified Device Descriptor (FQDD) of the disk for expanding the capacity with the existing disk.<br>- I(expand_capacity_size) is mutually exclusive with I(expand_capacity_disk).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;expand_capacity_size</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Capacity of the virtual disk to be expanded in MB.<br>- Check mode and Idempotency is not supported for I(expand_capacity_size).<br>- Minimum Online Capacity Expansion size must be greater than 100 MB of the current size.<br>- I(expand_capacity_disk) is mutually exclusive with I(expand_capacity_size).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;blink</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- Blinks the target virtual disk, and it always reports as changes found when check mode is enabled.</td>
+ </tr>
+ <tr>
+ <td>disks</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- List of physical disks that belongs to I(controller_id).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;id</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Fully Qualified Device Descriptor (FQDD) of the physical disk.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;blink</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- Blinks the target physical disk, and it always reports as changes found when check mode is enabled.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;raid_state</td>
+ <td>false</td>
+ <td></td>
+ <td>'raid', 'nonraid'</td>
+ <td>str</td>
+ <td>- Converts the disk form Non-Raid to Raid and vice versa.<br>- C(raid) converts the physical disk to Raid.<br>- C(nonraid) converts the physical disk to Non Raid.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;status</td>
+ <td>false</td>
+ <td></td>
+ <td>'online', 'offline'</td>
+ <td>str</td>
+ <td>- Converts the disk form online to offline and vice versa.<br>- C(online) converts the physical disk status to online.<br>- C(offline) converts the physical disk status to offline.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;global_hot_spare</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- Assigns a global hot spare or unassigns a hot spare.<br>- C(true) assigns the disk as a global hot spare.<br>- C(false) unassigns the disk as a hot spare.</td>
+ </tr>
+ <tr>
+ <td>reset_config</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- To reset the controller.</td>
+ </tr>
+ <tr>
+ <td>set_controller_key</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- Set the security key or enable controller encryption.<br>- If I(mode) is provided controller encryption operation is performed, otherwise sets the controller security key.<br>- I(key), and I(key_id) are required for this operation.</td>
+ </tr>
+ <tr>
+ <td>rekey</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- Resets the key on the controller, and it always reports as changes found when check mode is enabled.</td>
+ </tr>
+ <tr>
+ <td>remove_key</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- Remove the key on controllers.</td>
+ </tr>
+ <tr>
+ <td>key</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- A new security key passphrase that the encryption-capable controller uses to create the encryption key. The controller uses the encryption key to lock or unlock access to the Self-Encrypting Drive (SED). Only one encryption key can be created for each controller.<br>- This is mandatory when I(set_controller_key) is C(true), I(rekey) is C(true).<br>- The length of the key can be a maximum of 32 characters in length, where the expanded form of the special character is counted as a single character.<br>- The key must contain at least one character from each of the character classes are uppercase, lowercase, number, and special character.</td>
+ </tr>
+ <tr>
+ <td>key_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- This is a user supplied text label associated with the passphrase.<br>- This is mandatory when I(set_controller_key) is C(true), I(rekey) is C(true).<br>- The length of I(key_id) can be a maximum of 32 characters in length and should not have any spaces.</td>
+ </tr>
+ <tr>
+ <td>old_key</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Security key passphrase used by the encryption-capable controller.<br>- This option is mandatory when I(rekey) is C(true).</td>
+ </tr>
+ <tr>
+ <td>mode</td>
+ <td>false</td>
+ <td></td>
+ <td>'LKM', 'SEKM'</td>
+ <td>str</td>
+ <td>- Encryption mode of the encryption capable controller.<br>- This option is mandatory when I(rekey) is C(true) and for enabling controller encryption.<br>- C(SEKM) to choose mode as secure enterprise key manager.<br>- C(LKM) to choose mode as local key management.</td>
+ </tr>
+ <tr>
+ <td>attributes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- Dictionary of controller attributes and value pair.<br>- This feature is only supported for iDRAC9 with firmware version 6.00.00.00 and above.<br>- I(controller_id) is required for this operation.<br>- I(apply_time) and I(maintenance_window) is applicable for I(attributes).<br>- Use U(https://I(idrac_ip)/redfish/v1/Schemas/DellOemStorageController.json) to view the attributes.</td>
+ </tr>
+ <tr>
+ <td>apply_time</td>
+ <td>false</td>
+ <td>Immediate</td>
+ <td>'Immediate', 'OnReset', 'AtMaintenanceWindowStart', 'InMaintenanceWindowOnReset'</td>
+ <td>str</td>
+ <td>- Apply time of the I(attributes).<br>- This is applicable only to I(attributes).<br>- C(Immediate) Allows the user to immediately reboot the host and apply the changes. I(job_wait) is applicable.<br>- C(OnReset) Allows the user to apply the changes on the next reboot of the host server.<br>- C(AtMaintenanceWindowStart) Allows the user to apply the changes at the start of a maintenance window as specified in I(maintenance_window).<br>- C(InMaintenanceWindowOnReset) Allows the users to apply after a manual reset but within the maintenance window as specified in I(maintenance_window).</td>
+ </tr>
+ <tr>
+ <td>maintenance_window</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- Option to schedule the maintenance window.<br>- This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset).</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;start_time</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- The start time for the maintenance window to be scheduled.<br>- The format is YYYY-MM-DDThh:mm:ss<offset>.<br>- <offset> is the time offset from UTC that the current timezone set in iDRAC in the format is +05:30 for IST.</td>
+ </tr>
+ <tr>
+ <td>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;duration</td>
+ <td>false</td>
+ <td>900</td>
+ <td></td>
+ <td>int</td>
+ <td>- The duration in seconds for the maintenance window.</td>
+ </tr>
+</tbody>
+</table>
+
+## Example Playbook
+
+```
+- name: Reset controller configuration
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.0
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ reset_config: true
+```
+
+```
+- name: Set controller attributes
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ attributes:
+ ControllerMode: HBA
+ apply_time: Immediate
+```
+
+```
+- name: Set controller attributes at maintenance window
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ attributes:
+ CheckConsistencyMode: Normal
+ LoadBalanceMode: Disabled
+ apply_time: AtMaintenanceWindowStart
+ maintenance_window:
+ start_time: "2022-09-30T05:15:40-05:00"
+ duration: 1200
+```
+
+```
+- name: Set controller encryption key
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ key: PassPhrase@123
+ key_id: MyKeyId123
+ set_controller_key: true
+```
+
+```
+- name: Rekey in LKM mode
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ rekey: true
+ key: PassPhrase@123
+ key_id: mykeyid123
+ old_key: OldPhassParse@123
+ mode: LKM
+```
+
+```
+- name: Rekey in SEKM mode
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ rekey: true
+ key: PassPhrase@123
+ key_id: mykeyid123
+ old_key: OldPhassParse@123
+ mode: SEKM
+```
+
+```
+- name: Remove controller encryption key
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ remove_key: true
+```
+
+```
+- name: Enable controller encryption
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ set_controller_key: true
+ key: your_key@123
+ key_id: your_keyid@123
+ mode: LKM
+```
+
+```
+- name: Change physical disk state to online
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ status: online
+```
+
+```
+- name: Change physical disk state to offline
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ status: offline
+```
+
+```
+- name: Convert physical disk to RAID mode
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ raid_state: raid
+```
+
+```
+- name: Convert physical disk to Non-RAID mode
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ raid_state: nonraid
+```
+
+```
+- name: Assign dedicated hot spare.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ volumes:
+ id: Disk.Virtual.0:RAID.Slot.1-1
+ dedicated_hot_spare: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+```
+
+```
+- name: Assign global hot spare.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ global_hot_spare: true
+```
+
+```
+- name: Unassign hot spare.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ global_hot_spare: false
+```
+
+```
+- name: Lock virtual drive.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ volumes:
+ id: Disk.Virtual.0:RAID.Slot.1-1
+ encrypted: true
+```
+
+```
+- name: Online capacity expansion of volume using size.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ volumes:
+ id: Disk.Virtual.0:RAID.Slot.1-1
+ expand_capacity_size: 362785
+```
+
+```
+- name: Online capacity expansion of volume using target.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ volumes:
+ id: Disk.Virtual.0:RAID.Slot.1-1
+ expand_capacity_disk: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+```
+
+```
+- name: Blink virtual drive.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ volumes:
+ id: Disk.Virtual.0:RAID.Slot.1-1
+ blink: true
+```
+
+```
+- name: Unblink virtual drive.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ volumes:
+ id: Disk.Virtual.0:RAID.Slot.1-1
+ blink: false
+```
+
+```
+- name: Blink physical disk.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ blink: true
+```
+
+```
+- name: Unblink physical disk.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ ca_path: "/path/to/ca_cert.pem"
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ blink: false
+```
+
+```
+- name: Multiple operations on controller.
+ ansible.builtin.include_role:
+ name: idrac_storage_controller
+ vars:
+ hostname: 192.168.0.1
+ username: username
+ password: password
+ validate_certs: false
+ controller_id: RAID.Slot.1-1
+ disks:
+ id: Disk.Bay.3:Enclosure.Internal.0-1:RAID.Slot.1-1
+ global_hot_spare: false
+ set_controller_key: true
+ key: PassPhrase@12341
+ key_id: mykeyid123
+ mode: LKM
+ attributes:
+ CheckConsistencyMode: StopOnError
+ CopybackMode: OnWithSMART
+ apply_time: Immediate
+```
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Felix Stephen Anthuvan (felix_s@dell.com) 2023
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/defaults/main.yml
new file mode 100644
index 000000000..dd4c8e271
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/defaults/main.yml
@@ -0,0 +1,7 @@
+---
+apply_time: Immediate
+https_port: 443
+https_timeout: 30
+validate_certs: true
+job_wait: true
+job_wait_timeout: 300
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/handlers/main.yml
new file mode 100644
index 000000000..8cec1a4ee
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for idrac_storage_controller
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/meta/argument_specs.yml
new file mode 100644
index 000000000..e26b57242
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/meta/argument_specs.yml
@@ -0,0 +1,200 @@
+---
+argument_specs:
+ main:
+ version_added: "7.6.0"
+ short_description: Configures the physical disk, virtual disk, and storage controller settings
+ description:
+ - This role allows the users to configure the settings of the physical disk, virtual disk,
+ and storage controller on iDRAC9 based PowerEdge servers.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address.
+ username:
+ type: str
+ description: iDRAC username.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ controller_id:
+ required: true
+ description: The ID of the controller on which the operations need to be performed.
+ type: str
+ volumes:
+ type: dict
+ description: List of volume that belongs to I(controller_id).
+ options:
+ id:
+ required: true
+ type: str
+ description: Fully Qualified Device Descriptor (FQDD) of the volume.
+ dedicated_hot_spare:
+ type: str
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the physical disk to assign the volume
+ as a dedicated hot spare to a disk.
+ encrypted:
+ type: bool
+ description: To encrypt the virtual disk.
+ expand_capacity_disk:
+ type: str
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the disk for expanding the capacity with
+ the existing disk.
+ - I(expand_capacity_size) is mutually exclusive with I(expand_capacity_disk).
+ expand_capacity_size:
+ type: str
+ description:
+ - Capacity of the virtual disk to be expanded in MB.
+ - Check mode and Idempotency is not supported for I(expand_capacity_size).
+ - Minimum Online Capacity Expansion size must be greater than 100 MB of the current size.
+ - I(expand_capacity_disk) is mutually exclusive with I(expand_capacity_size).
+ blink:
+ type: bool
+ description:
+ - Blinks the target virtual disk and it always reports as changes found when
+ check mode is enabled.
+ disks:
+ type: dict
+ description: List of physical disks that belongs to I(controller_id).
+ options:
+ id:
+ required: true
+ type: str
+ description: Fully Qualified Device Descriptor (FQDD) of the physical disk.
+ blink:
+ type: bool
+ description:
+ - Blinks the target physical disk and it always reports as changes found when
+ check mode is enabled.
+ raid_state:
+ type: str
+ description:
+ - Converts the disk form Non-Raid to Raid and vice versa.
+ - C(raid) converts the physical disk to Raid.
+ - C(nonraid) converts the physical disk to Non Raid.
+ choices: [raid, nonraid]
+ status:
+ type: str
+ description:
+ - Converts the disk form online to offline and vice versa.
+ - C(online) converts the physical disk status to online.
+ - C(offline) converts the physical disk status to offline.
+ choices: [online, offline]
+ global_hot_spare:
+ type: bool
+ description:
+ - Assigns a global hot spare or unassigns a hot spare.
+ - C(true) assigns the disk as a global hot spare.
+ - C(false) unassigns the disk as a hot spare.
+ reset_config:
+ type: bool
+ description: To reset the controller.
+ set_controller_key:
+ type: bool
+ description:
+ - Set the security key or enable controller encryption.
+ - If I(mode) is provided controller encryption operation is performed, otherwise sets the
+ controller security key.
+ - I(key), and I(key_id) are required for this operation.
+ rekey:
+ type: bool
+ description: Resets the key on the controller and it always reports as changes found when check mode is enabled.
+ remove_key:
+ type: bool
+ description: Remove the key on controllers.
+ key:
+ type: str
+ description:
+ - A new security key passphrase that the encryption-capable controller uses to create the
+ encryption key. The controller uses the encryption key to lock or unlock access to the Self-Encrypting
+ Drive (SED). Only one encryption key can be created for each controller.
+ - This is mandatory when I(set_controller_key) is C(true), I(rekey) is C(true).
+ - The length of the key can be a maximum of 32 characters in length, where the expanded form of the special
+ character is counted as a single character.
+ - The key must contain at least one character from each of the character classes are uppercase, lowercase,
+ number, and special character.
+ key_id:
+ type: str
+ description:
+ - This is a user supplied text label associated with the passphrase.
+ - This is mandatory when I(set_controller_key) is C(true), I(rekey) is C(true).
+ - The length of I(key_id) can be a maximum of 32 characters in length and should not have any spaces.
+ old_key:
+ type: str
+ description:
+ - Security key passphrase used by the encryption-capable controller.
+ - This option is mandatory when I(rekey) is C(true).
+ mode:
+ type: str
+ description:
+ - Encryption mode of the encryption capable controller.
+ - This option is mandatory when I(rekey) is C(true) and for enabling controller encryption.
+ - C(SEKM) to choose mode as secure enterprise key manager.
+ - C(LKM) to choose mode as local key management.
+ choices: [LKM, SEKM]
+ attributes:
+ type: dict
+ description:
+ - Dictionary of controller attributes and value pair.
+ - This feature is only supported for iDRAC9 with firmware version 6.00.00.00 and above.
+ - I(controller_id) is required for this operation.
+ - I(apply_time) and I(maintenance_window) is applicable for I(attributes).
+ - Use U(https://I(idrac_ip)/redfish/v1/Schemas/DellOemStorageController.json) to view the attributes.
+ apply_time:
+ type: str
+ description:
+ - Apply time of the I(attributes).
+ - This is applicable only to I(attributes).
+ - C(Immediate) Allows the user to immediately reboot the host and apply the changes. I(job_wait)
+ is applicable.
+ - C(OnReset) Allows the user to apply the changes on the next reboot of the host server.
+ - C(AtMaintenanceWindowStart) Allows the user to apply the changes at the start of a maintenance window
+ as specified in I(maintenance_window).
+ - C(InMaintenanceWindowOnReset) Allows the users to apply after a manual reset but within the maintenance
+ window as specified in I(maintenance_window).
+ choices:
+ [
+ Immediate,
+ OnReset,
+ AtMaintenanceWindowStart,
+ InMaintenanceWindowOnReset,
+ ]
+ default: Immediate
+ maintenance_window:
+ type: dict
+ description:
+ - Option to schedule the maintenance window.
+ - This is required when I(apply_time) is C(AtMaintenanceWindowStart) or C(InMaintenanceWindowOnReset).
+ options:
+ start_time:
+ required: true
+ type: str
+ description:
+ - The start time for the maintenance window to be scheduled.
+ - The format is YYYY-MM-DDThh:mm:ss<offset>
+ - <offset> is the time offset from UTC that the current timezone set in iDRAC in the
+ format is +05:30 for IST.
+ duration:
+ type: int
+ description: The duration in seconds for the maintenance window.
+ default: 900
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/meta/main.yml
new file mode 100644
index 000000000..40024380a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/meta/main.yml
@@ -0,0 +1,26 @@
+galaxy_info:
+ role_name: idrac_storage_controller
+ author: Felix Stephen
+ description: The role performs the iDRAC storage controller operations.
+ company: Dell Technologies
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.13"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tasks/main.yml
new file mode 100644
index 000000000..0c23a324c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tasks/main.yml
@@ -0,0 +1,200 @@
+---
+- name: Setting up hostname with port
+ ansible.builtin.set_fact:
+ baseuri: "{{ hostname }}:{{ https_port }}"
+
+- name: Setting common options
+ ansible.builtin.set_fact:
+ idrac_inputs: &idrac_inputs
+ baseuri: "{{ baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ timeout: "{{ https_timeout }}"
+ no_log: true
+
+- name: Storage controller reset configuration.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "ResetConfig"
+ controller_id: "{{ controller_id }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: reset_config_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: reset_config is true
+
+- name: Storage controller attributes configuration.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ controller_id: "{{ controller_id }}"
+ attributes: "{{ attributes }}"
+ apply_time: "{{ apply_time }}"
+ maintenance_window: "{{ maintenance_window | default(omit) }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: attributes_config_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: attributes is defined
+
+- name: Storage set controller key configuration.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "SetControllerKey"
+ controller_id: "{{ controller_id }}"
+ key: "{{ key | default(omit) }}"
+ key_id: "{{ key_id | default(omit) }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: set_controller_key_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: set_controller_key is true and key is defined and key_id is defined and mode is undefined
+
+- name: Storage remove controller key configuration.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "RemoveControllerKey"
+ controller_id: "{{ controller_id }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: remove_controller_key_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: remove_key is true
+
+- name: Storage controller rekey configuration.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "ReKey"
+ controller_id: "{{ controller_id }}"
+ key: "{{ key | default(omit) }}"
+ key_id: "{{ key_id | default(omit) }}"
+ old_key: "{{ old_key | default(omit) }}"
+ mode: "{{ mode | default(omit) }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: storage_controller_rekey
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: rekey is true
+
+- name: Storage enable controller encryption.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "EnableControllerEncryption"
+ controller_id: "{{ controller_id }}"
+ key: "{{ key | default(omit) }}"
+ key_id: "{{ key_id | default(omit) }}"
+ mode: "{{ mode | default(omit) }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: enable_encryption_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when:
+ set_controller_key is true and key is defined and key_id is defined and mode is defined
+
+- name: Change physical disk state.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "{{ 'ChangePDStateToOnline' if disks.status == 'online' else 'ChangePDStateToOffline' }}"
+ controller_id: "{{ controller_id }}"
+ target: "{{ disks.id }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: pd_state_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: disks.status is defined
+
+- name: Change physical disk raid state.
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "{{ 'ConvertToRAID' if disks.raid_state == 'raid' else 'ConvertToNonRAID' }}"
+ controller_id: "{{ controller_id }}"
+ target: "{{ disks.id }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: raid_state_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: disks.raid_state is defined
+
+- name: Assign dedicated hot spare
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "AssignSpare"
+ controller_id: "{{ controller_id }}"
+ volume_id: "{{ volumes.id }}"
+ target: "{{ volumes.dedicated_hot_spare }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: assign_dedicated_spare_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: volumes.id is defined and volumes.dedicated_hot_spare is defined
+
+- name: Assign global hot spare
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "AssignSpare"
+ controller_id: "{{ controller_id }}"
+ target: "{{ disks.id }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: assign_global_spare_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: disks.id is defined and disks.global_hot_spare is true
+
+- name: Unassign hot spare
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "UnassignSpare"
+ controller_id: "{{ controller_id }}"
+ target: "{{ disks.id }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: unassign_hotspare_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: disks.global_hot_spare is false and disks.id is defined
+
+- name: Lock virtual disk
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "LockVirtualDisk"
+ controller_id: "{{ controller_id }}"
+ volume_id: "{{ volumes.id }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: lock_vd_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: volumes.encrypted is true
+
+- name: Online capacity expansion of a volume using target
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "OnlineCapacityExpansion"
+ controller_id: "{{ controller_id }}"
+ volume_id: "{{ volumes.id }}"
+ target: "{{ volumes.expand_capacity_disk | default(omit) }}"
+ size: "{{ volumes.expand_capacity_size | default(omit) }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: oce_vd_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: volumes.expand_capacity_disk is defined or volumes.expand_capacity_size is defined
+
+- name: Blink and Un-blink virtual disk
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "{{ 'BlinkTarget' if volumes.blink is true else 'UnBlinkTarget' }}"
+ controller_id: "{{ controller_id }}"
+ volume_id: "{{ volumes.id }}"
+ register: blink_vd_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: volumes.blink is defined
+
+- name: Blink and Un-blink physical disk
+ dellemc.openmanage.idrac_redfish_storage_controller:
+ <<: *idrac_inputs
+ command: "{{ 'BlinkTarget' if disks.blink is true else 'UnBlinkTarget' }}"
+ controller_id: "{{ controller_id }}"
+ target: "{{ disks.id }}"
+ register: blink_pd_out
+ delegate_to: "{{ idrac_storage_controller_task_delegate }}"
+ when: disks.blink is defined
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tests/inventory b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tests/test.yml
new file mode 100644
index 000000000..7e20bbe3c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Executing iDRAC storage controller role
+ hosts: localhost
+ remote_user: root
+ roles:
+ - idrac_storage_controller
diff --git a/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/vars/main.yml
new file mode 100644
index 000000000..7bb065621
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/idrac_storage_controller/vars/main.yml
@@ -0,0 +1,2 @@
+---
+idrac_storage_controller_task_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/molecule.yml b/ansible_collections/dellemc/openmanage/roles/molecule.yml
new file mode 100644
index 000000000..231e04e0b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/molecule.yml
@@ -0,0 +1,37 @@
+---
+dependency:
+ name: galaxy
+ enabled: false
+driver:
+ name: podman
+platforms:
+ - name: centos
+ image: quay.io/centos/centos:stream8
+ pre_build_image: true
+ volumes:
+ - /tmp:/tmp
+provisioner:
+ name: ansible
+ env:
+ idrac_powercycle_wait_seconds: 180
+ idrac_certificate_delegate_to: localhost
+ redfish_firmware_image_url_need_reboot: "https://dl.dell.com/FOLDER07217671M/1/SAS-RAID_Firmware_700GG_WN64_25.5.9.0001_A17.EXE"
+ redfish_firmware_image_url_without_reboot: "https://dl.dell.com/FOLDER10335817M/1/Diagnostics_Application_31T12_WN64_4303A02_4303.2.EXE"
+ redfish_firmware_image_local_path: "/tmp/redfish_firmware"
+verifier:
+ name: ansible
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - prepare
+ - check
+ - converge
+ - idempotence
+ - side_effect
+ - verify
+ - cleanup
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/README.md b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/README.md
new file mode 100644
index 000000000..69f9989cf
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/README.md
@@ -0,0 +1,219 @@
+# redfish_firmware
+
+To perform a component firmware update using the image file available on the local or remote system.
+
+## Requirements
+
+---
+
+
+### Development
+Requirements to develop and contribute to the role.
+
+```
+ansible
+docker
+molecule
+python
+```
+
+### Production
+
+Requirements to use the role.
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+---
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>image_uri</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Firmware Image location URI or local path.
+ <br>- For example- U(http://<web_address>/components.exe) or /home/firmware_repo/component.exe.
+ </td>
+ </tr>
+ <tr>
+ <td>transfer_protocol</td>
+ <td>false</td>
+ <td>HTTP</td>
+ <td>"CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"</td>
+ <td>str</td>
+ <td>- Protocol used to transfer the firmware image file. Applicable for URI based update.</td>
+ </tr>
+ <tr>
+ <td>job_wait</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>str</td>
+ <td>- Provides the option to wait for job completion.</td>
+ </tr>
+ <tr>
+ <td>job_wait_timeout</td>
+ <td>false</td>
+ <td>3600</td>
+ <td></td>
+ <td>str</td>
+ <td>- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ <br>- This option is applicable when I(job_wait) is C(True).
+ </td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>redfish_firmware_out</td>
+ <td>{
+ msg: Successfully submitted the firmware update task.
+ task: {
+ "id": "JID_XXXXXXXXXXXX",
+ "uri": "/redfish/v1/TaskService/Tasks/JID_XXXXXXXXXXXX"
+ }
+ }</td>
+ <td>Returns the output of the firmware update status.</td>
+ </tr>
+ </tbody>
+</table>
+
+## Examples
+
+---
+
+```
+- name: Update the firmware from a single executable file available in HTTP protocol
+ ansible.builtin.include_role:
+ name: redfish_firmware:
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ image_uri: "http://192.168.0.2/firmware_repo/component.exe"
+ transfer_protocol: "HTTP"
+```
+
+```
+- name: Update the firmware from a single executable file available in a local path
+ ansible.builtin.include_role:
+ name: redfish_firmware:
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ image_uri: "/home/firmware_repo/component.exe"
+```
+
+```
+- name: Update the firmware from a single executable file available in a HTTP protocol with job_wait_timeout
+ ansible.builtin.include_role:
+ name: redfish_firmware:
+ vars:
+ hostname: "192.168.0.1"
+ username: "user_name"
+ password: "user_password"
+ ca_path: "/path/to/ca_cert.pem"
+ image_uri: "http://192.168.0.2/firmware_repo/component.exe"
+ transfer_protocol: "HTTP"
+ job_wait_timeout: 600
+```
+
+## Author Information
+
+---
+
+Dell Technologies <br>
+Shivam Sharma (Shivam.Sharma3@Dell.com) 2023
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/defaults/main.yml
new file mode 100644
index 000000000..6a1d8fe6e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# defaults file for redfish_firmware
+
+https_port: 443
+validate_certs: true
+https_timeout: 30
+job_wait_timeout: 3600
+transfer_protocol: HTTP
+job_wait: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/handlers/main.yml
new file mode 100644
index 000000000..6c0214362
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for redfish_firmware
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/meta/argument_specs.yml
new file mode 100644
index 000000000..1396b9bd8
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/meta/argument_specs.yml
@@ -0,0 +1,60 @@
+---
+argument_specs:
+ main:
+ version_added: "7.5.0"
+ short_description: Update a component firmware using the image file available on the local or remote system
+ description:
+ - This module allows the firmware update of only one component at a time. If the module is run for more than one component, an error message is returned.
+ - Depending on the component, the firmware update is applied after an automatic or manual reboot.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address or hostname.
+ username:
+ type: str
+ description: iDRAC username with admin privileges.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(False), the SSL certificates will not be validated.
+ - Configure C(False) only on personally controlled sites where self-signed certificates are used.
+ - Prior to collection version 5.0.0, I(validate_certs) is C(False) by default.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ http_timeout:
+ description: The socket level timeout in seconds.
+ type: int
+ default: 30
+ image_uri:
+ description:
+ - Firmware Image location URI or local path.
+ - For example- U(http://<web_address>/components.exe) or /home/firmware_repo/component.exe.
+ type: str
+ required: true
+ transfer_protocol:
+ description: Protocol used to transfer the firmware image file. Applicable for URI based update.
+ type: str
+ default: HTTP
+ choices:
+ ["CIFS", "FTP", "HTTP", "HTTPS", "NSF", "OEM", "SCP", "SFTP", "TFTP"]
+ job_wait:
+ description: Provides the option to wait for job completion.
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 3600
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/meta/main.yml
new file mode 100644
index 000000000..f9b757aff
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/meta/main.yml
@@ -0,0 +1,55 @@
+galaxy_info:
+ author: "Shivam Sharma"
+ description: To perform a component firmware update using the image file available on the local or remote system.
+ company: Dell Technologies
+
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: http://example.com/issue/tracker
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.13"
+
+ # If this a Container Enabled role, provide the minimum Ansible Container version.
+ # min_ansible_container_version:
+
+ #
+ # Provide a list of supported platforms, and for each platform a list of versions.
+ # If you don't wish to enumerate all versions for a particular platform, use 'all'.
+ # To view available platforms and versions (or releases), visit:
+ # https://galaxy.ansible.com/api/v1/platforms/
+ #
+ platforms:
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+
+ galaxy_tags:
+ []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
+
+dependencies:
+ []
+ # List your role dependencies here, one per line. Be sure to remove the '[]' above,
+ # if you add dependencies to this list.
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/default/converge.yml
new file mode 100644
index 000000000..26d2c476c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/default/converge.yml
@@ -0,0 +1,74 @@
+---
+- name: Converge
+ hosts: all
+ vars:
+ redfish_firmware_image_url_without_reboot: "{{ lookup('env', 'redfish_firmware_image_url_without_reboot') }}"
+ redfish_firmware_image_url_need_reboot: "{{ lookup('env', 'redfish_firmware_image_url_need_reboot') }}"
+ redfish_firmware_image_local_path: "{{ lookup('env', 'redfish_firmware_image_local_path') }}"
+ gather_facts: false
+ tasks:
+ - name: Checking if HTTP url is empty
+ ansible.builtin.fail:
+ msg: Please provide HTTP url for redfish_firmware in molecule.yml
+ when: redfish_firmware_image_url_without_reboot == "" and redfish_firmware_image_url_need_reboot == ""
+
+ - name: Downloading firmware image for local manual reboot
+ ansible.builtin.get_url:
+ url: "{{ redfish_firmware_image_url_need_reboot }}"
+ dest: "{{ redfish_firmware_image_local_path }}"
+ mode: "0755"
+ delegate_to: localhost
+
+ - name: Initializing idrac common inputs to use as aliases
+ ansible.builtin.set_fact:
+ common_input: &idrac_input
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ no_log: true
+
+ - name: TC - 001 - Provide valid HTTP image_uri with default transfer_protocol with auto reboot
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ <<: *idrac_input
+ image_uri: "{{ redfish_firmware_image_url_without_reboot }}"
+ job_wait_timeout: 3600
+
+ - name: TC - 001 - Asserting after performing update with valid image_uri and default transfter_protocol with auto reboot
+ ansible.builtin.assert:
+ that:
+ - redfish_firmware_out.msg == 'Successfully updated the firmware.'
+ - redfish_firmware_out.changed
+
+ - name: TC - 002 - Provide valid local image_uri firmware to schedule job with job_wait_timeout 60 secs
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ <<: *idrac_input
+ image_uri: "{{ redfish_firmware_image_url_need_reboot }}"
+ job_wait_timeout: 60
+
+ - name: TC - 002 - Asserting after performing update with local image_uri firmware to schedule job
+ ansible.builtin.assert:
+ that:
+ - redfish_firmware_out.msg == "Successfully scheduled the firmware job."
+ - redfish_firmware_out.changed
+
+ - name: TC - 002 - Deleting the schedule job in iDRAC
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}/redfish/v1/JobService/Jobs/{{ redfish_firmware_out.task.id }}"
+ validate_certs: false
+ method: "DELETE"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ status_code:
+ - 200
+ return_content: true
+ no_log: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/default/molecule.yml
new file mode 100644
index 000000000..484f43c65
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/default/molecule.yml
@@ -0,0 +1,15 @@
+---
+provisioner:
+ name: ansible
+ playbooks:
+ prepare: ../resources/prepare.yml
+ cleanup: ../resources/cleanup.yml
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/negative/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/negative/converge.yml
new file mode 100644
index 000000000..86196dc82
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/negative/converge.yml
@@ -0,0 +1,174 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+ vars:
+ redfish_firmware_image_url_without_reboot: "{{ lookup('env', 'redfish_firmware_image_url_without_reboot') }}"
+ redfish_firmware_image_url_need_reboot: "{{ lookup('env', 'redfish_firmware_image_url_need_reboot') }}"
+ redfish_firmware_image_local_path: "{{ lookup('env', 'redfish_firmware_image_local_path') }}"
+ tasks:
+ - name: Checking if HTTP url is empty
+ ansible.builtin.fail:
+ msg: Please provide HTTP url for redfish_firmware in molecule.yml
+ when: redfish_firmware_image_url_without_reboot == "" and redfish_firmware_image_url_need_reboot == ""
+
+ - name: Downloading firmware image for local manual reboot
+ ansible.builtin.get_url:
+ url: "{{ redfish_firmware_image_url_need_reboot }}"
+ dest: "{{ redfish_firmware_image_local_path }}"
+ mode: "0755"
+ delegate_to: localhost
+
+ - name: TC - 003 - Provide wrong hostname
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ hostname: "randomHostname"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ image_uri: "{{ redfish_firmware_image_url_need_reboot }}"
+ job_wait_timeout: 60
+ ignore_errors: true
+ ignore_unreachable: true
+ register: redfish_firmware_error_msg
+
+ - name: TC - 003 - Asserting wrong hostname
+ ansible.builtin.assert:
+ that:
+ - redfish_firmware_out.msg == "<urlopen error [Errno -2] Name or service not known>" or
+ redfish_firmware_out.msg == "<urlopen error Unable to resolve hostname or IP randomHostname:443.>"
+
+ - name: TC - 004 - Provide wrong username
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "WrongUsername123"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ image_uri: "{{ redfish_firmware_image_url_need_reboot }}"
+ job_wait_timeout: 60
+ ignore_errors: true
+ ignore_unreachable: true
+ register: redfish_firmware_error_msg
+
+ - name: TC - 004 - Asserting wrong username
+ ansible.builtin.assert:
+ that:
+ - '"HTTP Error 401" in redfish_firmware_out.msg'
+
+ - name: TC - 005 - Provide wrong password
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "WrongPassword@123"
+ validate_certs: false
+ image_uri: "{{ redfish_firmware_image_url_need_reboot }}"
+ job_wait_timeout: 60
+ ignore_errors: true
+ ignore_unreachable: true
+ register: redfish_firmware_error_msg
+
+ - name: TC - 005 - Asserting wrong password
+ ansible.builtin.assert:
+ that: |-
+ ('"HTTP Error 401" in redfish_firmware_out.msg')
+ or
+ ('"urlopen error timed out" in redfish_firmware_out.msg')
+
+ - name: TC - 006 - Providing invalid validate_certs
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: 'someStringValue'
+ image_uri: "{{ redfish_firmware_image_url_need_reboot }}"
+ job_wait_timeout: 60
+ ignore_errors: true
+ ignore_unreachable: true
+ register: redfish_firmware_error_msg
+
+ - name: TC - 006 - Asserting invalid validate_certs
+ ansible.builtin.assert:
+ that:
+ - '"Valid booleans include" in redfish_firmware_out.msg'
+
+ - name: TC - 007 - Providing invalid local path in image_uri
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ image_uri: "/tmp/file_not_exists.iso"
+ job_wait_timeout: 60
+ ignore_errors: true
+ register: redfish_firmware_error_msg
+
+ - name: TC - 007 - Asserting invalid local path in image_uri
+ ansible.builtin.assert:
+ that:
+ - '"No such file or directory" in redfish_firmware_out.msg'
+
+ - name: TC - 008 - Providing invalid HTTP in image_uri
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ image_uri: "http://www.example.com"
+ job_wait_timeout: 60
+ ignore_errors: true
+ register: redfish_firmware_error_msg
+
+ - name: TC - 008 - Asserting invalid HTTP in image_uri
+ ansible.builtin.assert:
+ that:
+ - redfish_firmware_out.msg == "Firmware update failed."
+ - not redfish_firmware_out.changed
+
+ - name: TC - 009 - Providing job_wait_timeout as -100 with auto reboot firmware component
+ ansible.builtin.import_role:
+ name: redfish_firmware
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ image_uri: "{{ redfish_firmware_image_url_without_reboot }}"
+ job_wait_timeout: -100
+
+ - name: TC - 009 - Asserting job_wait_timeout as -100 with auto reboot firmware component
+ ansible.builtin.assert:
+ that:
+ - redfish_firmware_out.msg == "Successfully submitted the firmware update task."
+ - redfish_firmware_out.changed
+
+ - name: TC - 009 - Waiting for job completion
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}/redfish/v1/JobService/Jobs/{{ redfish_firmware_out.task.id }}"
+ validate_certs: false
+ method: "GET"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ headers:
+ Accept: "application/json"
+ Content-Type: "application/json"
+ OData-Version: "4.0"
+ body_format: "json"
+ status_code:
+ - 200
+ return_content: true
+ register: job_result
+ until: job_result.json.JobState == 'Completed' or job_result.json.JobState == 'Failed'
+ retries: 30
+ delay: 10
+ when: redfish_firmware_out.changed # noqa: no-handler
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/negative/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/negative/molecule.yml
new file mode 100644
index 000000000..484f43c65
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/negative/molecule.yml
@@ -0,0 +1,15 @@
+---
+provisioner:
+ name: ansible
+ playbooks:
+ prepare: ../resources/prepare.yml
+ cleanup: ../resources/cleanup.yml
+scenario:
+ test_sequence:
+ - dependency
+ - cleanup
+ - destroy
+ - syntax
+ - create
+ - converge
+ - destroy
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/resources/cleanup.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/resources/cleanup.yml
new file mode 100644
index 000000000..bfffc48fd
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/resources/cleanup.yml
@@ -0,0 +1,16 @@
+- name: Cleanup
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Check if directory exists
+ ansible.builtin.stat:
+ path: "{{ lookup('env', 'redfish_firmware_image_local_path') }}"
+ register: directory_check
+ delegate_to: localhost
+
+ - name: Delete directory if it exists
+ ansible.builtin.file:
+ path: "{{ lookup('env', 'redfish_firmware_image_local_path') }}"
+ state: absent
+ when: directory_check.stat.exists
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/resources/prepare.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/resources/prepare.yml
new file mode 100644
index 000000000..17c1a617a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/molecule/resources/prepare.yml
@@ -0,0 +1,17 @@
+- name: Prepare
+ hosts: all
+ gather_facts: false
+ tasks:
+ - name: Checking if path exists
+ ansible.builtin.stat:
+ path: "{{ lookup('env', 'redfish_firmware_image_local_path') }}"
+ register: directory_check
+ delegate_to: localhost
+
+ - name: Create directory if it doesn't exist
+ ansible.builtin.file:
+ path: "{{ lookup('env', 'redfish_firmware_image_local_path') }}"
+ state: directory
+ mode: "0755"
+ when: directory_check.stat.exists
+ delegate_to: localhost
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tasks/main.yml
new file mode 100644
index 000000000..418e8a7ac
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+# tasks file for redfish_firmware
+- name: Update the firmware from a single executable file.
+ dellemc.openmanage.redfish_firmware:
+ baseuri: "{{ hostname }}:{{ https_port }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ timeout: "{{ https_timeout }}"
+ image_uri: "{{ image_uri }}"
+ transfer_protocol: "{{ transfer_protocol }}"
+ job_wait: "{{ job_wait }}"
+ job_wait_timeout: "{{ job_wait_timeout }}"
+ register: redfish_firmware_out
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tests/inventory b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tests/test.yml
new file mode 100644
index 000000000..8a43f29b7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/tests/test.yml
@@ -0,0 +1,5 @@
+---
+- name: Upgrading Firmware
+ hosts: all
+ roles:
+ - redfish_firmware
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_firmware/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/vars/main.yml
new file mode 100644
index 000000000..849f79286
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_firmware/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for redfish_firmware
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/README.md b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/README.md
new file mode 100644
index 000000000..fa023fb22
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/README.md
@@ -0,0 +1,417 @@
+# redfish_storage_volume
+
+Role to create, modify, initialize, or delete a single storage volume.
+
+## Requirements
+
+### Development
+
+Requirements to develop and contribute to the role.
+
+```
+ansible
+docker
+molecule
+python
+```
+
+### Production
+
+Requirements to use the role.
+
+```
+ansible
+python
+```
+
+### Ansible collections
+
+Collections required to use the role
+
+```
+dellemc.openmanage
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Default Value</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC IP Address</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC username</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- iDRAC user password.</td>
+ </tr>
+ <tr>
+ <td>https_port</td>
+ <td>false</td>
+ <td>443</td>
+ <td></td>
+ <td>int</td>
+ <td>- iDRAC port.</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- If C(false), the SSL certificates will not be validated.<br>- Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ </tr>
+ <tr>
+ <td>ca_path</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>path</td>
+ <td>- The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.</td>
+ </tr>
+ <tr>
+ <td>https_timeout</td>
+ <td>false</td>
+ <td>30</td>
+ <td></td>
+ <td>int</td>
+ <td>- The HTTPS socket level timeout in seconds.</td>
+ </tr>
+ <tr>
+ <td>controller_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Fully Qualified Device Descriptor (FQDD) of the storage controller.For example- RAID.Slot.1-1.</br>This option is mandatory when I(state) is C(present) while creating a volume.</td>
+ </tr>
+ <tr>
+ <td>volume_id</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- FQDD of existing volume.For example- Disk.Virtual.4:RAID.Slot.1-1. </br>- This option is mandatory in the following scenarios, I(state) is C(present), when updating a volume. I(state) is C(absent), when deleting a volume.I(command) is C(initialize), when initializing a volume.</td>
+ </tr>
+ <tr>
+ <td>state</td>
+ <td>false</td>
+ <td>present</td>
+ <td>[present, absent]</td>
+ <td>str</td>
+ <td>- C(present) creates a storage volume for the specified I (controller_id), or modifies the storage volume for the specified I (volume_id). "Note: Modification of an existing volume properties depends on drive and controller capabilities". </br> C(absent) deletes the volume for the specified I(volume_id).</td>
+ </tr>
+ <tr>
+ <td>command</td>
+ <td>false</td>
+ <td></td>
+ <td>[initialize]</td>
+ <td>str</td>
+ <td>- C(initialize) initializes an existing storage volume for a specified I(volume_id).</td>
+ </tr>
+ <tr>
+ <td>raid_type</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- One of the following raid types must be selected to create a volume.</br>
+ - C(RAID0) to create a RAID0 type volume.</br>
+ - C(RAID1) to create a RAID1 type volume.</br>
+ - C(RAID5) to create a RAID5 type volume.</br>
+ - C(RAID6) to create a RAID6 type volume.</br>
+ - C(RAID10) to create a RAID10 type volume.</br>
+ - C(RAID50) to create a RAID50 type volume.</br>
+ - C(RAID60) to create a RAID60 type volume.</td>
+ </tr>
+ <tr>
+ <td>name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Name of the volume to be created.</br>
+- Only applicable when I(state) is C(present).</br>
+- This will be deprecated. Please use I(volume_name) for specifying the volume name.</td>
+ </tr>
+ <tr>
+ <td>volume_name</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Name of the volume to be created.</br>
+- Only applicable when I(state) is C(present).</td>
+ </tr>
+ <tr>
+ <td>drives</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>list</td>
+ <td>- FQDD of the Physical disks. For example- Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1. </br>- Only applicable when I(state) is C(present) when creating a new volume.</td>
+ </tr>
+ <tr>
+ <td>block_size_bytes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>int</td>
+ <td>- Block size in bytes.Only applicable when I(state) is C(present).</td>
+ </tr>
+ </tr>
+ <tr>
+ <td>capacity_bytes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Volume size in bytes.</br>- Only applicable when I(state) is C(present).</td>
+ </tr>
+ <tr>
+ <td>optimum_io_size_bytes</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>str</td>
+ <td>- Stripe size value must be in multiples of 64 * 1024.</br>- Only applicable when I(state) is C(present).</td>
+ </tr>
+ <tr>
+ <td>encryption_types</td>
+ <td>false</td>
+ <td></td>
+ <td>[NativeDriveEncryption, ControllerAssisted, SoftwareAssisted]</td>
+ <td>str</td>
+ <td>- The following encryption types can be selected.</br>
+C(ControllerAssisted) The volume is encrypted by the storage controller entity.</br>
+C(NativeDriveEncryption) The volume utilizes the native drive encryption capabilities of the drive hardware.</br>
+C(SoftwareAssisted) The volume is encrypted by the software running on the system or the operating system.</br>
+Only applicable when I(state) is C(present).</td>
+ </tr>
+ <tr>
+ <td>encrypted</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>bool</td>
+ <td>- Indicates whether volume is currently utilizing encryption or not.</br>- Only applicable when I(state) is C(present).</td>
+ </tr>
+ <tr>
+ <td>oem</td>
+ <td>false</td>
+ <td></td>
+ <td></td>
+ <td>dict</td>
+ <td>- Includes OEM extended payloads.</br>- Only applicable when I(state) is I(present).</td>
+ </tr>
+ <tr>
+ <td>initialize_type</td>
+ <td>false</td>
+ <td>Fast</td>
+ <td>[Fast, Slow]</td>
+ <td>str</td>
+ <td>- Initialization type of existing volume.</br> Only applicable when I(command) is C(initialize).</td>
+ </tr>
+ <tr>
+ <td>job_wait</td>
+ <td>false</td>
+ <td>true</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Determines whether to wait for the job completion or not.</td>
+ </tr>
+ <tr>
+ <td>job_wait_timeout</td>
+ <td>false</td>
+ <td></td>
+ <td>300</td>
+ <td>int</td>
+ <td>- The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.</br>- This option is applicable when I(job_wait) is C(True).</td>
+ </tr>
+ <tr>
+ <td>apply_time</td>
+ <td>false</td>
+ <td></td>
+ <td>[Immediate, OnReset]</td>
+ <td>str</td>
+ <td>- Apply time of the Volume configuration.</br>
+- C(Immediate) allows you to apply the volume configuration on the host server immediately and apply the changes. This is applicable for I(job_wait).</br>
+- C(OnReset) allows you to apply the changes on the next reboot of the host server.</br>
+- I(apply_time) has a default value based on the different types of the controller.</br>
+- For example, BOSS-S1 and BOSS-N1 controllers have a default value of I(apply_time) as C(OnReset).</br>
+- PERC controllers have a default value of I(apply_time) as C(Immediate).</td>
+ </tr>
+ <tr>
+ <td>reboot_server</td>
+ <td></td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Reboot the server to apply the changes.</br>
+- I(reboot_server) is applicable only when I(apply_timeout) is C(OnReset) or when the default value for the apply time of the controller is C(OnReset).</td>
+ </tr>
+ <tr>
+ <td>force_reboot</td>
+ <td></td>
+ <td>false</td>
+ <td></td>
+ <td>bool</td>
+ <td>- Reboot the server forcefully to apply the changes when the normal reboot fails.</br>
+- I(force_reboot) is applicable only when I(reboot_server) is C(true).</td>
+ </tr>
+</tbody>
+</table>
+
+## Fact variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Sample</th>
+ <th>Description</th>
+ </tr>
+</thead>
+ <tbody>
+ <tr>
+ <td>redfish_storage_volume_out</td>
+ <td>{"changed": true,
+ "failed": false,
+ "msg": "Successfully submitted create volume task."
+}</td>
+<td>Module output of the redfish storage volume</td>
+</tbody>
+</table>
+
+## Examples
+
+---
+
+```yml
+- name: Create a volume with supported options
+ ansible.builtin.include_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ raid_type: "RAID1"
+ volume_name: "VD0"
+ controller_id: "RAID.Slot.1-1"
+ drives:
+ - Disk.Bay.5:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.6:Enclosure.Internal.0-1:RAID.Slot.1-1
+ block_size_bytes: 512
+ capacity_bytes: 299439751168
+ optimum_io_size_bytes: 65536
+ encryption_types: NativeDriveEncryption
+ encrypted: true
+```
+
+```yml
+- name: Create a volume with apply time
+ ansible.builtin.include_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ raid_type: "RAID6"
+ volume_name: "Raid6_VD"
+ controller_id: "RAID.Slot.1-1"
+ drives:
+ - Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.2:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.5:Enclosure.Internal.0-1:RAID.Slot.1-1
+ - Disk.Bay.6:Enclosure.Internal.0-1:RAID.Slot.1-1
+ apply_time: OnReset
+ reboot_server: true
+ force_reboot: true
+```
+
+```yml
+- name: Create a volume with minimum options
+ ansible.builtin.include_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ controller_id: "RAID.Slot.1-1"
+ raid_type: "RAID0"
+ drives:
+ - Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1
+```
+
+```yml
+- name: Modify a volume's encryption type settings
+ ansible.builtin.include_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "present"
+ volume_id: "Disk.Virtual.5:RAID.Slot.1-1"
+ encryption_types: "ControllerAssisted"
+ encrypted: true
+```
+
+```yml
+- name: Delete an existing volume
+ ansible.builtin.include_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ state: "absent"
+ volume_id: "Disk.Virtual.5:RAID.Slot.1-1"
+```
+
+```yml
+- name: Initialize an existing volume
+ ansible.builtin.include_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "192.168.0.1"
+ username: "username"
+ password: "password"
+ command: "initialize"
+ volume_id: "Disk.Virtual.6:RAID.Slot.1-1"
+ initialize_type: "Slow"
+```
+## Author Information
+
+---
+
+Dell Technologies <br>
+Kritika Bhateja (Kritika.Bhateja@Dell.com) 2023
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/defaults/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/defaults/main.yml
new file mode 100644
index 000000000..3c7eeccfe
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# defaults file for idrac_server_powerstate
+validate_certs: true
+https_timeout: 30
+https_port: 443
+initialize_type: "Fast"
+job_wait: true
+job_wait_timeout: 1200
+reboot_server: false
+force_reboot: false
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/handlers/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/handlers/main.yml
new file mode 100644
index 000000000..adce9c861
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/handlers/main.yml
@@ -0,0 +1,2 @@
+---
+# handlers file for redfish_storage_volume
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/meta/argument_specs.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/meta/argument_specs.yml
new file mode 100644
index 000000000..fdaf8ea52
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/meta/argument_specs.yml
@@ -0,0 +1,176 @@
+---
+argument_specs:
+ main:
+ version_added: "7.5.0"
+ short_description: Role to manage the storage volume configuration
+ description:
+ - Role to create, modify, initialize, or delete a single storage volume.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: iDRAC IP Address or hostname.
+ username:
+ type: str
+ description: iDRAC username with admin privilages.
+ password:
+ type: str
+ description: iDRAC user password.
+ https_port:
+ type: int
+ description: iDRAC port.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: true
+ ca_path:
+ description:
+ - The Privacy Enhanced Mail (PEM) file that contains a CA certificate to be used for the validation.
+ type: path
+ https_timeout:
+ description: The HTTPS socket level timeout in seconds.
+ type: int
+ default: 30
+ controller_id:
+ description:
+ - Fully Qualified Device Descriptor (FQDD) of the storage controller.
+ - For example- RAID.Slot.1-1.
+ - This option is mandatory when I(state) is C(present) while creating a volume.
+ type: str
+ volume_id:
+ description:
+ - FQDD of existing volume.
+ - For example- Disk.Virtual.4:RAID.Slot.1-1.
+ - This option is mandatory in the following scenarios,
+ - >-
+ I(state) is C(present), when updating a volume.
+ - >-
+ I(state) is C(absent), when deleting a volume.
+ - >-
+ I(command) is C(initialize), when initializing a volume.
+ type: str
+ state:
+ description:
+ - >-
+ C(present) creates a storage volume for the specified I (controller_id), or modifies the storage volume for the
+ specified I (volume_id).
+ "Note: Modification of an existing volume properties depends on drive and controller capabilities".
+ - C(absent) deletes the volume for the specified I(volume_id).
+ type: str
+ choices: [present, absent]
+ command:
+ description:
+ - C(initialize) initializes an existing storage volume for a specified I(volume_id).
+ type: str
+ choices: [initialize]
+ raid_type:
+ description:
+ - One of the following raid types must be selected to create a volume for firmware version 4.40 and above.
+ - C(RAID0) to create a RAID0 type volume.
+ - C(RAID1) to create a RAID1 type volume.
+ - C(RAID5) to create a RAID5 type volume.
+ - C(RAID6) to create a RAID6 type volume.
+ - C(RAID10) to create a RAID10 type volume.
+ - C(RAID50) to create a RAID50 type volume.
+ - C(RAID60) to create a RAID60 type volume.
+ type: str
+ choices: ["RAID0", "RAID1", "RAID5", "RAID6", "RAID10", "RAID50", "RAID60"]
+ name:
+ description:
+ - Name of the volume to be created.
+ - Only applicable when I(state) is C(present).
+ - This will be deprecated. Please use I(volume_name) for specifying the volume name.
+ type: str
+ volume_name:
+ description:
+ - Name of the volume to be created.
+ - Only applicable when I(state) is C(present).
+ type: str
+ drives:
+ description:
+ - FQDD of the Physical disks.
+ - For example- Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1.
+ - Only applicable when I(state) is C(present) when creating a new volume.
+ type: list
+ elements: str
+ block_size_bytes:
+ description:
+ - Block size in bytes.Only applicable when I(state) is C(present).
+ type: int
+ capacity_bytes:
+ description:
+ - Volume size in bytes.
+ - Only applicable when I(state) is C(present).
+ type: str
+ optimum_io_size_bytes:
+ description:
+ - Stripe size value must be in multiples of 64 * 1024.
+ - Only applicable when I(state) is C(present).
+ type: int
+ encryption_types:
+ description:
+ - The following encryption types can be selected.
+ - C(ControllerAssisted) The volume is encrypted by the storage controller entity.
+ - C(NativeDriveEncryption) The volume utilizes the native drive encryption capabilities
+ of the drive hardware.
+ - C(SoftwareAssisted) The volume is encrypted by the software running
+ on the system or the operating system.
+ - Only applicable when I(state) is C(present).
+ type: str
+ choices: [NativeDriveEncryption, ControllerAssisted, SoftwareAssisted]
+ encrypted:
+ description:
+ - Indicates whether volume is currently utilizing encryption or not.
+ - Only applicable when I(state) is C(present).
+ type: bool
+ oem:
+ description:
+ - Includes OEM extended payloads.
+ - Only applicable when I(state) is I(present).
+ type: dict
+ initialize_type:
+ description:
+ - Initialization type of existing volume.
+ - Only applicable when I(command) is C(initialize).
+ type: str
+ choices: [Fast, Slow]
+ default: Fast
+ job_wait:
+ description: Determines whether to wait for the job completion or not.
+ type: bool
+ default: true
+ job_wait_timeout:
+ description:
+ - The maximum wait time of I(job_wait) in seconds. The job is tracked only for this duration.
+ - This option is applicable when I(job_wait) is C(True).
+ type: int
+ default: 1200
+ apply_time:
+ description:
+ - Apply time of the Volume configuration.
+ - C(Immediate) allows you to apply the volume configuration on the host server immediately and apply the changes. This is applicable for I(job_wait).
+ - C(OnReset) allows you to apply the changes on the next reboot of the host server.
+ - I(apply_time) has a default value based on the different types of the controller.
+ - For example, BOSS-S1 and BOSS-N1 controllers have a default value of I(apply_time) as C(OnReset).
+ - PERC controllers have a default value of I(apply_time) as C(Immediate).
+ type: str
+ choices: [Immediate, OnReset]
+ version_added: 8.5.0
+ reboot_server:
+ description:
+ - Reboot the server to apply the changes.
+ - I(reboot_server) is applicable only when I(apply_timeout) is C(OnReset) or
+ when the default value for the apply time of the controller is C(OnReset).
+ type: bool
+ default: false
+ version_added: 8.5.0
+ force_reboot:
+ description:
+ - Reboot the server forcefully to apply the changes when the normal reboot fails.
+ - I(force_reboot) is applicable only when I(reboot_server) is C(true).
+ type: bool
+ default: false
+ version_added: 8.5.0
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/meta/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/meta/main.yml
new file mode 100644
index 000000000..23e4223fa
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/meta/main.yml
@@ -0,0 +1,22 @@
+galaxy_info:
+ author: |
+ "Kritika-Bhateja
+ Lovepreet-Singh"
+ description: The role helps to manage the storage volume configuration.
+ company: Dell Technologies
+ license: GPL-3.0-only
+ min_ansible_version: "2.13"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+ galaxy_tags: []
+dependencies: []
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID0/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID0/converge.yml
new file mode 100644
index 000000000..f7cbf1ba7
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID0/converge.yml
@@ -0,0 +1,83 @@
+---
+- name: Converge
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID0"
+ redfish_storage_volume_minimum_drives: 1
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: To check the behaviour of RAID0.
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID0/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID0/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID0/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID1/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID1/converge.yml
new file mode 100644
index 000000000..8f87e24e4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID1/converge.yml
@@ -0,0 +1,83 @@
+---
+- name: Converge
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID1"
+ redfish_storage_volume_minimum_drives: 2
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: To check the behaviour of Mirrored.
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID1/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID1/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID1/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID10/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID10/converge.yml
new file mode 100644
index 000000000..2e0749638
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID10/converge.yml
@@ -0,0 +1,83 @@
+---
+- name: Converge
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID10"
+ redfish_storage_volume_minimum_drives: 4
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: To check the behaviour of SpannedMirrors.
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID10/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID10/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID10/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID5/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID5/converge.yml
new file mode 100644
index 000000000..c4a5c2591
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID5/converge.yml
@@ -0,0 +1,83 @@
+---
+- name: Converge
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID5"
+ redfish_storage_volume_minimum_drives: 3
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: To check the behaviour of StripedWithParity.
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID5/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID5/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID5/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID50/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID50/converge.yml
new file mode 100644
index 000000000..ef22a880c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID50/converge.yml
@@ -0,0 +1,83 @@
+---
+- name: Converge
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID50"
+ redfish_storage_volume_minimum_drives: 6
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: To check the behaviour of SpannedStripesWithParity.
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID50/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID50/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID50/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID6/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID6/converge.yml
new file mode 100644
index 000000000..0963a5eb3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID6/converge.yml
@@ -0,0 +1,80 @@
+---
+- name: TC-124007 - Ansible - Role- redfish_storage_volume - [Check Mode] [Idempotency] Validate creating RAID6 VD
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID6"
+ redfish_storage_volume_minimum_drives: 4
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for PERC, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_PERC"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID6/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID6/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID6/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID60/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID60/converge.yml
new file mode 100644
index 000000000..ae9e67945
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID60/converge.yml
@@ -0,0 +1,80 @@
+---
+- name: TC-124010 - Ansible - Role- redfish_storage_volume - [Check Mode] [Idempotency] Validate creating RAID60 VD
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID60"
+ redfish_storage_volume_minimum_drives: 8
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for PERC, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_PERC"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID60/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID60/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/RAID60/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__delete_virtual_drive.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__delete_virtual_drive.yml
new file mode 100644
index 000000000..df94bc905
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__delete_virtual_drive.yml
@@ -0,0 +1,33 @@
+---
+- name: Waiting for 10 seconds before fetching data
+ ansible.builtin.wait_for:
+ timeout: 10
+
+- name: Fetching Volume_id from iDRAC
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ url: "Systems/System.Embedded.1/Storage/{{ redfish_storage_volume_controller_id }}/Volumes"
+
+- name: Set fact for redfish_storage_volume_volume_id_list
+ ansible.builtin.set_fact:
+ redfish_storage_volume_volume_id_list: []
+
+- name: Extracting volume_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_volume_id_list: "{{ redfish_storage_volume_volume_id_list + [item['@odata.id'] | ansible.builtin.split('/') | last] }}"
+ loop: "{{ redfish_storage_volume_fetched_output.json.Members }}"
+
+- name: "Deleting virtual disk in {{ controller_id }}"
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: absent
+ volume_id: "{{ redfish_storage_volume_volume_id_list[0] }}"
+ reboot_server: true
+ job_wait: true
+ when: redfish_storage_volume_volume_id_list is defined and redfish_storage_volume_volume_id_list != []
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__extract_storage.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__extract_storage.yml
new file mode 100644
index 000000000..7be337e92
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__extract_storage.yml
@@ -0,0 +1,27 @@
+---
+- name: Fetching Storage controller from iDRAC
+ ansible.builtin.include_tasks:
+ file: ../__get_helper.yml
+ vars:
+ url: "Systems/System.Embedded.1/Storage?$expand=*($levels=1)"
+
+- name: Intializing set_fact variable
+ ansible.builtin.set_fact:
+ redfish_storage_volume_controller_id: ""
+ redfish_storage_volume_drive_list: []
+
+- name: Extracting Controller id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_controller_id: "{{ item.Id }}"
+ redfish_storage_volume_drive_list_odata: "{{ item.Drives }}"
+ when:
+ - redfish_storage_volume_search_in_name is defined and redfish_storage_volume_search_in_name in item.Name
+ - item.StorageControllers[0].SupportedRAIDTypes != []
+ - redfish_storage_volume_raid in item.StorageControllers[0].SupportedRAIDTypes
+ loop: "{{ redfish_storage_volume_fetched_output.json.Members }}"
+
+- name: Extracting Drives id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_drive_list: "{{ redfish_storage_volume_drive_list + [item['@odata.id'] | ansible.builtin.split('/') | last] }}"
+ loop: "{{ redfish_storage_volume_drive_list_odata }}"
+ when: redfish_storage_volume_drive_list_odata is defined
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__get_helper.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__get_helper.yml
new file mode 100644
index 000000000..ad8577667
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__get_helper.yml
@@ -0,0 +1,22 @@
+---
+- name: Waiting for LC status to be ready
+ ansible.builtin.include_tasks:
+ file: ../__lc_status.yml
+
+- name: Fetching data from iDRAC
+ ansible.builtin.uri:
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}/redfish/v1/{{ url }}"
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ method: GET
+ timeout: 30
+ body: {}
+ validate_certs: false
+ force_basic_auth: true
+ body_format: json
+ return_content: true
+ status_code: 200
+ headers: 'Accept=application/json'
+ register: redfish_storage_volume_fetched_output
+ when: url is defined
+ check_mode: false
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__idrac_reset.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__idrac_reset.yml
new file mode 100644
index 000000000..23f25ba5e
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__idrac_reset.yml
@@ -0,0 +1,23 @@
+---
+- name: GracefulRestart of iDRAC
+ ansible.builtin.import_role:
+ name: idrac_server_powerstate
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ reset_type: "GracefulRestart"
+ resource_id: "System.Embedded.1"
+
+- name: Waiting for 60 secs before LC tracking
+ ansible.builtin.wait_for:
+ timeout: 60
+ when:
+ - not ansible_check_mode
+ - idrac_server_powerstate_out is defined
+ - idrac_server_powerstate_out.changed
+
+- name: Waiting for LC status to be ready
+ ansible.builtin.include_tasks:
+ file: ../__lc_status.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__lc_status.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__lc_status.yml
new file mode 100644
index 000000000..9145e05af
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/__lc_status.yml
@@ -0,0 +1,22 @@
+---
+- name: Waiting for iDRAC to be in ready state
+ ansible.builtin.uri:
+ user: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ timeout: 30
+ validate_certs: false
+ return_content: true
+ force_basic_auth: true
+ body_format: "json"
+ headers:
+ Content-Type: "application/json"
+ Accept: "application/json"
+ OData-Version: "4.0"
+ url: "https://{{ lookup('env', 'IDRAC_IP') }}/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DellLCService/Actions/DellLCService.GetRemoteServicesAPIStatus"
+ method: POST
+ body: {}
+ until: status_result.json.Status == "Ready"
+ register: status_result
+ check_mode: false
+ retries: 120
+ delay: 30
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_default/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_default/converge.yml
new file mode 100644
index 000000000..934d2fb92
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_default/converge.yml
@@ -0,0 +1,156 @@
+---
+- name: TC-123778 - Ansible - Role - redfish_storage_volume - [Check Mode] [Idempotency] Validate creating RAID1 with apply_time with default values
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID1"
+ redfish_storage_volume_minimum_drives: 2
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for PERC, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_PERC"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Fetching data from iDRAC for BOSS
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'BOSS'
+
+ - name: Setting BOSS controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_boss_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for BOSS Controller
+ when: redfish_storage_volume_boss_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for BOSS, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_BOSS"
+ controller_id: "{{ redfish_storage_volume_boss_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ job_wait: false
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Waiting for idrac reset and LC tracking.
+ ansible.builtin.include_tasks:
+ file: ../__idrac_reset.yml
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Successfully submitted create volume task."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for BOSS
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'BOSS': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
+
+ - name: Executing if both PERC and BOSS is not found.
+ ansible.builtin.debug:
+ msg: "iDRAC doesn't have PERC and BOSS raid controller."
+ when: redfish_storage_volume_perc_raid_controller_id is undefined and redfish_storage_volume_boss_raid_controller_id is undefined
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_default/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_default/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_immediate/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_immediate/converge.yml
new file mode 100644
index 000000000..abdb4792b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_immediate/converge.yml
@@ -0,0 +1,133 @@
+---
+- name: TC-123862 - Ansible - Role- redfish_storage_volume - [Check Mode] [Idempotency] Validate creating RAID1 with apply_time as Immediate
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID1"
+ redfish_storage_volume_minimum_drives: 2
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for PERC, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_PERC"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ optimum_io_size_bytes: 65536
+ apply_time: "Immediate"
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Fetching data from iDRAC for BOSS
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'BOSS'
+
+ - name: Setting BOSS controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_boss_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for BOSS Controller
+ when: redfish_storage_volume_boss_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "TC-123865 - Ansible - Role - redfish_storage_volume - Provide invalid value to apply_time"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_BOSS"
+ controller_id: "{{ redfish_storage_volume_boss_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ apply_time: "Immediate"
+ job_wait: true
+ ignore_errors: true
+ register: redfish_storage_volume_result
+
+ - name: Asserting after performing operation for invalid apply_time for BOSS controller.
+ ansible.builtin.assert:
+ that:
+ - redfish_storage_volume_out.msg == "{{ error_msg }}"
+ vars:
+ error_msg: "Apply time Immediate is not supported. The supported values are ['OnReset']. Enter the valid values and retry the operation."
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
+
+ - name: Executing if both PERC and BOSS is not found.
+ ansible.builtin.debug:
+ msg: "iDRAC doesn't have PERC and BOSS raid controller."
+ when: redfish_storage_volume_perc_raid_controller_id is undefined and redfish_storage_volume_boss_raid_controller_id is undefined
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_immediate/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_immediate/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_immediate/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/converge.yml
new file mode 100644
index 000000000..167f475ee
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/converge.yml
@@ -0,0 +1,157 @@
+---
+- name: TC-123863 - [Check Mode] [Idempotency] Validate creating RAID1 with apply_time(OnReset) with reboot_server(true) with force_reboot(false)
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID1"
+ redfish_storage_volume_minimum_drives: 2
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for PERC, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_PERC"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ apply_time: "OnReset"
+ reboot_server: true
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ vars:
+ redfish_storage_volume_reset: true
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Fetching data from iDRAC for BOSS
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'BOSS'
+
+ - name: Setting BOSS controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_boss_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for BOSS Controller
+ when: redfish_storage_volume_boss_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for BOSS, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_BOSS"
+ controller_id: "{{ redfish_storage_volume_boss_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ apply_time: "OnReset"
+ reboot_server: true
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for BOSS
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'BOSS': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
+
+ - name: Executing if both PERC and BOSS is not found.
+ ansible.builtin.debug:
+ msg: "iDRAC doesn't have PERC and BOSS raid controller."
+ when: redfish_storage_volume_perc_raid_controller_id is undefined and redfish_storage_volume_boss_raid_controller_id is undefined
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/converge.yml
new file mode 100644
index 000000000..71014f248
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/converge.yml
@@ -0,0 +1,157 @@
+---
+- name: TC-123864 - [Check Mode] [Idempotency] Validate creating RAID1 with apply_time(OnReset) with reboot_server(true) with force_reboot(true)
+ hosts: all
+ vars:
+ redfish_storage_volume_failure: {}
+ redfish_storage_volume_raid: "RAID1"
+ redfish_storage_volume_minimum_drives: 2
+ gather_facts: false
+
+ tasks:
+ - name: Fetching data from iDRAC for PERC
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'PERC'
+
+ - name: Setting PERC controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_perc_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for PERC Controller
+ when: redfish_storage_volume_perc_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for PERC, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_PERC"
+ controller_id: "{{ redfish_storage_volume_perc_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ apply_time: "OnReset"
+ reboot_server: true
+ force_reboot: true
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for PERC
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'PERC': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Fetching data from iDRAC for BOSS
+ ansible.builtin.include_tasks:
+ file: ../__extract_storage.yml
+ vars:
+ redfish_storage_volume_search_in_name: 'BOSS'
+
+ - name: Setting BOSS controller_id
+ ansible.builtin.set_fact:
+ redfish_storage_volume_boss_raid_controller_id: "{{ redfish_storage_volume_controller_id }}"
+ when: redfish_storage_volume_controller_id != ""
+
+ - name: Running for BOSS Controller
+ when: redfish_storage_volume_boss_raid_controller_id is defined
+ block:
+ - name: "Checking minimum number of drives for {{ redfish_storage_volume_raid }}"
+ ansible.builtin.debug:
+ msg: "Minimum number of required drives: {{ redfish_storage_volume_minimum_drives }}, current: {{ redfish_storage_volume_drive_list | length }}"
+ when: redfish_storage_volume_drive_list | length < redfish_storage_volume_minimum_drives
+ failed_when: true
+
+ - name: "Create a volume for BOSS, raid_type {{ redfish_storage_volume_raid }}"
+ ansible.builtin.import_role:
+ name: dellemc.openmanage.redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ raid_type: "{{ redfish_storage_volume_raid }}"
+ volume_name: "VD_BOSS"
+ controller_id: "{{ redfish_storage_volume_boss_raid_controller_id }}"
+ drives: "{{ redfish_storage_volume_drive_list[:redfish_storage_volume_minimum_drives] }}"
+ apply_time: "OnReset"
+ reboot_server: true
+ force_reboot: true
+ job_wait: true
+
+ - name: Asserting operation with check mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Changes found to be applied."
+ when: ansible_check_mode
+
+ - name: Asserting operation with normal mode.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "The job is successfully completed."
+ when: not ansible_check_mode and redfish_storage_volume_out.changed
+
+ - name: Asserting operation with idempotence.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "No changes found to be applied."
+ when: not ansible_check_mode and not redfish_storage_volume_out.changed
+
+ rescue:
+ - name: Set the failure messages for BOSS
+ ansible.builtin.set_fact:
+ redfish_storage_volume_failure: "{{ redfish_storage_volume_failure | combine({'BOSS': {'msg': ansible_failed_result.msg,
+ 'failed_task_name': ansible_failed_task.name}}) }}"
+
+ always:
+ - name: Deleting VD
+ ansible.builtin.include_tasks:
+ file: ../__delete_virtual_drive.yml
+ when:
+ - not ansible_check_mode
+ - redfish_storage_volume_out is defined
+ - not redfish_storage_volume_out.changed
+
+ - name: Collecting failure
+ ansible.builtin.debug:
+ var: redfish_storage_volume_failure
+ when: redfish_storage_volume_failure
+ failed_when: true
+
+ - name: Executing if both PERC and BOSS is not found.
+ ansible.builtin.debug:
+ msg: "iDRAC doesn't have PERC and BOSS raid controller."
+ when: redfish_storage_volume_perc_raid_controller_id is undefined and redfish_storage_volume_boss_raid_controller_id is undefined
+ failed_when: true
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/apply_time_onreset_reboot_server_true_force_reboot_true/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/default/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/default/converge.yml
new file mode 100644
index 000000000..72b2e5977
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/default/converge.yml
@@ -0,0 +1,132 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+
+ tasks:
+ - name: To check the behaviour of invalid hostname.
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'INVALID_IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ volume_type: "NonRedundant"
+ name: "VD" # noqa: var-naming[no-reserved]
+ controller_id: "{{ lookup('env', 'CONTROLLER_ID') }}"
+ drives: "{{ lookup('env', 'PHYSICAL_DISK') }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: false
+ ignore_errors: true
+ register: redfish_storage_volume_result
+
+ - name: Asserting after performing operation.
+ ansible.builtin.assert:
+ that: |-
+ redfish_storage_volume_out.msg == "<urlopen error [Errno -2] Name or service not known>"
+
+ - name: To check the behaviour of invalid credentials.
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'INVALID_IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ volume_type: "NonRedundant"
+ name: "VD" # noqa: var-naming[no-reserved]
+ controller_id: "{{ lookup('env', 'CONTROLLER_ID') }}"
+ drives: "{{ lookup('env', 'PHYSICAL_DISK') }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: false
+ ignore_errors: true
+ register: redfish_storage_volume_result
+
+ - name: Asserting after performing operation for invalid credentials.
+ ansible.builtin.assert:
+ that: |-
+ redfish_storage_volume_out.msg == "HTTP Error 401: Unauthorized"
+
+ - name: To check the behaviour of invalid span count.
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ volume_type: "SpannedStripesWithParity"
+ name: "VD" # noqa: var-naming[no-reserved]
+ controller_id: "{{ lookup('env', 'CONTROLLER_ID') }}"
+ drives: "{{ lookup('env', 'PHYSICAL_DISK') }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: false
+ ignore_errors: true
+ register: redfish_storage_volume_result
+
+ - name: Asserting after performing operation for invalid span count.
+ ansible.builtin.assert:
+ that: |-
+ redfish_storage_volume_out.msg == "HTTP Error 400: Bad Request"
+
+ - name: To check the behaviour of invalid certificate path.
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: true
+ ca_path: "{{ lookup('env', 'INVALID_CERT_PATH') }}"
+ state: present
+ volume_type: "SpannedStripesWithParity"
+ name: "VD" # noqa: var-naming[no-reserved]
+ controller_id: "{{ lookup('env', 'CONTROLLER_ID') }}"
+ drives: "{{ lookup('env', 'PHYSICAL_DISK') }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: false
+ ignore_errors: true
+ register: redfish_storage_volume_result
+
+ - name: Asserting after performing operation for invalid certificate path.
+ ansible.builtin.assert:
+ that: |-
+ "certificate verify failed" in redfish_storage_volume_out.msg
+
+ - name: To check the behaviour of invalid volume type.
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ state: present
+ volume_type: "InvalidMirrored"
+ name: "VD" # noqa: var-naming[no-reserved]
+ controller_id: "{{ lookup('env', 'CONTROLLER_ID') }}"
+ drives: "{{ lookup('env', 'PHYSICAL_DISK') }}"
+ capacity_bytes: 214748364800
+ optimum_io_size_bytes: 65536
+ encrypted: false
+ job_wait: false
+ ignore_errors: true
+ register: redfish_storage_volume_result
+
+ - name: Asserting after performing operation for invalid volume type.
+ ansible.builtin.assert:
+ that: |-
+ redfish_storage_volume_out.msg == "value of volume_type must be one of: NonRedundant, Mirrored,
+ StripedWithParity, SpannedMirrors, SpannedStripesWithParity, got: InvalidMirrored"
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/default/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/default/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/default/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/initialization/converge.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/initialization/converge.yml
new file mode 100644
index 000000000..a76faebd4
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/initialization/converge.yml
@@ -0,0 +1,55 @@
+---
+- name: Converge
+ hosts: all
+ gather_facts: false
+
+ tasks:
+ - name: To check the behaviour of Initialization type Fast.
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ command: initialize
+ volume_id: "{{ lookup('env', 'INVALID_VOLUME_ID') }}"
+ initialize_type: "Fast"
+ ignore_errors: true
+ register: redfish_storage_volume_result
+
+ - name: Asserting operation for initialization of type Fast.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Specified Volume Id Disk.Virtual.0:RAID.Mezzanine.1C-1-test does not exist in the System."
+
+ - name: To check the behaviour of Initialization type Fast.
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ command: initialize
+ volume_id: "{{ lookup('env', 'VOLUME_ID') }}"
+ initialize_type: "Fast"
+
+ - name: Asserting operation for initialization type Fast.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Successfully submitted initialize volume task."
+
+ - name: To check the behaviour of Initialization type Slow.
+ ansible.builtin.import_role:
+ name: redfish_storage_volume
+ vars:
+ hostname: "{{ lookup('env', 'IDRAC_IP') }}"
+ username: "{{ lookup('env', 'IDRAC_USER') }}"
+ password: "{{ lookup('env', 'IDRAC_PASSWORD') }}"
+ validate_certs: false
+ command: initialize
+ volume_id: "{{ lookup('env', 'VOLUME_ID') }}"
+ initialize_type: "Slow"
+
+ - name: Asserting operation for initialization type Slow.
+ ansible.builtin.assert:
+ that: redfish_storage_volume_out.msg == "Successfully submitted initialize volume task."
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/initialization/molecule.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/initialization/molecule.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/molecule/initialization/molecule.yml
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tasks/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tasks/main.yml
new file mode 100644
index 000000000..a6e2ff305
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+# tasks file for redfish_storage_volume
+- name: Setting up parameters
+ ansible.builtin.set_fact:
+ redfish_storage_volume_baseuri: "{{ hostname }}:{{ https_port }}"
+
+- name: Performing the operation on redfish storage volume
+ dellemc.openmanage.redfish_storage_volume:
+ baseuri: "{{ redfish_storage_volume_baseuri }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ timeout: "{{ https_timeout }}"
+ validate_certs: "{{ validate_certs }}"
+ ca_path: "{{ ca_path | default(omit) }}"
+ state: "{{ state | default(omit) }}"
+ controller_id: "{{ controller_id | default(omit) }}"
+ raid_type: "{{ raid_type | default(omit) }}"
+ drives: "{{ drives | default([]) }}"
+ volume_name: "{{ volume_name | default(name) | default(omit) }}"
+ volume_id: "{{ volume_id | default(omit) }}"
+ initialize_type: "{{ initialization_type | default(omit) }}"
+ command: "{{ command | default(omit) }}"
+ encryption_types: "{{ encryption_types | default(omit) }}"
+ encrypted: "{{ encrypted | default(omit) }}"
+ block_size_bytes: "{{ block_size_bytes | default(omit) }}"
+ capacity_bytes: "{{ capacity_bytes | default(omit) }}"
+ optimum_io_size_bytes: "{{ optimum_io_size_bytes | default(omit) }}"
+ apply_time: "{{ apply_time | default(omit) }}"
+ reboot_server: "{{ reboot_server | default(omit) }}"
+ force_reboot: "{{ force_reboot | default(omit) }}"
+ job_wait: "{{ job_wait | default(omit) }}"
+ job_wait_timeout: "{{ job_wait_timeout | default(omit) }}"
+ register: redfish_storage_volume_out
+ delegate_to: "{{ redfish_storage_volume_delegate }}"
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tests/inventory b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tests/inventory
new file mode 100644
index 000000000..878877b07
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tests/inventory
@@ -0,0 +1,2 @@
+localhost
+
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tests/test.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tests/test.yml
new file mode 100644
index 000000000..e392bcf21
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/tests/test.yml
@@ -0,0 +1,6 @@
+---
+- name: Executing redfish storage volume
+ hosts: localhost
+ remote_user: root
+ roles:
+ - redfish_storage_volume
diff --git a/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/vars/main.yml b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/vars/main.yml
new file mode 100644
index 000000000..b3717db2b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/roles/redfish_storage_volume/vars/main.yml
@@ -0,0 +1,4 @@
+---
+# vars file for redfish_storage_volume
+redfish_storage_volume_polling_interval: 30
+redfish_storage_volume_delegate: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/openmanage/tests/README.md b/ansible_collections/dellemc/openmanage/tests/README.md
index f66cdd59d..a7d90ff01 100644
--- a/ansible_collections/dellemc/openmanage/tests/README.md
+++ b/ansible_collections/dellemc/openmanage/tests/README.md
@@ -1,5 +1,5 @@
### Overview
-Dell EMC OpenManage Ansible Modules unit test scripts are located under
+Dell OpenManage Ansible Modules unit test scripts are located under
[unit](./tests/unit) directory.
### Implementing the unit tests
@@ -10,27 +10,15 @@ Any contribution must have an associated unit test. This section covers the
addition to the tested module name. For example: test_ome_user
### Prerequisites
-* Dell EMC OpenManage collections - to install run `ansible-galaxy collection
+* Dell OpenManage collections - to install run `ansible-galaxy collection
install dellemc.openmanage`
* To run the unittest for iDRAC modules, install OpenManage Python Software Development Kit (OMSDK) using
-`pip install omsdk --upgrade` or from [Dell EMC OpenManage Python SDK](https://github.com/dell/omsdk)
+`pip install omsdk --upgrade` or from [Dell OpenManage Python SDK](https://github.com/dell/omsdk)
### Executing unit tests
You can execute them manually by using any tool of your choice, like `pytest` or `ansible-test`.
#### Executing with `ansible-test`
-* Clone [Ansible repository](https://github.com/ansible/ansible) from GitHub to local $ANSIBLE_DIR.
-* Copy `compat` directory from the cloned repository path.
- `$ANSIBLE_DIR/test/units/` to the location of the installed Dell EMC OpenManage collection `$ANSIBLE_COLLECTIONS_PATHS/ansible_collections/dellemc/openmanage/tests/unit`.
-* Copy `utils.py` file from `$ANSIBLE_DIR/test/units/modules` tests location to the location of the installed collection `$ANSIBLE_COLLECTIONS_PATHS/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules`
-* Edit the copied `utils.py` to refer the above `compat` package as below:
-```python
- from units.compat import unittest
-
- # Replace the above lines in utils.py as below
-
- from ansible_collections.dellemc.openmanage.tests.unit.compat import unittest
-```
* To install `ansible-test` requirements use
```
ansible-test units --requirements
diff --git a/ansible_collections/dellemc/openmanage/tests/config.yml b/ansible_collections/dellemc/openmanage/tests/config.yml
new file mode 100644
index 000000000..22131f4f5
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/config.yml
@@ -0,0 +1,2 @@
+modules:
+ python_requires: '>=3.9.6'
diff --git a/ansible_collections/dellemc/openmanage/tests/requirements.txt b/ansible_collections/dellemc/openmanage/tests/requirements.txt
deleted file mode 100644
index 3ea8227f8..000000000
--- a/ansible_collections/dellemc/openmanage/tests/requirements.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-omsdk
-pytest
-pytest-xdist==2.5.0
-mock
-pytest-mock
-pytest-cov
-# pytest-ansible==2.0.1
-coverage==4.5.4
-netaddr>=0.7.19
diff --git a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.10.txt
deleted file mode 100644
index f6fec0eb5..000000000
--- a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.10.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-tests/unit/plugins/modules/test_ome_server_interface_profiles.py compile-2.6!skip
-plugins/modules/idrac_attributes.py compile-2.6!skip
-plugins/modules/idrac_attributes.py import-2.6!skip \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.11.txt
deleted file mode 100644
index f6fec0eb5..000000000
--- a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.11.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-tests/unit/plugins/modules/test_ome_server_interface_profiles.py compile-2.6!skip
-plugins/modules/idrac_attributes.py compile-2.6!skip
-plugins/modules/idrac_attributes.py import-2.6!skip \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.12.txt
deleted file mode 100644
index f6fec0eb5..000000000
--- a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.12.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-tests/unit/plugins/modules/test_ome_server_interface_profiles.py compile-2.6!skip
-plugins/modules/idrac_attributes.py compile-2.6!skip
-plugins/modules/idrac_attributes.py import-2.6!skip \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.9.txt
deleted file mode 100644
index 9d8f3ba14..000000000
--- a/ansible_collections/dellemc/openmanage/tests/sanity/ignore-2.9.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-plugins/modules/dellemc_get_firmware_inventory.py validate-modules:deprecation-mismatch
-plugins/modules/dellemc_get_firmware_inventory.py validate-modules:invalid-documentation
-plugins/modules/dellemc_get_system_inventory.py validate-modules:deprecation-mismatch
-plugins/modules/dellemc_get_system_inventory.py validate-modules:invalid-documentation
-tests/unit/plugins/modules/test_ome_server_interface_profiles.py compile-2.6!skip
-plugins/modules/idrac_attributes.py compile-2.6!skip
-plugins/modules/idrac_attributes.py import-2.6!skip \ No newline at end of file
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_idrac_redfish.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_idrac_redfish.py
new file mode 100644
index 000000000..fc3b3543d
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_idrac_redfish.py
@@ -0,0 +1,345 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2023 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.idrac_redfish import iDRACRedfishAPI, OpenURLResponse
+from mock import MagicMock
+import json
+import os
+
+MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.'
+OPEN_URL = 'idrac_redfish.open_url'
+TEST_PATH = "/testpath"
+INVOKE_REQUEST = 'idrac_redfish.iDRACRedfishAPI.invoke_request'
+JOB_COMPLETE = 'idrac_redfish.iDRACRedfishAPI.wait_for_job_complete'
+API_TASK = '/api/tasks'
+SLEEP_TIME = 'idrac_redfish.time.sleep'
+
+
+class TestIdracRedfishRest(object):
+
+ @pytest.fixture
+ def mock_response(self):
+ mock_response = MagicMock()
+ mock_response.getcode.return_value = 200
+ mock_response.headers = mock_response.getheaders.return_value = {
+ 'X-Auth-Token': 'token_id'}
+ mock_response.read.return_value = json.dumps({"value": "data"})
+ return mock_response
+
+ @pytest.fixture
+ def module_params(self):
+ module_parameters = {'idrac_ip': '192.168.0.1', 'idrac_user': 'username',
+ 'idrac_password': 'password', 'idrac_port': '443'}
+ return module_parameters
+
+ @pytest.fixture
+ def idrac_redfish_object(self, module_params):
+ idrac_redfish_obj = iDRACRedfishAPI(module_params)
+ return idrac_redfish_obj
+
+ def test_invoke_request_with_session(self, mock_response, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ return_value=mock_response)
+ req_session = True
+ with iDRACRedfishAPI(module_params, req_session) as obj:
+ response = obj.invoke_request(TEST_PATH, "GET")
+ assert response.status_code == 200
+ assert response.json_data == {"value": "data"}
+ assert response.success is True
+
+ def test_invoke_request_without_session(self, mock_response, mocker):
+ mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ return_value=mock_response)
+ module_params = {'idrac_ip': '2001:db8:3333:4444:5555:6666:7777:8888', 'idrac_user': 'username',
+ 'idrac_password': 'password', "idrac_port": '443'}
+ req_session = False
+ with iDRACRedfishAPI(module_params, req_session) as obj:
+ response = obj.invoke_request(TEST_PATH, "GET")
+ assert response.status_code == 200
+ assert response.json_data == {"value": "data"}
+ assert response.success is True
+
+ def test_invoke_request_without_session_with_header(self, mock_response, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ return_value=mock_response)
+ req_session = False
+ with iDRACRedfishAPI(module_params, req_session) as obj:
+ response = obj.invoke_request(TEST_PATH, "POST", headers={
+ "application": "octstream"})
+ assert response.status_code == 200
+ assert response.json_data == {"value": "data"}
+ assert response.success is True
+
+ def test_invoke_request_with_session_connection_error(self, mocker, mock_response, module_params):
+ mock_response.success = False
+ mock_response.status_code = 500
+ mock_response.json_data = {}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ req_session = True
+ with pytest.raises(ConnectionError):
+ with iDRACRedfishAPI(module_params, req_session) as obj:
+ obj.invoke_request(TEST_PATH, "GET")
+
+ @pytest.mark.parametrize("exc", [URLError, SSLValidationError, ConnectionError])
+ def test_invoke_request_error_case_handling(self, exc, mock_response, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ side_effect=exc("test"))
+ req_session = False
+ with pytest.raises(exc):
+ with iDRACRedfishAPI(module_params, req_session) as obj:
+ obj.invoke_request(TEST_PATH, "GET")
+
+ def test_invoke_request_http_error_handling(self, mock_response, mocker, module_params):
+ open_url_mock = mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ return_value=mock_response)
+ open_url_mock.side_effect = HTTPError('https://testhost.com/', 400,
+ 'Bad Request Error', {}, None)
+ req_session = False
+ with pytest.raises(HTTPError):
+ with iDRACRedfishAPI(module_params, req_session) as obj:
+ obj.invoke_request(TEST_PATH, "GET")
+
+ @pytest.mark.parametrize("query_params", [
+ {"inp": {"$filter": "UserName eq 'admin'"},
+ "out": "%24filter=UserName+eq+%27admin%27"},
+ {"inp": {"$top": 1, "$skip": 2, "$filter": "JobType/Id eq 8"}, "out":
+ "%24top=1&%24skip=2&%24filter=JobType%2FId+eq+8"},
+ {"inp": {"$top": 1, "$skip": 3}, "out": "%24top=1&%24skip=3"}
+ ])
+ def test_build_url(self, query_params, mocker, idrac_redfish_object):
+ """builds complete url"""
+ base_uri = 'https://192.168.0.1:443/api'
+ path = "/AccountService/Accounts"
+ mocker.patch(MODULE_UTIL_PATH + 'idrac_redfish.iDRACRedfishAPI._get_url',
+ return_value=base_uri + path)
+ inp = query_params["inp"]
+ out = query_params["out"]
+ url = idrac_redfish_object._build_url(
+ path, query_param=inp)
+ assert url == base_uri + path + "?" + out
+
+ def test_build_url_none(self, mocker, idrac_redfish_object):
+ """builds complete url"""
+ base_uri = 'https://192.168.0.1:443/api'
+ mocker.patch(MODULE_UTIL_PATH + 'redfish.Redfish._get_base_url',
+ return_value=base_uri)
+ url = idrac_redfish_object._build_url("", None)
+ assert url == ""
+
+ def test_invalid_json_openurlresp(self):
+ obj = OpenURLResponse({})
+ obj.body = 'invalid json'
+ with pytest.raises(ValueError) as e:
+ obj.json_data
+ assert e.value.args[0] == "Unable to parse json"
+
+ def test_reason(self):
+ def mock_read():
+ return "{}"
+
+ obj = MagicMock()
+ obj.reason = "returning reason"
+ obj.read = mock_read
+ ourl = OpenURLResponse(obj)
+ reason_ret = ourl.reason
+ assert reason_ret == "returning reason"
+
+ @pytest.mark.parametrize("task_inp", [{"job_wait": True, "job_status": {"TaskState": "Completed"}}])
+ def test_wait_for_job_complete(self, mocker, mock_response, task_inp, idrac_redfish_object):
+ mock_response.json_data = task_inp.get("job_status")
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mocker.patch(MODULE_UTIL_PATH + SLEEP_TIME,
+ return_value=None)
+ ret_resp = idrac_redfish_object.wait_for_job_complete(
+ API_TASK, task_inp.get("job_wait"))
+ assert ret_resp.json_data == mock_response.json_data
+
+ def test_wait_for_job_complete_false(self, mocker, mock_response, idrac_redfish_object):
+ mock_response.json_data = {"TaskState": "Completed"}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mocker.patch(MODULE_UTIL_PATH + SLEEP_TIME,
+ return_value=None)
+ ret_resp = idrac_redfish_object.wait_for_job_complete(API_TASK, False)
+ assert ret_resp is None
+
+ def test_wait_for_job_complete_value_error(self, mocker, mock_response, module_params):
+ mock_response.json_data = {"TaskState": "Completed"}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ side_effect=ValueError("test"))
+ with pytest.raises(ValueError):
+ with iDRACRedfishAPI(module_params, True) as obj:
+ obj.wait_for_job_complete(API_TASK, True)
+
+ @pytest.mark.parametrize("inp_data", [
+ {
+ "j_data": {"PercentComplete": 100, "JobState": "Completed"},
+ "job_wait": True,
+ "reboot": True,
+ "apply_update": True
+ },
+ {
+ "j_data": {"PercentComplete": 0, "JobState": "Starting"},
+ "job_wait": True,
+ "reboot": False,
+ "apply_update": True
+ },
+ {
+ "j_data": {"PercentComplete": 0, "JobState": "Starting"},
+ "job_wait": False,
+ "reboot": False,
+ "apply_update": True
+ },
+ ])
+ def test_wait_for_job_completion(self, mocker, mock_response, inp_data, idrac_redfish_object):
+ mock_response.json_data = inp_data.get("j_data")
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mocker.patch(MODULE_UTIL_PATH + SLEEP_TIME,
+ return_value=None)
+ ret_resp = idrac_redfish_object.wait_for_job_completion(API_TASK, inp_data.get(
+ "job_wait"), inp_data.get("reboot"), inp_data.get("apply_update"))
+ assert ret_resp.json_data is mock_response.json_data
+
+ @pytest.mark.parametrize("share_inp", [
+ {"share_ip": "share_ip", "share_name": "share_name", "share_type": "share_type",
+ "file_name": "file_name", "username": "username", "password": "password",
+ "ignore_certificate_warning": "ignore_certificate_warning",
+ "proxy_support": "proxy_support", "proxy_type": "proxy_type",
+ "proxy_port": "proxy_port", "proxy_server": "proxy_server",
+ "proxy_username": "proxy_username", "proxy_password": "proxy_password"}, {}, None])
+ def test_export_scp(self, mocker, mock_response, share_inp, idrac_redfish_object):
+ mock_response.json_data = {"Status": "Completed"}
+ mock_response.status_code = 202
+ mock_response.headers = {"Location": API_TASK}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mocker.patch(MODULE_UTIL_PATH + JOB_COMPLETE,
+ return_value={"Status": "Completed"})
+ job_wait = share_inp is not None
+ resp = idrac_redfish_object.export_scp("xml", "export_use",
+ "All", job_wait, share_inp)
+ if job_wait:
+ assert resp == {"Status": "Completed"}
+ else:
+ assert resp.json_data == {"Status": "Completed"}
+
+ @pytest.mark.parametrize("share_inp", [
+ {"share_ip": "share_ip", "share_name": "share_name", "share_type": "share_type",
+ "file_name": "file_name", "username": "username", "password": "password",
+ "ignore_certificate_warning": "ignore_certificate_warning",
+ "proxy_support": "proxy_support", "proxy_type": "proxy_type",
+ "proxy_port": "proxy_port", "proxy_server": "proxy_server",
+ "proxy_username": "proxy_username", "proxy_password": "proxy_password"}, {}, None])
+ def test_import_scp_share(self, mocker, mock_response, share_inp, idrac_redfish_object):
+ mock_response.json_data = {"Status": "Completed"}
+ mock_response.status_code = 202
+ mock_response.headers = {"Location": API_TASK}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ imp_buffer = "import_buffer"
+ if share_inp is not None:
+ imp_buffer = None
+ resp = idrac_redfish_object.import_scp_share(
+ "shutdown_type", "host_powerstate", True, "All", imp_buffer, share_inp)
+ assert resp.json_data == {"Status": "Completed"}
+
+ @pytest.mark.parametrize("share_inp", [
+ {"share_ip": "share_ip", "share_name": "share_name", "share_type": "share_type",
+ "file_name": "file_name", "username": "username", "password": "password",
+ "ignore_certificate_warning": "ignore_certificate_warning",
+ "proxy_support": "proxy_support", "proxy_type": "proxy_type",
+ "proxy_port": "proxy_port", "proxy_server": "proxy_server",
+ "proxy_username": "proxy_username", "proxy_password": "proxy_password"}, {}, None])
+ def test_import_preview(self, mocker, mock_response, share_inp, idrac_redfish_object):
+ mock_response.json_data = {"Status": "Completed"}
+ mock_response.status_code = 202
+ mock_response.headers = {"Location": API_TASK}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mocker.patch(MODULE_UTIL_PATH + JOB_COMPLETE,
+ return_value={"Status": "Completed"})
+ job_wait = True
+ imp_buffer = "import_buffer"
+ if share_inp is not None:
+ imp_buffer = None
+ job_wait = False
+ resp = idrac_redfish_object.import_preview(
+ imp_buffer, "All", share_inp, job_wait)
+ if job_wait:
+ assert resp == {"Status": "Completed"}
+ else:
+ assert resp.json_data == {"Status": "Completed"}
+
+ @pytest.mark.parametrize("status_code", [202, 200])
+ def test_import_scp(self, mocker, mock_response, status_code, idrac_redfish_object):
+ mock_response.json_data = {"Status": "Completed"}
+ mock_response.status_code = status_code
+ mock_response.headers = {"Location": "/tasks/1"}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mocker.patch(MODULE_UTIL_PATH + JOB_COMPLETE,
+ return_value=mock_response)
+ resp = idrac_redfish_object.import_scp("imp_buffer", "All", True)
+ assert resp.json_data == {"Status": "Completed"}
+
+ @pytest.mark.parametrize("status_code", [202, 200])
+ def test_import_preview_scp(self, mocker, mock_response, status_code, idrac_redfish_object):
+ mock_response.json_data = {"Status": "Completed"}
+ mock_response.status_code = status_code
+ mock_response.headers = {"Location": "/tasks/1"}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mocker.patch(MODULE_UTIL_PATH + JOB_COMPLETE,
+ return_value=mock_response)
+ resp = idrac_redfish_object.import_preview_scp(
+ "imp_buffer", "All", True)
+ assert resp.json_data == {"Status": "Completed"}
+
+ def test_requests_ca_bundle_set(self, mocker, mock_response, idrac_redfish_object):
+ os.environ["REQUESTS_CA_BUNDLE"] = "/path/to/requests_ca_bundle.pem"
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ result = idrac_redfish_object._get_omam_ca_env()
+ assert result == "/path/to/requests_ca_bundle.pem"
+ del os.environ["REQUESTS_CA_BUNDLE"]
+
+ def test_curl_ca_bundle_set(self, mocker, mock_response, idrac_redfish_object):
+ os.environ["CURL_CA_BUNDLE"] = "/path/to/curl_ca_bundle.pem"
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ result = idrac_redfish_object._get_omam_ca_env()
+ assert result == "/path/to/curl_ca_bundle.pem"
+ del os.environ["CURL_CA_BUNDLE"]
+
+ def test_omam_ca_bundle_set(self, mocker, mock_response, idrac_redfish_object):
+ os.environ["OMAM_CA_BUNDLE"] = "/path/to/omam_ca_bundle.pem"
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ result = idrac_redfish_object._get_omam_ca_env()
+ assert result == "/path/to/omam_ca_bundle.pem"
+ del os.environ["OMAM_CA_BUNDLE"]
+
+ def test_no_env_variable_set(self, mocker, mock_response, idrac_redfish_object):
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ result = idrac_redfish_object._get_omam_ca_env()
+ assert result is None
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py
index fc0f0be53..93892a744 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_ome.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2019-2022 Dell Inc.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2019-2023 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
@@ -17,268 +17,403 @@ __metaclass__ = type
import pytest
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
-from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME
+from ansible_collections.dellemc.openmanage.plugins.module_utils.ome import RestOME, OpenURLResponse
from mock import MagicMock
import json
MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.'
+OME_OPENURL = 'ome.open_url'
+TEST_PATH = "/testpath"
+INVOKE_REQUEST = 'ome.RestOME.invoke_request'
+JOB_SUBMISSION = 'ome.RestOME.job_submission'
+DEVICE_API = "DeviceService/Devices"
+TEST_HOST = 'https://testhost.com/'
+BAD_REQUEST = 'Bad Request Error'
+ODATA_COUNT = "@odata.count"
+ODATA_TYPE = "@odata.type"
+DDEVICE_TYPE = "#DeviceService.DeviceType"
-class TestRestOME(object):
-
- @pytest.fixture
- def ome_response_mock(self, mocker):
- set_method_result = {'json_data': {}}
- response_class_mock = mocker.patch(
- MODULE_UTIL_PATH + 'ome.OpenURLResponse',
- return_value=set_method_result)
- response_class_mock.success = True
- response_class_mock.status_code = 200
- return response_class_mock
+class TestOMERest(object):
@pytest.fixture
def mock_response(self):
mock_response = MagicMock()
mock_response.getcode.return_value = 200
- mock_response.headers = mock_response.getheaders.return_value = {'X-Auth-Token': 'token_id'}
+ mock_response.headers = mock_response.getheaders.return_value = {
+ 'X-Auth-Token': 'token_id'}
mock_response.read.return_value = json.dumps({"value": "data"})
return mock_response
+ @pytest.fixture
+ def module_params(self):
+ module_parameters = {'hostname': '192.168.0.1', 'username': 'username',
+ 'password': 'password', "port": 443}
+ return module_parameters
+
+ @pytest.fixture
+ def ome_object(self, module_params):
+ ome_obj = RestOME(module_params=module_params)
+ return ome_obj
+
def test_invoke_request_with_session(self, mock_response, mocker):
- mocker.patch(MODULE_UTIL_PATH + 'ome.open_url',
+
+ mocker.patch(MODULE_UTIL_PATH + OME_OPENURL,
return_value=mock_response)
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
+ module_params = {'hostname': '[2001:db8:3333:4444:5555:6666:7777:8888]', 'username': 'username',
'password': 'password', "port": 443}
req_session = True
with RestOME(module_params, req_session) as obj:
- response = obj.invoke_request("/testpath", "GET")
+
+ response = obj.invoke_request(TEST_PATH, "GET")
assert response.status_code == 200
assert response.json_data == {"value": "data"}
assert response.success is True
- def test_invoke_request_without_session(self, mock_response, mocker):
- mocker.patch(MODULE_UTIL_PATH + 'ome.open_url',
+ def test_invoke_request_without_session(self, mock_response, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + OME_OPENURL,
return_value=mock_response)
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
req_session = False
with RestOME(module_params, req_session) as obj:
- response = obj.invoke_request("/testpath", "GET")
+ response = obj.invoke_request(TEST_PATH, "GET")
assert response.status_code == 200
assert response.json_data == {"value": "data"}
assert response.success is True
- def test_invoke_request_without_session_with_header(self, mock_response, mocker):
- mocker.patch(MODULE_UTIL_PATH + 'ome.open_url',
+ def test_invoke_request_without_session_with_header(self, mock_response, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + OME_OPENURL,
return_value=mock_response)
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
req_session = False
with RestOME(module_params, req_session) as obj:
- response = obj.invoke_request("/testpath", "POST", headers={"application": "octstream"})
+ response = obj.invoke_request(TEST_PATH, "POST", headers={
+ "application": "octstream"})
assert response.status_code == 200
assert response.json_data == {"value": "data"}
assert response.success is True
- def test_invoke_request_with_session_connection_error(self, mocker, mock_response):
+ def test_invoke_request_with_session_connection_error(self, mocker, mock_response, module_params):
mock_response.success = False
mock_response.status_code = 500
mock_response.json_data = {}
- mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
return_value=mock_response)
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
req_session = True
with pytest.raises(ConnectionError):
with RestOME(module_params, req_session) as obj:
- obj.invoke_request("/testpath", "GET")
+ obj.invoke_request(TEST_PATH, "GET")
@pytest.mark.parametrize("exc", [URLError, SSLValidationError, ConnectionError])
- def test_invoke_request_error_case_handling(self, exc, mock_response, mocker):
- open_url_mock = mocker.patch(MODULE_UTIL_PATH + 'ome.open_url',
+ def test_invoke_request_error_case_handling(self, exc, mock_response, mocker, module_params):
+ open_url_mock = mocker.patch(MODULE_UTIL_PATH + OME_OPENURL,
return_value=mock_response)
open_url_mock.side_effect = exc("test")
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
req_session = False
- with pytest.raises(exc) as e:
+ with pytest.raises(exc):
with RestOME(module_params, req_session) as obj:
- obj.invoke_request("/testpath", "GET")
+ obj.invoke_request(TEST_PATH, "GET")
- def test_invoke_request_http_error_handling(self, mock_response, mocker):
- open_url_mock = mocker.patch(MODULE_UTIL_PATH + 'ome.open_url',
+ def test_invoke_request_http_error_handling(self, mock_response, mocker, module_params):
+ open_url_mock = mocker.patch(MODULE_UTIL_PATH + OME_OPENURL,
return_value=mock_response)
- open_url_mock.side_effect = HTTPError('http://testhost.com/', 400,
- 'Bad Request Error', {}, None)
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
+ open_url_mock.side_effect = HTTPError(TEST_HOST, 400,
+ BAD_REQUEST, {}, None)
req_session = False
- with pytest.raises(HTTPError) as e:
+ with pytest.raises(HTTPError):
with RestOME(module_params, req_session) as obj:
- obj.invoke_request("/testpath", "GET")
+ obj.invoke_request(TEST_PATH, "GET")
- def test_get_all_report_details(self, mock_response, mocker):
+ def test_get_all_report_details(self, mock_response, mocker, module_params):
mock_response.success = True
mock_response.status_code = 200
- mock_response.json_data = {"@odata.count": 50, "value": list(range(51))}
- mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
+ mock_response.json_data = {ODATA_COUNT: 53, "value": list(range(50))}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
return_value=mock_response)
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
with RestOME(module_params, True) as obj:
- reports = obj.get_all_report_details("DeviceService/Devices")
- assert reports == {"resp_obj": mock_response, "report_list": list(range(51))}
+ reports = obj.get_all_report_details(DEVICE_API)
+ assert reports == {"resp_obj": mock_response,
+ "report_list": list(range(50)) + (list(range(50)))}
- def test_get_report_list_error_case(self, mock_response, mocker):
- mocker.patch(MODULE_UTIL_PATH + 'ome.open_url',
+ def test_get_report_list_error_case(self, mock_response, mocker, ome_object):
+ mocker.patch(MODULE_UTIL_PATH + OME_OPENURL,
return_value=mock_response)
- invoke_obj = mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
- side_effect=HTTPError('http://testhost.com/', 400, 'Bad Request Error', {}, None))
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
- with pytest.raises(HTTPError) as e:
- with RestOME(module_params, False) as obj:
- obj.get_all_report_details("DeviceService/Devices")
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ side_effect=HTTPError(TEST_HOST, 400, BAD_REQUEST, {}, None))
+ with pytest.raises(HTTPError):
+ ome_object.get_all_report_details(DEVICE_API)
@pytest.mark.parametrize("query_param", [
- {"inp": {"$filter": "UserName eq 'admin'"}, "out": "%24filter=UserName%20eq%20%27admin%27"},
+ {"inp": {"$filter": "UserName eq 'admin'"},
+ "out": "%24filter=UserName%20eq%20%27admin%27"},
{"inp": {"$top": 1, "$skip": 2, "$filter": "JobType/Id eq 8"}, "out":
"%24top=1&%24skip=2&%24filter=JobType%2FId%20eq%208"},
{"inp": {"$top": 1, "$skip": 3}, "out": "%24top=1&%24skip=3"}
])
- def test_build_url(self, query_param, mocker):
+ def test_build_url(self, query_param, mocker, module_params):
"""builds complete url"""
base_uri = 'https://192.168.0.1:443/api'
path = "AccountService/Accounts"
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME._get_base_url',
return_value=base_uri)
inp = query_param["inp"]
out = query_param["out"]
- url = RestOME(module_params=module_params)._build_url(path, query_param=inp)
+ url = RestOME(module_params=module_params)._build_url(
+ path, query_param=inp)
assert url == base_uri + "/" + path + "?" + out
assert "+" not in url
- def test_get_job_type_id(self, mock_response, mocker):
+ def test_get_job_type_id(self, mock_response, mocker, ome_object):
mock_response.success = True
mock_response.status_code = 200
- mock_response.json_data = {"@odata.count": 50, "value": [{"Name": "PowerChange", "Id": 11}]}
- mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
+ mock_response.json_data = {ODATA_COUNT: 50,
+ "value": [{"Name": "PowerChange", "Id": 11}]}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
return_value=mock_response)
jobtype_name = "PowerChange"
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
- with RestOME(module_params, True) as obj:
- job_id = obj.get_job_type_id(jobtype_name)
+ job_id = ome_object.get_job_type_id(jobtype_name)
assert job_id == 11
- def test_get_job_type_id_null_case(self, mock_response, mocker):
+ def test_get_job_type_id_null_case(self, mock_response, mocker, ome_object):
mock_response.success = True
mock_response.status_code = 200
- mock_response.json_data = {"@odata.count": 50, "value": [{"Name": "PowerChange", "Id": 11}]}
- mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
+ mock_response.json_data = {ODATA_COUNT: 50,
+ "value": [{"Name": "PowerChange", "Id": 11}]}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
return_value=mock_response)
jobtype_name = "FirmwareUpdate"
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
- with RestOME(module_params, True) as obj:
- job_id = obj.get_job_type_id(jobtype_name)
+ job_id = ome_object.get_job_type_id(jobtype_name)
assert job_id is None
- def test_get_device_id_from_service_tag_ome_case01(self, mocker, mock_response):
+ def test_get_device_id_from_service_tag_ome_case01(self, mocker, mock_response, ome_object):
mock_response.success = True
mock_response.status_code = 200
- mock_response.json_data = {"@odata.count": 1, "value": [{"Name": "xyz", "Id": 11}]}
- mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
+ mock_response.json_data = {ODATA_COUNT: 1,
+ "value": [{"Name": "xyz", "Id": 11}]}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
return_value=mock_response)
- ome_default_args = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
- with RestOME(ome_default_args, True) as obj:
- details = obj.get_device_id_from_service_tag("xyz")
+ details = ome_object.get_device_id_from_service_tag("xyz")
assert details["Id"] == 11
assert details["value"] == {"Name": "xyz", "Id": 11}
- def test_get_device_id_from_service_tag_ome_case02(self, mocker, mock_response):
+ def test_get_device_id_from_service_tag_ome_case02(self, mocker, mock_response, ome_object):
mock_response.success = True
mock_response.status_code = 200
- mock_response.json_data = {"@odata.count": 0, "value": []}
- mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
+ mock_response.json_data = {ODATA_COUNT: 0, "value": []}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
return_value=mock_response)
- ome_default_args = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
- with RestOME(ome_default_args, True) as obj:
- details = obj.get_device_id_from_service_tag("xyz")
+ details = ome_object.get_device_id_from_service_tag("xyz")
assert details["Id"] is None
assert details["value"] == {}
- def test_get_all_items_with_pagination(self, mock_response, mocker):
+ def test_get_all_items_with_pagination(self, mock_response, mocker, ome_object):
mock_response.success = True
mock_response.status_code = 200
- mock_response.json_data = {"@odata.count": 50, "value": list(range(51))}
- mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
- return_value=mock_response)
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
- with RestOME(module_params, True) as obj:
- reports = obj.get_all_items_with_pagination("DeviceService/Devices")
- assert reports == {"total_count": 50, "value": list(range(51))}
+ mock_response.json_data = {ODATA_COUNT: 100, "value": list(
+ range(50)), '@odata.nextLink': '/api/DeviceService/Devices2'}
- def test_get_all_items_with_pagination_error_case(self, mock_response, mocker):
- mocker.patch(MODULE_UTIL_PATH + 'ome.open_url',
+ mock_response_page2 = MagicMock()
+ mock_response_page2.success = True
+ mock_response_page2.status_code = 200
+ mock_response_page2.json_data = {
+ ODATA_COUNT: 100, "value": list(range(50, 100))}
+
+ def mock_invoke_request(*args, **kwargs):
+ if args[1] == DEVICE_API:
+ return mock_response
+ return mock_response_page2
+
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ side_effect=mock_invoke_request)
+ reports = ome_object.get_all_items_with_pagination(DEVICE_API)
+ assert reports == {"total_count": 100, "value": list(range(100))}
+
+ def test_get_all_items_with_pagination_error_case(self, mock_response, mocker, ome_object):
+ mocker.patch(MODULE_UTIL_PATH + OME_OPENURL,
return_value=mock_response)
- invoke_obj = mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
- side_effect=HTTPError('http://testhost.com/', 400, 'Bad Request Error', {}, None))
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
- with pytest.raises(HTTPError) as e:
- with RestOME(module_params, False) as obj:
- obj.get_all_items_with_pagination("DeviceService/Devices")
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ side_effect=HTTPError(TEST_HOST, 400, BAD_REQUEST, {}, None))
+ with pytest.raises(HTTPError):
+ ome_object.get_all_items_with_pagination(DEVICE_API)
- def test_get_device_type(self, mock_response, mocker):
+ def test_get_device_type(self, mock_response, mocker, ome_object):
mock_response.success = True
mock_response.status_code = 200
mock_response.json_data = {
"@odata.context": "/api/$metadata#Collection(DeviceService.DeviceType)",
- "@odata.count": 5,
+ ODATA_COUNT: 5,
"value": [
{
- "@odata.type": "#DeviceService.DeviceType",
+ ODATA_TYPE: DDEVICE_TYPE,
"DeviceType": 1000,
"Name": "SERVER",
"Description": "Server Device"
},
{
- "@odata.type": "#DeviceService.DeviceType",
+ ODATA_TYPE: DDEVICE_TYPE,
"DeviceType": 2000,
"Name": "CHASSIS",
"Description": "Chassis Device"
},
{
- "@odata.type": "#DeviceService.DeviceType",
+ ODATA_TYPE: DDEVICE_TYPE,
"DeviceType": 3000,
"Name": "STORAGE",
"Description": "Storage Device"
},
{
- "@odata.type": "#DeviceService.DeviceType",
+ ODATA_TYPE: DDEVICE_TYPE,
"DeviceType": 4000,
"Name": "NETWORK_IOM",
"Description": "NETWORK IO Module Device"
},
{
- "@odata.type": "#DeviceService.DeviceType",
+ ODATA_TYPE: DDEVICE_TYPE,
"DeviceType": 8000,
"Name": "STORAGE_IOM",
"Description": "Storage IOM Device"
}
]
}
- mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.invoke_request',
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
return_value=mock_response)
- module_params = {'hostname': '192.168.0.1', 'username': 'username',
- 'password': 'password', "port": 443}
- with RestOME(module_params, False) as obj:
- type_map = obj.get_device_type()
+ type_map = ome_object.get_device_type()
assert type_map == {1000: "SERVER", 2000: "CHASSIS", 3000: "STORAGE",
4000: "NETWORK_IOM", 8000: "STORAGE_IOM"}
+
+ def test_invalid_json_openurlresp(self):
+ obj = OpenURLResponse({})
+ obj.body = 'invalid json'
+ with pytest.raises(ValueError) as e:
+ obj.json_data
+ assert e.value.args[0] == "Unable to parse json"
+
+ @pytest.mark.parametrize("status_assert", [
+ {'id': 2060, 'exist_poll': True, 'job_failed': False,
+ 'message': "Job Completed successfully."},
+ {'id': 2070, 'exist_poll': True, 'job_failed': True,
+ 'message': "Job is in Failed state, and is not completed."},
+ {'id': 1234, 'exist_poll': False, 'job_failed': False, 'message': None}])
+ def test_get_job_info(self, mocker, mock_response, status_assert, ome_object):
+
+ mock_response.success = True
+ mock_response.status_code = 200
+ mock_response.json_data = {
+ 'LastRunStatus': {'Id': status_assert['id']}
+ }
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ exit_poll, job_failed, message = ome_object.get_job_info(12345)
+
+ assert exit_poll is status_assert['exist_poll']
+ assert job_failed is status_assert['job_failed']
+ assert message == status_assert['message']
+
+ def test_get_job_exception(self, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ side_effect=HTTPError(TEST_HOST, 400,
+ BAD_REQUEST, {}, None))
+ with pytest.raises(HTTPError):
+ with RestOME(module_params, True) as obj:
+ obj.get_job_info(12345)
+
+ @pytest.mark.parametrize("ret_val", [
+ (True, False, "My Message"),
+ (False, True, "The job is not complete after 2 seconds.")])
+ def test_job_tracking(self, mocker, mock_response, ret_val, ome_object):
+ mocker.patch(MODULE_UTIL_PATH + 'ome.time.sleep',
+ return_value=())
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+
+ mocker.patch(MODULE_UTIL_PATH + 'ome.RestOME.get_job_info',
+ return_value=ret_val)
+ job_failed, message = ome_object.job_tracking(12345, 2, 1)
+ assert job_failed is ret_val[1]
+ assert message == ret_val[2]
+
+ def test_strip_substr_dict(self, mocker, mock_response, ome_object):
+ data_dict = {"@odata.context": "/api/$metadata#Collection(DeviceService.DeviceType)",
+ ODATA_COUNT: 5,
+ "value": [
+ {
+ ODATA_TYPE: DDEVICE_TYPE,
+ "DeviceType": 1000,
+ "Name": "SERVER",
+ "Description": "Server Device"
+ },
+ {
+ ODATA_TYPE: DDEVICE_TYPE,
+ "DeviceType": 2000,
+ "Name": "CHASSIS",
+ "Description": "Chassis Device"
+ }
+ ]}
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ ret = ome_object.strip_substr_dict(data_dict)
+ assert ret == {'value': [{'@odata.type': '#DeviceService.DeviceType', 'Description': 'Server Device', 'DeviceType': 1000, 'Name': 'SERVER'}, {
+ '@odata.type': '#DeviceService.DeviceType', 'Description': 'Chassis Device', 'DeviceType': 2000, 'Name': 'CHASSIS'}]}
+
+ def test_job_submission(self, mocker, mock_response, ome_object):
+ mock_response.success = True
+ mock_response.status_code = 200
+ mock_response.json_data = {
+ 'JobStatus': "Completed"
+ }
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ ret = ome_object.job_submission(
+ "job_name", "job_desc", "targets", "params", "job_type")
+ assert ret.json_data == mock_response.json_data
+
+ def test_test_network_connection(self, mocker, mock_response, ome_object):
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mock_response.success = True
+ mock_response.status_code = 200
+ mock_response.json_data = {
+ 'JobStatus': "Completed"
+ }
+ mocker.patch(MODULE_UTIL_PATH + JOB_SUBMISSION,
+ return_value=mock_response)
+ ret = ome_object.test_network_connection(
+ "share_address", "share_path", "share_type", "share_user", "share_password", "share_domain")
+ assert ret.json_data == mock_response.json_data
+
+ ret = ome_object.test_network_connection(
+ "share_address", "share_path", "share_type")
+ assert ret.json_data == mock_response.json_data
+
+ def test_check_existing_job_state(self, mocker, mock_response, ome_object):
+ mocker.patch(MODULE_UTIL_PATH + INVOKE_REQUEST,
+ return_value=mock_response)
+ mock_response.success = True
+ mock_response.status_code = 200
+ mock_response.json_data = {
+ 'value': [{"JobType": {"Name": "Job_Name_1"}}]
+ }
+ mocker.patch(MODULE_UTIL_PATH + JOB_SUBMISSION,
+ return_value=mock_response)
+ job_allowed, available_jobs = ome_object.check_existing_job_state(
+ "Job_Name_1")
+ assert job_allowed is False
+ assert available_jobs == {"JobType": {"Name": "Job_Name_1"}}
+
+ mock_response.json_data = {
+ 'value': []
+ }
+ mocker.patch(MODULE_UTIL_PATH + JOB_SUBMISSION,
+ return_value=mock_response)
+ job_allowed, available_jobs = ome_object.check_existing_job_state(
+ "Job_Name_1")
+ assert job_allowed is True
+ assert available_jobs == []
+
+ mock_response.json_data = {
+ 'value': [{"JobType": {"Name": "Job_Name_2"}}]
+ }
+ mocker.patch(MODULE_UTIL_PATH + JOB_SUBMISSION,
+ return_value=mock_response)
+ job_allowed, available_jobs = ome_object.check_existing_job_state(
+ "Job_Name_1")
+ assert job_allowed is True
+ assert available_jobs == [{'JobType': {'Name': 'Job_Name_2'}}]
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_redfish.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_redfish.py
new file mode 100644
index 000000000..2e092af15
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/module_utils/test_redfish.py
@@ -0,0 +1,171 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2023 Dell Inc.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# All rights reserved. Dell, EMC, and other trademarks are trademarks of Dell Inc. or its subsidiaries.
+# Other trademarks may be trademarks of their respective owners.
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible_collections.dellemc.openmanage.plugins.module_utils.redfish import Redfish, OpenURLResponse
+from mock import MagicMock
+import json
+
+MODULE_UTIL_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.'
+OPEN_URL = 'redfish.open_url'
+TEST_PATH = "/testpath"
+
+
+class TestRedfishRest(object):
+
+ @pytest.fixture
+ def mock_response(self):
+ mock_response = MagicMock()
+ mock_response.getcode.return_value = 200
+ mock_response.headers = mock_response.getheaders.return_value = {
+ 'X-Auth-Token': 'token_id'}
+ mock_response.read.return_value = json.dumps({"value": "data"})
+ return mock_response
+
+ @pytest.fixture
+ def module_params(self):
+ module_parameters = {'baseuri': '192.168.0.1:443', 'username': 'username',
+ 'password': 'password'}
+ return module_parameters
+
+ @pytest.fixture
+ def redfish_object(self, module_params):
+ redfish_obj = Redfish(module_params=module_params)
+ return redfish_obj
+
+ def test_invoke_request_with_session(self, mock_response, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ return_value=mock_response)
+ req_session = True
+ with Redfish(module_params, req_session) as obj:
+ response = obj.invoke_request(TEST_PATH, "GET")
+ assert response.status_code == 200
+ assert response.json_data == {"value": "data"}
+ assert response.success is True
+
+ def test_invoke_request_without_session(self, mock_response, mocker):
+ mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ return_value=mock_response)
+ module_params = {'baseuri': '[2001:db8:3333:4444:5555:6666:7777:8888]:443', 'username': 'username',
+ 'password': 'password', "port": 443}
+ req_session = False
+ with Redfish(module_params, req_session) as obj:
+ response = obj.invoke_request(TEST_PATH, "GET")
+ assert response.status_code == 200
+ assert response.json_data == {"value": "data"}
+ assert response.success is True
+
+ def test_invoke_request_without_session_with_header(self, mock_response, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ return_value=mock_response)
+ req_session = False
+ with Redfish(module_params, req_session) as obj:
+ response = obj.invoke_request(TEST_PATH, "POST", headers={
+ "application": "octstream"})
+ assert response.status_code == 200
+ assert response.json_data == {"value": "data"}
+ assert response.success is True
+
+ def test_invoke_request_with_session_connection_error(self, mocker, mock_response, module_params):
+ mock_response.success = False
+ mock_response.status_code = 500
+ mock_response.json_data = {}
+ mocker.patch(MODULE_UTIL_PATH + 'redfish.Redfish.invoke_request',
+ return_value=mock_response)
+ req_session = True
+ with pytest.raises(ConnectionError):
+ with Redfish(module_params, req_session) as obj:
+ obj.invoke_request(TEST_PATH, "GET")
+
+ @pytest.mark.parametrize("exc", [URLError, SSLValidationError, ConnectionError])
+ def test_invoke_request_error_case_handling(self, exc, mock_response, mocker, module_params):
+ mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ side_effect=exc("test"))
+ req_session = False
+ with pytest.raises(exc):
+ with Redfish(module_params, req_session) as obj:
+ obj.invoke_request(TEST_PATH, "GET")
+
+ def test_invoke_request_http_error_handling(self, mock_response, mocker, module_params):
+ open_url_mock = mocker.patch(MODULE_UTIL_PATH + OPEN_URL,
+ return_value=mock_response)
+ open_url_mock.side_effect = HTTPError('https://testhost.com/', 400,
+ 'Bad Request Error', {}, None)
+ req_session = False
+ with pytest.raises(HTTPError):
+ with Redfish(module_params, req_session) as obj:
+ obj.invoke_request(TEST_PATH, "GET")
+
+ @pytest.mark.parametrize("query_params", [
+ {"inp": {"$filter": "UserName eq 'admin'"},
+ "out": "%24filter=UserName+eq+%27admin%27"},
+ {"inp": {"$top": 1, "$skip": 2, "$filter": "JobType/Id eq 8"}, "out":
+ "%24top=1&%24skip=2&%24filter=JobType%2FId+eq+8"},
+ {"inp": {"$top": 1, "$skip": 3}, "out": "%24top=1&%24skip=3"}
+ ])
+ def test_build_url(self, query_params, mocker, redfish_object):
+ """builds complete url"""
+ base_uri = 'https://192.168.0.1:443/api'
+ path = "/AccountService/Accounts"
+ mocker.patch(MODULE_UTIL_PATH + 'redfish.Redfish._get_base_url',
+ return_value=base_uri)
+ inp = query_params["inp"]
+ out = query_params["out"]
+ url = redfish_object._build_url(
+ path, query_param=inp)
+ assert url == base_uri + path + "?" + out
+
+ def test_build_url_none(self, mocker, redfish_object):
+ """builds complete url"""
+ base_uri = 'https://192.168.0.1:443/api'
+ mocker.patch(MODULE_UTIL_PATH + 'redfish.Redfish._get_base_url',
+ return_value=base_uri)
+ url = redfish_object._build_url("", None)
+ assert url == ""
+
+ def test_strip_substr_dict(self, mocker, mock_response, redfish_object):
+ data_dict = {"@odata.context": "/api/$metadata#Collection(DeviceService.DeviceType)",
+ "@odata.count": 5,
+ "value": [
+ {
+ "@odata.type": "#DeviceService.DeviceType",
+ "DeviceType": 1000,
+ "Name": "SERVER",
+ "Description": "Server Device"
+ }
+ ]}
+ ret = redfish_object.strip_substr_dict(data_dict)
+ assert ret == {'value': [{'@odata.type': '#DeviceService.DeviceType',
+ 'Description': 'Server Device', 'DeviceType': 1000, 'Name': 'SERVER'}]}
+
+ def test_invalid_json_openurlresp(self):
+ obj = OpenURLResponse({})
+ obj.body = 'invalid json'
+ with pytest.raises(ValueError) as e:
+ obj.json_data
+ assert e.value.args[0] == "Unable to parse json"
+
+ def test_reason(self):
+ def mock_read():
+ return "{}"
+ obj = MagicMock()
+ obj.reason = "returning reason"
+ obj.read = mock_read
+ ourl = OpenURLResponse(obj)
+ reason_ret = ourl.reason
+ assert reason_ret == "returning reason"
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py
index 0cc124f9b..ef7f8d4e3 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/common.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -26,8 +26,8 @@ class Constants:
device_id2 = 4321
service_tag1 = "MXL1234"
service_tag2 = "MXL5467"
- hostname1 = "192.168.0.1"
- hostname2 = "192.168.0.2"
+ hostname1 = "XX.XX.XX.XX"
+ hostname2 = "YY.YY.YY.YY"
class AnsibleFailJSonException(Exception):
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py
index e6f9ae46e..ff455d6d8 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/conftest.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -16,7 +16,7 @@ __metaclass__ = type
import pytest
from ansible.module_utils import basic
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.utils import set_module_args, exit_json, \
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.utils import exit_json, \
fail_json, AnsibleFailJson, AnsibleExitJson
from mock import MagicMock
@@ -57,7 +57,7 @@ def redfish_response_mock(mocker):
@pytest.fixture
def ome_default_args():
- default_args = {'hostname': '192.168.0.1', 'username': 'username', 'password': 'password', "ca_path": "/path/ca_bundle"}
+ default_args = {'hostname': 'XX.XX.XX.XX', 'username': 'username', 'password': 'password', "ca_path": "/path/ca_bundle"}
return default_args
@@ -70,7 +70,7 @@ def idrac_default_args():
@pytest.fixture
def redfish_default_args():
- default_args = {'baseuri': '192.168.0.1', 'username': 'username', 'password': 'password',
+ default_args = {'baseuri': 'XX.XX.XX.XX', 'username': 'username', 'password': 'password',
"ca_path": "/path/to/ca_cert.pem"}
return default_args
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py
index 0386269ec..fb361a38e 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_eventing.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -14,13 +14,20 @@ __metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_configure_idrac_eventing
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock, PropertyMock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock, Mock, PropertyMock
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
from pytest import importorskip
+from ansible.module_utils._text import to_text
+import json
+from io import StringIO
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+
class TestConfigureEventing(FakeAnsibleModule):
module = dellemc_configure_idrac_eventing
@@ -90,7 +97,7 @@ class TestConfigureEventing(FakeAnsibleModule):
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
- "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
+ "smtp_ip_address": "XX.XX.XX.XX", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {"changes_applicable": True, "message": "Changes found to commit!"}
idrac_connection_configure_eventing_mock.config_mgr.is_change_applicable.return_value = message
@@ -106,7 +113,7 @@ class TestConfigureEventing(FakeAnsibleModule):
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
- "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
+ "smtp_ip_address": "XX.XX.XX.XX", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True,
"Status": "Success"}
@@ -123,7 +130,7 @@ class TestConfigureEventing(FakeAnsibleModule):
"destination": "1.1.1.1", "snmp_v3_username": "snmpuser",
"snmp_trap_state": "Enabled", "alert_number": 4, "email_alert_state": "Enabled",
"address": "abc@xyz", "custom_message": "test", "enable_alerts": "Enabled",
- "authentication": "Enabled", "smtp_ip_address": "192.168.0.1", "smtp_port": 443,
+ "authentication": "Enabled", "smtp_ip_address": "XX.XX.XX.XX", "smtp_port": 443,
"username": "uname", "password": "pwd"})
message = {"changes_applicable": False, "Message": "No changes found to commit!", "changed": False,
"Status": "Success"}
@@ -140,7 +147,7 @@ class TestConfigureEventing(FakeAnsibleModule):
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
- "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
+ "smtp_ip_address": "XX.XX.XX.XX", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "Success"}
@@ -180,7 +187,7 @@ class TestConfigureEventing(FakeAnsibleModule):
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
- "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
+ "smtp_ip_address": "XX.XX.XX.XX", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}}
idrac_connection_configure_eventing_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
@@ -197,7 +204,7 @@ class TestConfigureEventing(FakeAnsibleModule):
"snmp_v3_username": "snmpuser", "snmp_trap_state": "Enabled", "alert_number": 4,
"email_alert_state": "Enabled", "address": "abc@xyz", "custom_message": "test",
"enable_alerts": "Enabled", "authentication": "Enabled",
- "smtp_ip_address": "192.168.0.1", "smtp_port": 443, "username": "uname",
+ "smtp_ip_address": "XX.XX.XX.XX", "smtp_port": 443, "username": "uname",
"password": "pwd"})
message = {"changes_applicable": False, "Message": "No changes were applied", "changed": False,
"Status": "failed"}
@@ -214,7 +221,7 @@ class TestConfigureEventing(FakeAnsibleModule):
"destination": "1.1.1.1", "snmp_v3_username": "snmpuser",
"snmp_trap_state": "Enabled", "alert_number": 4, "email_alert_state": "Enabled",
"address": "abc@xyz", "custom_message": "test", "enable_alerts": "Enabled",
- "authentication": "Enabled", "smtp_ip_address": "192.168.0.1",
+ "authentication": "Enabled", "smtp_ip_address": "XX.XX.XX.XX",
"smtp_port": 443, "username": "uname", "password": "pwd"})
message = {'Status': 'Failed', "Data": {'Message': "Failed to found changes"}}
idrac_connection_configure_eventing_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
@@ -224,14 +231,98 @@ class TestConfigureEventing(FakeAnsibleModule):
self.module.run_idrac_eventing_config(idrac_connection_configure_eventing_mock, f_module)
assert ex.value.args[0] == 'Failed to found changes'
- @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError])
- def test_main_configure_eventing_exception_handling_case(self, exc_type, mocker, idrac_default_args,
- idrac_connection_configure_eventing_mock,
- idrac_file_manager_config_eventing_mock):
- idrac_default_args.update({"share_name": None, 'share_password': None,
- 'share_mnt': None, 'share_user': None})
- mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
- 'dellemc_configure_idrac_eventing.run_idrac_eventing_config', side_effect=exc_type('test'))
- result = self._run_module_with_fail_json(idrac_default_args)
+ @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError, HTTPError, URLError, SSLValidationError, ConnectionError])
+ def test_main_dellemc_configure_idrac_eventing_handling_case(self, exc_type, idrac_connection_configure_eventing_mock,
+ idrac_file_manager_config_eventing_mock, idrac_default_args,
+ is_changes_applicable_eventing_mock, mocker):
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH +
+ 'dellemc_configure_idrac_eventing.run_idrac_eventing_config',
+ side_effect=exc_type('test'))
+ else:
+ mocker.patch(MODULE_PATH +
+ 'dellemc_configure_idrac_eventing.run_idrac_eventing_config',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ if exc_type != URLError:
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ else:
+ result = self._run_module(idrac_default_args)
assert 'msg' in result
+
+ def test_run_run_idrac_eventing_config_invalid_share(self, idrac_connection_configure_eventing_mock,
+ idrac_file_manager_config_eventing_mock, idrac_default_args,
+ is_changes_applicable_eventing_mock, mocker):
+ f_module = self.get_module_mock(params=idrac_default_args)
+ obj = MagicMock()
+ obj.IsValid = False
+ mocker.patch(
+ MODULE_PATH + "dellemc_configure_idrac_eventing.file_share_manager.create_share_obj", return_value=(obj))
+ with pytest.raises(Exception) as exc:
+ self.module.run_idrac_eventing_config(
+ idrac_connection_configure_eventing_mock, f_module)
+ assert exc.value.args[0] == "Unable to access the share. Ensure that the share name, share mount, and share credentials provided are correct."
+
+ def test_run_idrac_eventing_config_Error(self, idrac_connection_configure_eventing_mock,
+ idrac_file_manager_config_eventing_mock, idrac_default_args,
+ is_changes_applicable_eventing_mock, mocker):
+ f_module = self.get_module_mock(params=idrac_default_args)
+ obj = MagicMock()
+ obj.IsValid = True
+ mocker.patch(
+ MODULE_PATH + "dellemc_configure_idrac_eventing.file_share_manager.create_share_obj", return_value=(obj))
+ message = {'Status': 'Failed', 'Message': 'Key Error Expected', "Data1": {
+ 'Message': 'Status failed in checking data'}}
+ idrac_connection_configure_eventing_mock.config_mgr.set_liason_share.return_value = message
+ idrac_connection_configure_eventing_mock.config_mgr.apply_changes.return_value = "Returned on Key Error"
+ with pytest.raises(Exception) as exc:
+ self.module.run_idrac_eventing_config(
+ idrac_connection_configure_eventing_mock, f_module)
+ assert exc.value.args[0] == "Key Error Expected"
+
+ def test_dellemc_configure_idrac_eventing_main_cases(self, idrac_connection_configure_eventing_mock,
+ idrac_file_manager_config_eventing_mock, idrac_default_args,
+ is_changes_applicable_eventing_mock, mocker):
+ status_msg = {"Status": "Success", "Message": "No changes found"}
+ mocker.patch(MODULE_PATH +
+ 'dellemc_configure_idrac_eventing.run_idrac_eventing_config', return_value=status_msg)
+ result = self._run_module(idrac_default_args)
+ assert result['changed'] is True
+ assert result['msg'] == "Successfully configured the iDRAC eventing settings."
+ assert result['eventing_status'].get("Message") == "No changes found"
+
+ status_msg = {"Status": "Failed", "Message": "No changes found"}
+ mocker.patch(MODULE_PATH +
+ 'dellemc_configure_idrac_eventing.run_idrac_eventing_config', return_value=status_msg)
+ result = self._run_module_with_fail_json(idrac_default_args)
assert result['failed'] is True
+ assert result['msg'] == "Failed to configure the iDRAC eventing settings"
+
+ def test_run_idrac_eventing_config_main_cases(self, idrac_connection_configure_eventing_mock,
+ idrac_file_manager_config_eventing_mock, idrac_default_args,
+ is_changes_applicable_eventing_mock, mocker):
+ idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None,
+ "share_password": None, "destination_number": 1,
+ "destination": None, "snmp_v3_username": None,
+ "snmp_trap_state": None, "alert_number": 4, "email_alert_state": None,
+ "address": None, "custom_message": None, "enable_alerts": "Enabled",
+ "authentication": "Enabled", "smtp_ip_address": "XX.XX.XX.XX",
+ "smtp_port": 443, "username": "uname", "password": "pwd"})
+
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ obj = MagicMock()
+ obj.IsValid = True
+ mocker.patch(
+ MODULE_PATH + "dellemc_configure_idrac_eventing.file_share_manager.create_share_obj", return_value=(obj))
+ message = {'Status': 'Success', 'Message': 'Message Success', "Data": {
+ 'Message': 'Status failed in checking data'}}
+ idrac_connection_configure_eventing_mock.config_mgr.set_liason_share.return_value = message
+ idrac_connection_configure_eventing_mock.config_mgr.is_change_applicable.return_value = {
+ "changes_applicable": False}
+ with pytest.raises(Exception) as exc:
+ self.module.run_idrac_eventing_config(
+ idrac_connection_configure_eventing_mock, f_module)
+ assert exc.value.args[0] == "No changes found to commit!"
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py
index 2606a0343..f2f40c390 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_configure_idrac_services.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -14,13 +14,20 @@ __metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_configure_idrac_services
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from mock import MagicMock, Mock
from pytest import importorskip
+from ansible.module_utils._text import to_text
+import json
+from io import StringIO
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+
class TestConfigServices(FakeAnsibleModule):
module = dellemc_configure_idrac_services
@@ -242,13 +249,106 @@ class TestConfigServices(FakeAnsibleModule):
result = self._run_module_with_fail_json(idrac_default_args)
assert result['failed'] is True
- @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError])
- def test_main_idrac_configure_services_exception_handling_case(self, exc_type, mocker, idrac_default_args,
- idrac_connection_configure_services_mock,
- idrac_file_manager_config_services_mock):
- idrac_default_args.update({"share_name": None})
- mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
- 'dellemc_configure_idrac_services.run_idrac_services_config', side_effect=exc_type('test'))
- result = self._run_module_with_fail_json(idrac_default_args)
+ @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError, HTTPError, URLError, SSLValidationError, ConnectionError])
+ def test_main_dellemc_configure_idrac_services_handling_case(self, exc_type, mocker, idrac_default_args, idrac_connection_configure_services_mock,
+ idrac_file_manager_config_services_mock):
+ idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None,
+ "share_password": None, "enable_web_server": "Enabled", "http_port": 443,
+ "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher",
+ "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled",
+ "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445,
+ "discovery_port": 1000, "trap_format": "SNMPv1"})
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH +
+ 'dellemc_configure_idrac_services.run_idrac_services_config',
+ side_effect=exc_type('test'))
+ else:
+ mocker.patch(MODULE_PATH +
+ 'dellemc_configure_idrac_services.run_idrac_services_config',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ if exc_type != URLError:
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ else:
+ result = self._run_module(idrac_default_args)
assert 'msg' in result
- assert result['failed'] is True
+
+ def test_run_idrac_services_config_invalid_share(self, mocker, idrac_default_args, idrac_connection_configure_services_mock,
+ idrac_file_manager_config_services_mock):
+ f_module = self.get_module_mock(params=idrac_default_args)
+ obj = MagicMock()
+ obj.IsValid = False
+ mocker.patch(
+ MODULE_PATH + "dellemc_configure_idrac_services.file_share_manager.create_share_obj", return_value=(obj))
+ with pytest.raises(Exception) as exc:
+ self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module)
+ assert exc.value.args[0] == "Unable to access the share. Ensure that the share name, share mount, and share credentials provided are correct."
+
+ def test_run_idrac_services_config_Error(self, mocker, idrac_default_args, idrac_connection_configure_services_mock,
+ idrac_file_manager_config_services_mock):
+ idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None,
+ "share_password": None, "enable_web_server": "Enabled", "http_port": 443,
+ "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher",
+ "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled",
+ "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445,
+ "discovery_port": 1000, "trap_format": "SNMPv1"})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ obj = MagicMock()
+ obj.IsValid = True
+ mocker.patch(
+ MODULE_PATH + "dellemc_configure_idrac_services.file_share_manager.create_share_obj", return_value=(obj))
+ message = {'Status': 'Failed', 'Message': 'Key Error Expected', "Data1": {
+ 'Message': 'Status failed in checking data'}}
+ idrac_connection_configure_services_mock.config_mgr.set_liason_share.return_value = message
+ idrac_connection_configure_services_mock.config_mgr.apply_changes.return_value = "Returned on Key Error"
+ with pytest.raises(Exception) as exc:
+ self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module)
+ assert exc.value.args[0] == "Key Error Expected"
+
+ def test_run_idrac_services_config_extra_coverage(self, mocker, idrac_default_args, idrac_connection_configure_services_mock,
+ idrac_file_manager_config_services_mock):
+ idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None,
+ "share_password": None, "enable_web_server": "Enabled", "http_port": 443,
+ "https_port": 343, "timeout": 10, "ssl_encryption": "T_128_Bit_or_higher",
+ "tls_protocol": "TLS_1_1_and_Higher", "snmp_enable": "Enabled",
+ "community_name": "communityname", "snmp_protocol": "All", "alert_port": 445,
+ "discovery_port": 1000, "trap_format": "SNMPv1", "ipmi_lan": {}})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ obj = MagicMock()
+ obj.IsValid = True
+ mocker.patch(
+ MODULE_PATH + "dellemc_configure_idrac_services.file_share_manager.create_share_obj", return_value=(obj))
+ message = {'Status': 'Success', "Data": {
+ 'Message': 'Status failed in checking data'}}
+ idrac_connection_configure_services_mock.config_mgr.set_liason_share.return_value = message
+ idrac_connection_configure_services_mock.config_mgr.apply_changes.return_value = "Returned on community name none"
+ ret_data = self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module)
+ assert ret_data == "Returned on community name none"
+
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ idrac_connection_configure_services_mock.config_mgr.is_change_applicable.return_value = {
+ 'changes_applicable': False}
+ with pytest.raises(Exception) as exc:
+ self.module.run_idrac_services_config(idrac_connection_configure_services_mock, f_module)
+ assert exc.value.args[0] == "No changes found to commit!"
+
+ idrac_default_args.update({"ipmi_lan": None})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ ret_data = self.module.run_idrac_services_config(
+ idrac_connection_configure_services_mock, f_module)
+ assert ret_data == "Returned on community name none"
+
+ def test_run_idrac_services_config_success_case06(self, idrac_connection_configure_services_mock,
+ idrac_default_args, idrac_file_manager_config_services_mock, mocker):
+ status_msg = {"Status": "Success", "Message": "No changes found"}
+ mocker.patch(
+ MODULE_PATH + 'dellemc_configure_idrac_services.run_idrac_services_config', return_value=status_msg)
+ resp = self._run_module(idrac_default_args)
+ assert resp['changed'] is True
+ assert resp['msg'] == "Successfully configured the iDRAC services settings."
+ assert resp['service_status'].get('Status') == "Success"
+ assert resp['service_status'].get('Message') == "No changes found"
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py
deleted file mode 100644
index 657f89e49..000000000
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_firmware_inventory.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# -*- coding: utf-8 -*-
-
-#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-import pytest
-from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_get_firmware_inventory
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, PropertyMock
-from pytest import importorskip
-
-importorskip("omsdk.sdkfile")
-importorskip("omsdk.sdkcreds")
-
-
-class TestFirmware(FakeAnsibleModule):
- module = dellemc_get_firmware_inventory
-
- @pytest.fixture
- def idrac_firmware_mock(self, mocker):
- omsdk_mock = MagicMock()
- idrac_obj = MagicMock()
- omsdk_mock.update_mgr = idrac_obj
- type(idrac_obj).InstalledFirmware = PropertyMock(return_value="msg")
- return idrac_obj
-
- @pytest.fixture
- def idrac_get_firmware_inventory_connection_mock(self, mocker, idrac_firmware_mock):
- idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
- 'dellemc_get_firmware_inventory.iDRACConnection',
- return_value=idrac_firmware_mock)
- idrac_conn_class_mock.return_value.__enter__.return_value = idrac_firmware_mock
- return idrac_firmware_mock
-
- def test_main_idrac_get_firmware_inventory_success_case01(self, idrac_get_firmware_inventory_connection_mock,
- idrac_default_args):
- idrac_get_firmware_inventory_connection_mock.update_mgr.InstalledFirmware.return_value = {"Status": "Success"}
- result = self._run_module(idrac_default_args)
- assert result == {'ansible_facts': {
- idrac_get_firmware_inventory_connection_mock.ipaddr: {
- 'Firmware Inventory': idrac_get_firmware_inventory_connection_mock.update_mgr.InstalledFirmware}},
- "changed": False}
-
- def test_run_get_firmware_inventory_success_case01(self, idrac_get_firmware_inventory_connection_mock,
- idrac_default_args):
- obj2 = MagicMock()
- idrac_get_firmware_inventory_connection_mock.update_mgr = obj2
- type(obj2).InstalledFirmware = PropertyMock(return_value="msg")
- f_module = self.get_module_mock(params=idrac_default_args)
- msg, err = self.module.run_get_firmware_inventory(idrac_get_firmware_inventory_connection_mock, f_module)
- assert msg == {'failed': False,
- 'msg': idrac_get_firmware_inventory_connection_mock.update_mgr.InstalledFirmware}
- assert msg['failed'] is False
- assert err is False
-
- def test_run_get_firmware_inventory_failed_case01(self, idrac_get_firmware_inventory_connection_mock,
- idrac_default_args):
- f_module = self.get_module_mock(params=idrac_default_args)
- error_msg = "Error in Runtime"
- obj2 = MagicMock()
- idrac_get_firmware_inventory_connection_mock.update_mgr = obj2
- type(obj2).InstalledFirmware = PropertyMock(side_effect=Exception(error_msg))
- msg, err = self.module.run_get_firmware_inventory(idrac_get_firmware_inventory_connection_mock, f_module)
- assert msg['failed'] is True
- assert msg['msg'] == "Error: {0}".format(error_msg)
- assert err is True
-
- def test_run_get_firmware_inventory_failed_case02(self, idrac_get_firmware_inventory_connection_mock,
- idrac_default_args):
- message = {'Status': "Failed", "Message": "Fetched..."}
- obj2 = MagicMock()
- idrac_get_firmware_inventory_connection_mock.update_mgr = obj2
- type(obj2).InstalledFirmware = PropertyMock(return_value=message)
- f_module = self.get_module_mock(params=idrac_default_args)
- result = self.module.run_get_firmware_inventory(idrac_get_firmware_inventory_connection_mock, f_module)
- assert result == ({'msg': {'Status': 'Failed', 'Message': 'Fetched...'}, 'failed': True}, False)
- if "Status" in result[0]['msg']:
- if not result[0]['msg']['Status'] == "Success":
- assert result[0]['failed'] is True
-
- def test_main_idrac_get_firmware_inventory_faild_case01(self, idrac_get_firmware_inventory_connection_mock,
- idrac_default_args):
- error_msg = "Error occurs"
- obj2 = MagicMock()
- idrac_get_firmware_inventory_connection_mock.update_mgr = obj2
- type(obj2).InstalledFirmware = PropertyMock(side_effect=Exception(error_msg))
- result = self._run_module_with_fail_json(idrac_default_args)
- assert result['failed'] is True
- assert result['msg'] == "Error: {0}".format(error_msg)
-
- @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError])
- def test_main_idrac_get_firmware_inventory_exception_handling_case(self, exc_type, mocker,
- idrac_get_firmware_inventory_connection_mock,
- idrac_default_args):
- mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_get_firmware_inventory.'
- 'run_get_firmware_inventory', side_effect=exc_type('test'))
- result = self._run_module_with_fail_json(idrac_default_args)
- assert 'msg' in result
- assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_system_inventory.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_system_inventory.py
deleted file mode 100644
index c398c9f8a..000000000
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_get_system_inventory.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# -*- coding: utf-8 -*-
-
-#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-import pytest
-from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_get_system_inventory
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, Mock
-from pytest import importorskip
-
-importorskip("omsdk.sdkfile")
-importorskip("omsdk.sdkcreds")
-
-
-class TestSystemInventory(FakeAnsibleModule):
- module = dellemc_get_system_inventory
-
- @pytest.fixture
- def idrac_system_inventory_mock(self, mocker):
- omsdk_mock = MagicMock()
- idrac_obj = MagicMock()
- omsdk_mock.get_entityjson = idrac_obj
- type(idrac_obj).get_json_device = Mock(return_value="msg")
- return idrac_obj
-
- @pytest.fixture
- def idrac_get_system_inventory_connection_mock(self, mocker, idrac_system_inventory_mock):
- idrac_conn_class_mock = mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.'
- 'dellemc_get_system_inventory.iDRACConnection',
- return_value=idrac_system_inventory_mock)
- idrac_conn_class_mock.return_value.__enter__.return_value = idrac_system_inventory_mock
- return idrac_system_inventory_mock
-
- def test_main_idrac_get_system_inventory_success_case01(self, idrac_get_system_inventory_connection_mock, mocker,
- idrac_default_args):
- mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_get_system_inventory.run_get_system_inventory',
- return_value=({"msg": "Success"}, False))
- msg = self._run_module(idrac_default_args)
- assert msg['changed'] is False
- assert msg['ansible_facts'] == {idrac_get_system_inventory_connection_mock.ipaddr:
- {'SystemInventory': "Success"}}
-
- def test_run_get_system_inventory_error_case(self, idrac_get_system_inventory_connection_mock, idrac_default_args,
- mocker):
- f_module = self.get_module_mock()
- idrac_get_system_inventory_connection_mock.get_json_device = {"msg": "Success"}
- result, err = self.module.run_get_system_inventory(idrac_get_system_inventory_connection_mock, f_module)
- assert result["failed"] is True
- assert err is True
-
- def test_main_error_case(self, idrac_get_system_inventory_connection_mock, idrac_default_args, mocker):
- mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_get_system_inventory.run_get_system_inventory',
- return_value=({"msg": "Failed"}, True))
- result = self._run_module_with_fail_json(idrac_default_args)
- assert result['failed'] is True
-
- @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError])
- def test_main_exception_handling_case(self, exc_type, mocker, idrac_default_args,
- idrac_get_system_inventory_connection_mock):
-
- mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_get_system_inventory.run_get_system_inventory',
- side_effect=exc_type('test'))
- result = self._run_module_with_fail_json(idrac_default_args)
- assert 'msg' in result
- assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py
index 1ae8b22c0..c1c3fd5d2 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_lc_attributes.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -14,13 +14,20 @@ __metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_idrac_lc_attributes
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from mock import MagicMock, Mock
from pytest import importorskip
+from ansible.module_utils._text import to_text
+import json
+from io import StringIO
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+
class TestLcAttributes(FakeAnsibleModule):
module = dellemc_idrac_lc_attributes
@@ -58,7 +65,8 @@ class TestLcAttributes(FakeAnsibleModule):
idrac_default_args, mocker, idrac_file_manager_lc_attribute_mock):
idrac_default_args.update({"share_name": None, 'share_password': None,
'csior': 'Enabled', 'share_mnt': None, 'share_user': None})
- message = {'changed': False, 'msg': {'Status': "Success", "message": "No changes found to commit!"}}
+ message = {'changed': False, 'msg': {
+ 'Status': "Success", "message": "No changes found to commit!"}}
mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_lc_attributes.run_setup_idrac_csior',
return_value=message)
with pytest.raises(Exception) as ex:
@@ -69,7 +77,8 @@ class TestLcAttributes(FakeAnsibleModule):
return_value=status_msg)
result = self._run_module(idrac_default_args)
assert result["msg"] == "Successfully configured the iDRAC LC attributes."
- status_msg = {"Status": "Success", "Message": "No changes were applied"}
+ status_msg = {"Status": "Success",
+ "Message": "No changes were applied"}
mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_lc_attributes.run_setup_idrac_csior',
return_value=status_msg)
result = self._run_module(idrac_default_args)
@@ -79,17 +88,23 @@ class TestLcAttributes(FakeAnsibleModule):
idrac_file_manager_lc_attribute_mock):
idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None,
"share_password": None, "csior": "csior"})
- message = {"changes_applicable": True, "message": "changes are applicable"}
+ message = {"changes_applicable": True,
+ "message": "changes are applicable"}
idrac_connection_lc_attribute_mock.config_mgr.is_change_applicable.return_value = message
- f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
with pytest.raises(Exception) as ex:
- self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module)
+ self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
assert ex.value.args[0] == "Changes found to commit!"
- status_msg = {"changes_applicable": False, "message": "no changes are applicable"}
+ status_msg = {"changes_applicable": False,
+ "message": "no changes are applicable"}
idrac_connection_lc_attribute_mock.config_mgr.is_change_applicable.return_value = status_msg
- f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
with pytest.raises(Exception) as ex:
- self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module)
+ self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
assert ex.value.args[0] == "No changes found to commit!"
def test_run_setup_idrac_csior_success_case02(self, idrac_connection_lc_attribute_mock, idrac_default_args,
@@ -101,7 +116,8 @@ class TestLcAttributes(FakeAnsibleModule):
idrac_connection_lc_attribute_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
- msg = self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module)
+ msg = self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
assert msg == {'changes_applicable': True, 'message': 'changes found to commit!',
'changed': True, 'Status': 'Success'}
@@ -114,7 +130,8 @@ class TestLcAttributes(FakeAnsibleModule):
idrac_connection_lc_attribute_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
- msg = self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module)
+ msg = self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
assert msg == {'changes_applicable': True, 'Message': 'No changes found to commit!',
'changed': False, 'Status': 'Success'}
@@ -127,9 +144,11 @@ class TestLcAttributes(FakeAnsibleModule):
idrac_connection_lc_attribute_mock.config_mgr = obj
type(obj).disable_csior = Mock(return_value=message)
idrac_connection_lc_attribute_mock.config_mgr.is_change_applicable.return_value = message
- f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
with pytest.raises(Exception) as ex:
- self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module)
+ self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
assert ex.value.args[0] == "Changes found to commit!"
def test_run_setup_csior_enable_case(self, idrac_connection_lc_attribute_mock, idrac_default_args,
@@ -141,21 +160,25 @@ class TestLcAttributes(FakeAnsibleModule):
idrac_connection_lc_attribute_mock.config_mgr = obj
type(obj).enable_csior = Mock(return_value='Enabled')
idrac_connection_lc_attribute_mock.config_mgr.is_change_applicable.return_value = message
- f_module = self.get_module_mock(params=idrac_default_args, check_mode=True)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
with pytest.raises(Exception) as ex:
- self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module)
+ self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
assert ex.value.args[0] == "Changes found to commit!"
def test_run_setup_csior_failed_case01(self, idrac_connection_lc_attribute_mock, idrac_default_args,
idrac_file_manager_lc_attribute_mock):
idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None,
"share_password": None, "csior": "csior"})
- message = {'Status': 'Failed', "Data": {'Message': 'status failed in checking Data'}}
+ message = {'Status': 'Failed', "Data": {
+ 'Message': 'status failed in checking Data'}}
idrac_connection_lc_attribute_mock.file_share_manager.create_share_obj.return_value = "mnt/iso"
idrac_connection_lc_attribute_mock.config_mgr.set_liason_share.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
with pytest.raises(Exception) as ex:
- self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module)
+ self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
assert ex.value.args[0] == "status failed in checking Data"
def test_run_setup_idrac_csior_failed_case03(self, idrac_connection_lc_attribute_mock, idrac_default_args,
@@ -167,19 +190,64 @@ class TestLcAttributes(FakeAnsibleModule):
idrac_connection_lc_attribute_mock.config_mgr.apply_changes.return_value = message
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = False
- msg = self.module.run_setup_idrac_csior(idrac_connection_lc_attribute_mock, f_module)
+ msg = self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
assert msg == {'changes_applicable': False, 'Message': 'Failed to found changes',
'changed': False, 'Status': 'Failed', "failed": True}
assert msg['changed'] is False
assert msg['failed'] is True
- @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError])
+ @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError, HTTPError, URLError, SSLValidationError, ConnectionError])
def test_main_lc_attribute_exception_handling_case(self, exc_type, mocker, idrac_connection_lc_attribute_mock,
idrac_default_args, idrac_file_manager_lc_attribute_mock):
idrac_default_args.update({"share_name": None, 'share_password': None,
'csior': 'Enabled', 'share_mnt': None, 'share_user': None})
- mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_idrac_lc_attributes.run_setup_idrac_csior',
- side_effect=exc_type('test'))
- result = self._run_module_with_fail_json(idrac_default_args)
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH +
+ 'dellemc_idrac_lc_attributes.run_setup_idrac_csior',
+ side_effect=exc_type('test'))
+ else:
+ mocker.patch(MODULE_PATH +
+ 'dellemc_idrac_lc_attributes.run_setup_idrac_csior',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ if exc_type != URLError:
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ else:
+ result = self._run_module(idrac_default_args)
assert 'msg' in result
- assert result['failed'] is True
+
+ def test_run_setup_idrac_csior_invalid_share(self, idrac_connection_lc_attribute_mock, idrac_default_args,
+ idrac_file_manager_lc_attribute_mock, mocker):
+ idrac_default_args.update({"share_name": None, 'share_password': None,
+ 'csior': 'Enabled', 'share_mnt': None, 'share_user': None})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ obj = MagicMock()
+ obj.IsValid = False
+ mocker.patch(
+ MODULE_PATH + "dellemc_idrac_lc_attributes.file_share_manager.create_share_obj", return_value=(obj))
+ with pytest.raises(Exception) as exc:
+ self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
+ assert exc.value.args[0] == "Unable to access the share. Ensure that the share name, share mount, and share credentials provided are correct."
+
+ @pytest.mark.parametrize("exc_type", [KeyError])
+ def test_run_setup_idrac_csior_Error(self, exc_type, idrac_connection_lc_attribute_mock, idrac_default_args,
+ idrac_file_manager_lc_attribute_mock, mocker):
+ idrac_default_args.update({"share_name": None, 'share_password': None,
+ 'csior': 'Enabled', 'share_mnt': None, 'share_user': None})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ obj = MagicMock()
+ obj.IsValid = True
+ mocker.patch(
+ MODULE_PATH + "dellemc_idrac_lc_attributes.file_share_manager.create_share_obj", return_value=(obj))
+ message = {'Status': 'Failed', 'Message': 'Key Error Expected', "Data1": {
+ 'Message': 'Status failed in checking data'}}
+ idrac_connection_lc_attribute_mock.config_mgr.set_liason_share.return_value = message
+ idrac_connection_lc_attribute_mock.config_mgr.apply_changes.return_value = "Returned on Key Error"
+ with pytest.raises(Exception) as exc:
+ self.module.run_setup_idrac_csior(
+ idrac_connection_lc_attribute_mock, f_module)
+ assert exc.value.args[0] == "Key Error Expected"
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py
index c3a0dff19..c95fccf01 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_idrac_storage_volume.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -15,8 +15,8 @@ __metaclass__ = type
import pytest
import os
from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_idrac_storage_volume
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock, Mock
from pytest import importorskip
importorskip("omsdk.sdkfile")
@@ -221,7 +221,7 @@ class TestStorageVolume(FakeAnsibleModule):
mocker):
idrac_default_args.update({"share_name": "sharename", "state": "create", "controller_id": "XYZ123",
"capacity": 1.4, "stripe_size": 1, "volumes": [{"drives": {"id": ["data"],
- "location":[1]}}]})
+ "location": [1]}}]})
with pytest.raises(ValueError) as ex:
self.module._validate_options(idrac_default_args)
assert "Either {0} or {1} is allowed".format("id", "location") == str(ex.value)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py
index 768c62bfc..5ee3c9201 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_dellemc_system_lockdown_mode.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -14,13 +14,20 @@ __metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import dellemc_system_lockdown_mode
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from mock import MagicMock
from pytest import importorskip
+from ansible.module_utils._text import to_text
+import json
+from io import StringIO
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+
class TestSysytemLockdownMode(FakeAnsibleModule):
module = dellemc_system_lockdown_mode
@@ -77,17 +84,27 @@ class TestSysytemLockdownMode(FakeAnsibleModule):
self._run_module_with_fail_json(idrac_default_args)
assert ex.value.args[0]['msg'] == "Failed to complete the lockdown mode operations."
- @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError])
+ @pytest.mark.parametrize("exc_type", [ImportError, ValueError, RuntimeError, HTTPError, URLError, SSLValidationError, ConnectionError])
def test_main_exception_handling_case(self, exc_type, mocker, idrac_connection_system_lockdown_mode_mock,
idrac_file_manager_system_lockdown_mock, idrac_default_args):
idrac_default_args.update({"share_name": None, "share_password": None,
"lockdown_mode": "Enabled"})
- idrac_connection_system_lockdown_mode_mock.config_mgr.set_liason_share.return_value = {"Status": "Failed"}
- mocker.patch('ansible_collections.dellemc.openmanage.plugins.modules.dellemc_system_lockdown_mode.run_system_lockdown_mode',
- side_effect=exc_type('test'))
- result = self._run_module_with_fail_json(idrac_default_args)
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH +
+ 'dellemc_system_lockdown_mode.run_system_lockdown_mode',
+ side_effect=exc_type('test'))
+ else:
+ mocker.patch(MODULE_PATH +
+ 'dellemc_system_lockdown_mode.run_system_lockdown_mode',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ if exc_type != URLError:
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ else:
+ result = self._run_module(idrac_default_args)
assert 'msg' in result
- assert result['failed'] is True
def test_run_system_lockdown_mode_success_case01(self, idrac_connection_system_lockdown_mode_mock, mocker,
idrac_file_manager_system_lockdown_mock, idrac_default_args):
@@ -124,3 +141,31 @@ class TestSysytemLockdownMode(FakeAnsibleModule):
with pytest.raises(Exception) as ex:
self.module.run_system_lockdown_mode(idrac_connection_system_lockdown_mode_mock, f_module)
assert ex.value.args[0] == "message inside data"
+
+ def test_run_system_lockdown_mode_invalid_share(self, idrac_connection_system_lockdown_mode_mock, mocker,
+ idrac_file_manager_system_lockdown_mock, idrac_default_args):
+ idrac_default_args.update({"share_name": None, "share_password": None,
+ "lockdown_mode": "EnabledDisabled", "share_mnt": None, "share_user": None})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ obj = MagicMock()
+ obj.IsValid = False
+
+ mocker.patch(
+ MODULE_PATH + "dellemc_system_lockdown_mode.tempfile.gettempdir", return_value=(obj))
+ message = {"Message": "message inside data"}
+ idrac_connection_system_lockdown_mode_mock.config_mgr.disable_system_lockdown.return_value = message
+ msg = self.module.run_system_lockdown_mode(idrac_connection_system_lockdown_mode_mock, f_module)
+ assert msg == {'changed': False, 'failed': False, 'msg': "Successfully completed the lockdown mode operations."}
+
+ idrac_default_args.update({"lockdown_mode": "Disabled"})
+ message = {"Message": "message inside data"}
+ idrac_connection_system_lockdown_mode_mock.config_mgr.disable_system_lockdown.return_value = message
+ msg = self.module.run_system_lockdown_mode(idrac_connection_system_lockdown_mode_mock, f_module)
+ assert msg['system_lockdown_status']['Message'] == "message inside data"
+
+ mocker.patch(
+ MODULE_PATH + "dellemc_system_lockdown_mode.file_share_manager.create_share_obj", return_value=(obj))
+ with pytest.raises(Exception) as exc:
+ self.module.run_system_lockdown_mode(
+ idrac_connection_system_lockdown_mode_mock, f_module)
+ assert exc.value.args[0] == "Unable to access the share. Ensure that the share name, share mount, and share credentials provided are correct."
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py
index d5c225230..42bb58f62 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_attributes.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -13,14 +13,11 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
-import os
-import tempfile
from io import StringIO
import pytest
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_attributes
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from mock import MagicMock
@@ -35,31 +32,20 @@ IDRAC_URI = "/redfish/v1/Managers/{res_id}/Oem/Dell/DellAttributes/{attr_id}"
MANAGERS_URI = "/redfish/v1/Managers"
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.idrac_attributes.'
UTILS_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.utils.'
+SNMP_ADDRESS = "SNMP.1.IPAddress"
@pytest.fixture
-def idrac_redfish_mock_for_attr(mocker, ome_response_mock):
+def idrac_redfish_mock_for_attr(mocker, redfish_response_mock):
connection_class_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI')
- ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
- ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
- return ome_connection_mock_obj
+ idrac_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ idrac_connection_mock_obj.invoke_request.return_value = redfish_response_mock
+ return idrac_connection_mock_obj
class TestIdracAttributes(FakeAnsibleModule):
module = idrac_attributes
- @pytest.fixture
- def idrac_attributes_mock(self):
- idrac_obj = MagicMock()
- return idrac_obj
-
- @pytest.fixture
- def idrac_connection_attributes_mock(self, mocker, idrac_attributes_mock):
- idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI',
- return_value=idrac_attributes_mock)
- idrac_conn_mock.return_value.__enter__.return_value = idrac_attributes_mock
- return idrac_conn_mock
-
@pytest.mark.parametrize("params", [{"id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'Disabled'},
"uri_dict":
{"iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1",
@@ -72,55 +58,6 @@ class TestIdracAttributes(FakeAnsibleModule):
diff, response_attr = self.module.get_response_attr(idrac_redfish_mock_for_attr, params["id"], params["attr"], params["uri_dict"])
assert response_attr.keys() == params["response_attr"].keys()
- @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'public'},
- "uri_dict": {
- "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1",
- "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1",
- "LifecycleController.Embedded.1":
- "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"},
- "response_attr": {"SNMP.1.AgentCommunity": "public"},
- "mparams": {'idrac_attributes': {"SNMP.1.AgentCommunity": "public"}
- }
- }])
- def _test_fetch_idrac_uri_attr(self, params, idrac_redfish_mock_for_attr, idrac_default_args):
- idrac_default_args.update(params.get('mparams'))
- f_module = self.get_module_mock(params=idrac_default_args)
- diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr =\
- self.module.fetch_idrac_uri_attr(idrac_redfish_mock_for_attr, f_module, params["res_id"])
- assert idrac_response_attr.keys() == params["response_attr"].keys()
-
- @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'Disabled'},
- "uri_dict": {
- "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1",
- "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1",
- "LifecycleController.Embedded.1":
- "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"},
- "response_attr": {"ThermalSettings.1.ThermalProfile": "Sound Cap"},
- "mparams": {'system_attributes': {"ThermalSettings.1.ThermalProfile": "Sound Cap"}
- }}])
- def _test_fetch_idrac_uri_attr_succes_case01(self, params, idrac_redfish_mock_for_attr, idrac_default_args):
- idrac_default_args.update(params.get('mparams'))
- f_module = self.get_module_mock(params=idrac_default_args)
- diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr = self.module.fetch_idrac_uri_attr(
- idrac_redfish_mock_for_attr, f_module, params["res_id"])
- assert system_response_attr.keys() == params["response_attr"].keys()
-
- @pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'Disabled'},
- "uri_dict": {
- "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1",
- "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1",
- "LifecycleController.Embedded.1":
- "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"},
- "response_attr": {"LCAttributes.1.AutoUpdate": "Enabled"},
- "mparams": {'lifecycle_controller_attributes': {"LCAttributes.1.AutoUpdate": "Enabled"}
- }}])
- def _test_fetch_idrac_uri_attr_succes_case02(self, params, idrac_redfish_mock_for_attr, idrac_default_args):
- idrac_default_args.update(params.get('mparams'))
- f_module = self.get_module_mock(params=idrac_default_args)
- diff, uri_dict, idrac_response_attr, system_response_attr, lc_response_attr = self.module.fetch_idrac_uri_attr(
- idrac_redfish_mock_for_attr, f_module, params["res_id"])
- assert lc_response_attr.keys() == params["response_attr"].keys()
-
@pytest.mark.parametrize("params", [{"res_id": "iDRAC.Embedded.1", "attr": {'SNMP.1.AgentCommunity': 'Disabled'},
"uri_dict": {
"iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1",
@@ -257,51 +194,217 @@ class TestIdracAttributes(FakeAnsibleModule):
params["lc_response_attr"])
assert resp.keys() == params["resp"].keys()
- @pytest.mark.parametrize("params",
- [{"json_data": {},
- "diff": 1,
- "uri_dict": {
- "iDRAC.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/iDRAC.Embedded.1",
- "System.Embedded.1": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/System.Embedded.1",
- "LifecycleController.Embedded.1":
- "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DellAttributes/LifecycleController.Embedded.1"},
- "system_response_attr": {"ThermalSettings.1.ThermalProfile": "Sound Cap"},
- "mparams": {'system_attributes': {"ThermalSettings.1.ThermalProfile": "Sound Cap"}},
- "idrac_response_attr": {},
- "lc_response_attr": {},
- "message": "Successfully updated the attributes."
- }])
- def _test_idrac_attributes(self, params, idrac_connection_attributes_mock, idrac_default_args, mocker):
- idrac_connection_attributes_mock.success = params.get("success", True)
- idrac_connection_attributes_mock.json_data = params.get('json_data')
- idrac_default_args.update(params.get('mparams'))
- f_module = self.get_module_mock(params=idrac_default_args)
- mocker.patch(UTILS_PATH + 'get_manager_res_id', return_value=MANAGER_ID)
- mocker.patch(MODULE_PATH + 'fetch_idrac_uri_attr', return_value=(params["diff"],
- params["uri_dict"],
- params["idrac_response_attr"],
- params["system_response_attr"],
- params["lc_response_attr"]))
- mocker.patch(MODULE_PATH + 'update_idrac_attributes', return_value=params["resp"])
- result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False))
- assert result['msg'] == params['message']
-
- @pytest.mark.parametrize("exc_type", [HTTPError, URLError])
- def _test_main_idrac_attributes_exception_handling_case(self, exc_type, idrac_connection_attributes_mock, idrac_default_args, mocker):
+ @pytest.mark.parametrize("exc_type", [HTTPError, URLError, IOError, ValueError, TypeError, ConnectionError,
+ AttributeError, IndexError, KeyError])
+ def test_main_idrac_attributes_exception_handling_case(self, exc_type, idrac_redfish_mock_for_attr,
+ idrac_default_args, mocker):
idrac_default_args.update({'lifecycle_controller_attributes': {"LCAttributes.1.AutoUpdate": "Enabled"}})
json_str = to_text(json.dumps({"data": "out"}))
if exc_type not in [HTTPError]:
- mocker.patch(
- MODULE_PATH + 'update_idrac_attributes',
- side_effect=exc_type('test'))
+ mocker.patch(MODULE_PATH + 'update_idrac_attributes', side_effect=exc_type('test'))
else:
- mocker.patch(
- MODULE_PATH + 'update_idrac_attributes',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
- {"accept-type": "application/json"}, StringIO(json_str)))
- if not exc_type == URLError:
- result = self._run_module_with_fail_json(idrac_default_args)
+ mocker.patch(MODULE_PATH + 'update_idrac_attributes',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ if exc_type != URLError:
+ result = self._run_module(idrac_default_args)
assert result['failed'] is True
else:
result = self._run_module(idrac_default_args)
assert 'msg' in result
+
+ def test_xml_data_conversion(self, idrac_redfish_mock_for_attr, idrac_default_args):
+ attribute = {"Time.1.Timezone": "CST6CDT", "SNMP.1.SNMPProtocol": "All",
+ "LCAttributes.1.AutoUpdate": "Disabled"}
+ result = self.module.xml_data_conversion(attribute, "System.Embedded.1")
+ assert isinstance(result[0], str)
+ assert isinstance(result[1], dict)
+
+ def test_validate_attr_name(self, idrac_redfish_mock_for_attr, idrac_default_args):
+ attribute = [{"Name": "Time.1.Timezone", "Value": "CST6CDT"}, {"Name": "SNMP.1.SNMPProtocol", "Value": "All"},
+ {"Name": "LCAttributes.1.AutoUpdate", "Value": "Disabled"}]
+ req_data = {"Time.1.Timezone": "CST6CDT", "SNMP.1.SNMPProtocol": "All",
+ "LCAttributes.1.AutoUpdate": "Disabled"}
+ result = self.module.validate_attr_name(attribute, req_data)
+ assert result[0] == {'Time.1.Timezone': 'CST6CDT', 'SNMP.1.SNMPProtocol': 'All',
+ 'LCAttributes.1.AutoUpdate': 'Disabled'}
+ assert result[1] == {}
+ req_data = {"Time.2.Timezone": "CST6CDT", "SNMP.2.SNMPProtocol": "All"}
+ result = self.module.validate_attr_name(attribute, req_data)
+ assert result[0] == {}
+ assert result[1] == {'Time.2.Timezone': 'Attribute does not exist.',
+ 'SNMP.2.SNMPProtocol': 'Attribute does not exist.'}
+
+ def test_process_check_mode(self, idrac_redfish_mock_for_attr, idrac_default_args):
+ idrac_default_args.update({'lifecycle_controller_attributes': {"LCAttributes.1.AutoUpdate": "Enabled"}})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ with pytest.raises(Exception) as exc:
+ self.module.process_check_mode(f_module, False)
+ assert exc.value.args[0] == "No changes found to be applied."
+ f_module.check_mode = True
+ with pytest.raises(Exception) as exc:
+ self.module.process_check_mode(f_module, True)
+ assert exc.value.args[0] == "Changes found to be applied."
+
+ def test_scp_idrac_attributes(self, idrac_redfish_mock_for_attr, redfish_response_mock, idrac_default_args, mocker):
+ idrac_default_args.update({'lifecycle_controller_attributes': {"LCAttributes.1.AutoUpdate": "Enabled"}})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + 'get_check_mode', return_value=None)
+ mocker.patch(MODULE_PATH + 'xml_data_conversion', return_value=("<components></components>",
+ {"LCAttributes.1.AutoUpdate": "Enabled"}))
+ idrac_redfish_mock_for_attr.wait_for_job_completion.return_value = {"JobStatus": "Success"}
+ result = self.module.scp_idrac_attributes(f_module, idrac_redfish_mock_for_attr, "LC.Embedded.1")
+ assert result["JobStatus"] == "Success"
+ idrac_default_args.update({'idrac_attributes': {"User.1.UserName": "username"}})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + 'xml_data_conversion', return_value=("<components></components>",
+ {"User.1.UserName": "username"}))
+ idrac_redfish_mock_for_attr.wait_for_job_completion.return_value = {"JobStatus": "Success"}
+ result = self.module.scp_idrac_attributes(f_module, idrac_redfish_mock_for_attr, MANAGER_ID)
+ assert result["JobStatus"] == "Success"
+ idrac_default_args.update({'system_attributes': {SNMP_ADDRESS: "XX.XX.XX.XX"}})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + 'xml_data_conversion', return_value=("<components></components>",
+ {SNMP_ADDRESS: "XX.XX.XX.XX"}))
+ idrac_redfish_mock_for_attr.wait_for_job_completion.return_value = {"JobStatus": "Success"}
+ result = self.module.scp_idrac_attributes(f_module, idrac_redfish_mock_for_attr, "System.Embedded.1")
+ assert result["JobStatus"] == "Success"
+
+ def test_get_check_mode(self, idrac_redfish_mock_for_attr, redfish_response_mock, idrac_default_args, mocker):
+ idrac_json = {SNMP_ADDRESS: "XX.XX.XX.XX"}
+ idrac_default_args.update({'idrac_attributes': idrac_json})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ response_obj = MagicMock()
+ idrac_redfish_mock_for_attr.export_scp.return_value = response_obj
+ response_obj.json_data = {
+ "SystemConfiguration": {"Components": [
+ {"FQDD": MANAGER_ID, "Attributes": {"Name": SNMP_ADDRESS, "Value": "XX.XX.XX.XX"}}
+ ]}}
+ mocker.patch(MODULE_PATH + 'validate_attr_name', return_value=(
+ idrac_json, {"SNMP.10.IPAddress": "Attribute does not exists."}))
+ with pytest.raises(Exception) as exc:
+ self.module.get_check_mode(f_module, idrac_redfish_mock_for_attr, idrac_json, {}, {})
+ assert exc.value.args[0] == "Attributes have invalid values."
+ system_json = {"System.1.Attr": "Value"}
+ idrac_default_args.update({'system_attributes': system_json})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ response_obj.json_data = {
+ "SystemConfiguration": {"Components": [
+ {"FQDD": "System.Embedded.1", "Attributes": {"Name": "System.1.Attr", "Value": "Value"}}
+ ]}}
+ mocker.patch(MODULE_PATH + 'validate_attr_name', return_value=(
+ system_json, {"System.10.Attr": "Attribute does not exists."}))
+ with pytest.raises(Exception) as exc:
+ self.module.get_check_mode(f_module, idrac_redfish_mock_for_attr, {}, system_json, {})
+ assert exc.value.args[0] == "Attributes have invalid values."
+ lc_json = {"LCAttributes.1.AutoUpdate": "Enabled"}
+ idrac_default_args.update({'lifecycle_controller_attributes': lc_json})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ response_obj.json_data = {
+ "SystemConfiguration": {"Components": [
+ {"FQDD": "LifecycleController.Embedded.1", "Attributes": {"Name": "LCAttributes.1.AutoUpdate",
+ "Value": "Enabled"}}
+ ]}}
+ mocker.patch(MODULE_PATH + 'validate_attr_name', return_value=(
+ lc_json, {"LCAttributes.10.AutoUpdate": "Attribute does not exists."}))
+ with pytest.raises(Exception) as exc:
+ self.module.get_check_mode(f_module, idrac_redfish_mock_for_attr, {}, {}, lc_json)
+ assert exc.value.args[0] == "Attributes have invalid values."
+ lc_json = {"LCAttributes.1.AutoUpdate": "Enabled"}
+ idrac_default_args.update({'lifecycle_controller_attributes': lc_json})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ f_module.check_mode = True
+ mocker.patch(MODULE_PATH + 'validate_attr_name', return_value=(lc_json, None))
+ with pytest.raises(Exception) as exc:
+ self.module.get_check_mode(f_module, idrac_redfish_mock_for_attr, {}, {}, lc_json)
+ assert exc.value.args[0] == "No changes found to be applied."
+ mocker.patch(MODULE_PATH + 'validate_attr_name', return_value=({"LCAttributes.1.AutoUpdate": "Disabled"}, None))
+ with pytest.raises(Exception) as exc:
+ self.module.get_check_mode(f_module, idrac_redfish_mock_for_attr, {}, {}, lc_json)
+ assert exc.value.args[0] == "Changes found to be applied."
+
+ def test_fetch_idrac_uri_attr(self, idrac_redfish_mock_for_attr, redfish_response_mock, idrac_default_args, mocker):
+ idrac_json = {SNMP_ADDRESS: "XX.XX.XX.XX"}
+ idrac_default_args.update({'idrac_attributes': idrac_json})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ response_obj = MagicMock()
+ idrac_redfish_mock_for_attr.invoke_request.return_value = response_obj
+ response_obj.json_data = {"Links": {"Oem": {"Dell": {"DellAttributes": {}}}},
+ "Message": "None", "MessageId": "SYS069"}
+ response_obj.status_code = 200
+ mocker.patch(MODULE_PATH + "scp_idrac_attributes", return_value=response_obj)
+ with pytest.raises(Exception) as exc:
+ self.module.fetch_idrac_uri_attr(idrac_redfish_mock_for_attr, f_module, MANAGER_ID)
+ assert exc.value.args[0] == "No changes found to be applied."
+ response_obj.json_data = {"Links": {"Oem": {"Dell": {"DellAttributes": {}}}},
+ "Message": "None", "MessageId": "SYS053"}
+ mocker.patch(MODULE_PATH + "scp_idrac_attributes", return_value=response_obj)
+ with pytest.raises(Exception) as exc:
+ self.module.fetch_idrac_uri_attr(idrac_redfish_mock_for_attr, f_module, MANAGER_ID)
+ assert exc.value.args[0] == "Successfully updated the attributes."
+ response_obj.json_data = {"Links": {"Oem": {"Dell": {"DellAttributes": {}}}},
+ "Message": "Unable to complete application of configuration profile values.",
+ "MessageId": "SYS080"}
+ mocker.patch(MODULE_PATH + "scp_idrac_attributes", return_value=response_obj)
+ with pytest.raises(Exception) as exc:
+ self.module.fetch_idrac_uri_attr(idrac_redfish_mock_for_attr, f_module, MANAGER_ID)
+ assert exc.value.args[0] == "Application of some of the attributes failed due to invalid value or enumeration."
+
+ response_obj.json_data = {"Links": {"Oem": {"Dell": {"DellAttributes": {}}}},
+ "Message": "Unable to complete the task.", "MessageId": "SYS080"}
+ mocker.patch(MODULE_PATH + "scp_idrac_attributes", return_value=response_obj)
+ with pytest.raises(Exception) as exc:
+ self.module.fetch_idrac_uri_attr(idrac_redfish_mock_for_attr, f_module, MANAGER_ID)
+ assert exc.value.args[0] == "Unable to complete the task."
+
+ def test_main_success(self, idrac_redfish_mock_for_attr, redfish_response_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"resource_id": "System.Embedded.1", "idrac_attributes": {"Attr": "Value"}})
+ mocker.patch(MODULE_PATH + "fetch_idrac_uri_attr", return_value=(None, None, None, None, None))
+ mocker.patch(MODULE_PATH + "process_check_mode", return_value=None)
+ mocker.patch(MODULE_PATH + "update_idrac_attributes", return_value=None)
+ result = self._run_module(idrac_default_args)
+ assert result["changed"]
+ assert result["msg"] == "Successfully updated the attributes."
+
+ def test_validate_vs_registry(self, idrac_redfish_mock_for_attr, redfish_response_mock, idrac_default_args):
+ idrac_default_args.update({"resource_id": "System.Embedded.1", "idrac_attributes": {"Attr": "Value"}})
+ attr_dict = {"attr": "value", "attr1": "value1", "attr2": 3}
+ registry = {"attr": {"Readonly": True},
+ "attr1": {"Type": "Enumeration", "Value": [{"ValueDisplayName": "Attr"}]},
+ "attr2": {"Type": "Integer", "LowerBound": 1, "UpperBound": 2}}
+ result = self.module.validate_vs_registry(registry, attr_dict)
+ assert result["attr"] == "Read only Attribute cannot be modified."
+ assert result["attr1"] == "Invalid value for Enumeration."
+ assert result["attr2"] == "Integer out of valid range."
+
+ def test_fetch_idrac_uri_attr_dell_attr(self, idrac_redfish_mock_for_attr, redfish_response_mock,
+ idrac_default_args, mocker):
+ idrac_default_args.update({"resource_id": "System.Embedded.1", "idrac_attributes": {"Attr": "Value"}})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + "get_response_attr", return_value=(1, None))
+ mocker.patch(MODULE_PATH + "validate_vs_registry", return_value={"Attr": "Attribute does not exists"})
+ response_obj = MagicMock()
+ idrac_redfish_mock_for_attr.invoke_request.return_value = response_obj
+ response_obj.json_data = {"Links": {"Oem": {"Dell": {
+ "DellAttributes": [
+ {"@odata.id": "/api/services/"}
+ ]
+ }}}}
+ with pytest.raises(Exception) as exc:
+ self.module.fetch_idrac_uri_attr(idrac_redfish_mock_for_attr, f_module, "System.Embedded.1")
+ assert exc.value.args[0] == "Attributes have invalid values."
+
+ idrac_default_args.update({"resource_id": "System.Embedded.1", "system_attributes": {"Attr": "Value"}})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + "get_response_attr", return_value=(1, None))
+ mocker.patch(MODULE_PATH + "validate_vs_registry", return_value={"Attr": "Attribute does not exists"})
+ response_obj = MagicMock()
+ idrac_redfish_mock_for_attr.invoke_request.return_value = response_obj
+ response_obj.json_data = {"Links": {"Oem": {"Dell": {
+ "DellAttributes": [
+ {"@odata.id": "/api/services/"}
+ ]
+ }}}}
+ with pytest.raises(Exception) as exc:
+ self.module.fetch_idrac_uri_attr(idrac_redfish_mock_for_attr, f_module, "System.Embedded.1")
+ assert exc.value.args[0] == "Attributes have invalid values."
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py
index 3ea74c90a..edbb5b4ea 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_bios.py
@@ -402,7 +402,7 @@ class TestConfigBios(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'run_server_bios_config',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
@@ -585,3 +585,10 @@ class TestConfigBios(FakeAnsibleModule):
]
result = self.module.check_params(params.get('each'), fields)
assert result == params.get('message')
+
+ def test_validate_negative_job_time_out(self, idrac_default_args):
+ idrac_default_args.update({"job_wait": True, "job_wait_timeout": -5})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ with pytest.raises(Exception) as ex:
+ self.module.validate_negative_job_time_out(f_module)
+ assert ex.value.args[0] == "The parameter job_wait_timeout value cannot be negative or zero."
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py
index 2e754888f..d5f43360f 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_boot.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.1.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2022-23 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -15,13 +15,12 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_boot
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
-from mock import PropertyMock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from io import StringIO
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from mock import MagicMock
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
@@ -47,9 +46,14 @@ class TestConfigBios(FakeAnsibleModule):
"BootSourceOverrideEnabled": "Disabled", "BootSourceOverrideMode": "Legacy",
"BootSourceOverrideTarget": "None", "UefiTargetBootSourceOverride": None,
"BootSourceOverrideTarget@Redfish.AllowableValues": []},
- "Actions": {"#ComputerSystem.Reset": {"ResetType@Redfish.AllowableValues": ["GracefulShutdown"]}}}
+ "Actions": {"#ComputerSystem.Reset": {"ResetType@Redfish.AllowableValues": ["ForceRestart"]}}}
result = self.module.get_response_attributes(f_module, boot_connection_mock, "System.Embedded.1")
assert result["BootSourceOverrideEnabled"] == "Disabled"
+
+ redfish_response_mock.json_data.pop("Actions")
+ result = self.module.get_response_attributes(f_module, boot_connection_mock, "System.Embedded.1")
+ assert result["BootSourceOverrideEnabled"] == "Disabled"
+
redfish_response_mock.json_data["Boot"].pop("BootOptions", None)
with pytest.raises(Exception) as err:
self.module.get_response_attributes(f_module, boot_connection_mock, "System.Embedded.1")
@@ -74,7 +78,7 @@ class TestConfigBios(FakeAnsibleModule):
def test_system_reset(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
mocker.patch(MODULE_PATH + 'idrac_boot.idrac_system_reset', return_value=(True, False, "Completed", {}))
- idrac_default_args.update({"boot_source_override_mode": "uefi", "reset_type": "graceful_restart"})
+ idrac_default_args.update({"boot_source_override_mode": "uefi", "reset_type": "force_restart"})
f_module = self.get_module_mock(params=idrac_default_args)
reset, track_failed, reset_msg, resp_data = self.module.system_reset(f_module, boot_connection_mock,
"System.Embedded.1")
@@ -90,9 +94,30 @@ class TestConfigBios(FakeAnsibleModule):
status, job = self.module.get_scheduled_job(boot_connection_mock)
assert status is True
+ def test_get_scheduled_job_job_state_not_none(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
+ mocker.patch(MODULE_PATH + 'idrac_boot.time', return_value=None)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"Members": []}
+ is_job, progress_job = self.module.get_scheduled_job(boot_connection_mock, ["Scheduled", "New", "Running"])
+ print(progress_job)
+ assert is_job is False
+
+ def test_get_scheduled_job_progress_job_none(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
+ mocker.patch(MODULE_PATH + 'idrac_boot.time', return_value=None)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"Members": [{
+ "Description": "Job Instance", "EndTime": "TIME_NA", "Id": "JID_609237056489", "JobState": "Completed",
+ "JobType": "BIOSConfiguration", "Message": "Job scheduled successfully.", "MessageArgs": [],
+ "MessageId": "PR19", "Name": "Configure: BIOS.Setup.1-1", "PercentComplete": 10}]}
+ status, job = self.module.get_scheduled_job(boot_connection_mock)
+ assert status is False
+
def test_configure_boot_options(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
idrac_default_args.update({"boot_source_override_mode": "uefi", "job_wait": True, "reset_type": "none",
"job_wait_timeout": 900})
+ obj = MagicMock()
+ obj.json_data = {"JobState": "Reset Successful"}
+
f_module = self.get_module_mock(params=idrac_default_args)
mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(True, {}))
resp_data = {"BootOrder": ["Boot001", "Boot002", "Boot003"], "BootSourceOverrideEnabled": "Disabled",
@@ -119,7 +144,7 @@ class TestConfigBios(FakeAnsibleModule):
"BootSourceOverrideMode": "UEFI", "BootSourceOverrideTarget": "UefiTarget",
"UefiTargetBootSourceOverride": "/0x31/0x33/0x01/0x01"}
mocker.patch(MODULE_PATH + 'idrac_boot.get_response_attributes', return_value=resp_data)
- idrac_default_args.update({"boot_source_override_mode": "legacy"})
+ idrac_default_args.update({"boot_source_override_mode": "legacy", "reset_type": "force_restart"})
f_module = self.get_module_mock(params=idrac_default_args)
redfish_response_mock.json_data = {"Attributes": {"UefiBootSeq": [
{"Name": "Boot001", "Id": 0, "Enabled": True}, {"Name": "Boot000", "Id": 1, "Enabled": True}]}}
@@ -127,6 +152,40 @@ class TestConfigBios(FakeAnsibleModule):
self.module.configure_boot_options(f_module, boot_connection_mock, "System.Embedded.1", {"Boot001": False})
assert err.value.args[0] == "This job is not complete after 900 seconds."
+ mocker.patch(MODULE_PATH + 'idrac_boot.system_reset', return_value=(False, False, "Completed Reset", None))
+ with pytest.raises(Exception) as err:
+ self.module.configure_boot_options(f_module, boot_connection_mock, "System.Embedded.1", {"Boot001": False})
+ assert err.value.args[0] == "Completed Reset"
+
+ redfish_response_mock.status_code = 200
+ redfish_response_mock.success = True
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(False, {}))
+ job_data = self.module.configure_boot_options(f_module, boot_connection_mock, "System.Embedded.1", {"Boot001": False})
+ assert job_data == {}
+
+ def test_configure_boot_options_v2(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"boot_source_override_mode": "uefi", "job_wait": True, "reset_type": "none",
+ "job_wait_timeout": 900})
+ obj = MagicMock()
+ obj.json_data = {"JobState": "Reset Successful"}
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(True, {}))
+ resp_data = {"BootOrder": ["Boot001", "Boot002", "Boot003"], "BootSourceOverrideEnabled": "Disabled",
+ "BootSourceOverrideMode": "Legacy", "BootSourceOverrideTarget": "UefiTarget",
+ "UefiTargetBootSourceOverride": "/0x31/0x33/0x01/0x01"}
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_response_attributes', return_value=resp_data)
+ redfish_response_mock.status_code = 202
+ redfish_response_mock.success = True
+ redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/JID_123456789"}
+ redfish_response_mock.json_data = {"Attributes": {"BootSeq": [{"Name": "Boot001", "Id": 0, "Enabled": True},
+ {"Name": "Boot000", "Id": 1, "Enabled": True}]}}
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(False, {}))
+ mocker.patch(MODULE_PATH + 'idrac_boot.system_reset', return_value=(True, False, "Completed", obj))
+ mocker.patch(MODULE_PATH + 'idrac_boot.wait_for_idrac_job_completion',
+ return_value=(obj, ""))
+ f_module = self.get_module_mock(params=idrac_default_args)
+ job_data = self.module.configure_boot_options(f_module, boot_connection_mock, "System.Embedded.1", {"Boot001": False})
+ assert job_data == {"JobState": "Reset Successful"}
+
def test_apply_boot_settings(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
idrac_default_args.update({"boot_source_override_mode": "uefi", "job_wait": True, "reset_type": "none",
"job_wait_timeout": 900})
@@ -142,6 +201,32 @@ class TestConfigBios(FakeAnsibleModule):
self.module.apply_boot_settings(f_module, boot_connection_mock, payload, "System.Embedded.1")
assert err.value.args[0] == "This job is not complete after 900 seconds."
+ redfish_response_mock.status_code = 400
+ job_data = self.module.apply_boot_settings(f_module, boot_connection_mock, payload, "System.Embedded.1")
+ assert job_data == {}
+
+ def test_apply_boot_settings_reset_type(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"boot_source_override_mode": "uefi", "job_wait": True, "reset_type": "graceful_restart",
+ "job_wait_timeout": 900})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ payload = {"Boot": {"BootSourceOverrideMode": "UEFI"}}
+ redfish_response_mock.success = True
+ redfish_response_mock.status_code = 200
+
+ obj = MagicMock()
+ obj.json_data = {"JobState": "Reset Successful"}
+ mocker.patch(MODULE_PATH + 'idrac_boot.system_reset', return_value=(False, False, "Completed", obj))
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(False, [{"Id": "JID_123456789"}]))
+ job_data = self.module.apply_boot_settings(f_module, boot_connection_mock, payload, "System.Embedded.1")
+ assert job_data == {"JobState": "Reset Successful"}
+
+ mocker.patch(MODULE_PATH + 'idrac_boot.system_reset', return_value=(True, False, "Completed", {}))
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_scheduled_job', return_value=(True, [{"Id": "JID_123456789"}]))
+ mocker.patch(MODULE_PATH + 'idrac_boot.wait_for_idrac_job_completion',
+ return_value=(obj, ""))
+ job_data = self.module.apply_boot_settings(f_module, boot_connection_mock, payload, "System.Embedded.1")
+ assert job_data == {"JobState": "Reset Successful"}
+
def test_configure_boot_settings(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
idrac_default_args.update({"boot_order": ["Boot005", "Boot001"], "job_wait": True, "reset_type": "none",
"job_wait_timeout": 900, "boot_source_override_mode": "uefi",
@@ -170,6 +255,37 @@ class TestConfigBios(FakeAnsibleModule):
self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1")
assert err.value.args[0] == "Changes found to be applied."
+ def test_configure_boot_settings_v2(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"boot_order": ["Boot001", "Boot002", "Boot003"], "job_wait": True, "reset_type": "none",
+ "job_wait_timeout": 900, "boot_source_override_mode": "uefi",
+ "boot_source_override_enabled": "once", "boot_source_override_target": "cd",
+ "uefi_target_boot_source_override": "test_uefi_path"})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ resp_data = {"BootSourceOverrideEnabled": "Disabled",
+ "BootSourceOverrideMode": "Legacy", "BootSourceOverrideTarget": "UefiTarget",
+ "UefiTargetBootSourceOverride": "/0x31/0x33/0x01/0x01", "BootOrder": ["Boot001", "Boot002", "Boot003"]}
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_response_attributes', return_value=resp_data)
+ mocker.patch(MODULE_PATH + 'idrac_boot.apply_boot_settings', return_value={"JobStatus": "Completed"})
+
+ job_resp = self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1")
+ assert job_resp["JobStatus"] == "Completed"
+
+ idrac_default_args.update({"boot_order": []})
+ with pytest.raises(Exception) as err:
+ self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1")
+ assert err.value.args[0] == "Unable to complete the operation because all boot devices are required for this operation."
+
+ idrac_default_args.pop("boot_order")
+ idrac_default_args.pop("boot_source_override_mode")
+ idrac_default_args.pop("boot_source_override_enabled")
+ job_resp = self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1")
+ assert job_resp["JobStatus"] == "Completed"
+
+ idrac_default_args.update({"boot_source_override_target": "uefi_target"})
+ resp_data.update({"BootSourceOverrideTarget": "cd"})
+ job_resp = self.module.configure_boot_settings(f_module, boot_connection_mock, "System.Embedded.1")
+ assert job_resp["JobStatus"] == "Completed"
+
def test_configure_idrac_boot(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
idrac_default_args.update({"job_wait": True, "reset_type": "none", "job_wait_timeout": 900,
"boot_options": [{"boot_option_reference": "HardDisk.List.1-1", "enabled": True}]})
@@ -208,7 +324,12 @@ class TestConfigBios(FakeAnsibleModule):
self.module.configure_idrac_boot(f_module, boot_connection_mock, "System.Embedded.1")
assert err.value.args[0] == "Changes found to be applied."
- @pytest.mark.parametrize("exc_type", [RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
+ f_module = self.get_module_mock(params=idrac_default_args)
+ idrac_default_args.pop("boot_options")
+ job_resp = self.module.configure_idrac_boot(f_module, boot_connection_mock, "System.Embedded.1")
+ assert job_resp == {"JobType": "Completed"}
+
+ @pytest.mark.parametrize("exc_type", [HTTPError, RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError])
def test_main_exception(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker, exc_type):
idrac_default_args.update({"boot_source_override_mode": "legacy"})
@@ -217,9 +338,9 @@ class TestConfigBios(FakeAnsibleModule):
mocker.patch(MODULE_PATH + 'idrac_boot.get_system_res_id', side_effect=exc_type('test'))
else:
mocker.patch(MODULE_PATH + 'idrac_boot.get_system_res_id',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 401, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
- if not exc_type == URLError:
+ if exc_type != URLError:
result = self._run_module_with_fail_json(idrac_default_args)
assert result['failed'] is True
else:
@@ -254,3 +375,32 @@ class TestConfigBios(FakeAnsibleModule):
with pytest.raises(Exception) as err:
self._run_module(idrac_default_args)
assert err.value.args[0]["msg"] == "Failed"
+
+ def test_manin_success_v2(self, boot_connection_mock, redfish_response_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"boot_source_override_mode": "legacy", "resource_id": "System.Embedded.1"})
+ redfish_response_mock.success = True
+ job_resp = {"Description": "Job Instance", "EndTime": "TIME_NA", "Id": "JID_609237056489",
+ "JobState": "Failed", "JobType": "BIOSConfiguration", "MessageId": "PR19",
+ "Message": "Job scheduled successfully.", "MessageArgs": [],
+ "Name": "Configure: BIOS.Setup.1-1", "PercentComplete": 100}
+ mocker.patch(MODULE_PATH + 'idrac_boot.configure_idrac_boot', return_value=job_resp)
+ boot_return_data = {"Members": [{"BootOptionEnabled": False, "BootOptionReference": "HardDisk.List.1-1",
+ "Description": "Current settings of the Legacy Boot option",
+ "DisplayName": "Hard drive C:", "Id": "HardDisk.List.1-1",
+ "Name": "Legacy Boot option", "UefiDevicePath": "VenHw(D6C0639F-823DE6)"}],
+ "Name": "Boot Options Collection", "Description": "Collection of BootOptions"}
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_existing_boot_options', return_value=boot_return_data)
+ resp_data = {"BootOrder": ["Boot001", "Boot002", "Boot003"], "BootSourceOverrideEnabled": "Disabled",
+ "BootSourceOverrideMode": "Legacy", "BootSourceOverrideTarget": "UefiTarget",
+ "UefiTargetBootSourceOverride": "/0x31/0x33/0x01/0x01"}
+ mocker.patch(MODULE_PATH + 'idrac_boot.get_response_attributes', return_value=resp_data)
+ mocker.patch(MODULE_PATH + 'idrac_boot.strip_substr_dict', return_value=job_resp)
+ with pytest.raises(Exception) as err:
+ self._run_module(idrac_default_args)
+ assert err.value.args[0]["msg"] == "Failed to update the boot settings."
+
+ idrac_default_args.update({"job_wait": False, "reset_type": "none"})
+ job_resp.update({"JobState": "Running"})
+ # with pytest.raises(Exception) as err:
+ module_return = self._run_module(idrac_default_args)
+ assert module_return["msg"] == "The boot settings job is triggered successfully."
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py
index c5ee0dc8f..5e94faf91 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_certificates.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.5.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.6.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -25,39 +25,50 @@ from ansible_collections.dellemc.openmanage.plugins.modules import idrac_certifi
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from mock import MagicMock
-NOT_SUPPORTED_ACTION = "Certificate {op} not supported for the specified certificate type {certype}."
-SUCCESS_MSG = "Successfully performed the '{command}' operation."
+IMPORT_SSL_CERTIFICATE = "#DelliDRACCardService.ImportSSLCertificate"
+EXPORT_SSL_CERTIFICATE = "#DelliDRACCardService.ExportSSLCertificate"
+IDRAC_CARD_SERVICE_ACTION_URI = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions"
+IDRAC_CARD_SERVICE_ACTION_URI_RES_ID = "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions"
+
+NOT_SUPPORTED_ACTION = "Certificate '{operation}' not supported for the specified certificate type '{cert_type}'."
+SUCCESS_MSG = "Successfully performed the '{command}' certificate operation."
+SUCCESS_MSG_SSL = "Successfully performed the SSL key upload and '{command}' certificate operation."
NO_CHANGES_MSG = "No changes found to be applied."
CHANGES_MSG = "Changes found to be applied."
-NO_RESET = " Reset iDRAC to apply new certificate. Until iDRAC is reset, the old certificate will be active."
+WAIT_NEGATIVE_OR_ZERO_MSG = "The value for the `wait` parameter cannot be negative or zero."
+SSL_KEY_MSG = "Unable to locate the SSL key file at {ssl_key}."
+SSK_KEY_NOT_SUPPORTED = "Upload of SSL key not supported"
+NO_RESET = "Reset iDRAC to apply the new certificate. Until the iDRAC is reset, the old certificate will remain active."
RESET_UNTRACK = " iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply."
-RESET_SUCCESS = " iDRAC has been reset successfully."
+RESET_SUCCESS = "iDRAC has been reset successfully."
RESET_FAIL = " Unable to reset the iDRAC. For changes to reflect, manually reset the iDRAC."
SYSTEM_ID = "System.Embedded.1"
MANAGER_ID = "iDRAC.Embedded.1"
SYSTEMS_URI = "/redfish/v1/Systems"
MANAGERS_URI = "/redfish/v1/Managers"
-IDRAC_SERVICE = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService"
+IDRAC_SERVICE = "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService"
CSR_SSL = "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR"
-IMPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate"
-EXPORT_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate"
-RESET_SSL = "/redfish/v1/Dell/Managers/{res_id}/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg"
+IMPORT_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ImportSSLCertificate"
+UPLOAD_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.UploadSSLKey"
+EXPORT_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ExportSSLCertificate"
+RESET_SSL = f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.SSLResetCfg"
IDRAC_RESET = "/redfish/v1/Managers/{res_id}/Actions/Manager.Reset"
idrac_service_actions = {
- "#DelliDRACCardService.DeleteCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.DeleteCertificate",
- "#DelliDRACCardService.ExportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportCertificate",
- "#DelliDRACCardService.ExportSSLCertificate": EXPORT_SSL,
+ "#DelliDRACCardService.DeleteCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.DeleteCertificate",
+ "#DelliDRACCardService.ExportCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ExportCertificate",
+ EXPORT_SSL_CERTIFICATE: EXPORT_SSL,
"#DelliDRACCardService.FactoryIdentityCertificateGenerateCSR":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR",
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityCertificateGenerateCSR",
"#DelliDRACCardService.FactoryIdentityExportCertificate":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityExportCertificate",
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityExportCertificate",
"#DelliDRACCardService.FactoryIdentityImportCertificate":
- "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.FactoryIdentityImportCertificate",
- "#DelliDRACCardService.GenerateSEKMCSR": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.GenerateSEKMCSR",
- "#DelliDRACCardService.ImportCertificate": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportCertificate",
- "#DelliDRACCardService.ImportSSLCertificate": IMPORT_SSL,
- "#DelliDRACCardService.SSLResetCfg": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg",
- "#DelliDRACCardService.iDRACReset": "/redfish/v1/Managers/{res_id}/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.iDRACReset"
+ f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.FactoryIdentityImportCertificate",
+ "#DelliDRACCardService.GenerateSEKMCSR": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.GenerateSEKMCSR",
+ "#DelliDRACCardService.ImportCertificate": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.ImportCertificate",
+ IMPORT_SSL_CERTIFICATE: IMPORT_SSL,
+ "#DelliDRACCardService.UploadSSLKey": UPLOAD_SSL,
+ "#DelliDRACCardService.SSLResetCfg": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.SSLResetCfg",
+ "#DelliDRACCardService.iDRACReset": f"{IDRAC_CARD_SERVICE_ACTION_URI}/DelliDRACCardService.iDRACReset"
}
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.idrac_certificates.'
@@ -79,7 +90,8 @@ class TestIdracCertificates(FakeAnsibleModule):
return idrac_obj
@pytest.fixture
- def idrac_connection_certificates_mock(self, mocker, idrac_certificates_mock):
+ def idrac_connection_certificates_mock(
+ self, mocker, idrac_certificates_mock):
idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI',
return_value=idrac_certificates_mock)
idrac_conn_mock.return_value.__enter__.return_value = idrac_certificates_mock
@@ -99,9 +111,15 @@ class TestIdracCertificates(FakeAnsibleModule):
{"json_data": {"CertificateFile": b'Hello world!'}, 'message': CHANGES_MSG, "success": True,
"reset_idrac": (True, False, RESET_SUCCESS), 'check_mode': True,
'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', 'reset': False}},
+ {"json_data": {"CertificateFile": b'Hello world!', "ssl_key": b'Hello world!'}, 'message': CHANGES_MSG, "success": True,
+ "reset_idrac": (True, False, RESET_SUCCESS), 'check_mode': True,
+ 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', "ssl_key": '.pem', 'reset': False}},
{"json_data": {}, 'message': "{0}{1}".format(SUCCESS_MSG.format(command="import"), NO_RESET), "success": True,
"reset_idrac": (True, False, RESET_SUCCESS),
'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', 'reset': False}},
+ {"json_data": {}, 'message': "{0} {1}".format(SUCCESS_MSG_SSL.format(command="import"), NO_RESET), "success": True,
+ "reset_idrac": (True, False, RESET_SUCCESS),
+ 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', "ssl_key": '.pem', 'reset': False}},
{"json_data": {}, 'message': SUCCESS_MSG.format(command="generate_csr"),
"success": True,
"get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS),
@@ -117,7 +135,7 @@ class TestIdracCertificates(FakeAnsibleModule):
"subject_alt_name": [
"emc"
]}}},
- {"json_data": {}, 'message': NOT_SUPPORTED_ACTION.format(op="generate_csr", certype="CA"),
+ {"json_data": {}, 'message': NOT_SUPPORTED_ACTION.format(operation="generate_csr", cert_type="CA"),
"success": True,
"get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS),
'mparams': {'command': 'generate_csr', 'certificate_type': "CA", 'certificate_path': tempfile.gettempdir(),
@@ -141,49 +159,84 @@ class TestIdracCertificates(FakeAnsibleModule):
"success": True,
"get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS),
'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem'}},
+ {"json_data": {}, 'message': "{0} {1}".format(SUCCESS_MSG_SSL.format(command="import"), RESET_SUCCESS),
+ "success": True,
+ "get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS),
+ 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', 'ssl_key': '.pem'}},
{"json_data": {}, 'message': "{0}{1}".format(SUCCESS_MSG.format(command="import"), RESET_SUCCESS),
"success": True,
"reset_idrac": (True, False, RESET_SUCCESS),
'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem'}},
+ {"json_data": {}, 'message': "{0} {1}".format(SUCCESS_MSG_SSL.format(command="import"), RESET_SUCCESS),
+ "success": True,
+ "reset_idrac": (True, False, RESET_SUCCESS),
+ 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', "ssl_key": '.pem'}},
{"json_data": {}, 'message': SUCCESS_MSG.format(command="export"), "success": True, "get_cert_url": "url",
'mparams': {'command': 'export', 'certificate_type': "HTTPS", 'certificate_path': tempfile.gettempdir()}},
{"json_data": {}, 'message': "{0}{1}".format(SUCCESS_MSG.format(command="reset"), RESET_SUCCESS),
"success": True, "get_cert_url": "url", "reset_idrac": (True, False, RESET_SUCCESS),
'mparams': {'command': 'reset', 'certificate_type': "HTTPS"}
- }
+ },
+ {"json_data": {}, 'message': WAIT_NEGATIVE_OR_ZERO_MSG, "success": True,
+ 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', 'wait': -1}},
+ {"json_data": {}, 'message': WAIT_NEGATIVE_OR_ZERO_MSG, "success": True,
+ 'mparams': {'command': 'reset', 'certificate_type': "HTTPS", 'wait': 0}},
+ {"json_data": {}, 'message': f"{SSL_KEY_MSG.format(ssl_key='/invalid/path')}", "success": True,
+ 'mparams': {'command': 'import', 'certificate_type': "HTTPS", 'certificate_path': '.pem', 'ssl_key': '/invalid/path'}}
])
- def test_idrac_certificates(self, params, idrac_connection_certificates_mock, idrac_default_args, mocker):
- idrac_connection_certificates_mock.success = params.get("success", True)
+ def test_idrac_certificates(
+ self, params, idrac_connection_certificates_mock, idrac_default_args, mocker):
+ idrac_connection_certificates_mock.success = params.get(
+ "success", True)
idrac_connection_certificates_mock.json_data = params.get('json_data')
- if params.get('mparams').get('certificate_path') and params.get('mparams').get('command') == 'import':
+ if params.get('mparams').get('certificate_path') and params.get(
+ 'mparams').get('command') == 'import':
sfx = params.get('mparams').get('certificate_path')
temp = tempfile.NamedTemporaryFile(suffix=sfx, delete=False)
temp.write(b'Hello')
temp.close()
params.get('mparams')['certificate_path'] = temp.name
+ if params.get('mparams').get('ssl_key') == '.pem':
+ temp = tempfile.NamedTemporaryFile(suffix=sfx, delete=False)
+ temp.write(b'Hello')
+ temp.close()
+ params.get('mparams')['ssl_key'] = temp.name
mocker.patch(MODULE_PATH + 'get_res_id', return_value=MANAGER_ID)
- mocker.patch(MODULE_PATH + 'get_idrac_service', return_value=IDRAC_SERVICE.format(res_id=MANAGER_ID))
- mocker.patch(MODULE_PATH + 'get_actions_map', return_value=idrac_service_actions)
+ mocker.patch(
+ MODULE_PATH + 'get_idrac_service',
+ return_value=IDRAC_SERVICE.format(
+ res_id=MANAGER_ID))
+ mocker.patch(
+ MODULE_PATH + 'get_actions_map',
+ return_value=idrac_service_actions)
# mocker.patch(MODULE_PATH + 'get_cert_url', return_value=params.get('get_cert_url'))
# mocker.patch(MODULE_PATH + 'write_to_file', return_value=params.get('write_to_file'))
- mocker.patch(MODULE_PATH + 'reset_idrac', return_value=params.get('reset_idrac'))
+ mocker.patch(
+ MODULE_PATH + 'reset_idrac',
+ return_value=params.get('reset_idrac'))
idrac_default_args.update(params.get('mparams'))
- result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False))
+ result = self._run_module(
+ idrac_default_args,
+ check_mode=params.get(
+ 'check_mode',
+ False))
if params.get('mparams').get('command') == 'import' and params.get('mparams').get(
'certificate_path') and os.path.exists(temp.name):
os.remove(temp.name)
assert result['msg'] == params['message']
@pytest.mark.parametrize("params", [{"json_data": {"Members": [{"@odata.id": '/redfish/v1/Mangers/iDRAC.1'}]},
- "certype": 'Server', "res_id": "iDRAC.1"},
+ "cert_type": 'Server', "res_id": "iDRAC.1"},
{"json_data": {"Members": []},
- "certype": 'Server', "res_id": MANAGER_ID}
+ "cert_type": 'Server', "res_id": MANAGER_ID}
])
def test_res_id(
self, params, idrac_redfish_mock_for_certs, ome_response_mock):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = params["json_data"]
- res_id = self.module.get_res_id(idrac_redfish_mock_for_certs, params.get('certype'))
+ res_id = self.module.get_res_id(
+ idrac_redfish_mock_for_certs,
+ params.get('cert_type'))
assert res_id == params['res_id']
@pytest.mark.parametrize("params", [{"json_data": {
@@ -196,62 +249,97 @@ class TestIdracCertificates(FakeAnsibleModule):
"VirtualMedia": {
"@odata.id": "/redfish/v1/Managers/iDRAC.Embedded.1/VirtualMedia"}
},
- "idrac_srv": '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService', "res_id": "iDRAC.1"},
- {"json_data": {"Members": []},
- "idrac_srv": '/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DelliDRACCardService', "res_id": MANAGER_ID}
+ "idrac_srv": '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService', "res_id": "iDRAC.1"}
])
def test_get_idrac_service(
self, params, idrac_redfish_mock_for_certs, ome_response_mock):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = params["json_data"]
- idrac_srv = self.module.get_idrac_service(idrac_redfish_mock_for_certs, params.get('res_id'))
+ idrac_srv = self.module.get_idrac_service(
+ idrac_redfish_mock_for_certs, params.get('res_id'))
assert idrac_srv == params['idrac_srv']
+ def test_write_to_file(self, idrac_default_args):
+ inv_dir = "invalid_temp_dir"
+ idrac_default_args.update({"certificate_path": inv_dir})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ with pytest.raises(Exception) as ex:
+ self.module.write_to_file(f_module, {}, "dkey")
+ assert ex.value.args[0] == f"Provided directory path '{inv_dir}' is not valid."
+ temp_dir = tempfile.mkdtemp()
+ os.chmod(temp_dir, 0o000)
+ idrac_default_args.update({"certificate_path": temp_dir})
+ with pytest.raises(Exception) as ex:
+ self.module.write_to_file(f_module, {}, "dkey")
+ assert ex.value.args[0] == f"Provided directory path '{temp_dir}' is not writable. Please check if you have appropriate permissions."
+ os.removedirs(temp_dir)
+
+ def test_upload_ssl_key(self, idrac_default_args):
+ temp_ssl = tempfile.NamedTemporaryFile(delete=False)
+ temp_ssl.write(b'ssl_key')
+ temp_ssl.close()
+ f_module = self.get_module_mock(params=idrac_default_args)
+ with pytest.raises(Exception) as ex:
+ self.module.upload_ssl_key(f_module, {}, {}, temp_ssl.name, "res_id")
+ assert ex.value.args[0] == "Upload of SSL key not supported"
+ os.chmod(temp_ssl.name, 0o000)
+ with pytest.raises(Exception) as ex:
+ self.module.upload_ssl_key(f_module, {}, {}, temp_ssl.name, "res_id")
+ assert "Permission denied" in ex.value.args[0]
+ os.remove(temp_ssl.name)
+
@pytest.mark.parametrize("params", [{"json_data": {
"Actions": {
- "#DelliDRACCardService.ExportSSLCertificate": {
- "SSLCertType@Redfish.AllowableValues": ["CA", "CSC", "ClientTrustCertificate", "Server"],
+ EXPORT_SSL_CERTIFICATE: {
+ "SSLCertType@Redfish.AllowableValues": ["CA", "CSC", "CustomCertificate", "ClientTrustCertificate", "Server"],
"target":
- "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate"
+ f"{IDRAC_CARD_SERVICE_ACTION_URI_RES_ID}/DelliDRACCardService.ExportSSLCertificate"
},
- "#DelliDRACCardService.ImportSSLCertificate": {
- "CertificateType@Redfish.AllowableValues": ["CA", "CSC", "ClientTrustCertificate", "Server"],
+ IMPORT_SSL_CERTIFICATE: {
+ "CertificateType@Redfish.AllowableValues": ["CA", "CSC", "CustomCertificate", "ClientTrustCertificate", "Server"],
"target":
- "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate"
+ f"{IDRAC_CARD_SERVICE_ACTION_URI_RES_ID}/DelliDRACCardService.ImportSSLCertificate"
},
"#DelliDRACCardService.SSLResetCfg": {
- "target": "/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg"
+ "target": f"{IDRAC_CARD_SERVICE_ACTION_URI_RES_ID}/DelliDRACCardService.SSLResetCfg"
},
+ "#DelliDRACCardService.UploadSSLKey": {
+ "target": f"{IDRAC_CARD_SERVICE_ACTION_URI_RES_ID}/DelliDRACCardService.UploadSSLKey"}
},
},
"idrac_service_uri": '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService',
"actions": {
- '#DelliDRACCardService.ExportSSLCertificate':
- '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ExportSSLCertificate',
- '#DelliDRACCardService.ImportSSLCertificate':
- '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate',
+ EXPORT_SSL_CERTIFICATE:
+ f"{IDRAC_CARD_SERVICE_ACTION_URI_RES_ID}/DelliDRACCardService.ExportSSLCertificate",
+ IMPORT_SSL_CERTIFICATE:
+ f"{IDRAC_CARD_SERVICE_ACTION_URI_RES_ID}/DelliDRACCardService.ImportSSLCertificate",
'#DelliDRACCardService.SSLResetCfg':
- '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService/Actions/DelliDRACCardService.SSLResetCfg'}},
+ f"{IDRAC_CARD_SERVICE_ACTION_URI_RES_ID}/DelliDRACCardService.SSLResetCfg",
+ '#DelliDRACCardService.UploadSSLKey':
+ f"{IDRAC_CARD_SERVICE_ACTION_URI_RES_ID}/DelliDRACCardService.UploadSSLKey"}},
{"json_data": {"Members": []},
- "idrac_service_uri": '/redfish/v1/Dell/Managers/iDRAC.Embedded.1/DelliDRACCardService',
+ "idrac_service_uri": '/redfish/v1/Managers/iDRAC.Embedded.1/Oem/Dell/DelliDRACCardService',
"actions": idrac_service_actions}
])
def test_get_actions_map(
self, params, idrac_redfish_mock_for_certs, ome_response_mock):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = params["json_data"]
- actions = self.module.get_actions_map(idrac_redfish_mock_for_certs, params.get('idrac_service_uri'))
+ actions = self.module.get_actions_map(
+ idrac_redfish_mock_for_certs,
+ params.get('idrac_service_uri'))
assert actions == params['actions']
- @pytest.mark.parametrize("params", [{"actions": {}, "op": "generate_csr",
- "certype": 'Server', "res_id": "iDRAC.1",
+ @pytest.mark.parametrize("params", [{"actions": {}, "operation": "generate_csr",
+ "cert_type": 'Server', "res_id": "iDRAC.1",
"dynurl": "/redfish/v1/CertificateService/Actions/CertificateService.GenerateCSR"},
- {"actions": {}, "op": "import",
- "certype": 'Server', "res_id": "iDRAC.1",
- "dynurl": "/redfish/v1/Dell/Managers/iDRAC.1/DelliDRACCardService/Actions/DelliDRACCardService.ImportSSLCertificate"}
+ {"actions": {}, "operation": "import",
+ "cert_type": 'Server', "res_id": "iDRAC.1",
+ "dynurl": "/redfish/v1/Managers/iDRAC.1/Oem/Dell/DelliDRACCardService/Actions/"
+ "DelliDRACCardService.ImportSSLCertificate"}
])
def test_get_cert_url(self, params):
- dynurl = self.module.get_cert_url(params.get('actions'), params.get('op'), params.get('certype'),
+ dynurl = self.module.get_cert_url(params.get('actions'), params.get('operation'), params.get('cert_type'),
params.get('res_id'))
assert dynurl == params['dynurl']
@@ -269,6 +357,21 @@ class TestIdracCertificates(FakeAnsibleModule):
'Resolution': 'No response action is required.',
'Severity': 'Informational'}]},
"mparams": {'command': 'export', 'certificate_type': "HTTPS",
+ 'certificate_path': tempfile.gettempdir(), 'reset': False}
+ },
+ {"cert_data": {"CertificateFile": 'Hello world!',
+ "@Message.ExtendedInfo": [{
+ "Message": "Successfully exported SSL Certificate.",
+ "MessageId": "IDRAC.2.5.LC067",
+ "Resolution": "No response action is required.",
+ "Severity": "Informational"}
+ ]},
+ "result": {'@Message.ExtendedInfo': [
+ {'Message': 'Successfully exported SSL Certificate.',
+ 'MessageId': 'IDRAC.2.5.LC067',
+ 'Resolution': 'No response action is required.',
+ 'Severity': 'Informational'}]},
+ "mparams": {'command': 'generate_csr', 'certificate_type': "HTTPS",
'certificate_path': tempfile.gettempdir(), 'reset': False}}])
def test_format_output(self, params, idrac_default_args):
idrac_default_args.update(params.get('mparams'))
@@ -280,18 +383,20 @@ class TestIdracCertificates(FakeAnsibleModule):
@pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError,
ConnectionError, HTTPError, ImportError, RuntimeError])
- def test_main_exceptions(self, exc_type, idrac_connection_certificates_mock, idrac_default_args, mocker):
- idrac_default_args.update({"command": "export", "certificate_path": "mypath"})
+ def test_main_exceptions(
+ self, exc_type, idrac_connection_certificates_mock, idrac_default_args, mocker):
+ idrac_default_args.update(
+ {"command": "export", "certificate_path": "mypath"})
json_str = to_text(json.dumps({"data": "out"}))
if exc_type not in [HTTPError, SSLValidationError]:
mocker.patch(MODULE_PATH + "get_res_id",
side_effect=exc_type('test'))
else:
mocker.patch(MODULE_PATH + "get_res_id",
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
- result = self._run_module_with_fail_json(idrac_default_args)
+ result = self._run_module(idrac_default_args)
assert result['failed'] is True
else:
result = self._run_module(idrac_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py
index c30ce409e..6d9fdd51b 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -15,26 +15,38 @@ __metaclass__ = type
import json
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_firmware
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from mock import MagicMock, patch, Mock
+from mock import MagicMock, Mock
from io import StringIO
from ansible.module_utils._text import to_text
-from ansible.module_utils.six.moves.urllib.parse import urlparse, ParseResult
+from ansible.module_utils.six.moves.urllib.parse import ParseResult
from pytest import importorskip
importorskip("omsdk.sdkfile")
importorskip("omsdk.sdkcreds")
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+CATALOG = "Catalog.xml"
+DELL_SHARE = "https://downloads.dell.com"
+GET_JOBID = "idrac_firmware.get_jobid"
+CONVERT_XML_JSON = "idrac_firmware._convert_xmltojson"
+SUCCESS_MSG = "Successfully updated the firmware."
+UPDATE_URL = "idrac_firmware.update_firmware_url_redfish"
+WAIT_FOR_JOB = "idrac_firmware.wait_for_job_completion"
+TIME_SLEEP = "idrac_firmware.time.sleep"
+VALIDATE_CATALOG = "idrac_firmware._validate_catalog_file"
+SHARE_PWD = "share_pwd"
+USER_PWD = "user_pwd"
+TEST_HOST = "'https://testhost.com'"
class TestidracFirmware(FakeAnsibleModule):
module = idrac_firmware
@pytest.fixture
- def idrac_firmware_update_mock(self, mocker):
+ def idrac_firmware_update_mock(self):
omsdk_mock = MagicMock()
idrac_obj = MagicMock()
omsdk_mock.update_mgr = idrac_obj
@@ -123,351 +135,72 @@ class TestidracFirmware(FakeAnsibleModule):
idrac_conn_class_mock.return_value.__enter__.return_value = idrac_firmware_job_mock
return idrac_firmware_job_mock
- def test_main_idrac_firmware_success_case(self, idrac_connection_firmware_mock,
- idrac_connection_firmware_redfish_mock,
- idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "sharename", "catalog_file_name": "Catalog.xml",
- "share_user": "sharename", "share_password": "sharepswd",
- "share_mnt": "sharmnt",
- "reboot": True, "job_wait": True
- })
- message = {"Status": "Success", "update_msg": "Successfully updated the firmware.",
- "update_status": "Success", 'changed': False, 'failed': False}
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {}
- mocker.patch(MODULE_PATH + 'idrac_firmware.update_firmware_redfish', return_value=message)
- result = self._run_module(idrac_default_args)
- assert result == {'msg': 'Successfully updated the firmware.', 'update_status': 'Success',
- 'changed': False, 'failed': False}
-
- @pytest.mark.parametrize("exc_type", [RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
- ImportError, ValueError, TypeError])
- def test_main_idrac_firmware_exception_handling_case(self, exc_type, mocker, idrac_default_args,
- idrac_connection_firmware_redfish_mock,
- idrac_connection_firmware_mock):
- idrac_default_args.update({"share_name": "sharename", "catalog_file_name": "Catalog.xml",
- "share_user": "sharename", "share_password": "sharepswd",
- "share_mnt": "sharmnt",
- "reboot": True, "job_wait": True
- })
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"}
- mocker.patch(MODULE_PATH +
- 'idrac_firmware._validate_catalog_file', return_value="catalog_file_name")
- mocker.patch(MODULE_PATH +
- 'idrac_firmware.update_firmware_omsdk', side_effect=exc_type('test'))
- result = self._run_module_with_fail_json(idrac_default_args)
- assert 'msg' in result
- assert result['failed'] is True
-
- def test_main_HTTPError_case(self, idrac_connection_firmware_mock, idrac_default_args,
- idrac_connection_firmware_redfish_mock, mocker):
- idrac_default_args.update({"share_name": "sharename", "catalog_file_name": "Catalog.xml",
- "share_user": "sharename", "share_password": "sharepswd",
- "share_mnt": "sharmnt",
- "reboot": True, "job_wait": True
- })
- json_str = to_text(json.dumps({"data": "out"}))
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"}
- mocker.patch(MODULE_PATH + 'idrac_firmware.update_firmware_omsdk',
- side_effect=HTTPError('http://testhost.com', 400, 'http error message',
- {"accept-type": "application/json"},
- StringIO(json_str)))
- result = self._run_module_with_fail_json(idrac_default_args)
- assert 'msg' in result
- assert result['failed'] is True
-
- def test_update_firmware_omsdk_success_case01(self, idrac_connection_firmware_mock,
- idrac_connection_firmware_redfish_mock, idrac_default_args, mocker,
- re_match_mock):
- idrac_default_args.update({"share_name": "https://downloads.dell.com", "catalog_file_name": "Catalog.xml",
- "share_user": "UserName", "share_password": "sharepswd",
- "share_mnt": "shrmnt",
- "reboot": True, "job_wait": True, "ignore_cert_warning": True,
- "apply_update": True})
- mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk",
- return_value=({"update_status": {"job_details": {"Data": {"StatusCode": 200,
- "body": {"PackageList": [{}]}}}}},
- {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}))
-
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson",
- return_value=({"BaseLocation": None,
- "ComponentID": "18981",
- "ComponentType": "APAC",
- "Criticality": "3",
- "DisplayName": "Dell OS Driver Pack",
- "JobID": None,
- "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64"
- "_19.10.12_A00.EXE",
- "PackagePath": "FOLDER05902898M/1/Drivers-for-"
- "OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackageVersion": "19.10.12",
- "RebootType": "NONE",
- "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1"
- }, True, False))
- f_module = self.get_module_mock(params=idrac_default_args)
- idrac_connection_firmware_mock.match.return_value = "2.70"
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"}
- idrac_connection_firmware_mock.ServerGeneration.return_value = "13"
- idrac_connection_firmware_mock.update_mgr.update_from_repo.return_value = {
- "job_details": {"Data": {"StatusCode": 200, "GetRepoBasedUpdateList_OUTPUT": {},
- "body": {"PackageList1": [{}]}}}
- }
- result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
- assert result["update_status"]["job_details"]["Data"]["StatusCode"] == 200
-
- def test_update_firmware_omsdk_success_case02(self, idrac_connection_firmware_mock,
- idrac_connection_firmware_redfish_mock, idrac_default_args, mocker,
- re_match_mock, fileonshare_idrac_firmware_mock):
- idrac_default_args.update({"share_name": "mhttps://downloads.dell.com", "catalog_file_name": "Catalog.xml",
- "share_user": "UserName", "share_password": "sharepswd",
- "share_mnt": "shrmnt",
- "reboot": True, "job_wait": True, "ignore_cert_warning": True,
- "apply_update": True
- })
- mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk",
- return_value=({"update_status": {"job_details": {"data": {"StatusCode": 200,
- "body": {"PackageList": [{}]}}}}},
- {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}))
-
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson",
- return_value=({"BaseLocation": None,
- "ComponentID": "18981",
- "ComponentType": "APAC",
- "Criticality": "3",
- "DisplayName": "Dell OS Driver Pack",
- "JobID": None,
- "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64"
- "_19.10.12_A00.EXE",
- "PackagePath": "FOLDER05902898M/1/Drivers-for-"
- "OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackageVersion": "19.10.12",
- "RebootType": "NONE",
- "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1"
- }, True))
-
- f_module = self.get_module_mock(params=idrac_default_args)
- idrac_connection_firmware_mock.match.return_value = "2.70"
- idrac_connection_firmware_mock.ServerGeneration.return_value = "13"
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"}
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=("INSTANCENAME", False, False))
- idrac_connection_firmware_mock.update_mgr.update_from_repo.return_value = {
- "job_details": {"Data": {"StatusCode": 200, "GetRepoBasedUpdateList_OUTPUT": {},
- "body": {"PackageList": [{}]}}}}
- upd_share = fileonshare_idrac_firmware_mock
- upd_share.IsValid = True
- result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
- assert result["update_status"]["job_details"]["Data"]["StatusCode"] == 200
-
- def test_update_firmware_redfish_success_case03(self, idrac_connection_firmware_mock,
- idrac_connection_firmware_redfish_mock,
- idrac_default_args, mocker, re_match_mock):
- idrac_default_args.update({"share_name": "https://downloads.dell.com", "catalog_file_name": "Catalog.xml",
- "share_user": "UserName", "share_password": "sharepswd",
- "share_mnt": "shrmnt",
- "reboot": True, "job_wait": False, "ignore_cert_warning": True,
- "apply_update": True
- })
- mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_redfish",
- return_value=(
- {"job_details": {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}},
- {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}))
-
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson",
- return_value=({"BaseLocation": None,
- "ComponentID": "18981",
- "ComponentType": "APAC",
- "Criticality": "3",
- "DisplayName": "Dell OS Driver Pack",
- "JobID": None,
- "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_"
- "19.10.12_A00.EXE",
- "PackagePath": "FOLDER05902898M/1/Drivers-for-OS-"
- "Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackageVersion": "19.10.12",
- "RebootType": "NONE",
- "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1"
- }, True))
- f_module = self.get_module_mock(params=idrac_default_args)
- idrac_connection_firmware_mock.re_match_mock.group = Mock(return_value="3.30")
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "3.30"}
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=("INSTANCENAME", False, False))
- idrac_connection_firmware_mock.ServerGeneration = "14"
- result = self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
- assert result["changed"] is False
- assert result["update_msg"] == "Successfully triggered the job to update the firmware."
-
- def test_update_firmware_omsdk_status_success_case01(self, idrac_connection_firmware_mock,
- idrac_connection_firmware_redfish_mock, idrac_default_args,
- mocker, re_match_mock, fileonshare_idrac_firmware_mock):
- idrac_default_args.update({"share_name": "mhttps://downloads.dell.com", "catalog_file_name": "Catalog.xml",
- "share_user": "UserName", "share_password": "sharepswd",
- "share_mnt": "sharemnt",
- "reboot": True, "job_wait": True, "ignore_cert_warning": True,
- "apply_update": True
- })
- mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk",
- return_value=({"update_status": {"job_details": {"data": {"StatusCode": 200,
- "body": {"PackageList": [{}]}}}}},
- {"job_details": {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}}))
-
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson",
- return_value={
- "BaseLocation": None,
- "ComponentID": "18981",
- "ComponentType": "APAC",
- "Criticality": "3",
- "DisplayName": "Dell OS Driver Pack",
- "JobID": None,
- "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackagePath": "FOLDER05902898M/1/Drivers-for-OS-Deployment_"
- "Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackageVersion": "19.10.12",
- "RebootType": "NONE",
- "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1"
- })
- f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
- idrac_connection_firmware_mock.match.return_value = "2.70"
- idrac_connection_firmware_mock.ServerGeneration.return_value = "13"
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"}
- idrac_connection_firmware_mock.update_mgr.update_from_repo.return_value = {"job_details": {
- "Data": {"StatusCode": 200, "body": {}, "GetRepoBasedUpdateList_OUTPUT": {}}, "Status": "Success"},
- "Status": "Success"}
- upd_share = fileonshare_idrac_firmware_mock
- upd_share.IsValid = True
- result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
- assert result == {'changed': False, 'failed': False,
- 'update_msg': 'Successfully triggered the job to update the firmware.',
- 'update_status': {'Status': 'Success',
- 'job_details': {'Data': {'StatusCode': 200, 'body': {},
- "GetRepoBasedUpdateList_OUTPUT": {}},
- 'Status': 'Success'}}}
-
- def test_update_firmware_omsdk_status_failed_case01(self, idrac_connection_firmware_mock,
- idrac_connection_firmware_redfish_mock,
- idrac_default_args, mocker, re_match_mock):
- idrac_default_args.update({"share_name": "mhttps://downloads.dell.com", "catalog_file_name": "Catalog.xml",
- "share_user": "UserName", "share_password": "sharepswd",
- "share_mnt": "sharemnt",
- "reboot": True, "job_wait": True, "ignore_cert_warning": True,
- "apply_update": True})
- mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk",
- return_value=({"update_status": {"job_details": {"data": {"StatusCode": 200,
- "body": {"PackageList": [{}]}}}}},
- {"job_details": {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}}))
-
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson",
- return_value={
- "BaseLocation": None,
- "ComponentID": "18981",
- "ComponentType": "APAC",
- "Criticality": "3",
- "DisplayName": "Dell OS Driver Pack",
- "JobID": None,
- "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackagePath": "FOLDER05902898M/1/Drivers-for-OS-Deployment_"
- "Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackageVersion": "19.10.12",
- "RebootType": "NONE",
- "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1"
- })
+ @pytest.fixture
+ def idrac_connection_firm_mock(self, mocker, redfish_response_mock):
- f_module = self.get_module_mock(params=idrac_default_args)
- idrac_connection_firmware_mock.match.return_value = "2.70"
- idrac_connection_firmware_mock.ServerGeneration.return_value = "13"
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"}
- idrac_connection_firmware_mock.update_mgr.update_from_repo.return_value = {"job_details": {"Data": {
- "StatusCode": 200, "body": {}, "GetRepoBasedUpdateList_OUTPUT": {}}, "Status": "Failed"},
- "Status": "Failed"}
- with pytest.raises(Exception) as ex:
- self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
- assert ex.value.args[0] == "Firmware update failed."
+ connection_class_mock = mocker.patch(MODULE_PATH + 'idrac_firmware.iDRACRedfishAPI')
+ redfish_connection_obj = connection_class_mock.return_value.__enter__.return_value
+ redfish_connection_obj.invoke_request.return_value = redfish_response_mock
+ return redfish_connection_obj
- def test__validate_catalog_file_case01(self, idrac_connection_firmware_mock, idrac_default_args):
+ def test__validate_catalog_file_case01(self, idrac_default_args):
idrac_default_args.update({"catalog_file_name": ""})
with pytest.raises(ValueError) as exc:
self.module._validate_catalog_file("")
assert exc.value.args[0] == 'catalog_file_name should be a non-empty string.'
- def test__validate_catalog_file_case02(self, idrac_connection_firmware_mock, idrac_default_args):
+ def test__validate_catalog_file_case02(self, idrac_default_args):
idrac_default_args.update({"catalog_file_name": "Catalog.json"})
with pytest.raises(ValueError) as exc:
self.module._validate_catalog_file("Catalog.json")
assert exc.value.args[0] == 'catalog_file_name should be an XML file.'
- def test_convert_xmltojson_case01(self, mocker, idrac_connection_firmware_mock,
- idrac_default_args, ET_convert_mock):
- idrac_default_args.update({"PackageList": [{
- "BaseLocation": None,
- "ComponentID": "18981",
- "ComponentType": "APAC",
- "Criticality": "3",
- "DisplayName": "Dell OS Driver Pack",
- "JobID": None,
- "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackagePath":
- "FOLDER05902898M/1/Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
- "PackageVersion": "19.10.12"}]})
- mocker.patch(MODULE_PATH + "idrac_firmware.get_job_status", return_value=("Component", False))
- mocker.patch(MODULE_PATH + 'idrac_firmware.ET')
- result = self.module._convert_xmltojson({"PackageList": [{"INSTANCENAME": {"PROPERTY": {"NAME": "abc"}}}]},
- MagicMock(), None)
- assert result == ([], True, False)
-
- def test_convert_xmltojson_case02(self, mocker, idrac_connection_firmware_mock, idrac_default_args):
- idrac_default_args.update({"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}})
- packagelist = {"PackageList": "INSTANCENAME"}
+ def test_convert_xmltojson(self, mocker, idrac_default_args, idrac_connection_firmware_redfish_mock):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
mocker.patch(MODULE_PATH + "idrac_firmware.get_job_status", return_value=("Component", False))
- mocker.patch(MODULE_PATH + 'idrac_firmware.ET')
- result = self.module._convert_xmltojson(packagelist, MagicMock(), None)
+ job_details = {"PackageList": """<?xml version="1.0" encoding="UTF-8" ?><root><BaseLocation /><ComponentID>18981</ComponentID></root>"""}
+ result = self.module._convert_xmltojson(f_module, job_details, idrac_connection_firmware_redfish_mock)
assert result == ([], True, False)
-
- def test_get_jobid_success_case01(self, idrac_connection_firmware_mock, idrac_default_args,
- idrac_firmware_job_mock,
- idrac_connection_firmware_redfish_mock):
- idrac_default_args.update({"Location": "https://jobmnager/jid123"})
- idrac_firmware_job_mock.status_code = 202
- idrac_firmware_job_mock.Success = True
- idrac_connection_firmware_redfish_mock.update_mgr.headers.get().split().__getitem__().return_value = "jid123"
- f_module = self.get_module_mock(params=idrac_default_args)
- result = self.module.get_jobid(f_module, idrac_firmware_job_mock)
- assert result == idrac_connection_firmware_redfish_mock.headers.get().split().__getitem__()
-
- def test_get_jobid_fail_case01(self, idrac_connection_firmware_mock, idrac_default_args,
- idrac_firmware_job_mock):
- idrac_firmware_job_mock.status_code = 202
- idrac_firmware_job_mock.headers = {"Location": None}
- f_module = self.get_module_mock(params=idrac_default_args)
- with pytest.raises(Exception) as exc:
- self.module.get_jobid(f_module, idrac_firmware_job_mock)
- assert exc.value.args[0] == "Failed to update firmware."
-
- def test_get_jobid_fail_case02(self, idrac_connection_firmware_mock, idrac_default_args,
- idrac_firmware_job_mock):
- idrac_firmware_job_mock.status_code = 400
+ et_mock = MagicMock()
+ et_mock.iter.return_value = [et_mock, et_mock]
+ mocker.patch(MODULE_PATH + "idrac_firmware.ET.fromstring", return_value=et_mock)
+ mocker.patch(MODULE_PATH + "idrac_firmware.get_job_status", return_value=("Component", True))
+ result = self.module._convert_xmltojson(f_module, job_details, idrac_connection_firmware_redfish_mock)
+ assert result[0] == ['Component', 'Component']
+ assert result[1]
+ assert result[2]
+
+ def test_update_firmware_url_omsdk(self, idrac_connection_firmware_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"share_name": DELL_SHARE, "catalog_file_name": CATALOG,
+ "share_user": "shareuser", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": False, "ignore_cert_warning": True,
+ "share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user",
+ "idrac_password": "idrac_password", "idrac_port": 443, "proxy_support": "Off"})
+ mocker.patch(MODULE_PATH + GET_JOBID, return_value="23451")
+ mocker.patch(MODULE_PATH + "idrac_firmware.get_check_mode_status")
+ idrac_connection_firmware_mock.use_redfish = True
+ idrac_connection_firmware_mock.job_mgr.get_job_status_redfish.return_value = "23451"
+ idrac_connection_firmware_mock.update_mgr.update_from_dell_repo_url.return_value = {"InstanceID": "JID_12345678"}
f_module = self.get_module_mock(params=idrac_default_args)
- with pytest.raises(Exception) as exc:
- self.module.get_jobid(f_module, idrac_firmware_job_mock)
- assert exc.value.args[0] == "Failed to update firmware."
+ payload = {"ApplyUpdate": "True", "CatalogFile": CATALOG, "IgnoreCertWarning": "On",
+ "RebootNeeded": True, "UserName": "username", "Password": USER_PWD}
+ result = self.module.update_firmware_url_omsdk(f_module, idrac_connection_firmware_mock,
+ DELL_SHARE, CATALOG, True, True, True, True, payload)
+ assert result[0] == {"InstanceID": "JID_12345678"}
def test_update_firmware_url_omsdk_success_case02(self, idrac_connection_firmware_mock, idrac_default_args,
mocker, idrac_connection_firmware_redfish_mock):
- idrac_default_args.update({"share_name": "http://downloads.dell.com", "catalog_file_name": "catalog.xml",
- "share_user": "shareuser", "share_password": "sharepswd",
+ idrac_default_args.update({"share_name": DELL_SHARE, "catalog_file_name": CATALOG,
+ "share_user": "shareuser", "share_password": SHARE_PWD,
"share_mnt": "sharmnt",
"reboot": True, "job_wait": False, "ignore_cert_warning": True,
"share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user",
- "idrac_password": "idrac_password", "idrac_port": 443
+ "idrac_password": "idrac_password", "idrac_port": 443, "proxy_support": "Off",
})
- mocker.patch(MODULE_PATH + "idrac_firmware.get_jobid",
- return_value="23451")
-
+ mocker.patch(MODULE_PATH + GET_JOBID, return_value="23451")
mocker.patch(MODULE_PATH + "idrac_firmware.urlparse",
return_value=ParseResult(scheme='http', netloc='downloads.dell.com',
path='/%7Eguido/Python.html',
@@ -478,148 +211,353 @@ class TestidracFirmware(FakeAnsibleModule):
idrac_connection_firmware_redfish_mock.get_job_status_redfish = "Status"
idrac_connection_firmware_redfish_mock.update_mgr.job_mgr.job_wait.return_value = "12345"
idrac_connection_firmware_mock.update_mgr.update_from_repo_url.return_value = {
- "update_status": {"job_details": {"data": {
- "StatusCode": 200,
- "body": {
- "PackageList": [
- {}]
- }
- }
- }
- }
+ "update_status": {"job_details": {"data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}}
}
idrac_connection_firmware_mock.update_mgr.update_from_dell_repo_url.return_value = {"job_details": {"Data": {
- "GetRepoBasedUpdateList_OUTPUT": {
- "Message": [
- {}]
- }
- }
+ "GetRepoBasedUpdateList_OUTPUT": {"Message": [{}]}}}
}
- }
- payload = {"ApplyUpdate": "True",
- "CatalogFile": "Catalog.xml",
- "IgnoreCertWarning": "On",
- "RebootNeeded": True,
- "UserName": "username",
- "Password": "psw"
- }
+ payload = {"ApplyUpdate": "True", "CatalogFile": CATALOG, "IgnoreCertWarning": "On", "RebootNeeded": True,
+ "UserName": "username", "Password": USER_PWD}
result = self.module.update_firmware_url_omsdk(f_module, idrac_connection_firmware_mock,
- "http://downloads.dell.com", "catalog.xml", True, True, True,
+ DELL_SHARE, CATALOG, True, True, True,
False, payload)
- assert result == (
- {'job_details': {'Data': {'GetRepoBasedUpdateList_OUTPUT': {'Message': [{}]}}}}, {})
-
- def test_update_firmware_url_omsdk(self, idrac_connection_firmware_mock, idrac_default_args, mocker,
- idrac_connection_firmware_redfish_mock):
- idrac_default_args.update({"share_name": "http://downloads.dell.com", "catalog_file_name": "catalog.xml",
- "share_user": "shareuser", "share_password": "sharepswd",
- "share_mnt": "sharmnt",
- "reboot": True, "job_wait": False, "ignore_cert_warning": True,
- "share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user",
- "idrac_password": "idrac_password", "idrac_port": 443
- })
- mocker.patch(MODULE_PATH + "idrac_firmware.get_jobid",
- return_value="23451")
- mocker.patch(MODULE_PATH + "idrac_firmware.get_check_mode_status")
- idrac_connection_firmware_mock.use_redfish = True
- idrac_connection_firmware_mock.job_mgr.get_job_status_redfish.return_value = "23451"
- idrac_connection_firmware_mock.update_mgr.update_from_dell_repo_url.return_value = {
- "InstanceID": "JID_12345678"}
- f_module = self.get_module_mock(params=idrac_default_args)
- payload = {"ApplyUpdate": "True", "CatalogFile": "Catalog.xml", "IgnoreCertWarning": "On",
- "RebootNeeded": True, "UserName": "username", "Password": "psw"}
- result = self.module.update_firmware_url_omsdk(f_module, idrac_connection_firmware_mock,
- "http://downloads.dell.com/repo",
- "catalog.xml", True, True, True, True, payload)
- assert result[0] == {"InstanceID": "JID_12345678"}
-
- def _test_update_firmware_redfish(self, idrac_connection_firmware_mock, idrac_default_args, re_match_mock,
- mocker, idrac_connection_firmware_redfish_mock,
- fileonshare_idrac_firmware_mock):
- idrac_default_args.update({"share_name": "192.168.0.1:/share_name", "catalog_file_name": "catalog.xml",
- "share_user": "shareuser", "share_password": "sharepswd",
- "share_mnt": "sharmnt",
- "reboot": True, "job_wait": False, "ignore_cert_warning": True,
- "share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user",
- "idrac_password": "idrac_password", "idrac_port": 443, 'apply_update': True
- })
- mocker.patch(MODULE_PATH + "idrac_firmware.SHARE_TYPE",
- return_value={"NFS": "NFS"})
- mocker.patch(MODULE_PATH + "idrac_firmware.eval",
- return_value={"PackageList": []})
- mocker.patch(MODULE_PATH + "idrac_firmware.wait_for_job_completion", return_value=({}, None))
- f_module = self.get_module_mock(params=idrac_default_args)
- re_mock = mocker.patch(MODULE_PATH + "idrac_firmware.re",
- return_value=MagicMock())
- re_mock.match(MagicMock(), MagicMock()).group.return_value = "3.60"
- mocker.patch(MODULE_PATH + "idrac_firmware.get_jobid",
- return_value="23451")
- idrac_connection_firmware_mock.idrac.update_mgr.job_mgr.get_job_status_redfish.return_value = "23451"
- idrac_connection_firmware_mock.ServerGeneration = "14"
- upd_share = fileonshare_idrac_firmware_mock
- upd_share.remote_addr.return_value = "192.168.0.1"
- upd_share.remote.share_name.return_value = "share_name"
- upd_share.remote_share_type.name.lower.return_value = "NFS"
- result = self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module)
- assert result['update_msg'] == "Successfully triggered the job to update the firmware."
-
- def _test_get_job_status(self, idrac_connection_firmware_mock, idrac_default_args,
- mocker, idrac_connection_firmware_redfish_mock):
- idrac_default_args.update({"share_name": "http://downloads.dell.com", "catalog_file_name": "catalog.xml",
- "share_user": "shareuser", "share_password": "sharepswd",
- "share_mnt": "sharmnt", "apply_update": False,
- "reboot": True, "job_wait": False, "ignore_cert_warning": True,
- "share_type": "http", "idrac_ip": "idrac_ip", "idrac_user": "idrac_user",
- "idrac_password": "idrac_password", "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- idrac_connection_firmware_redfish_mock.success = True
- idrac_connection_firmware_redfish_mock.json_data = {"JobStatus": "OK"}
- each_comp = {"JobID": "JID_1234567", "Messages": [{"Message": "test_message"}], "JobStatus": "Completed"}
- result = self.module.get_job_status(f_module, each_comp, None)
- assert result[1] is False
+ assert result == ({'job_details': {'Data': {'GetRepoBasedUpdateList_OUTPUT': {'Message': [{}]}}}}, {})
def test_message_verification(self, idrac_connection_firmware_mock, idrac_connection_firmware_redfish_mock,
idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "http://downloads.dell.com", "catalog_file_name": "catalog.xml",
- "share_user": "shareuser", "share_password": "sharepswd",
+ idrac_default_args.update({"share_name": DELL_SHARE, "catalog_file_name": CATALOG,
+ "share_user": "shareuser", "share_password": SHARE_PWD,
"share_mnt": "sharmnt", "apply_update": False,
"reboot": False, "job_wait": True, "ignore_cert_warning": True,
"idrac_ip": "idrac_ip", "idrac_user": "idrac_user",
- "idrac_password": "idrac_password", "idrac_port": 443})
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=("INSTANCENAME", False, False))
- # mocker.patch(MODULE_PATH + "idrac_firmware.re")
+ "idrac_password": "idrac_password", "idrac_port": 443, "proxy_support": "Off", })
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=("INSTANCENAME", False, False))
idrac_connection_firmware_redfish_mock.success = True
idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"}
f_module = self.get_module_mock(params=idrac_default_args)
result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
assert result['update_msg'] == "Successfully fetched the applicable firmware update package list."
-
idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": False})
f_module = self.get_module_mock(params=idrac_default_args)
result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
assert result['update_msg'] == "Successfully triggered the job to stage the firmware."
-
idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": True})
f_module = self.get_module_mock(params=idrac_default_args)
result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
assert result['update_msg'] == "Successfully staged the applicable firmware update packages."
-
idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": True})
mocker.patch(MODULE_PATH + "idrac_firmware.update_firmware_url_omsdk",
return_value=({"Status": "Success"}, {"PackageList": []}))
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=({}, True, True))
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=({}, True, True))
f_module = self.get_module_mock(params=idrac_default_args)
result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
assert result['update_msg'] == "Successfully staged the applicable firmware update packages with error(s)."
-
idrac_default_args.update({"apply_update": True, "reboot": True, "job_wait": True})
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=({}, True, False))
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=({}, True, False))
f_module = self.get_module_mock(params=idrac_default_args)
result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
- assert result['update_msg'] == "Successfully updated the firmware."
-
+ assert result['update_msg'] == SUCCESS_MSG
idrac_default_args.update({"apply_update": True, "reboot": True, "job_wait": True})
- mocker.patch(MODULE_PATH + "idrac_firmware._convert_xmltojson", return_value=({}, True, True))
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=({}, True, True))
f_module = self.get_module_mock(params=idrac_default_args)
result = self.module.update_firmware_omsdk(idrac_connection_firmware_mock, f_module)
assert result['update_msg'] == "Firmware update failed."
+
+ def test_update_firmware_redfish_success_case03(self, idrac_connection_firmware_mock,
+ idrac_connection_firmware_redfish_mock,
+ idrac_default_args, mocker):
+ idrac_default_args.update({"share_name": DELL_SHARE, "catalog_file_name": CATALOG,
+ "share_user": "UserName", "share_password": SHARE_PWD, "share_mnt": "shrmnt",
+ "reboot": True, "job_wait": False, "ignore_cert_warning": True, "apply_update": True})
+ mocker.patch(MODULE_PATH + UPDATE_URL,
+ return_value=({"job_details": {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}},
+ {"Data": {"StatusCode": 200, "body": {"PackageList": [{}]}}}))
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON,
+ return_value=({"BaseLocation": None, "ComponentID": "18981", "ComponentType": "APAC", "Criticality": "3",
+ "DisplayName": "Dell OS Driver Pack", "JobID": None,
+ "PackageName": "Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
+ "PackagePath": "FOLDER05902898M/1/Drivers-for-OS-Deployment_Application_X0DW6_WN64_19.10.12_A00.EXE",
+ "PackageVersion": "19.10.12", "RebootType": "NONE",
+ "Target": "DCIM:INSTALLED#802__DriverPack.Embedded.1:LC.Embedded.1"}, True))
+ f_module = self.get_module_mock(params=idrac_default_args)
+ idrac_connection_firmware_mock.re_match_mock.group = Mock(return_value="3.30")
+ idrac_connection_firmware_redfish_mock.success = True
+ idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "3.30"}
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=("INSTANCENAME", False, False))
+ idrac_connection_firmware_mock.ServerGeneration = "14"
+ result = self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert result["changed"] is False
+ assert result["update_msg"] == "Successfully triggered the job to update the firmware."
+ idrac_default_args.update({"proxy_support": "ParametersProxy", "proxy_server": "127.0.0.2", "proxy_port": 3128,
+ "proxy_type": "HTTP", "proxy_uname": "username", "proxy_passwd": "pwd", "apply_update": False})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ f_module.check_mode = True
+ mocker.patch(MODULE_PATH + WAIT_FOR_JOB, return_value=({"JobStatus": "Ok"}, ""))
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=({"PackageList": []}, False, False))
+ mocker.patch(MODULE_PATH + UPDATE_URL,
+ return_value=({"JobStatus": "Ok"}, {"Status": "Success", "JobStatus": "Ok",
+ "Data": {"GetRepoBasedUpdateList_OUTPUT": {}}}))
+ with pytest.raises(Exception) as exc:
+ self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert exc.value.args[0] == 'Unable to complete the firmware repository download.'
+ idrac_default_args.update({"share_name": "\\\\127.0.0.1\\cifsshare"})
+ idrac_connection_firmware_mock.json_data = {"Status": "Success"}
+ mocker.patch(MODULE_PATH + GET_JOBID, return_value=None)
+ mocker.patch(MODULE_PATH + WAIT_FOR_JOB,
+ return_value=({"JobStatus": "Ok"}, {"job_details": "", "JobStatus": "Ok"}))
+ with pytest.raises(Exception) as exc:
+ self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert exc.value.args[0] == 'Unable to complete the firmware repository download.'
+ idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": True})
+ mocker.patch(MODULE_PATH + WAIT_FOR_JOB,
+ return_value=({"JobStatus": "OK"}, {"job_details": "", "JobStatus": "OK"}))
+ with pytest.raises(Exception) as exc:
+ self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert exc.value.args[0] == 'Changes found to commit!'
+ f_module.check_mode = False
+ idrac_default_args.update({"apply_update": True, "reboot": True, "job_wait": True, "share_name": "https://127.0.0.2/httpshare"})
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=({"PackageList": []}, True, False))
+ mocker.patch(MODULE_PATH + UPDATE_URL, return_value=(
+ {"JobStatus": "Ok"}, {"Status": "Success", "JobStatus": "Ok", "PackageList": [],
+ "Data": {"GetRepoBasedUpdateList_OUTPUT": {}}}))
+ result = self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert result["update_msg"] == SUCCESS_MSG
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=({"PackageList": []}, True, True))
+ result = self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert result["update_msg"] == "Firmware update failed."
+ idrac_default_args.update({"apply_update": False})
+ mocker.patch(MODULE_PATH + UPDATE_URL,
+ return_value=({"JobStatus": "Critical"}, {"Status": "Success", "JobStatus": "Critical", "PackageList": [],
+ "Data": {"GetRepoBasedUpdateList_OUTPUT": {}}}))
+ with pytest.raises(Exception) as exc:
+ self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert exc.value.args[0] == 'Unable to complete the repository update.'
+ idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": True, "share_name": "https://127.0.0.3/httpshare"})
+ with pytest.raises(Exception) as exc:
+ self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert exc.value.args[0] == 'Firmware update failed.'
+ idrac_default_args.update({"apply_update": True, "reboot": False, "job_wait": False, "share_name": "https://127.0.0.4/httpshare"})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ f_module.check_mode = True
+ mocker.patch(MODULE_PATH + CONVERT_XML_JSON, return_value=({"PackageList": []}, True, False))
+ mocker.patch(MODULE_PATH + UPDATE_URL,
+ return_value=({"JobStatus": "OK"}, {"Status": "Success", "JobStatus": "OK", "PackageList": ['test'],
+ "Data": {"key": "value"}}))
+ with pytest.raises(Exception) as exc:
+ self.module.update_firmware_redfish(idrac_connection_firmware_mock, f_module, {})
+ assert exc.value.args[0] == 'Changes found to commit!'
+
+ def test_main_idrac_firmware_success_case(self, idrac_connection_firmware_redfish_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True})
+ message = {"Status": "Success", "update_msg": SUCCESS_MSG,
+ "update_status": "Success", 'changed': False, 'failed': False}
+ idrac_connection_firmware_redfish_mock.success = True
+ idrac_connection_firmware_redfish_mock.json_data = {}
+ mocker.patch(MODULE_PATH + 'idrac_firmware.update_firmware_redfish', return_value=message)
+ result = self._run_module(idrac_default_args)
+ assert result == {'msg': 'Successfully updated the firmware.', 'update_status': 'Success',
+ 'changed': False, 'failed': False}
+
+ def test_main_HTTPError_case(self, idrac_default_args, idrac_connection_firmware_redfish_mock, mocker):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt",
+ "reboot": True, "job_wait": True})
+ json_str = to_text(json.dumps({"data": "out"}))
+ idrac_connection_firmware_redfish_mock.success = True
+ idrac_connection_firmware_redfish_mock.json_data = {"FirmwareVersion": "2.70"}
+ mocker.patch(MODULE_PATH + 'idrac_firmware.update_firmware_omsdk',
+ side_effect=HTTPError('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str)))
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert 'msg' in result
+ assert result['failed'] is True
+
+ def test_get_jobid(self, idrac_connection_firmware_mock, idrac_default_args):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ idrac_connection_firmware_mock.status_code = 202
+ idrac_connection_firmware_mock.headers = {"Location": "/uri/JID_123456789"}
+ result = self.module.get_jobid(f_module, idrac_connection_firmware_mock)
+ assert result == "JID_123456789"
+ idrac_connection_firmware_mock.headers = {"Location": None}
+ with pytest.raises(Exception) as exc:
+ self.module.get_jobid(f_module, idrac_connection_firmware_mock)
+ assert exc.value.args[0] == 'Failed to update firmware.'
+ idrac_connection_firmware_mock.status_code = 200
+ with pytest.raises(Exception) as exc:
+ self.module.get_jobid(f_module, idrac_connection_firmware_mock)
+ assert exc.value.args[0] == 'Failed to update firmware.'
+
+ def test_handle_HTTP_error(self, idrac_default_args, mocker):
+ error_message = {"error": {"@Message.ExtendedInfo": [{"Message": "Http error message", "MessageId": "SUP029"}]}}
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + 'idrac_firmware.json.load', return_value=error_message)
+ with pytest.raises(Exception) as exc:
+ self.module.handle_HTTP_error(f_module, error_message)
+ assert exc.value.args[0] == 'Http error message'
+
+ def test_get_job_status(self, idrac_default_args, idrac_connection_firmware_redfish_mock, mocker):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ each_comp = {"JobID": "JID_123456789", "Message": "Invalid", "JobStatus": "Ok"}
+ idrac_connection_firmware_redfish_mock.job_mgr.job_wait.return_value = {"JobStatus": "Completed", "Message": "Invalid"}
+ comp, failed = self.module.get_job_status(f_module, each_comp, idrac_connection_firmware_redfish_mock)
+ assert comp == {'JobID': 'JID_123456789', 'Message': 'Invalid', 'JobStatus': 'Critical'}
+ assert failed
+ mocker.patch(MODULE_PATH + WAIT_FOR_JOB,
+ return_value=(idrac_connection_firmware_redfish_mock, ""))
+ each_comp = {"JobID": "JID_123456789", "Message": "Invalid", "JobStatus": "Critical"}
+ idrac_connection_firmware_redfish_mock.json_data = {"Messages": [{"Message": "Success"}], "JobStatus": "Critical"}
+ comp, failed = self.module.get_job_status(f_module, each_comp, None)
+ assert comp == {'JobID': 'JID_123456789', 'Message': 'Success', 'JobStatus': 'Critical'}
+ assert failed
+
+ def test_wait_for_job_completion(self, idrac_default_args, idrac_connection_firm_mock, redfish_response_mock):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ result, msg = self.module.wait_for_job_completion(f_module, "JobService/Jobs/JID_1234567890")
+ assert msg is None
+ redfish_response_mock.json_data = {"Members": {}, "JobState": "Completed", "PercentComplete": 100}
+ result, msg = self.module.wait_for_job_completion(f_module, "JobService/Jobs/JID_12345678", job_wait=True)
+ assert result.json_data["JobState"] == "Completed"
+ redfish_response_mock.json_data = {"Members": {}, "JobState": "New", "PercentComplete": 0}
+ result, msg = self.module.wait_for_job_completion(f_module, "JobService/Jobs/JID_123456789", job_wait=True, apply_update=True)
+ assert result.json_data["JobState"] == "New"
+
+ @pytest.mark.parametrize("exc_type", [TypeError])
+ def test_wait_for_job_completion_exception(self, exc_type, idrac_default_args, idrac_connection_firmware_redfish_mock, mocker):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + TIME_SLEEP, return_value=None)
+ if exc_type == TypeError:
+ idrac_connection_firmware_redfish_mock.invoke_request.side_effect = exc_type("exception message")
+ result, msg = self.module.wait_for_job_completion(f_module, "JobService/Jobs/JID_123456789", job_wait=True)
+ assert msg == "Job wait timed out after 120.0 minutes"
+
+ def test_get_check_mode_status_check_mode(self, idrac_default_args):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ f_module.check_mode = True
+ status = {"job_details": {"Data": {"GetRepoBasedUpdateList_OUTPUT": {
+ "Message": "Firmware versions on server match catalog, applicable updates are not present in the repository"}}},
+ "JobStatus": "Completed"}
+ with pytest.raises(Exception) as ex:
+ self.module.get_check_mode_status(status, f_module)
+ assert ex.value.args[0] == "No changes found to commit!"
+ f_module.check_mode = False
+ with pytest.raises(Exception) as ex:
+ self.module.get_check_mode_status(status, f_module)
+ assert ex.value.args[0] == "The catalog in the repository specified in the operation has the same firmware versions as currently present on the server."
+
+ def test_update_firmware_url_redfish(self, idrac_default_args, idrac_connection_firmware_redfish_mock, mocker):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + TIME_SLEEP, return_value=None)
+ mocker.patch(MODULE_PATH + 'idrac_firmware.get_error_syslog', return_value=(True, "Failed to update firmware."))
+ mocker.patch(MODULE_PATH + WAIT_FOR_JOB, return_value=None)
+ mocker.patch(MODULE_PATH + 'idrac_firmware.get_jobid', return_value="JID_123456789")
+ mocker.patch(MODULE_PATH + 'idrac_firmware.handle_HTTP_error', return_value=None)
+ actions = {"Actions": {"#DellSoftwareInstallationService.InstallFromRepository": {"target": "/api/installRepository"},
+ "#DellSoftwareInstallationService.GetRepoBasedUpdateList": {"target": "/api/getRepoBasedUpdateList"}}}
+ idrac_connection_firmware_redfish_mock.json_data = {"Entries": {"@odata.id": "/api/log"}, "DateTime": "2023-10-05"}
+ with pytest.raises(Exception) as ex:
+ self.module.update_firmware_url_redfish(f_module, idrac_connection_firmware_redfish_mock,
+ "https://127.0.0.1/httpshare", True, True, True, {}, actions)
+ assert ex.value.args[0] == "Failed to update firmware."
+ mocker.patch(MODULE_PATH + 'idrac_firmware.get_error_syslog', return_value=(False, ""))
+ mocker.patch(MODULE_PATH + WAIT_FOR_JOB, return_value=(None, "Successfully updated."))
+ result, msg = self.module.update_firmware_url_redfish(f_module, idrac_connection_firmware_redfish_mock,
+ "https://127.0.0.1/httpshare", True, True, True, {}, actions)
+ assert result["update_msg"] == "Successfully updated."
+
+ def test_get_error_syslog(self, idrac_default_args, idrac_connection_firm_mock, redfish_response_mock, mocker):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ self.get_module_mock(params=idrac_default_args)
+ redfish_response_mock.json_data = {"Members": [{"MessageId": "SYS229"}], "Entries": {"@odata.id": "/api/log"}}
+ mocker.patch(MODULE_PATH + TIME_SLEEP, return_value=None)
+ result = self.module.get_error_syslog(idrac_connection_firm_mock, "", "/api/service")
+ assert result[0]
+
+ def test_update_firmware_omsdk(self, idrac_default_args, idrac_connection_firmware_redfish_mock, mocker):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD, "ignore_cert_warning": False,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ mocker.patch(MODULE_PATH + 'idrac_firmware.FileOnShare', return_value=None)
+ mocker.patch(MODULE_PATH + 'idrac_firmware.get_check_mode_status', return_value=None)
+ mocker.patch(MODULE_PATH + 'idrac_firmware._convert_xmltojson', return_value=([], True, False))
+ status = {"job_details": {"Data": {"GetRepoBasedUpdateList_OUTPUT": {"PackageList": []}}}, "JobStatus": "Completed"}
+ idrac_connection_firmware_redfish_mock.update_mgr.update_from_repo.return_value = status
+ result = self.module.update_firmware_omsdk(idrac_connection_firmware_redfish_mock, f_module)
+ assert result['update_msg'] == 'Successfully triggered the job to update the firmware.'
+ f_module.check_mode = True
+ with pytest.raises(Exception) as ex:
+ self.module.update_firmware_omsdk(idrac_connection_firmware_redfish_mock, f_module)
+ assert ex.value.args[0] == "Changes found to commit!"
+ status.update({"JobStatus": "InProgress"})
+ with pytest.raises(Exception) as ex:
+ self.module.update_firmware_omsdk(idrac_connection_firmware_redfish_mock, f_module)
+ assert ex.value.args[0] == "Unable to complete the firmware repository download."
+ status = {"job_details": {"Data": {}, "PackageList": []}, "JobStatus": "Completed", "Status": "Failed"}
+ idrac_connection_firmware_redfish_mock.update_mgr.update_from_repo.return_value = status
+ with pytest.raises(Exception) as ex:
+ self.module.update_firmware_omsdk(idrac_connection_firmware_redfish_mock, f_module)
+ assert ex.value.args[0] == "No changes found to commit!"
+ idrac_default_args.update({"apply_update": False})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ f_module.check_mode = False
+ with pytest.raises(Exception) as ex:
+ self.module.update_firmware_omsdk(idrac_connection_firmware_redfish_mock, f_module)
+ assert ex.value.args[0] == "Unable to complete the repository update."
+
+ @pytest.mark.parametrize("exc_type", [RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, IOError, AssertionError, OSError])
+ def test_main(self, idrac_default_args, idrac_connection_firmware_redfish_mock, mocker, exc_type):
+ idrac_default_args.update({"share_name": "sharename", "catalog_file_name": CATALOG,
+ "share_user": "sharename", "share_password": SHARE_PWD, "ignore_cert_warning": False,
+ "share_mnt": "sharmnt", "reboot": True, "job_wait": True, "apply_update": True})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ f_module.check_mode = True
+ idrac_connection_firmware_redfish_mock.status_code = 400
+ idrac_connection_firmware_redfish_mock.success = False
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH + VALIDATE_CATALOG,
+ side_effect=exc_type('test'))
+ else:
+ mocker.patch(MODULE_PATH + VALIDATE_CATALOG,
+ side_effect=exc_type(TEST_HOST, 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ if exc_type == HTTPError:
+ result = self._run_module(idrac_default_args)
+ assert result['failed'] is True
+ elif exc_type == URLError:
+ result = self._run_module(idrac_default_args)
+ assert result['unreachable'] is True
+ else:
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ if exc_type == HTTPError:
+ assert 'error_info' in result
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py
index 787dba2c7..b821c9556 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_firmware_info.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -15,7 +15,7 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_firmware_info
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from mock import MagicMock, PropertyMock
from pytest import importorskip
from ansible.module_utils.urls import ConnectionError, SSLValidationError
@@ -67,7 +67,7 @@ class TestFirmware(FakeAnsibleModule):
if exc_type not in [HTTPError, SSLValidationError]:
type(obj2).InstalledFirmware = PropertyMock(side_effect=exc_type('test'))
else:
- type(obj2).InstalledFirmware = PropertyMock(side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ type(obj2).InstalledFirmware = PropertyMock(side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_license.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_license.py
new file mode 100644
index 000000000..a07cc1eb1
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_license.py
@@ -0,0 +1,746 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.7.0
+# Copyright (C) 2024 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+from io import StringIO
+import json
+import tempfile
+import os
+
+import pytest
+from urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils._text import to_text
+from ansible_collections.dellemc.openmanage.plugins.modules import idrac_license
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock
+from ansible_collections.dellemc.openmanage.plugins.modules.idrac_license import main
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.idrac_license.'
+MODULE_UTILS_PATH = 'ansible_collections.dellemc.openmanage.plugins.module_utils.utils.'
+
+INVALID_LICENSE_MSG = "License with ID '{license_id}' does not exist on the iDRAC."
+SUCCESS_EXPORT_MSG = "Successfully exported the license."
+SUCCESS_DELETE_MSG = "Successfully deleted the license."
+SUCCESS_IMPORT_MSG = "Successfully imported the license."
+FAILURE_MSG = "Unable to '{operation}' the license with id '{license_id}' as it does not exist."
+FAILURE_IMPORT_MSG = "Unable to import the license."
+NO_FILE_MSG = "License file not found."
+UNSUPPORTED_FIRMWARE_MSG = "iDRAC firmware version is not supported."
+NO_OPERATION_SKIP_MSG = "Task is skipped as none of import, export or delete is specified."
+INVALID_FILE_MSG = "File extension is invalid. Supported extensions for local 'share_type' " \
+ "are: .txt and .xml, and for network 'share_type' is: .xml."
+INVALID_DIRECTORY_MSG = "Provided directory path '{path}' is not valid."
+INSUFFICIENT_DIRECTORY_PERMISSION_MSG = "Provided directory path '{path}' is not writable. " \
+ "Please check if the directory has appropriate permissions"
+MISSING_FILE_NAME_PARAMETER_MSG = "Missing required parameter 'file_name'."
+REDFISH = "/redfish/v1"
+
+LIC_GET_LICENSE_URL = "License.get_license_url"
+REDFISH_LICENSE_URL = "/redfish/v1/license"
+REDFISH_BASE_API = '/redfish/v1/api'
+MANAGER_URI_ONE = "/redfish/v1/managers/1"
+API_ONE = "/local/action"
+EXPORT_URL_MOCK = '/redfish/v1/export_license'
+IMPORT_URL_MOCK = '/redfish/v1/import_license'
+API_INVOKE_MOCKER = "iDRACRedfishAPI.invoke_request"
+ODATA = "@odata.id"
+IDRAC_ID = "iDRAC.Embedded.1"
+LIC_FILE_NAME = 'test_lic.txt'
+HTTPS_PATH = "https://testhost.com"
+HTTP_ERROR = "http error message"
+APPLICATION_JSON = "application/json"
+
+
+class TestLicense(FakeAnsibleModule):
+ module = idrac_license
+
+ @pytest.fixture
+ def idrac_license_mock(self):
+ idrac_obj = MagicMock()
+ return idrac_obj
+
+ @pytest.fixture
+ def idrac_connection_license_mock(self, mocker, idrac_license_mock):
+ idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI',
+ return_value=idrac_license_mock)
+ idrac_conn_mock.return_value.__enter__.return_value = idrac_license_mock
+ return idrac_conn_mock
+
+ def test_check_license_id(self, idrac_default_args, idrac_connection_license_mock,
+ idrac_license_mock, mocker):
+ mocker.patch(MODULE_PATH + LIC_GET_LICENSE_URL,
+ return_value=REDFISH_LICENSE_URL)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ lic_obj = self.module.License(
+ idrac_connection_license_mock, f_module)
+
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ data = lic_obj.check_license_id(license_id="1234")
+ assert data.json_data == {"license_id": "1234"}
+
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ side_effect=HTTPError(HTTPS_PATH, 400,
+ HTTP_ERROR,
+ {"accept-type": APPLICATION_JSON},
+ StringIO("json_str")))
+ with pytest.raises(Exception) as exc:
+ lic_obj.check_license_id(license_id="1234")
+ assert exc.value.args[0] == INVALID_LICENSE_MSG.format(license_id="1234")
+
+ def test_get_license_url(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ v1_resp = {"LicenseService": {ODATA: "/redfish/v1/LicenseService"},
+ "Licenses": {ODATA: "/redfish/v1/LicenseService/Licenses"}}
+ mocker.patch(MODULE_PATH + "get_dynamic_uri",
+ return_value=v1_resp)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ lic_obj = self.module.License(
+ idrac_connection_license_mock, f_module)
+ data = lic_obj.get_license_url()
+ assert data == "/redfish/v1/LicenseService/Licenses"
+
+ def test_get_job_status_success(self, mocker, idrac_license_mock):
+ # Mocking necessary objects and functions
+ module_mock = self.get_module_mock()
+ license_job_response_mock = mocker.MagicMock()
+ license_job_response_mock.headers.get.return_value = "HTTPS_PATH/job_tracking/12345"
+
+ mocker.patch(MODULE_PATH + "remove_key", return_value={"job_details": "mocked_job_details"})
+ mocker.patch(MODULE_PATH + "validate_and_get_first_resource_id_uri", return_value=[MANAGER_URI_ONE])
+
+ # Creating an instance of the class
+ obj_under_test = self.module.License(idrac_license_mock, module_mock)
+
+ # Mocking the idrac_redfish_job_tracking function to simulate a successful job tracking
+ mocker.patch(MODULE_PATH + "idrac_redfish_job_tracking", return_value=(False, "mocked_message", {"job_details": "mocked_job_details"}, 0))
+
+ # Calling the method under test
+ result = obj_under_test.get_job_status(license_job_response_mock)
+
+ # Assertions
+ assert result == {"job_details": "mocked_job_details"}
+
+ def test_get_job_status_failure(self, mocker, idrac_license_mock):
+ # Mocking necessary objects and functions
+ module_mock = self.get_module_mock()
+ license_job_response_mock = mocker.MagicMock()
+ license_job_response_mock.headers.get.return_value = "HTTPS_PATH/job_tracking/12345"
+
+ mocker.patch(MODULE_PATH + "remove_key", return_value={"Message": "None"})
+ mocker.patch(MODULE_PATH + "validate_and_get_first_resource_id_uri", return_value=[MANAGER_URI_ONE])
+
+ # Creating an instance of the class
+ obj_under_test = self.module.License(idrac_license_mock, module_mock)
+
+ # Mocking the idrac_redfish_job_tracking function to simulate a failed job tracking
+ mocker.patch(MODULE_PATH + "idrac_redfish_job_tracking", return_value=(True, "None", {"Message": "None"}, 0))
+
+ # Mocking module.exit_json
+ exit_json_mock = mocker.patch.object(module_mock, "exit_json")
+
+ # Calling the method under test
+ result = obj_under_test.get_job_status(license_job_response_mock)
+
+ # Assertions
+ exit_json_mock.assert_called_once_with(msg="None", failed=True, job_details={"Message": "None"})
+ assert result == {"Message": "None"}
+
+ def test_get_share_details(self, idrac_connection_license_mock):
+ # Create a mock module object
+ module_mock = MagicMock()
+ module_mock.params.get.return_value = {
+ 'ip_address': 'XX.XX.XX.XX',
+ 'share_name': 'my_share',
+ 'username': 'my_user',
+ 'password': 'my_password'
+ }
+
+ # Create an instance of the License class
+ lic_obj = self.module.License(idrac_connection_license_mock, module_mock)
+
+ # Call the get_share_details method
+ result = lic_obj.get_share_details()
+
+ # Assert the result
+ assert result == {
+ 'IPAddress': 'XX.XX.XX.XX',
+ 'ShareName': 'my_share',
+ 'UserName': 'my_user',
+ 'Password': 'my_password'
+ }
+
+ def test_get_proxy_details(self, idrac_connection_license_mock):
+ # Create a mock module object
+ module_mock = MagicMock()
+ module_mock.params.get.return_value = {
+ 'ip_address': 'XX.XX.XX.XX',
+ 'share_name': 'my_share',
+ 'username': 'my_user',
+ 'password': 'my_password',
+ 'share_type': 'http',
+ 'ignore_certificate_warning': 'off',
+ 'proxy_support': 'parameters_proxy',
+ 'proxy_type': 'http',
+ 'proxy_server': 'proxy.example.com',
+ 'proxy_port': 8080,
+ 'proxy_username': 'my_username',
+ 'proxy_password': 'my_password'
+ }
+
+ # Create an instance of the License class
+ lic_obj = self.module.License(idrac_connection_license_mock, module_mock)
+
+ # Call the get_proxy_details method
+ result = lic_obj.get_proxy_details()
+
+ # Define the expected result
+ expected_result = {
+ 'IPAddress': 'XX.XX.XX.XX',
+ 'ShareName': 'my_share',
+ 'UserName': 'my_user',
+ 'Password': 'my_password',
+ 'ShareType': 'HTTP',
+ 'IgnoreCertWarning': 'Off',
+ 'ProxySupport': 'ParametersProxy',
+ 'ProxyType': 'HTTP',
+ 'ProxyServer': 'proxy.example.com',
+ 'ProxyPort': '8080',
+ 'ProxyUname': 'my_username',
+ 'ProxyPasswd': 'my_password'
+ }
+
+ # Assert the result
+ assert result == expected_result
+
+
+class TestDeleteLicense:
+ @pytest.fixture
+ def delete_license_mock(self):
+ delete_license_obj = MagicMock()
+ return delete_license_obj
+
+ @pytest.fixture
+ def idrac_connection_license_mock(self, mocker, delete_license_mock):
+ idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI',
+ return_value=delete_license_mock)
+ idrac_conn_mock.return_value.__enter__.return_value = delete_license_mock
+ return idrac_conn_mock
+
+ def test_execute_delete_license_success(self, mocker, idrac_connection_license_mock):
+ mocker.patch(MODULE_PATH + LIC_GET_LICENSE_URL,
+ return_value=REDFISH_LICENSE_URL)
+ f_module = MagicMock()
+ f_module.params = {'license_id': '1234'}
+ delete_license_obj = idrac_license.DeleteLicense(idrac_connection_license_mock, f_module)
+ delete_license_obj.idrac.invoke_request.return_value.status_code = 204
+ delete_license_obj.execute()
+ f_module.exit_json.assert_called_once_with(msg=SUCCESS_DELETE_MSG, changed=True)
+
+ def test_execute_delete_license_failure(self, mocker, idrac_connection_license_mock):
+ mocker.patch(MODULE_PATH + LIC_GET_LICENSE_URL,
+ return_value=REDFISH_LICENSE_URL)
+ f_module = MagicMock()
+ f_module.params = {'license_id': '5678'}
+ delete_license_obj = idrac_license.DeleteLicense(idrac_connection_license_mock, f_module)
+ delete_license_obj.idrac.invoke_request.return_value.status_code = 404
+ delete_license_obj.execute()
+ f_module.exit_json.assert_called_once_with(msg=FAILURE_MSG.format(operation="delete", license_id="5678"), failed=True)
+
+
+class TestExportLicense(FakeAnsibleModule):
+ module = idrac_license
+
+ @pytest.fixture
+ def idrac_license_mock(self):
+ idrac_obj = MagicMock()
+ return idrac_obj
+
+ @pytest.fixture
+ def idrac_connection_license_mock(self, mocker, idrac_license_mock):
+ idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI',
+ return_value=idrac_license_mock)
+ idrac_conn_mock.return_value.__enter__.return_value = idrac_license_mock
+ return idrac_conn_mock
+
+ def test_export_license_local(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ tmp_path = tempfile.gettempdir()
+ export_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'share_name': str(tmp_path),
+ 'file_name': 'test_lic'
+ }
+ }
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234", "LicenseFile": "test_license_content"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ idrac_default_args.update(export_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ export_license_obj = self.module.ExportLicense(idrac_connection_license_mock, f_module)
+ result = export_license_obj._ExportLicense__export_license_local(EXPORT_URL_MOCK)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+ assert os.path.exists(f"{tmp_path}/test_lic_iDRAC_license.txt")
+ if os.path.exists(f"{tmp_path}/test_lic_iDRAC_license.txt"):
+ os.remove(f"{tmp_path}/test_lic_iDRAC_license.txt")
+
+ export_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'share_name': str(tmp_path),
+ }
+ }
+ idrac_default_args.update(export_params)
+ result = export_license_obj._ExportLicense__export_license_local(EXPORT_URL_MOCK)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+ assert os.path.exists(f"{tmp_path}/test_license_id_iDRAC_license.txt")
+ if os.path.exists(f"{tmp_path}/test_license_id_iDRAC_license.txt"):
+ os.remove(f"{tmp_path}/test_license_id_iDRAC_license.txt")
+
+ def test_export_license_http(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ export_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': 'http',
+ 'ignore_certificate_warning': 'off'
+ }
+ }
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234", "LicenseFile": "test_license_content"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ idrac_default_args.update(export_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ export_license_obj = self.module.ExportLicense(idrac_connection_license_mock, f_module)
+ result = export_license_obj._ExportLicense__export_license_http(EXPORT_URL_MOCK)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+
+ def test_export_license_cifs(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ export_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': 'cifs',
+ 'ignore_certificate_warning': 'off',
+ 'workgroup': "mydomain"
+ }
+ }
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234", "LicenseFile": "test_license_content"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ idrac_default_args.update(export_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ export_license_obj = self.module.ExportLicense(idrac_connection_license_mock, f_module)
+ result = export_license_obj._ExportLicense__export_license_cifs(EXPORT_URL_MOCK)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+
+ def test_export_license_nfs(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ export_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': 'nfs',
+ 'ignore_certificate_warning': 'off'
+ }
+ }
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234", "LicenseFile": "test_license_content"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ idrac_default_args.update(export_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ export_license_obj = self.module.ExportLicense(idrac_connection_license_mock, f_module)
+ result = export_license_obj._ExportLicense__export_license_nfs(EXPORT_URL_MOCK)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+
+ def test_get_export_license_url(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ export_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': 'local',
+ 'ignore_certificate_warning': 'off'
+ }
+ }
+ mocker.patch(MODULE_PATH + "validate_and_get_first_resource_id_uri",
+ return_value=(REDFISH, None))
+ mocker.patch(MODULE_PATH + "get_dynamic_uri",
+ return_value={"Links": {"Oem": {"Dell": {"DellLicenseManagementService": {ODATA: "/LicenseService"}}}},
+ "Actions": {"#DellLicenseManagementService.ExportLicense": {"target": API_ONE}}})
+ idrac_default_args.update(export_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ export_license_obj = self.module.ExportLicense(idrac_connection_license_mock, f_module)
+ result = export_license_obj._ExportLicense__get_export_license_url()
+ assert result == API_ONE
+
+ mocker.patch(MODULE_PATH + "validate_and_get_first_resource_id_uri",
+ return_value=(REDFISH, "error"))
+ with pytest.raises(Exception) as exc:
+ export_license_obj._ExportLicense__get_export_license_url()
+ assert exc.value.args[0] == "error"
+
+ def test_execute(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ share_type = 'local'
+ export_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': share_type
+ }
+ }
+ idrac_default_args.update(export_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ mocker.patch(MODULE_PATH + "License.check_license_id")
+ mocker.patch(MODULE_PATH + "ExportLicense._ExportLicense__get_export_license_url",
+ return_value="/License/url")
+ mocker.patch(MODULE_PATH + "ExportLicense.get_job_status",
+ return_value={"JobId": "JID1234"})
+ idr_obj = MagicMock()
+ idr_obj.status_code = 200
+
+ mocker.patch(MODULE_PATH + "ExportLicense._ExportLicense__export_license_local",
+ return_value=idr_obj)
+ export_license_obj = self.module.ExportLicense(idrac_connection_license_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ export_license_obj.execute()
+ assert exc.value.args[0] == SUCCESS_EXPORT_MSG
+
+ export_params.get('share_parameters')["share_type"] = "http"
+ mocker.patch(MODULE_PATH + "ExportLicense._ExportLicense__export_license_http",
+ return_value=idr_obj)
+ with pytest.raises(Exception) as exc:
+ export_license_obj.execute()
+ assert exc.value.args[0] == SUCCESS_EXPORT_MSG
+
+ export_params.get('share_parameters')["share_type"] = "cifs"
+ mocker.patch(MODULE_PATH + "ExportLicense._ExportLicense__export_license_cifs",
+ return_value=idr_obj)
+ with pytest.raises(Exception) as exc:
+ export_license_obj.execute()
+ assert exc.value.args[0] == SUCCESS_EXPORT_MSG
+
+ export_params.get('share_parameters')["share_type"] = "nfs"
+ mocker.patch(MODULE_PATH + "ExportLicense._ExportLicense__export_license_nfs",
+ return_value=idr_obj)
+ with pytest.raises(Exception) as exc:
+ export_license_obj.execute()
+ assert exc.value.args[0] == SUCCESS_EXPORT_MSG
+
+ export_params.get('share_parameters')["share_type"] = "https"
+ idr_obj.status_code = 400
+ mocker.patch(MODULE_PATH + "ExportLicense._ExportLicense__export_license_http",
+ return_value=idr_obj)
+ with pytest.raises(Exception) as exc:
+ export_license_obj.execute()
+ assert exc.value.args[0] == FAILURE_MSG.format(operation="export", license_id="test_license_id")
+
+
+class TestImportLicense(FakeAnsibleModule):
+ module = idrac_license
+
+ @pytest.fixture
+ def idrac_license_mock(self):
+ idrac_obj = MagicMock()
+ return idrac_obj
+
+ @pytest.fixture
+ def idrac_connection_license_mock(self, mocker, idrac_license_mock):
+ idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI',
+ return_value=idrac_license_mock)
+ idrac_conn_mock.return_value.__enter__.return_value = idrac_license_mock
+ return idrac_conn_mock
+
+ def test_execute(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ share_type = 'local'
+ import_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic.xml',
+ 'share_type': share_type
+ }
+ }
+ idrac_default_args.update(import_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ mocker.patch(MODULE_PATH + "ImportLicense._ImportLicense__get_import_license_url",
+ return_value="/License/url")
+ mocker.patch(MODULE_PATH + "get_manager_res_id",
+ return_value=IDRAC_ID)
+ mocker.patch(MODULE_PATH + "ImportLicense.get_job_status",
+ return_value={"JobId": "JID1234"})
+ idr_obj = MagicMock()
+ idr_obj.status_code = 200
+
+ mocker.patch(MODULE_PATH + "ImportLicense._ImportLicense__import_license_local",
+ return_value=idr_obj)
+ import_license_obj = self.module.ImportLicense(idrac_connection_license_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ import_license_obj.execute()
+ assert exc.value.args[0] == SUCCESS_IMPORT_MSG
+
+ import_params.get('share_parameters')["share_type"] = "http"
+ mocker.patch(MODULE_PATH + "ImportLicense._ImportLicense__import_license_http",
+ return_value=idr_obj)
+ with pytest.raises(Exception) as exc:
+ import_license_obj.execute()
+ assert exc.value.args[0] == SUCCESS_IMPORT_MSG
+
+ import_params.get('share_parameters')["share_type"] = "cifs"
+ mocker.patch(MODULE_PATH + "ImportLicense._ImportLicense__import_license_cifs",
+ return_value=idr_obj)
+ with pytest.raises(Exception) as exc:
+ import_license_obj.execute()
+ assert exc.value.args[0] == SUCCESS_IMPORT_MSG
+
+ import_params.get('share_parameters')["share_type"] = "nfs"
+ mocker.patch(MODULE_PATH + "ImportLicense._ImportLicense__import_license_nfs",
+ return_value=idr_obj)
+ with pytest.raises(Exception) as exc:
+ import_license_obj.execute()
+ assert exc.value.args[0] == SUCCESS_IMPORT_MSG
+
+ import_params.get('share_parameters')["share_type"] = "https"
+ idr_obj.status_code = 400
+ mocker.patch(MODULE_PATH + "ImportLicense._ImportLicense__import_license_http",
+ return_value=idr_obj)
+ with pytest.raises(Exception) as exc:
+ import_license_obj.execute()
+ assert exc.value.args[0] == FAILURE_IMPORT_MSG
+
+ def test_import_license_local(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ tmp_path = tempfile.gettempdir()
+ import_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'share_name': 'doesnotexistpath',
+ 'file_name': LIC_FILE_NAME
+ }
+ }
+ idrac_default_args.update(import_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ import_license_obj = self.module.ImportLicense(idrac_connection_license_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ import_license_obj._ImportLicense__import_license_local(EXPORT_URL_MOCK, IDRAC_ID)
+ assert exc.value.args[0] == INVALID_DIRECTORY_MSG.format(path='doesnotexistpath')
+
+ import_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'share_name': str(tmp_path),
+ 'file_name': LIC_FILE_NAME
+ }
+ }
+ file_name = os.path.join(tmp_path, LIC_FILE_NAME)
+ with open(file_name, "w") as fp:
+ fp.writelines("license_file")
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234", "LicenseFile": "test_license_content"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ idrac_default_args.update(import_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ import_license_obj = self.module.ImportLicense(idrac_connection_license_mock, f_module)
+ result = import_license_obj._ImportLicense__import_license_local(EXPORT_URL_MOCK, IDRAC_ID)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+ assert os.path.exists(file_name)
+
+ json_str = to_text(json.dumps({"error": {'@Message.ExtendedInfo': [
+ {
+ 'MessageId': "LIC018",
+ "Message": "Already imported"
+ }
+ ]}}))
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ side_effect=HTTPError(HTTPS_PATH, 400, HTTP_ERROR,
+ {"accept-type": APPLICATION_JSON}, StringIO(json_str)))
+ with pytest.raises(Exception) as exc:
+ import_license_obj._ImportLicense__import_license_local(EXPORT_URL_MOCK, IDRAC_ID)
+ assert exc.value.args[0] == "Already imported"
+
+ if os.path.exists(file_name):
+ os.remove(file_name)
+
+ def test_import_license_http(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ import_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': 'http',
+ 'ignore_certificate_warning': 'off'
+ }
+ }
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234", "LicenseFile": "test_license_content"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ idrac_default_args.update(import_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ import_license_obj = self.module.ImportLicense(idrac_connection_license_mock, f_module)
+ result = import_license_obj._ImportLicense__import_license_http(IMPORT_URL_MOCK, IDRAC_ID)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+
+ def test_import_license_cifs(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ import_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': 'cifs',
+ 'ignore_certificate_warning': 'off',
+ 'workgroup': 'mydomain'
+ }
+ }
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234", "LicenseFile": "test_license_content"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ idrac_default_args.update(import_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ import_license_obj = self.module.ImportLicense(idrac_connection_license_mock, f_module)
+ result = import_license_obj._ImportLicense__import_license_cifs(IMPORT_URL_MOCK, IDRAC_ID)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+
+ def test_import_license_nfs(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ import_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': 'nfs',
+ 'ignore_certificate_warning': 'off',
+ 'workgroup': 'mydomain'
+ }
+ }
+ idr_obj = MagicMock()
+ idr_obj.json_data = {"license_id": "1234", "LicenseFile": "test_license_content"}
+ mocker.patch(MODULE_PATH + API_INVOKE_MOCKER,
+ return_value=idr_obj)
+ idrac_default_args.update(import_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ import_license_obj = self.module.ImportLicense(idrac_connection_license_mock, f_module)
+ result = import_license_obj._ImportLicense__import_license_nfs(IMPORT_URL_MOCK, IDRAC_ID)
+ assert result.json_data == {'LicenseFile': 'test_license_content', 'license_id': '1234'}
+
+ def test_get_import_license_url(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ export_params = {
+ 'license_id': 'test_license_id',
+ 'share_parameters': {
+ 'file_name': 'test_lic',
+ 'share_type': 'local',
+ 'ignore_certificate_warning': 'off'
+ }
+ }
+ mocker.patch(MODULE_PATH + "validate_and_get_first_resource_id_uri",
+ return_value=(REDFISH, None))
+ mocker.patch(MODULE_PATH + "get_dynamic_uri",
+ return_value={"Links": {"Oem": {"Dell": {"DellLicenseManagementService": {ODATA: "/LicenseService"}}}},
+ "Actions": {"#DellLicenseManagementService.ImportLicense": {"target": API_ONE}}})
+ idrac_default_args.update(export_params)
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ import_license_obj = self.module.ImportLicense(idrac_connection_license_mock, f_module)
+ result = import_license_obj._ImportLicense__get_import_license_url()
+ assert result == API_ONE
+
+ def test_get_job_status(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ mocker.patch(MODULE_PATH + "validate_and_get_first_resource_id_uri", return_value=[MANAGER_URI_ONE])
+ lic_job_resp_obj = MagicMock()
+ lic_job_resp_obj.headers = {"Location": "idrac_internal"}
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ import_license_obj = self.module.ImportLicense(idrac_connection_license_mock, f_module)
+
+ mocker.patch(MODULE_PATH + "idrac_redfish_job_tracking", return_value=(False, "None", {"JobId": "JID1234"}, 0))
+ result = import_license_obj.get_job_status(lic_job_resp_obj)
+ assert result == {"JobId": "JID1234"}
+
+ mocker.patch(MODULE_PATH + "idrac_redfish_job_tracking", return_value=(True, "None", {"Message": "Got LIC018",
+ "MessageId": "LIC018"}, 0))
+ with pytest.raises(Exception) as exc:
+ import_license_obj.get_job_status(lic_job_resp_obj)
+ assert exc.value.args[0] == "Got LIC018"
+
+ mocker.patch(MODULE_PATH + "idrac_redfish_job_tracking", return_value=(True, "None", {"Message": "Got LIC019",
+ "MessageId": "LIC019"}, 0))
+ with pytest.raises(Exception) as exc:
+ import_license_obj.get_job_status(lic_job_resp_obj)
+ assert exc.value.args[0] == "Got LIC019"
+
+
+class TestLicenseType(FakeAnsibleModule):
+ module = idrac_license
+
+ @pytest.fixture
+ def idrac_license_mock(self):
+ idrac_obj = MagicMock()
+ return idrac_obj
+
+ @pytest.fixture
+ def idrac_connection_license_mock(self, mocker, idrac_license_mock):
+ idrac_conn_mock = mocker.patch(MODULE_PATH + 'iDRACRedfishAPI',
+ return_value=idrac_license_mock)
+ idrac_conn_mock.return_value.__enter__.return_value = idrac_license_mock
+ return idrac_conn_mock
+
+ def test_license_operation(self, idrac_default_args, idrac_connection_license_mock, mocker):
+ idrac_default_args.update({"import": False, "export": False, "delete": True})
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ lic_class = self.module.LicenseType.license_operation(idrac_connection_license_mock, f_module)
+ assert isinstance(lic_class, self.module.DeleteLicense)
+
+ idrac_default_args.update({"import": False, "export": True, "delete": False})
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ lic_class = self.module.LicenseType.license_operation(idrac_connection_license_mock, f_module)
+ assert isinstance(lic_class, self.module.ExportLicense)
+
+ idrac_default_args.update({"import": True, "export": False, "delete": False})
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ lic_class = self.module.LicenseType.license_operation(idrac_connection_license_mock, f_module)
+ assert isinstance(lic_class, self.module.ImportLicense)
+
+ @pytest.mark.parametrize("exc_type",
+ [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError])
+ def test_idrac_license_main_exception_handling_case(self, exc_type, mocker, idrac_default_args, idrac_connection_license_mock):
+ idrac_default_args.update({"delete": True, "license_id": "1234"})
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH + "get_idrac_firmware_version",
+ side_effect=exc_type(HTTPS_PATH, 400,
+ HTTP_ERROR,
+ {"accept-type": APPLICATION_JSON},
+ StringIO(json_str)))
+ else:
+ mocker.patch(MODULE_PATH + "get_idrac_firmware_version",
+ side_effect=exc_type('test'))
+ result = self._run_module(idrac_default_args)
+ if exc_type == URLError:
+ assert result['unreachable'] is True
+ else:
+ assert result['failed'] is True
+ assert 'msg' in result
+
+ def test_main(self, mocker):
+ module_mock = mocker.MagicMock()
+ idrac_mock = mocker.MagicMock()
+ license_mock = mocker.MagicMock()
+
+ # Mock the necessary functions and objects
+ mocker.patch(MODULE_PATH + 'get_argument_spec', return_value={})
+ mocker.patch(MODULE_PATH + 'idrac_auth_params', {})
+ mocker.patch(MODULE_PATH + 'AnsibleModule', return_value=module_mock)
+ mocker.patch(MODULE_PATH + 'iDRACRedfishAPI', return_value=idrac_mock)
+ mocker.patch(MODULE_PATH + 'get_idrac_firmware_version', return_value='3.1')
+ mocker.patch(MODULE_PATH + 'LicenseType.license_operation', return_value=license_mock)
+ main()
+ mocker.patch(MODULE_PATH + 'get_idrac_firmware_version', return_value='2.9')
+ main()
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py
index 39df4e4c6..b5690b037 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_job_status_info.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -14,7 +14,7 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_lifecycle_controller_job_status_info
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from mock import MagicMock, PropertyMock
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
@@ -69,7 +69,7 @@ class TestLcJobStatus(FakeAnsibleModule):
result = self._run_module_with_fail_json(idrac_default_args)
assert result['failed'] is True
else:
- idrac_get_lc_job_status_connection_mock.job_mgr.get_job_status.side_effect = exc_type('http://testhost.com', 400,
+ idrac_get_lc_job_status_connection_mock.job_mgr.get_job_status.side_effect = exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str))
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py
index 491932673..e4920f199 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_jobs.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -15,9 +15,9 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_lifecycle_controller_jobs
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import SSLValidationError
from mock import MagicMock, PropertyMock
from io import StringIO
from ansible.module_utils._text import to_text
@@ -76,10 +76,10 @@ class TestDeleteLcJob(FakeAnsibleModule):
idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_job.side_effect = exc_type('test')
else:
idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_all_jobs.side_effect = \
- exc_type('http://testhost.com', 400, 'http error message', {"accept-type": "application/json"},
+ exc_type('https://testhost.com', 400, 'http error message', {"accept-type": "application/json"},
StringIO(json_str))
idrac_connection_delete_lc_job_queue_mock.job_mgr.delete_job.side_effect = \
- exc_type('http://testhost.com', 400, 'http error message', {"accept-type": "application/json"},
+ exc_type('https://testhost.com', 400, 'http error message', {"accept-type": "application/json"},
StringIO(json_str))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py
index c1a0894e2..2802c3ed5 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_logs.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -14,8 +14,8 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_lifecycle_controller_logs
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from io import StringIO
@@ -67,23 +67,16 @@ class TestExportLcLogs(FakeAnsibleModule):
result = self._run_module(idrac_default_args)
assert result["msg"] == "Successfully exported the lifecycle controller logs."
- def test_run_export_lc_logs_success_case01(self, idrac_connection_export_lc_logs_mock, idrac_default_args,
- idrac_file_manager_export_lc_logs_mock):
- idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
- "share_password": "sharepassword", "job_wait": True})
- idrac_connection_export_lc_logs_mock.log_mgr.lclog_export.return_value = {"Status": "Success"}
- f_module = self.get_module_mock(params=idrac_default_args)
- msg = self.module.run_export_lc_logs(idrac_connection_export_lc_logs_mock, f_module)
- assert msg == {'Status': 'Success'}
-
- def test_run_export_lc_logs_status_fail_case01(self, idrac_connection_export_lc_logs_mock, idrac_default_args,
- idrac_file_manager_export_lc_logs_mock):
- idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
- "share_password": "sharepassword", "job_wait": True})
- idrac_connection_export_lc_logs_mock.log_mgr.lclog_export.return_value = {"Status": "failed"}
- f_module = self.get_module_mock(params=idrac_default_args)
- msg = self.module.run_export_lc_logs(idrac_connection_export_lc_logs_mock, f_module)
- assert msg == {'Status': 'failed'}
+ idrac_default_args.update({"job_wait": False})
+ mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_logs.run_export_lc_logs', return_value=message)
+ result = self._run_module(idrac_default_args)
+ assert result["msg"] == "The export lifecycle controller log job is submitted successfully."
+
+ message = {"Status": "Failed", "JobStatus": "Failed"}
+ mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_logs.run_export_lc_logs', return_value=message)
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result["msg"] == "Unable to export the lifecycle controller logs."
+ assert result["failed"] is True
@pytest.mark.parametrize("exc_type", [RuntimeError, SSLValidationError, ConnectionError, KeyError,
ImportError, ValueError, TypeError, HTTPError, URLError])
@@ -98,11 +91,61 @@ class TestExportLcLogs(FakeAnsibleModule):
side_effect=exc_type('test'))
else:
mocker.patch(MODULE_PATH + 'idrac_lifecycle_controller_logs.run_export_lc_logs',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
- if not exc_type == URLError:
+ if exc_type != URLError:
result = self._run_module_with_fail_json(idrac_default_args)
assert result['failed'] is True
else:
result = self._run_module(idrac_default_args)
assert 'msg' in result
+
+ @pytest.mark.parametrize("args_update", [{"share_user": "share@user"}, {"share_user": "shareuser"}, {"share_user": "share\\user"}])
+ def test_get_user_credentials(self, args_update, idrac_connection_export_lc_logs_mock, idrac_default_args, idrac_file_manager_export_lc_logs_mock, mocker):
+ idrac_default_args.update({"share_name": "sharename",
+ "share_password": "sharepassword", "job_wait": True})
+ obj = MagicMock()
+ obj.IsValid = True
+ mocker.patch(
+ MODULE_PATH + "idrac_lifecycle_controller_logs.file_share_manager.create_share_obj", return_value=(obj))
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idrac_default_args.update(args_update)
+ share = self.module.get_user_credentials(f_module)
+ assert share.IsValid is True
+
+ def test_run_export_lc_logs(self, idrac_connection_export_lc_logs_mock, idrac_default_args, idrac_file_manager_export_lc_logs_mock, mocker):
+ idrac_default_args.update({"idrac_port": 443, "share_name": "sharename", "share_user": "share@user",
+ "share_password": "sharepassword", "job_wait": True})
+ obj = MagicMock()
+ obj._name_ = "AF_INET6"
+ my_share = MagicMock()
+ my_share.new_file.return_value = "idrac_ip_file"
+ mocker.patch(
+ MODULE_PATH + "idrac_lifecycle_controller_logs.file_share_manager.create_share_obj", return_value=(my_share))
+ mocker.patch(
+ MODULE_PATH + "idrac_lifecycle_controller_logs.get_user_credentials", return_value=(my_share))
+ mocker.patch(
+ MODULE_PATH + "idrac_lifecycle_controller_logs.socket.getaddrinfo", return_value=([[obj]]))
+ mocker.patch(
+ MODULE_PATH + "idrac_lifecycle_controller_logs.copy.deepcopy", return_value=("idrac_ip"))
+ # mocker.patch(
+ # MODULE_PATH + "idrac_lifecycle_controller_logs.myshare.new_file", return_value=("idrac_ip_file"))
+ mocker.patch(
+ MODULE_PATH + "idrac_lifecycle_controller_logs.copy.deepcopy", return_value=("idrac_ip"))
+ idrac_connection_export_lc_logs_mock.log_mgr.lclog_export.return_value = {
+ "Status": "Success"}
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ msg = self.module.run_export_lc_logs(
+ idrac_connection_export_lc_logs_mock, f_module)
+ assert msg['Status'] == "Success"
+
+ idrac_default_args.update({"idrac_port": 443, "share_name": "sharename", "share_user": "shareuser",
+ "share_password": "sharepassword", "job_wait": True})
+ obj._name_ = "AF_INET4"
+ mocker.patch(
+ MODULE_PATH + "idrac_lifecycle_controller_logs.socket.getaddrinfo", return_value=([[obj]]))
+ msg = self.module.run_export_lc_logs(
+ idrac_connection_export_lc_logs_mock, f_module)
+ assert msg['Status'] == "Success"
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py
index d00e2bc06..431dc4b8e 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_lifecycle_controller_status_info.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -14,8 +14,8 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_lifecycle_controller_status_info
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock, Mock
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from mock import PropertyMock
@@ -74,7 +74,7 @@ class TestLcStatus(FakeAnsibleModule):
assert result['failed'] is True
assert 'msg' in result
else:
- type(obj2).LCReady = PropertyMock(side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ type(obj2).LCReady = PropertyMock(side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
result = self._run_module_with_fail_json(idrac_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py
index 10f7183f6..4037c8d05 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -15,8 +15,8 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_network
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock, Mock
from io import StringIO
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
@@ -90,7 +90,7 @@ class TestConfigNetwork(FakeAnsibleModule):
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
- "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled",
+ "enable_dhcp": "Enabled", "ip_address": "XX.XX.XX.XX", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
@@ -109,7 +109,7 @@ class TestConfigNetwork(FakeAnsibleModule):
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
- "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled",
+ "enable_dhcp": "Enabled", "ip_address": "XX.XX.XX.XX", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
@@ -133,7 +133,7 @@ class TestConfigNetwork(FakeAnsibleModule):
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
- "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled",
+ "enable_dhcp": "Enabled", "ip_address": "XX.XX.XX.XX", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
@@ -157,7 +157,7 @@ class TestConfigNetwork(FakeAnsibleModule):
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
- "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled",
+ "enable_dhcp": "Enabled", "ip_address": "XX.XX.XX.XX", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
@@ -209,7 +209,7 @@ class TestConfigNetwork(FakeAnsibleModule):
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
- "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled",
+ "enable_dhcp": "Enabled", "ip_address": "XX.XX.XX.XX", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
@@ -229,7 +229,7 @@ class TestConfigNetwork(FakeAnsibleModule):
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
- "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled",
+ "enable_dhcp": "Enabled", "ip_address": "XX.XX.XX.XX", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
@@ -251,7 +251,7 @@ class TestConfigNetwork(FakeAnsibleModule):
"enable_nic": "Enabled", "nic_selection": "Dedicated",
"failover_network": "ALL", "auto_detect": "Enabled", "auto_negotiation": "Enabled",
"network_speed": "T_10", "duplex_mode": "Full", "nic_mtu": "nicmtu",
- "enable_dhcp": "Enabled", "ip_address": "100.100.102.114", "enable_ipv4": "Enabled",
+ "enable_dhcp": "Enabled", "ip_address": "XX.XX.XX.XX", "enable_ipv4": "Enabled",
"dns_from_dhcp": "Enabled", "static_dns_1": "staticdns1",
"static_dns_2": "staticdns2", "static_gateway": "staticgateway",
"static_net_mask": "staticnetmask"})
@@ -276,7 +276,7 @@ class TestConfigNetwork(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'idrac_network.run_idrac_network_config',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network_attributes.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network_attributes.py
new file mode 100644
index 000000000..e9a6eada2
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_network_attributes.py
@@ -0,0 +1,1011 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+import json
+from io import StringIO
+
+import pytest
+from ansible.module_utils._text import to_text
+from urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.modules import \
+ idrac_network_attributes
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import \
+ FakeAnsibleModule
+from mock import MagicMock
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+
+SUCCESS_MSG = "Successfully updated the network attributes."
+SUCCESS_CLEAR_PENDING_ATTR_MSG = "Successfully cleared the pending network attributes."
+SCHEDULE_MSG = "Successfully scheduled the job for network attributes update."
+TIMEOUT_NEGATIVE_OR_ZERO_MSG = "The value for the `job_wait_timeout` parameter cannot be negative or zero."
+MAINTENACE_OFFSET_DIFF_MSG = "The maintenance time must be post-fixed with local offset to {0}."
+MAINTENACE_OFFSET_BEHIND_MSG = "The specified maintenance time window occurs in the past, provide a future time to schedule the maintenance window."
+APPLY_TIME_NOT_SUPPORTED_MSG = "Apply time {0} is not supported."
+INVALID_ATTR_MSG = "Unable to update the network attributes because invalid values are entered. " + \
+ "Enter the valid values for the network attributes and retry the operation."
+VALID_AND_INVALID_ATTR_MSG = "Successfully updated the network attributes for valid values. " + \
+ "Unable to update other attributes because invalid values are entered. Enter the valid values and retry the operation."
+NO_CHANGES_FOUND_MSG = "No changes found to be applied."
+CHANGES_FOUND_MSG = "Changes found to be applied."
+INVALID_ID_MSG = "Unable to complete the operation because the value `{0}` for the input `{1}` parameter is invalid."
+JOB_RUNNING_CLEAR_PENDING_ATTR = "{0} Config job is running. Wait for the job to complete. Currently can not clear pending attributes."
+ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE = 'Attribute is not valid.'
+CLEAR_PENDING_NOT_SUPPORTED_WITHOUT_ATTR_IDRAC8 = "Clear pending is not supported."
+WAIT_TIMEOUT_MSG = "The job is not complete after {0} seconds."
+
+
+class TestIDRACNetworkAttributes(FakeAnsibleModule):
+ module = idrac_network_attributes
+ uri = '/redfish/v1/api'
+ links = {
+ "Oem": {
+ "Dell": {
+ "DellNetworkAttributes": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1A/NetworkDeviceFunctions/NIC.Mezzanine.1A-1-1/Oem/" +
+ "Dell/DellNetworkAttributes/NIC.Mezzanine.1A-1-1"
+ }
+ }
+ }
+ }
+ redfish_settings = {"@Redfish.Settings": {
+ "SettingsObject": {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1A/NetworkDeviceFunctions/NIC.Mezzanine.1A-1-1/Oem/Dell/" +
+ "DellNetworkAttributes/NIC.Mezzanine.1A-1-1/Settings"
+ }
+ }
+ }
+
+ @pytest.fixture
+ def idrac_ntwrk_attr_mock(self):
+ idrac_obj = MagicMock()
+ return idrac_obj
+
+ @pytest.fixture
+ def idrac_connection_ntwrk_attr_mock(self, mocker, idrac_ntwrk_attr_mock):
+ idrac_conn_mock = mocker.patch(MODULE_PATH + 'idrac_network_attributes.iDRACRedfishAPI',
+ return_value=idrac_ntwrk_attr_mock)
+ idrac_conn_mock.return_value.__enter__.return_value = idrac_ntwrk_attr_mock
+ return idrac_conn_mock
+
+ def test_get_registry_fw_less_than_6_more_than_3(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ registry_list = [
+ {
+ "@odata.id": "/redfish/v1/Registries/BaseMessages"
+ },
+ {
+ "@odata.id": "/redfish/v1/Registries/NetworkAttributesRegistry_NIC.Mezzanine.1A-1-1"
+ }]
+ location = [{'Uri': self.uri}]
+ registry_response = {'Attributes': [{
+ "AttributeName": "DeviceName",
+ "CurrentValue": None
+ },
+ {"AttributeName": "ChipMdl",
+ "CurrentValue": None
+ }
+ ]}
+ # Scenario 1: Got the registry Members list, Got Location, Got Attributes
+
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if args[2] == 'Members':
+ return registry_list
+ elif args[2] == 'Location':
+ return location
+ else:
+ return registry_response
+
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+ idrac_default_args.update({'network_adapter_id': 'NIC.Mezzanine.1A',
+ 'network_device_function_id': 'NIC.Mezzanine.1A-1-1',
+ 'apply_time': 'Immediate'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__get_registry_fw_less_than_6_more_than_3()
+ assert data == {'ChipMdl': None, 'DeviceName': None}
+
+ # Scenario 2: Got the regisry Members empty
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if args[2] == 'Members':
+ return {}
+ elif args[2] == 'Location':
+ return location
+ else:
+ return registry_response
+
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+ idrac_default_args.update({'network_adapter_id': 'NIC.Mezzanine.1A',
+ 'network_device_function_id': 'NIC.Mezzanine.1A-1-1',
+ 'apply_time': 'Immediate'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__get_registry_fw_less_than_6_more_than_3()
+ assert data == {}
+
+ # Scenario 3: Got the regisry Member but does not contain Location
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if args[2] == 'Members':
+ return registry_list
+ elif args[2] == 'Location':
+ return {}
+ else:
+ return registry_response
+
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+ idrac_default_args.update({'network_adapter_id': 'NIC.Mezzanine.1A',
+ 'network_device_function_id': 'NIC.Mezzanine.1A-1-1',
+ 'apply_time': 'Immediate'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__get_registry_fw_less_than_6_more_than_3()
+ assert data == {}
+
+ def test_validate_time(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ resp = ("2022-09-14T05:59:35-05:00", "-05:00")
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_current_time",
+ return_value=resp)
+ idrac_default_args.update({'network_adapter_id': 'NIC.Mezzanine.1A',
+ 'network_device_function_id': 'NIC.Mezzanine.1A-1-1',
+ 'apply_time': 'Immediate'})
+ # Scenario 1: When mtime does not end with offset
+ m_time = "2022-09-14T05:59:35+05:00"
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj._IDRACNetworkAttributes__validate_time(m_time)
+ assert exc.value.args[0] == MAINTENACE_OFFSET_DIFF_MSG.format(resp[1])
+
+ # Scenario 2: When mtime is less than current time
+ m_time = "2021-09-14T05:59:35-05:00"
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj._IDRACNetworkAttributes__validate_time(m_time)
+ assert exc.value.args[0] == MAINTENACE_OFFSET_BEHIND_MSG
+
+ # Scenario 2: When mtime is greater than current time
+ m_time = "2024-09-14T05:59:35-05:00"
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__validate_time(m_time)
+ assert data is None
+
+ def test_get_redfish_apply_time(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ resp = ("2022-09-14T05:59:35-05:00", "-05:00")
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes._IDRACNetworkAttributes__validate_time",
+ return_value=resp)
+ rf_settings = [
+ "OnReset",
+ "Immediate"
+ ]
+ idrac_default_args.update({'network_adapter_id': 'NIC.Mezzanine.1A',
+ 'network_device_function_id': 'NIC.Mezzanine.1A-1-1',
+ 'apply_time': 'AtMaintenanceWindowStart',
+ 'maintenance_window': {"start_time": "2022-09-14T06:59:35-05:00",
+ "duration": 600}})
+
+ # Scenario 1: When Maintenance is not supported but 'AtMaintenanceWindowStart' is passed
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj._IDRACNetworkAttributes__get_redfish_apply_time(
+ 'AtMaintenanceWindowStart', rf_settings)
+ assert exc.value.args[0] == APPLY_TIME_NOT_SUPPORTED_MSG.format(
+ 'AtMaintenanceWindowStart')
+
+ # Scenario 2: When Maintenance is not supported but 'InMaintenanceWindowOnReset' is passed
+ idrac_default_args.update({'apply_time': 'InMaintenanceWindowOnReset'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj._IDRACNetworkAttributes__get_redfish_apply_time(
+ 'InMaintenanceWindowOnReset', rf_settings)
+ assert exc.value.args[0] == APPLY_TIME_NOT_SUPPORTED_MSG.format(
+ 'InMaintenanceWindowOnReset')
+
+ # Scenario 3: When ApplyTime does not support Maintenance
+ rf_settings.append('InMaintenanceWindowOnReset')
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__get_redfish_apply_time(
+ 'InMaintenanceWindowOnReset', rf_settings)
+ assert data == {'ApplyTime': 'InMaintenanceWindowOnReset',
+ 'MaintenanceWindowDurationInSeconds': 600,
+ 'MaintenanceWindowStartTime': '2022-09-14T06:59:35-05:00'}
+
+ # Scenario 4: When ApplyTime is Immediate
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__get_redfish_apply_time(
+ 'Immediate', rf_settings)
+ assert data == {'ApplyTime': 'Immediate'}
+
+ # Scenario 5: When ApplyTime does not support Immediate
+ rf_settings.remove('Immediate')
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj._IDRACNetworkAttributes__get_redfish_apply_time(
+ 'Immediate', rf_settings)
+ assert exc.value.args[0] == APPLY_TIME_NOT_SUPPORTED_MSG.format(
+ 'Immediate')
+
+ # Scenario 6: When AppyTime is empty
+ rf_settings = []
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__get_redfish_apply_time(
+ 'Immediate', rf_settings)
+ assert data == {}
+
+ def test_get_registry_fw_less_than_3(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ obj = MagicMock()
+ obj.json_data = {'SystemConfiguration': {
+ "Components": [
+ {'FQDD': 'NIC.Mezzanine.1A-1-1',
+ 'Attributes': [{
+ 'Name': 'VLanId',
+ 'Value': '10'
+ }]}
+ ]
+ }}
+ idrac_default_args.update(
+ {'network_device_function_id': 'NIC.Mezzanine.1A-1-1'})
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.iDRACRedfishAPI.export_scp",
+ return_value=obj)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__get_registry_fw_less_than_3()
+ assert data == {'VLanId': '10'}
+
+ def test_get_current_server_registry(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ reg_greater_than_6 = {'abc': False}
+ reg_less_than_6 = {'xyz': True}
+ reg_less_than_3 = {'Qwerty': False}
+ redfish_resp = {'Ethernet': {'abc': 123},
+ 'FibreChannel': {},
+ 'iSCSIBoot': {'ghi': 789}
+ }
+
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if len(args) > 2:
+ if args[2] == 'Links':
+ return self.links
+ elif args[2] == 'Attributes':
+ return reg_greater_than_6
+ return redfish_resp
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes._IDRACNetworkAttributes__get_registry_fw_less_than_6_more_than_3",
+ return_value=reg_less_than_6)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes._IDRACNetworkAttributes__get_registry_fw_less_than_3",
+ return_value=reg_less_than_3)
+ idrac_default_args.update({'network_adapter_id': 'NIC.Mezzanine.1A',
+ 'network_device_function_id': 'NIC.Mezzanine.1A-1-1',
+ 'apply_time': 'AtMaintenanceWindowStart',
+ 'maintenance_window': {"start_time": "2022-09-14T06:59:35-05:00",
+ "duration": 600}})
+
+ # Scenario 1: When Firmware version is greater and equal to 6.0 and oem_network_attributes is not given
+ firm_ver = '6.1'
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value=firm_ver)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_current_server_registry()
+ assert data == {}
+
+ # Scenario 2: When Firmware version is greater and equal to 6.0 and oem_network_attributes is given
+ firm_ver = '6.1'
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value=firm_ver)
+ idrac_default_args.update({'oem_network_attributes': 'some value'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_current_server_registry()
+ assert data == {'abc': False}
+
+ # Scenario 3: When Firmware version is less than 6.0 and oem_network_attributes is given
+ firm_ver = '4.0'
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value=firm_ver)
+ idrac_default_args.update({'oem_network_attributes': 'some value'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_current_server_registry()
+ assert data == {'xyz': True}
+
+ # Scenario 4: When Firmware version is less than 3.0 and oem_network_attributes is given
+ firm_ver = '2.9'
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value=firm_ver)
+ idrac_default_args.update({'oem_network_attributes': 'some value'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_current_server_registry()
+ assert data == {'Qwerty': False}
+
+ # Scenario 5: When network_attributes is given
+ firm_ver = '7.0'
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value=firm_ver)
+ idrac_default_args.update({'network_attributes': 'some value',
+ 'oem_network_attributes': None})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_current_server_registry()
+ assert data == redfish_resp
+
+ def test_extract_error_msg(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ error_info = {
+ "error": {
+ "@Message.ExtendedInfo": [
+ {
+ "Message": "AttributeValue cannot be changed to read only AttributeName BusDeviceFunction.",
+ "MessageArgs": [
+ "BusDeviceFunction"
+ ]
+ },
+ {
+ "Message": "AttributeValue cannot be changed to read only AttributeName ChipMdl.",
+ "MessageArgs": [
+ "ChipMdl"
+ ]
+ },
+ {
+ "Message": "AttributeValue cannot be changed to read only AttributeName ControllerBIOSVersion.",
+ "MessageArgs": [
+ "ControllerBIOSVersion"
+ ]
+ },
+ {
+ "Message": "some random message",
+ "MessageArgs": [
+ "ControllerBIOSVersion"
+ ]
+ }]}}
+ obj = MagicMock()
+ # Scenario 1: When response code is 202 and has response body
+ obj.body = obj.json_data = error_info
+ obj.status_code = 202
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.extract_error_msg(obj)
+ assert data == {'BusDeviceFunction': 'AttributeValue cannot be changed to read only AttributeName BusDeviceFunction.',
+ 'ChipMdl': 'AttributeValue cannot be changed to read only AttributeName ChipMdl.',
+ 'ControllerBIOSVersion': 'AttributeValue cannot be changed to read only AttributeName ControllerBIOSVersion.'
+ }
+
+ # Scenario 2: When response code is 200 and no response body
+ obj.body = obj.json_data = ''
+ obj.status_code = 200
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.extract_error_msg(obj)
+ assert data == {}
+
+ def test_get_diff_between_current_and_module_input(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ module_attr = {'a': 123, 'b': 456}
+ server_attr = {'c': 789, 'b': 456}
+ # Scenario 1: Simple attribute which does not contain nested values
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_diff_between_current_and_module_input(
+ module_attr, server_attr)
+ assert data == (0, {'a': ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE})
+
+ # Scenario 2: Complex attribute which contain nested values
+ module_attr = {'a': 123, 'b': 456, 'c': {'d': 789}}
+ server_attr = {'c': 789, 'b': 457, 'd': {'e': 123}}
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_diff_between_current_and_module_input(
+ module_attr, server_attr)
+ assert data == (2, {'a': ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE})
+
+ # Scenario 3: Complex attribute which contain nested values and value matched
+ module_attr = {'a': 123, 'b': 456, 'c': {'d': 789}}
+ server_attr = {'c': {'d': 789}, 'b': 457, 'd': {'e': 123}}
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_diff_between_current_and_module_input(
+ module_attr, server_attr)
+ assert data == (1, {'a': ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE})
+
+ # Scenario 3: module attr is None
+ module_attr = None
+ server_attr = {'a': 123}
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.get_diff_between_current_and_module_input(
+ module_attr, server_attr)
+ assert data == (0, {})
+
+ def test_perform_validation_for_network_adapter_id(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ netwkr_adapters = {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters"
+ }
+ network_adapter_list = [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1A"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1B"
+ }
+ ]
+
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if args[2] == 'NetworkInterfaces':
+ return netwkr_adapters
+ return network_adapter_list
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.validate_and_get_first_resource_id_uri",
+ return_value=('System.Embedded.1', ''))
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+
+ # Scenario 1: When network_adapter_id is in server network adapter list
+ idrac_default_args.update({'network_adapter_id': 'NIC.Mezzanine.1B'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__perform_validation_for_network_adapter_id()
+ assert data == "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1B"
+
+ # Scenario 2: When network_adapter_id is not in server network adapter list
+ network_adapter_id = 'random value'
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.validate_and_get_first_resource_id_uri",
+ return_value=('System.Embedded.1', ''))
+ idrac_default_args.update({'network_adapter_id': network_adapter_id})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj._IDRACNetworkAttributes__perform_validation_for_network_adapter_id()
+ assert exc.value.args[0] == INVALID_ID_MSG.format(network_adapter_id,
+ 'network_adapter_id')
+
+ # Scenario 3: When validate_and_get_first_resource_id_uri is returning error_msg
+ network_adapter_id = 'random value'
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.validate_and_get_first_resource_id_uri",
+ return_value=('System.Embedded.1', 'error_msg'))
+ idrac_default_args.update({'network_adapter_id': network_adapter_id})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj._IDRACNetworkAttributes__perform_validation_for_network_adapter_id()
+ assert exc.value.args[0] == 'error_msg'
+
+ def test_perform_validation_for_network_device_function_id(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ netwkr_devices = {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1A/NetworkDeviceFunctions"
+ }
+ network_device_function_list = [
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1A/NetworkDeviceFunctions/NIC.Mezzanine.1A-1-1"
+ },
+ {
+ "@odata.id": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1A/NetworkDeviceFunctions/NIC.Mezzanine.1A-2-1"
+ }
+ ]
+
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if args[2] == 'NetworkDeviceFunctions':
+ return netwkr_devices
+ return network_device_function_list
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.validate_and_get_first_resource_id_uri",
+ return_value=('System.Embedded.1', ''))
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes._IDRACNetworkAttributes__perform_validation_for_network_adapter_id",
+ return_value=self.uri)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+
+ # Scenario 1: When network_adapter_id is in server network adapter list
+ device_uri = "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1A/NetworkDeviceFunctions/NIC.Mezzanine.1A-2-1"
+ idrac_default_args.update(
+ {'network_device_function_id': 'NIC.Mezzanine.1A-2-1'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj._IDRACNetworkAttributes__perform_validation_for_network_device_function_id()
+ assert data == device_uri
+
+ # Scenario 2: When network_adapter_id is not in server network adapter list
+ network_device_function_id = 'random value'
+ idrac_default_args.update(
+ {'network_device_function_id': network_device_function_id})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj._IDRACNetworkAttributes__perform_validation_for_network_device_function_id()
+ assert exc.value.args[0] == INVALID_ID_MSG.format(
+ network_device_function_id, 'network_device_function_id')
+
+ def test_validate_job_timeout(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+
+ # Scenario 1: when job_wait is True and job_wait_timeout is in negative
+ idrac_default_args.update({'job_wait': True, 'job_wait_timeout': -120})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj.validate_job_timeout()
+ assert exc.value.args[0] == TIMEOUT_NEGATIVE_OR_ZERO_MSG
+
+ # Scenario 2: when job_wait is False
+ idrac_default_args.update(
+ {'job_wait': False, 'job_wait_timeout': -120})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.validate_job_timeout()
+ assert data is None
+
+ def test_apply_time(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ return_value=self.redfish_settings)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes._IDRACNetworkAttributes__get_redfish_apply_time",
+ return_value={'AppyTime': "OnReset"})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ rf_set = idr_obj.apply_time(self.uri)
+ assert rf_set == {'AppyTime': "OnReset"}
+
+ def test_set_dynamic_base_uri_and_validate_ids(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ tmp_dict = {}
+ tmp_dict.update({'Links': self.links,
+ '@Redfish.Settings': self.redfish_settings.get('@Redfish.Settings')})
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ return_value=tmp_dict)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes._IDRACNetworkAttributes__perform_validation_for_network_device_function_id",
+ return_value=self.uri)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.IDRACNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.set_dynamic_base_uri_and_validate_ids()
+ assert data is None
+
+ def test_clear_pending(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ action_setting_uri_resp = {
+ "Actions": {
+ "#DellManager.ClearPending": {
+ "target": "/redfish/v1/Chassis/System.Embedded.1/NetworkAdapters/NIC.Mezzanine.1A/NetworkDeviceFunctions/NIC.Mezzanine.1A-1-1/Oem/Dell/" +
+ "DellNetworkAttributes/NIC.Mezzanine.1A-1-1/Settings/Actions/DellManager.ClearPending"
+ }
+ },
+ "Attributes": {}
+ }
+
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if len(args) > 2 and args[2] == '@Redfish.Settings':
+ return self.redfish_settings.get('@Redfish.Settings')
+ return action_setting_uri_resp
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value='6.1')
+
+ # Scenario 1: When there's no pending attributes
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj.clear_pending()
+ assert exc.value.args[0] == NO_CHANGES_FOUND_MSG
+
+ # Scenario 2: When there's pending attributes and scheduled_job is running in normal mode
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_scheduled_job_resp",
+ return_value={'Id': 'JIDXXXXXX', 'JobState': 'Running'})
+ action_setting_uri_resp.update({'Attributes': {'VLanId': 10}})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj.clear_pending()
+ assert exc.value.args[0] == JOB_RUNNING_CLEAR_PENDING_ATTR.format(
+ 'NICConfiguration')
+
+ # Scenario 3: When there's pending attributes and scheduled_job is Starting in normal mode
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_scheduled_job_resp",
+ return_value={'Id': 'JIDXXXXXX', 'JobState': 'Starting'})
+ action_setting_uri_resp.update({'Attributes': {'VLanId': 10}})
+ g_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr__obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, g_module)
+ with pytest.raises(Exception) as exc:
+ idr__obj.clear_pending()
+ assert exc.value.args[0] == SUCCESS_CLEAR_PENDING_ATTR_MSG
+
+ # Scenario 4: Scenario 3 in check mode
+ g_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ idr__obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, g_module)
+ with pytest.raises(Exception) as exc:
+ idr__obj.clear_pending()
+ assert exc.value.args[0] == CHANGES_FOUND_MSG
+
+ # Scenario 5: When there's pending attribute but no job id is present in normal mode
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_scheduled_job_resp",
+ return_value={'Id': '', 'JobState': 'Starting'})
+ g_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, g_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj.clear_pending()
+ assert exc.value.args[0] == SUCCESS_CLEAR_PENDING_ATTR_MSG
+
+ # Scenario 6: Scenario 5 in check_mode
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj.clear_pending()
+ assert exc.value.args[0] == CHANGES_FOUND_MSG
+
+ # Scenario 7: When Job is completed in check mode, ideally won't get this condition
+ # as function will return only scheduled job
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_scheduled_job_resp",
+ return_value={'Id': 'JIDXXXXXX', 'JobState': 'Completed'})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj.clear_pending()
+ assert exc.value.args[0] == CHANGES_FOUND_MSG
+
+ # Scenario 8: When Firmware version is less 3 and oem_network_attribute is not given
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value='2.9')
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj.clear_pending()
+ assert exc.value.args[0] == CLEAR_PENDING_NOT_SUPPORTED_WITHOUT_ATTR_IDRAC8
+
+ # Scenario 9: When Firmware version is less 3 and oem_network_attribute is given
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value='2.9')
+ idrac_default_args.update(
+ {'oem_network_attributes': {'somedata': 'somevalue'}})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.clear_pending()
+ assert data is None
+
+ # Scenario 10: When Fw vers is greater than 3, job exists, in starting, normal mode, without oem_network_attribute
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value='3.1')
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_scheduled_job_resp",
+ return_value={'Id': 'JIDXXXXXX', 'JobState': 'Starting'})
+ idrac_default_args.update({'oem_network_attributes': None})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ with pytest.raises(Exception) as exc:
+ idr_obj.clear_pending()
+ assert exc.value.args[0] == SUCCESS_CLEAR_PENDING_ATTR_MSG
+
+ def test_perform_operation_OEMNetworkAttributes(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ obj = MagicMock()
+ obj.headers = {'Location': self.uri}
+ obj.json_data = {'data': 'some value'}
+ apply_time = {'ApplyTime': 'Immediate'}
+ error_info = {'abc': ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE}
+
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if len(args) > 2 and args[2] == 'Links':
+ return self.links
+ return self.redfish_settings
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.iDRACRedfishAPI.invoke_request",
+ return_value=obj)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.iDRACRedfishAPI.import_scp",
+ return_value=obj)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes.apply_time",
+ return_value=apply_time)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes.extract_error_msg",
+ return_value=error_info)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value='6.1')
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.idrac_redfish_job_tracking",
+ return_value=(False, 'msg', obj.json_data, 600))
+
+ idrac_default_args.update({'oem_network_attributes': {'VlanId': 1},
+ 'job_wait': True,
+ 'job_wait_timeout': 1200})
+ # Scenario 1: When Job has returned successfully and not error msg is there
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.OEMNetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ data = idr_obj.perform_operation()
+ assert data == (obj, {
+ 'abc': ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE}, False)
+
+ def test_perform_operation_NetworkAttributes(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ obj = MagicMock()
+ obj.headers = {'Location': self.uri}
+ obj.json_data = {'data': 'some value'}
+ apply_time = {'ApplyTime': 'Immediate'}
+ error_info = {'abc': ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE}
+
+ def mock_get_dynamic_uri_request(*args, **kwargs):
+ if len(args) > 2 and args[2] == 'Links':
+ return self.links
+ return self.redfish_settings
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_dynamic_uri",
+ side_effect=mock_get_dynamic_uri_request)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.iDRACRedfishAPI.invoke_request",
+ return_value=obj)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.iDRACRedfishAPI.import_scp",
+ return_value=obj)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes.apply_time",
+ return_value=apply_time)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes.extract_error_msg",
+ return_value=error_info)
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.idrac_redfish_job_tracking",
+ return_value=(False, 'msg', obj.json_data, 500))
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value='6.1')
+
+ idrac_default_args.update({'network_attributes': {'VlanId': 1},
+ 'job_wait': True,
+ 'job_wait_timeout': 1200})
+ # Scenario 1: When Job has returned successfully and not error msg is there
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ idr_obj = self.module.NetworkAttributes(
+ idrac_connection_ntwrk_attr_mock, f_module)
+ idr_obj.redfish_uri = self.uri
+ data = idr_obj.perform_operation()
+ assert data == (obj, {
+ 'abc': ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE}, False)
+
+ def test_perform_operation_for_main(self, idrac_default_args, idrac_connection_ntwrk_attr_mock,
+ idrac_ntwrk_attr_mock, mocker):
+ obj = MagicMock()
+ obj.json_data = {'some': 'value'}
+ job_state = {'JobState': "Completed"}
+ invalid_attr = {'a': ATTRIBUTE_NOT_EXIST_CHECK_IDEMPOTENCY_MODE}
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.idrac_redfish_job_tracking",
+ return_value=(False, 'some msg', job_state, 700))
+ # Scenario 1: When diff is false
+ diff = 0
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_operation_for_main(idrac_connection_ntwrk_attr_mock,
+ f_module, obj, diff, invalid_attr)
+ assert exc.value.args[0] == NO_CHANGES_FOUND_MSG
+
+ # Scenario 2: When diff is True and check mode is True
+ diff = ({'a': 123}, {'c': 789})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_operation_for_main(idrac_connection_ntwrk_attr_mock,
+ f_module, obj, diff, invalid_attr)
+ assert exc.value.args[0] == CHANGES_FOUND_MSG
+
+ # Scenario 3: When diff is True and JobState is completed and
+ # There is invalid_attr in normal mode
+ resp = MagicMock()
+ resp.headers = {'Location': self.uri}
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.get_idrac_firmware_version",
+ return_value='6.1')
+
+ def return_data():
+ return (resp, invalid_attr, False)
+ obj.perform_operation = return_data
+ obj.json_data = {'JobState': 'Completed'}
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.iDRACRedfishAPI.invoke_request",
+ return_value=obj)
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_operation_for_main(idrac_connection_ntwrk_attr_mock,
+ f_module, obj, diff, invalid_attr)
+ assert exc.value.args[0] == VALID_AND_INVALID_ATTR_MSG
+
+ # Scenario 4: When diff is True and JobState is completed and
+ # There is no invalid_attr in normal mode
+ invalid_attr = {}
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_operation_for_main(idrac_connection_ntwrk_attr_mock,
+ f_module, obj, diff, invalid_attr)
+ assert exc.value.args[0] == SUCCESS_MSG
+
+ # Scenario 5: When diff is True and JobState is not completed and
+ # There is no invalid_attr in normal mode
+ invalid_attr = {}
+
+ def return_data():
+ return (resp, invalid_attr, False)
+ obj.json_data = {'JobState': "Scheduled"}
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.iDRACRedfishAPI.invoke_request",
+ return_value=obj)
+ obj.perform_operation = return_data
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_operation_for_main(idrac_connection_ntwrk_attr_mock,
+ f_module, obj, diff, invalid_attr)
+ assert exc.value.args[0] == SCHEDULE_MSG
+
+ # Scenario 6: When diff is False and check mode is there
+ diff = 0
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_operation_for_main(idrac_connection_ntwrk_attr_mock,
+ f_module, obj, diff, invalid_attr)
+ assert exc.value.args[0] == NO_CHANGES_FOUND_MSG
+
+ # Scenario 7: When diff is False and check mode is False, invalid is False
+ diff = 0
+ invalid_attr = {}
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_operation_for_main(idrac_connection_ntwrk_attr_mock,
+ f_module, obj, diff, invalid_attr)
+ assert exc.value.args[0] == NO_CHANGES_FOUND_MSG
+
+ # Scenario 8: When Job_wait is True and wait time is less
+ diff = 1
+ invalid_attr = {}
+ resp = MagicMock()
+ resp.headers = {'Location': self.uri}
+
+ def return_data():
+ return (resp, invalid_attr, True)
+ obj.perform_operation = return_data
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.idrac_redfish_job_tracking",
+ return_value=(False, 'msg', obj.json_data, 1200))
+ idrac_default_args.update({'job_wait_timeout': 1000})
+ h_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_operation_for_main(idrac_connection_ntwrk_attr_mock,
+ h_module, obj, diff, invalid_attr)
+ assert exc.value.args[0] == WAIT_TIMEOUT_MSG.format(1000)
+
+ @pytest.mark.parametrize("exc_type",
+ [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError])
+ def test_idrac_network_attributes_main_exception_handling_case(self, exc_type, mocker, idrac_default_args,
+ idrac_connection_ntwrk_attr_mock, idrac_ntwrk_attr_mock):
+ obj = MagicMock()
+ obj.perform_validation_for_network_adapter_id.return_value = None
+ obj.perform_validation_for_network_device_function_id.return_value = None
+ obj.get_diff_between_current_and_module_input.return_value = (
+ None, None)
+ obj.validate_job_timeout.return_value = None
+ obj.clear_pending.return_value = None
+ idrac_default_args.update({'apply_time': "Immediate",
+ 'network_adapter_id': 'Some_adapter_id',
+ 'network_device_function_id': 'some_device_id',
+ 'clear_pending': True if exec == 'URLError' else False})
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type in [HTTPError, SSLValidationError]:
+ tmp = {'network_attributes': {'VlanId': 10}}
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes.set_dynamic_base_uri_and_validate_ids",
+ side_effect=exc_type('https://testhost.com', 400,
+ 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str)))
+ else:
+
+ tmp = {'oem_network_attributes': {'VlanId': 10}}
+ mocker.patch(MODULE_PATH + "idrac_network_attributes.IDRACNetworkAttributes.set_dynamic_base_uri_and_validate_ids",
+ side_effect=exc_type('test'))
+ idrac_default_args.update(tmp)
+ result = self._run_module(idrac_default_args)
+ if exc_type == URLError:
+ assert result['unreachable'] is True
+ else:
+ assert result['failed'] is True
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py
index d89673566..741aa83a3 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_os_deployment.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -14,10 +14,9 @@ __metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_os_deployment
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from mock import MagicMock
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.utils import set_module_args, exit_json, \
- fail_json, AnsibleFailJson, AnsibleExitJson
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.utils import set_module_args
from pytest import importorskip
importorskip("omsdk.sdkfile")
@@ -49,7 +48,7 @@ class TestOsDeployment(FakeAnsibleModule):
@pytest.fixture
def omsdk_mock(self, mocker):
mocker.patch(MODULE_UTIL_PATH + 'dellemc_idrac.UserCredentials')
- mocker.patch(MODULE_UTIL_PATH + 'dellemc_idrac.WsManOptions')
+ mocker.patch(MODULE_UTIL_PATH + 'dellemc_idrac.ProtoPreference')
@pytest.fixture
def fileonshare_mock(self, mocker):
@@ -100,7 +99,7 @@ class TestOsDeployment(FakeAnsibleModule):
idrac_mock.config_mgr.boot_to_network_iso.return_value = {"Status": "Success"}
params = {"idrac_ip": "idrac_ip", "idrac_user": "idrac_user", "idrac_password": "idrac_password",
"ca_path": "/path/to/ca_cert.pem",
- "share_name": None, "share_password": "dummy_share_password",
+ "share_name": "", "share_password": "dummy_share_password",
"iso_image": "dummy_iso_image", "expose_duration": "100"
}
set_module_args(params)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py
index 99185a933..342bd51fe 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_redfish_storage_controller.py
@@ -2,8 +2,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.3.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -15,14 +15,15 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_redfish_storage_controller
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from io import StringIO
from ansible.module_utils._text import to_text
-from ansible.module_utils.urls import urllib_error
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+HTTPS_ADDRESS = 'https://testhost.com'
+HTTP_ERROR_MSG = 'http error message'
@pytest.fixture
@@ -38,7 +39,7 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
module = idrac_redfish_storage_controller
def test_check_id_exists(self, redfish_str_controller_conn, redfish_response_mock):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password"}
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password"}
uri = "/redfish/v1/Dell/Systems/{system_id}/Storage/DellController/{controller_id}"
f_module = self.get_module_mock(params=param)
redfish_response_mock.success = True
@@ -46,6 +47,7 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
result = self.module.check_id_exists(f_module, redfish_str_controller_conn, "controller_id",
"RAID.Integrated.1-1", uri)
assert result is None
+
redfish_response_mock.success = False
redfish_response_mock.status_code = 400
with pytest.raises(Exception) as ex:
@@ -53,13 +55,25 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
"RAID.Integrated.1-1", uri)
assert ex.value.args[0] == "controller_id with id 'RAID.Integrated.1-1' not found in system"
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ with pytest.raises(Exception) as ex:
+ self.module.check_id_exists(f_module, redfish_str_controller_conn, "controller_id",
+ "RAID.Integrated.1-1", uri)
+ assert ex.value.args[0] == "controller_id with id 'RAID.Integrated.1-1' not found in system"
+
def test_validate_inputs(self, redfish_str_controller_conn, redfish_response_mock):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"command": "ReKey", "mode": "LKM"}
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as ex:
self.module.validate_inputs(f_module)
assert ex.value.args[0] == "All of the following: key, key_id and old_key are required for 'ReKey' operation."
+
param.update({"command": "AssignSpare", "target": ["Disk.Bay.0:Enclosure.Internal.0-2:RAID.Integrated.1-1",
"Disk.Bay.1:Enclosure.Internal.0-2:RAID.Integrated.1-1"]})
f_module = self.get_module_mock(params=param)
@@ -67,18 +81,21 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
self.module.validate_inputs(f_module)
assert ex.value.args[0] == "The Fully Qualified Device Descriptor (FQDD) of the target " \
"physical disk must be only one."
+
param.update({"volume_id": ["Disk.Virtual.0:RAID.Mezzanine.1C-0",
"Disk.Virtual.0:RAID.Mezzanine.1C-1"], "target": None})
with pytest.raises(Exception) as ex:
self.module.validate_inputs(f_module)
assert ex.value.args[0] == "The Fully Qualified Device Descriptor (FQDD) of the target " \
"virtual drive must be only one."
+
param.update({"command": "EnableControllerEncryption"})
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as ex:
self.module.validate_inputs(f_module)
assert ex.value.args[0] == "All of the following: key, key_id are " \
"required for 'EnableControllerEncryption' operation."
+
param.update({"command": "ChangePDStateToOnline",
"target": ["Disk.Bay.0:Enclosure.Internal.0-2:RAID.Integrated.1-1",
"Disk.Bay.0:Enclosure.Internal.0-2:RAID.Integrated.1-1"]})
@@ -87,8 +104,37 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
assert ex.value.args[0] == "The Fully Qualified Device Descriptor (FQDD) of the target " \
"physical disk must be only one."
+ param.update({"key": "Key@123", "key_id": 123, "old_key": "abc",
+ "command": "ReKey", "mode": "LKM"})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.validate_inputs(f_module)
+ assert result is None
+
+ param.update({"key": "Key@123", "key_id": 123,
+ "command": "EnableControllerEncryption", "mode": "LKM"})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.validate_inputs(f_module)
+ assert result is None
+
+ param.update({"volume_id": None, "command": "AssignSpare",
+ "target": ["Disk.Bay.0:Enclosure.Internal.0-2:RAID.Integrated.1-1"]})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.validate_inputs(f_module)
+ assert result is None
+
+ param.update({"command": "ChangePDStateToOnline",
+ "target": None})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.validate_inputs(f_module)
+ assert result is None
+
+ param.update({"command": "NoCommand"})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.validate_inputs(f_module)
+ assert result is None
+
def test_target_identify_pattern(self, redfish_str_controller_conn, redfish_response_mock):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"command": "BlinkTarget", "target": "Disk.Bay.1:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1",
"volume_id": "Disk.Virtual.0:RAID.Mezzanine.1C-1"}
f_module = self.get_module_mock(params=param)
@@ -96,13 +142,29 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
redfish_response_mock.status_code = 200
result = self.module.target_identify_pattern(f_module, redfish_str_controller_conn)
assert result.status_code == 200
+
f_module.check_mode = True
with pytest.raises(Exception) as ex:
self.module.target_identify_pattern(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+ param.update({"volume_id": None})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.target_identify_pattern(f_module, redfish_str_controller_conn)
+ assert result.status_code == 200
+
+ param.update({"target": None})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.target_identify_pattern(f_module, redfish_str_controller_conn)
+ assert result.status_code == 200
+
+ param.update({"volume_id": "Disk.Virtual.0:RAID.Mezzanine.1C-1"})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.target_identify_pattern(f_module, redfish_str_controller_conn)
+ assert result.status_code == 200
+
def test_ctrl_reset_config(self, redfish_str_controller_conn, redfish_response_mock, mocker):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"controller_id": "RAID.Mezzanine.1C-1", "command": "ResetConfig"}
f_module = self.get_module_mock(params=param)
mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None)
@@ -120,24 +182,41 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
assert ex.value.args[0] == "No changes found to be applied."
def test_hot_spare_config(self, redfish_str_controller_conn, redfish_response_mock):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
- "command": "AssignSpare", "target": "Disk.Bay.1:Enclosure.Internal.0-2:RAID.Integrated.1-1"}
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "command": "AssignSpare", "target": ["Disk.Bay.1:Enclosure.Internal.0-2:RAID.Integrated.1-1"]}
f_module = self.get_module_mock(params=param)
redfish_response_mock.json_data = {"HotspareType": "None"}
redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
result = self.module.hot_spare_config(f_module, redfish_str_controller_conn)
assert result[2] == "JID_XXXXXXXXXXXXX"
+
+ param.update({"volume_id": 'Disk.Virtual.0:RAID.Slot.1-1'})
+ f_module = self.get_module_mock(params=param)
+ result = self.module.hot_spare_config(f_module, redfish_str_controller_conn)
+ assert result[2] == "JID_XXXXXXXXXXXXX"
+
f_module.check_mode = True
with pytest.raises(Exception) as ex:
self.module.hot_spare_config(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+
redfish_response_mock.json_data = {"HotspareType": "Global"}
with pytest.raises(Exception) as ex:
self.module.hot_spare_config(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "No changes found to be applied."
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ with pytest.raises(Exception) as ex:
+ self.module.hot_spare_config(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Unable to locate the physical disk with the ID: Disk.Bay.1:Enclosure.Internal.0-2:RAID.Integrated.1-1"
+
def test_ctrl_key(self, redfish_str_controller_conn, redfish_response_mock, mocker):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"command": "SetControllerKey", "controller_id": "RAID.Integrated.1-1", "mode": "LKM"}
mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None)
f_module = self.get_module_mock(params=param)
@@ -145,49 +224,81 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
with pytest.raises(Exception) as ex:
self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "The storage controller 'RAID.Integrated.1-1' does not support encryption."
+
f_module.check_mode = True
redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": None}
with pytest.raises(Exception) as ex:
self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+
redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": "Key@123"}
with pytest.raises(Exception) as ex:
self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "No changes found to be applied."
+
+ param.update({"command": "ReKey"})
f_module = self.get_module_mock(params=param)
f_module.check_mode = True
- param.update({"command": "ReKey"})
with pytest.raises(Exception) as ex:
self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+
+ f_module.check_mode = False
+ redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": None}
+ redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
+ result = self.module.ctrl_key(f_module, redfish_str_controller_conn)
+ assert result[2] == "JID_XXXXXXXXXXXXX"
+
+ param.update({"mode": "LKM_"})
+ f_module.check_mode = False
+ redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": None}
+ redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
+ result = self.module.ctrl_key(f_module, redfish_str_controller_conn)
+ assert result[2] == "JID_XXXXXXXXXXXXX"
+
param.update({"command": "RemoveControllerKey"})
+ redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": 'Key@123'}
f_module = self.get_module_mock(params=param)
f_module.check_mode = True
with pytest.raises(Exception) as ex:
self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+
redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": None}
with pytest.raises(Exception) as ex:
self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "No changes found to be applied."
- param.update({"command": "EnableControllerEncryption"})
+
+ param.update({"command": "EnableControllerEncryption", "mode": "LKM"})
f_module = self.get_module_mock(params=param)
f_module.check_mode = True
with pytest.raises(Exception) as ex:
self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+
redfish_response_mock.json_data = {"SecurityStatus": "SecurityKeyAssigned", "KeyID": None}
with pytest.raises(Exception) as ex:
self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "No changes found to be applied."
+
f_module.check_mode = False
redfish_response_mock.json_data = {"SecurityStatus": "EncryptionCapable", "KeyID": None}
redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
result = self.module.ctrl_key(f_module, redfish_str_controller_conn)
assert result[2] == "JID_XXXXXXXXXXXXX"
+ param.update({"mode": "LKM_"})
+ result = self.module.ctrl_key(f_module, redfish_str_controller_conn)
+ assert result[2] == "JID_XXXXXXXXXXXXX"
+
+ param.update({"command": "wrongCommand", "mode": "LKM"})
+ f_module = self.get_module_mock(params=param)
+ f_module.check_mode = True
+ result = self.module.ctrl_key(f_module, redfish_str_controller_conn)
+ assert result[2] == "JID_XXXXXXXXXXXXX"
+
def test_convert_raid_status(self, redfish_str_controller_conn, redfish_response_mock):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"command": "ConvertToRAID", "target": ["Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1",
"Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"]}
f_module = self.get_module_mock(params=param)
@@ -195,18 +306,30 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
result = self.module.convert_raid_status(f_module, redfish_str_controller_conn)
assert result[2] == "JID_XXXXXXXXXXXXX"
+
f_module.check_mode = True
with pytest.raises(Exception) as ex:
self.module.convert_raid_status(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+
f_module.check_mode = False
redfish_response_mock.json_data = {"Oem": {"Dell": {"DellPhysicalDisk": {"RaidStatus": "Ready"}}}}
with pytest.raises(Exception) as ex:
self.module.convert_raid_status(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "No changes found to be applied."
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ with pytest.raises(Exception) as ex:
+ self.module.convert_raid_status(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Unable to locate the physical disk with the ID: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+
def test_change_pd_status(self, redfish_str_controller_conn, redfish_response_mock):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"command": "ChangePDStateToOnline",
"target": ["Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1",
"Disk.Bay.1:Enclosure.Internal.0-1:RAID.Slot.1-1"]}
@@ -215,41 +338,602 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
result = self.module.change_pd_status(f_module, redfish_str_controller_conn)
assert result[2] == "JID_XXXXXXXXXXXXX"
+
f_module.check_mode = True
with pytest.raises(Exception) as ex:
self.module.change_pd_status(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+
f_module.check_mode = False
redfish_response_mock.json_data = {"Oem": {"Dell": {"DellPhysicalDisk": {"RaidStatus": "Online"}}}}
with pytest.raises(Exception) as ex:
self.module.change_pd_status(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "No changes found to be applied."
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ with pytest.raises(Exception) as ex:
+ self.module.change_pd_status(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Unable to locate the physical disk with the ID: Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"
+
def test_lock_virtual_disk(self, redfish_str_controller_conn, redfish_response_mock, mocker):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"command": "LockVirtualDisk",
- "volume_id": "Disk.Virtual.0:RAID.SL.3-1"}
+ "volume_id": ["Disk.Virtual.0:RAID.SL.3-1"]
+ }
f_module = self.get_module_mock(params=param)
mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None)
redfish_response_mock.json_data = {"Oem": {"Dell": {"DellVolume": {"LockStatus": "Unlocked"}}}}
redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
result = self.module.lock_virtual_disk(f_module, redfish_str_controller_conn)
assert result[2] == "JID_XXXXXXXXXXXXX"
+
f_module.check_mode = True
with pytest.raises(Exception) as ex:
self.module.lock_virtual_disk(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "Changes found to be applied."
+
f_module.check_mode = False
redfish_response_mock.json_data = {"Oem": {"Dell": {"DellVolume": {"LockStatus": "Locked"}}}}
with pytest.raises(Exception) as ex:
self.module.lock_virtual_disk(f_module, redfish_str_controller_conn)
assert ex.value.args[0] == "No changes found to be applied."
+ redfish_response_mock.json_data = {"Oem": {"Dell": {"DellVolume": {"LockStatus": "Unlocked"}}},
+ "Links": {
+ "Drives": [
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/"
+ },
+ {
+ "@odata.id": "/redfish/v1/Systems/System.Embedded.1/"
+ }],
+ "Drives@odata.count": 2}}
+ with pytest.raises(Exception) as ex:
+ self.module.lock_virtual_disk(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Volume is not encryption capable."
+
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ with pytest.raises(Exception) as ex:
+ self.module.lock_virtual_disk(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Unable to locate the physical disk with the ID: RAID.SL.3-1"
+
+ def test_online_capacity_expansion_raid_type_error(self, redfish_str_controller_conn, redfish_response_mock, mocker):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "command": "OnlineCapacityExpansion",
+ "volume_id": ["Disk.Virtual.0:RAID.SL.3-1"],
+ "target": ["Disk.Bay.2:Enclosure.Internal.0-0:RAID.Integrated.1-1"]}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None)
+ redfish_response_mock.json_data = {"RAIDType": "RAID50"}
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Online Capacity Expansion is not supported for RAID50 virtual disks."
+
+ redfish_response_mock.json_data = {"RAIDType": "RAID1"}
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Cannot add more than two disks to RAID1 virtual disk."
+
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Unable to locate the virtual disk with the ID: Disk.Virtual.0:RAID.SL.3-1"
+
+ def test_online_capacity_expansion_empty_target(self, redfish_str_controller_conn, redfish_response_mock, mocker):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "command": "OnlineCapacityExpansion",
+ "volume_id": ["Disk.Virtual.0:RAID.SL.3-1"],
+ "target": []}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None)
+ redfish_response_mock.json_data = {"Links": {"Drives": [{"@odata.id": "Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Integrated.1-1"}]}}
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Provided list of targets is empty."
+
+ param.update({"volume_id": [], "target": None, "size": 3500})
+ f_module = self.get_module_mock(params=param)
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "The Fully Qualified Device Descriptor (FQDD) of the target virtual drive must be only one."
+
+ def test_online_capacity_expansion_valid_target(self, redfish_str_controller_conn, redfish_response_mock, mocker):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "command": "OnlineCapacityExpansion",
+ "volume_id": "Disk.Virtual.0:RAID.SL.3-1",
+ "target": ["Disk.Bay.2:Enclosure.Internal.0-0:RAID.Integrated.1-1",
+ "Disk.Bay.3:Enclosure.Internal.0-0:RAID.Integrated.1-1",
+ "Disk.Bay.4:Enclosure.Internal.0-0:RAID.Integrated.1-1"]}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None)
+ redfish_response_mock.json_data = {"Links": {"Drives": [{"@odata.id": "/Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Integrated.1-1"}]},
+ "RAIDType": "RAID0"}
+ redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
+ f_module.check_mode = True
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Changes found to be applied."
+
+ f_module.check_mode = False
+ result = self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert result[2] == "JID_XXXXXXXXXXXXX"
+
+ param.update({"target": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Integrated.1-1"]})
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "No changes found to be applied."
+
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.json_data = {"RAIDType": "RAID10"}
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "No changes found to be applied."
+
+ def test_online_capacity_expansion_size(self, redfish_str_controller_conn, redfish_response_mock, mocker):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "command": "OnlineCapacityExpansion",
+ "volume_id": ["Disk.Virtual.0:RAID.SL.3-1"],
+ "size": 3010}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "idrac_redfish_storage_controller.check_id_exists", return_value=None)
+ redfish_response_mock.json_data = {"CapacityBytes": 3145728000}
+ redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
+ with pytest.raises(Exception) as ex:
+ self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert ex.value.args[0] == "Minimum Online Capacity Expansion size must be greater than 100 MB of the current size 3000."
+
+ param.update({"size": 3500})
+ result = self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert result[2] == "JID_XXXXXXXXXXXXX"
+
+ param.update({"size": None})
+ result = self.module.online_capacity_expansion(f_module, redfish_str_controller_conn)
+ assert result[2] == "JID_XXXXXXXXXXXXX"
+
+ def test_get_current_time(self, redfish_str_controller_conn, redfish_response_mock):
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"DateTime": "2023-01-09T01:23:40-06:00", "DateTimeLocalOffset": "-06:00"}
+ resp = self.module.get_current_time(redfish_str_controller_conn)
+ assert resp[0] == "2023-01-09T01:23:40-06:00"
+ assert resp[1] == "-06:00"
+
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ resp = self.module.get_current_time(redfish_str_controller_conn)
+ assert resp[0] is None
+ assert resp[1] is None
+
+ def test_validate_time(self, redfish_str_controller_conn, redfish_response_mock, redfish_default_args):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "controller_id": "RAID.Integrated.1-1",
+ "attributes": {"ControllerMode": "RAID", "CheckConsistencyMode": "Normal"},
+ "job_wait": True, "apply_time": "InMaintenanceWindowOnReset",
+ "maintenance_window": {"start_time": "2023-09-30T05:15:40-06:00", "duration": 900}}
+ redfish_default_args.update(param)
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"DateTime": "2023-01-09T01:23:40-06:00", "DateTimeLocalOffset": "-06:00"}
+ with pytest.raises(Exception):
+ result = self.module.validate_time(f_module, redfish_str_controller_conn, "2023-01-09T01:23:40-05:00")
+ assert result["msg"] == "The maintenance time must be post-fixed with local offset to -05:00."
+
+ redfish_response_mock.json_data = {"DateTime": "2023-01-09T01:23:40-06:00", "DateTimeLocalOffset": "-06:00"}
+ with pytest.raises(Exception):
+ result = self.module.validate_time(f_module, redfish_str_controller_conn, "2022-01-09T01:23:40-05:00")
+ assert result["msg"] == "The specified maintenance time window occurs in the past, provide a future time" \
+ " to schedule the maintenance window."
+
+ redfish_response_mock.json_data = {"DateTime": "2023-10-09T01:23:40+06:00", "DateTimeLocalOffset": "+06:00"}
+ with pytest.raises(Exception):
+ result = self.module.validate_time(f_module, redfish_str_controller_conn, "2023-09-09T01:23:40+06:00")
+ assert result["msg"] == "The specified maintenance time window occurs in the past, provide a future time" \
+ " to schedule the maintenance window."
+
+ def test_check_attr_exists(self, redfish_str_controller_conn, redfish_response_mock):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "controller_id": "RAID.Integrated.1-1",
+ "attributes": {"ControllerMode": "RAID", "CheckConsistencyMode": "Normal"},
+ "job_wait": True, "apply_time": "InMaintenanceWindowOnReset",
+ "maintenance_window": {"start_time": "2023-09-30T05:15:40-06:00", "duration": 900}}
+ curr_attr = {"ControllerMode": "RAID", "CheckConsistencyMode": "StopOnError", "LoadBalanceMode": "Automatic"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.status_code = 200
+ result = self.module.check_attr_exists(f_module, curr_attr, param["attributes"])
+ assert result["CheckConsistencyMode"] == "Normal"
+ f_module.check_mode = True
+ with pytest.raises(Exception) as ex:
+ self.module.check_attr_exists(f_module, curr_attr, param["attributes"])
+ assert ex.value.args[0] == "Changes found to be applied."
+ f_module.check_mode = False
+ with pytest.raises(Exception) as ex:
+ self.module.check_attr_exists(f_module, curr_attr, {"ControllerMode": "RAID",
+ "CheckConsistencyMode": "StopOnError"})
+ assert ex.value.args[0] == "No changes found to be applied."
+ f_module.check_mode = False
+ with pytest.raises(Exception) as ex:
+ self.module.check_attr_exists(f_module, curr_attr, {"ControllerMode": "RAID",
+ "CheckConsistency": "StopOnError"})
+ assert ex.value.args[0] == "The following attributes are invalid: ['CheckConsistency']"
+
+ def test_get_attributes(self, redfish_str_controller_conn, redfish_response_mock):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "controller_id": "RAID.Integrated.1-1",
+ "attributes": {"ControllerMode": "RAID", "CheckConsistencyMode": "Normal"},
+ "job_wait": True, "apply_time": "InMaintenanceWindowOnReset",
+ "maintenance_window": {"start_time": "2023-09-30T05:15:40-06:00", "duration": 900}}
+ resp = {"@Redfish.Settings": {"SupportedApplyTimes": ["Immediate", "OnReset", "AtMaintenanceWindowStart",
+ "InMaintenanceWindowOnReset"]},
+ "Id": "RAID.Integrated.1-1",
+ "Oem": {
+ "Dell": {
+ "DellStorageController": {
+ "AlarmState": "AlarmNotPresent",
+ "AutoConfigBehavior": "NotApplicable",
+ "BackgroundInitializationRatePercent": 30,
+ "BatteryLearnMode": "null",
+ "BootVirtualDiskFQDD": "null",
+ "CacheSizeInMB": 2048,
+ "CachecadeCapability": "NotSupported",
+ "CheckConsistencyMode": "StopOnError",
+ "ConnectorCount": 2,
+ "ControllerBootMode": "ContinueBootOnError",
+ "ControllerFirmwareVersion": "25.5.9.0001",
+ "ControllerMode": "RAID",
+ "CopybackMode": "OnWithSMART",
+ "CurrentControllerMode": "RAID",
+ "Device": "0",
+ "DeviceCardDataBusWidth": "Unknown",
+ "DeviceCardSlotLength": "Unknown",
+ "DeviceCardSlotType": "Unknown",
+ "DriverVersion": "6.706.06.00",
+ "EncryptionCapability": "LocalKeyManagementCapable",
+ "EncryptionMode": "LocalKeyManagement",
+ "EnhancedAutoImportForeignConfigurationMode": "Disabled",
+ "KeyID": "MyNewKey@123",
+ "LastSystemInventoryTime": "2022-12-23T04:59:41+00:00",
+ "LastUpdateTime": "2022-12-23T17:59:44+00:00",
+ "LoadBalanceMode": "Automatic",
+ "MaxAvailablePCILinkSpeed": "Generation 3",
+ "MaxDrivesInSpanCount": 32,
+ "MaxPossiblePCILinkSpeed": "Generation 3",
+ "MaxSpansInVolumeCount": 8,
+ "MaxSupportedVolumesCount": 64,
+ "PCISlot": "null",
+ "PatrolReadIterationsCount": 0,
+ "PatrolReadMode": "Automatic",
+ "PatrolReadRatePercent": 30,
+ "PatrolReadState": "Stopped",
+ "PatrolReadUnconfiguredAreaMode": "Enabled",
+ "PersistentHotspare": "Disabled",
+ "PersistentHotspareMode": "Disabled",
+ "RAIDMode": "None",
+ "RealtimeCapability": "Capable",
+ "ReconstructRatePercent": 30,
+ "RollupStatus": "OK",
+ "SASAddress": "54CD98F0760C3D00",
+ "SecurityStatus": "SecurityKeyAssigned",
+ "SharedSlotAssignmentAllowed": "NotApplicable",
+ "SlicedVDCapability": "Supported",
+ "SpindownIdleTimeSeconds": 30,
+ "SupportControllerBootMode": "Supported",
+ "SupportEnhancedAutoForeignImport": "Supported",
+ "SupportRAID10UnevenSpans": "Supported",
+ "SupportedInitializationTypes": [
+ "Slow",
+ "Fast"],
+ "SupportedInitializationTypes@odata.count": 2,
+ "SupportsLKMtoSEKMTransition": "No",
+ "T10PICapability": "NotSupported"
+ }
+ }}}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = resp
+ result = self.module.get_attributes(f_module, redfish_str_controller_conn)
+ assert result == resp
+
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ resp = self.module.get_attributes(f_module, redfish_str_controller_conn)
+ assert resp == {}
+
+ def test_get_redfish_apply_time(self, redfish_str_controller_conn, redfish_response_mock):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "controller_id": "RAID.Integrated.1-1",
+ "attributes": {"ControllerMode": "RAID", "CheckConsistencyMode": "Normal"},
+ "job_wait": True, "apply_time": "InMaintenanceWindowOnReset",
+ "maintenance_window": {"start_time": "2023-09-30T05:15:40-06:00", "duration": 900}}
+ time_settings = ["Immediate", "OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"DateTime": "2023-01-09T01:23:40-06:00", "DateTimeLocalOffset": "-06:00"}
+ result = self.module.get_redfish_apply_time(f_module, redfish_str_controller_conn, param["apply_time"],
+ time_settings)
+ assert result['ApplyTime'] == param['apply_time']
+ assert result['MaintenanceWindowDurationInSeconds'] == 900
+ assert result['MaintenanceWindowStartTime'] == '2023-09-30T05:15:40-06:00'
+
+ param1 = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "controller_id": "RAID.Integrated.1-1",
+ "attributes": {"ControllerMode": "RAID", "CheckConsistencyMode": "Normal"},
+ "job_wait": True, "apply_time": "InMaintenanceWindowOnReset",
+ "maintenance_window": {"start_time": "2023-09-30T05:15:40-06:00", "duration": 900}}
+ f_module = self.get_module_mock(params=param1)
+ redfish_response_mock.json_data = {"DateTime": "2023-01-09T01:23:40-06:00", "DateTimeLocalOffset": "-06:00"}
+ result = self.module.get_redfish_apply_time(f_module, redfish_str_controller_conn, param1["apply_time"],
+ time_settings)
+ assert result['ApplyTime'] == param1['apply_time']
+
+ result = self.module.get_redfish_apply_time(f_module, redfish_str_controller_conn,
+ param1["apply_time"], [])
+ assert result == {}
+
+ with pytest.raises(Exception):
+ result = self.module.get_redfish_apply_time(f_module, redfish_str_controller_conn,
+ param1["apply_time"], ['NotEmpty'])
+ assert result["status_msg"] == "Apply time InMaintenanceWindowOnReset is not supported."
+
+ def test_apply_attributes(self, redfish_str_controller_conn, redfish_response_mock):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "controller_id": "RAID.Integrated.1-1",
+ "attributes": {"ControllerMode": "RAID", "CheckConsistencyMode": "Normal"},
+ "job_wait": True, "apply_time": "Immediate"}
+ time_settings = ["Immediate", "OnReset", "AtMaintenanceWindowStart", "InMaintenanceWindowOnReset"]
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"DateTime": "2023-01-09T01:23:40-06:00", "DateTimeLocalOffset": "-06:00"}
+ redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
+ job_id, time_set = self.module.apply_attributes(f_module, redfish_str_controller_conn,
+ {"CheckConsistencyMode": "StopOnError"},
+ time_settings)
+ assert job_id == "JID_XXXXXXXXXXXXX"
+ assert time_set == {'ApplyTime': "Immediate"}
+
+ redfish_response_mock.status_code = 202
+ redfish_response_mock.json_data = {"error": {"@Message.ExtendedInfo": [
+ {"Message": "The value 'abcd' for the property PatrolReadMode is not in the list of acceptable values.",
+ "MessageArgs": ["abcd", "PatrolReadMode"], "MessageArgs@odata.count": 2,
+ "MessageId": "Base.1.12.PropertyValueNotInList",
+ "RelatedProperties": ["#/Oem/Dell/DellStorageController/PatrolReadMode"],
+ "RelatedProperties@odata.count": 1,
+ "Resolution": "Choose a value from the enumeration list that the implementation can support and"
+ "resubmit the request if the operation failed.", "Severity": "Warning"}
+ ]}}
+ with pytest.raises(Exception) as ex:
+ self.module.apply_attributes(f_module, redfish_str_controller_conn, {"CheckConsistencyMode": "StopOnError"},
+ time_settings)
+ assert ex.value.args[0] == "Unable to configure the controller attribute(s) settings."
+
+ time_settings = []
+ with pytest.raises(Exception) as ex:
+ job_id, time_set = self.module.apply_attributes(f_module, redfish_str_controller_conn,
+ {"CheckConsistencyMode": "StopOnError"},
+ time_settings)
+ assert job_id == "JID_XXXXXXXXXXXXX"
+ assert time_set == {}
+
+ json_str = to_text(json.dumps({"data": "out"}))
+ redfish_str_controller_conn.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ HTTP_ERROR_MSG,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ with pytest.raises(Exception) as ex:
+ self.module.apply_attributes(f_module, redfish_str_controller_conn, {"CheckConsistencyMode": "StopOnError"},
+ time_settings)
+ assert ex.value.args[0] == "Unable to configure the controller attribute(s) settings."
+
+ def test_set_attributes(self, redfish_str_controller_conn, redfish_response_mock):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "controller_id": "RAID.Integrated.1-1", "attributes": {"ControllerMode": "HBA"},
+ "job_wait": True, "apply_time": "Immediate"}
+ resp = {"@Redfish.Settings": {"SupportedApplyTimes": ["Immediate", "OnReset", "AtMaintenanceWindowStart",
+ "InMaintenanceWindowOnReset"]},
+ "Id": "RAID.Integrated.1-1",
+ "Oem": {
+ "Dell": {
+ "DellStorageController": {
+ "AlarmState": "AlarmNotPresent",
+ "AutoConfigBehavior": "NotApplicable",
+ "BackgroundInitializationRatePercent": 30,
+ "BatteryLearnMode": "null",
+ "BootVirtualDiskFQDD": "null",
+ "CacheSizeInMB": 2048,
+ "CachecadeCapability": "NotSupported",
+ "CheckConsistencyMode": "StopOnError",
+ "ConnectorCount": 2,
+ "ControllerBootMode": "ContinueBootOnError",
+ "ControllerFirmwareVersion": "25.5.9.0001",
+ "ControllerMode": "RAID",
+ "CopybackMode": "OnWithSMART",
+ "CurrentControllerMode": "RAID",
+ "Device": "0",
+ "DeviceCardDataBusWidth": "Unknown",
+ "DeviceCardSlotLength": "Unknown",
+ "DeviceCardSlotType": "Unknown",
+ "DriverVersion": "6.706.06.00",
+ "EncryptionCapability": "LocalKeyManagementCapable",
+ "EncryptionMode": "LocalKeyManagement",
+ "EnhancedAutoImportForeignConfigurationMode": "Disabled",
+ "KeyID": "MyNewKey@123",
+ "LastSystemInventoryTime": "2022-12-23T04:59:41+00:00",
+ "LastUpdateTime": "2022-12-23T17:59:44+00:00",
+ "LoadBalanceMode": "Automatic",
+ "MaxAvailablePCILinkSpeed": "Generation 3",
+ "MaxDrivesInSpanCount": 32,
+ "MaxPossiblePCILinkSpeed": "Generation 3",
+ "MaxSpansInVolumeCount": 8,
+ "MaxSupportedVolumesCount": 64,
+ "PCISlot": "null",
+ "PatrolReadIterationsCount": 0,
+ "PatrolReadMode": "Automatic",
+ "PatrolReadRatePercent": 30,
+ "PatrolReadState": "Stopped",
+ "PatrolReadUnconfiguredAreaMode": "Enabled",
+ "PersistentHotspare": "Disabled",
+ "PersistentHotspareMode": "Disabled",
+ "RAIDMode": "None",
+ "RealtimeCapability": "Capable",
+ "ReconstructRatePercent": 30,
+ "RollupStatus": "OK",
+ "SASAddress": "54CD98F0760C3D00",
+ "SecurityStatus": "SecurityKeyAssigned",
+ "SharedSlotAssignmentAllowed": "NotApplicable",
+ "SlicedVDCapability": "Supported",
+ "SpindownIdleTimeSeconds": 30,
+ "SupportControllerBootMode": "Supported",
+ "SupportEnhancedAutoForeignImport": "Supported",
+ "SupportRAID10UnevenSpans": "Supported",
+ "SupportedInitializationTypes": [
+ "Slow",
+ "Fast"
+ ],
+ "SupportedInitializationTypes@odata.count": 2,
+ "SupportsLKMtoSEKMTransition": "No",
+ "T10PICapability": "NotSupported"
+ }
+ }}}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = resp
+ redfish_response_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/Jobs/JID_XXXXXXXXXXXXX"}
+ job_id, time_set = self.module.set_attributes(f_module, redfish_str_controller_conn)
+ assert job_id == "JID_XXXXXXXXXXXXX"
+ assert time_set == {'ApplyTime': "Immediate"}
+
+ param.update({"attributes": {"ControllerMode": "HBA", 'RandomKey': 123}})
+ f_module = self.get_module_mock(params=param)
+ with pytest.raises(Exception):
+ result = self.module.set_attributes(f_module, redfish_str_controller_conn)
+ assert result['msg'] == "Other attributes cannot be updated when ControllerMode is provided as input."
+
+ def test_main_success_attributes(self, redfish_str_controller_conn, redfish_response_mock, redfish_default_args, mocker):
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
+ "controller_id": None,
+ "attributes": {"ControllerMode": "RAID", "CheckConsistencyMode": "Normal"},
+ "job_wait": True, "apply_time": "Immediate"}
+ resp = {"@Redfish.Settings": {"SupportedApplyTimes": ["Immediate", "OnReset", "AtMaintenanceWindowStart",
+ "InMaintenanceWindowOnReset"]},
+ "Id": "RAID.Integrated.1-1",
+ "Oem": {
+ "Dell": {
+ "DellStorageController": {
+ "AlarmState": "AlarmNotPresent",
+ "AutoConfigBehavior": "NotApplicable",
+ "BackgroundInitializationRatePercent": 30,
+ "BatteryLearnMode": "null",
+ "BootVirtualDiskFQDD": "null",
+ "CacheSizeInMB": 2048,
+ "CachecadeCapability": "NotSupported",
+ "CheckConsistencyMode": "StopOnError",
+ "ConnectorCount": 2,
+ "ControllerBootMode": "ContinueBootOnError",
+ "ControllerFirmwareVersion": "25.5.9.0001",
+ "ControllerMode": "RAID",
+ "CopybackMode": "OnWithSMART",
+ "CurrentControllerMode": "RAID",
+ "Device": "0",
+ "DeviceCardDataBusWidth": "Unknown",
+ "DeviceCardSlotLength": "Unknown",
+ "DeviceCardSlotType": "Unknown",
+ "DriverVersion": "6.706.06.00",
+ "EncryptionCapability": "LocalKeyManagementCapable",
+ "EncryptionMode": "LocalKeyManagement",
+ "EnhancedAutoImportForeignConfigurationMode": "Disabled",
+ "KeyID": "MyNewKey@123",
+ "LastSystemInventoryTime": "2022-12-23T04:59:41+00:00",
+ "LastUpdateTime": "2022-12-23T17:59:44+00:00",
+ "LoadBalanceMode": "Automatic",
+ "MaxAvailablePCILinkSpeed": "Generation 3",
+ "MaxDrivesInSpanCount": 32,
+ "MaxPossiblePCILinkSpeed": "Generation 3",
+ "MaxSpansInVolumeCount": 8,
+ "MaxSupportedVolumesCount": 64,
+ "PCISlot": "null",
+ "PatrolReadIterationsCount": 0,
+ "PatrolReadMode": "Automatic",
+ "PatrolReadRatePercent": 30,
+ "PatrolReadState": "Stopped",
+ "PatrolReadUnconfiguredAreaMode": "Enabled",
+ "PersistentHotspare": "Disabled",
+ "PersistentHotspareMode": "Disabled",
+ "RAIDMode": "None",
+ "RealtimeCapability": "Capable",
+ "ReconstructRatePercent": 30,
+ "RollupStatus": "OK",
+ "SASAddress": "54CD98F0760C3D00",
+ "SecurityStatus": "SecurityKeyAssigned",
+ "SharedSlotAssignmentAllowed": "NotApplicable",
+ "SlicedVDCapability": "Supported",
+ "SpindownIdleTimeSeconds": 30,
+ "SupportControllerBootMode": "Supported",
+ "SupportEnhancedAutoForeignImport": "Supported",
+ "SupportRAID10UnevenSpans": "Supported",
+ "SupportedInitializationTypes": [
+ "Slow",
+ "Fast"
+ ],
+ "SupportedInitializationTypes@odata.count": 2,
+ "SupportsLKMtoSEKMTransition": "No",
+ "T10PICapability": "NotSupported"
+ }
+ }}}
+ redfish_default_args.update(param)
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.check_id_exists', return_value=None)
+ result = self._run_module(redfish_default_args)
+ assert result['msg'] == "controller_id is required to perform this operation."
+ param.update({"controller_id": "RAID.Integrated.1-1"})
+ param.update({"job_wait": False})
+ redfish_default_args.update(param)
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.check_id_exists', return_value=None)
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.set_attributes',
+ return_value=("JID_XXXXXXXXXXXXX", {'ApplyTime': "Immediate"}))
+ result = self._run_module(redfish_default_args)
+ assert result["task"]["id"] == "JID_XXXXXXXXXXXXX"
+ param.update({"job_wait": True})
+ redfish_default_args.update(param)
+ redfish_response_mock.json_data = {"JobState": "Completed"}
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.check_id_exists', return_value=None)
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.set_attributes',
+ return_value=("JID_XXXXXXXXXXXXX", {'ApplyTime': "Immediate"}))
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.wait_for_job_completion',
+ return_value=(redfish_response_mock, "Success"))
+ result = self._run_module(redfish_default_args)
+ assert result['msg'] == "Successfully applied the controller attributes."
+
+ redfish_response_mock.json_data = {"JobState": "Failed"}
+ result = self._run_module(redfish_default_args)
+ assert result['msg'] == "Successfully applied the controller attributes."
+
@pytest.mark.parametrize("exc_type", [RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
- ImportError, ValueError, TypeError])
+ ImportError, ValueError, TypeError, HTTPError])
def test_main_error(self, redfish_str_controller_conn, redfish_response_mock, mocker,
exc_type, redfish_default_args):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"command": "ResetConfig", "controller_id": "RAID.Integrated.1-1"}
redfish_default_args.update(param)
mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.validate_inputs', return_value=None)
@@ -268,14 +952,14 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.ctrl_reset_config',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type(HTTPS_ADDRESS, 400, HTTP_ERROR_MSG,
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(redfish_default_args)
assert result['failed'] is True
assert 'msg' in result
def test_main_success(self, redfish_str_controller_conn, redfish_response_mock, redfish_default_args, mocker):
- param = {"baseuri": "192.168.0.1", "username": "username", "password": "password",
+ param = {"baseuri": "XX.XX.XX.XX", "username": "username", "password": "password",
"command": "SetControllerKey", "key": "Key@123", "key_id": "keyid@123",
"controller_id": "RAID.Integrated.1-1",
"target": ["Disk.Bay.0:Enclosure.Internal.0-1:RAID.Slot.1-1"]}
@@ -314,3 +998,19 @@ class TestIdracRedfishStorageController(FakeAnsibleModule):
return_value={"JobState": "Failed"})
result = self._run_module(redfish_default_args)
assert result["task"]["id"] == "JID_XXXXXXXXXXXXX"
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.strip_substr_dict',
+ return_value={"JobState": "Completed"})
+ result = self._run_module(redfish_default_args)
+ assert result["task"]["id"] == "JID_XXXXXXXXXXXXX"
+ param.update({"command": "OnlineCapacityExpansion", "job_wait": True, "volume_id": ['123']})
+ redfish_default_args.update(param)
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.online_capacity_expansion',
+ return_value=("", "", "JID_XXXXXXXXXXXXX"))
+ result = self._run_module(redfish_default_args)
+ assert result["task"]["id"] == "JID_XXXXXXXXXXXXX"
+ param.update({"command": "LockVirtualDisk", "job_wait": True, "volume_id": ['123']})
+ redfish_default_args.update(param)
+ mocker.patch(MODULE_PATH + 'idrac_redfish_storage_controller.lock_virtual_disk',
+ return_value=("", "", "JID_XXXXXXXXXXXXX"))
+ result = self._run_module(redfish_default_args)
+ assert result["task"]["id"] == "JID_XXXXXXXXXXXXX"
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py
index 3f4ca4977..a6fbb1d04 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_reset.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -14,10 +14,10 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_reset
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from mock import MagicMock, patch, Mock
+from mock import MagicMock, Mock
from io import StringIO
from ansible.module_utils._text import to_text
@@ -85,7 +85,7 @@ class TestReset(FakeAnsibleModule):
mocker.patch(MODULE_PATH + 'idrac_reset.run_idrac_reset', side_effect=exc_type('test'))
else:
mocker.patch(MODULE_PATH + 'idrac_reset.run_idrac_reset',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py
index 16d5b0307..bae1de38e 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_server_config_profile.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.4.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.4.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -12,345 +12,198 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
-import sys
+import mock
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_server_config_profile
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants,\
- AnsibleExitJson
-from mock import MagicMock, patch, Mock, mock_open
-from pytest import importorskip
-from ansible.module_utils.six.moves.urllib.parse import urlparse, ParseResult
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
-
-importorskip("omsdk.sdkfile")
-importorskip("omsdk.sdkcreds")
+SUCCESS_MSG = 'Successfully {0}ed the Server Configuration Profile'
+JOB_SUCCESS_MSG = 'Successfully triggered the job to {0} the Server Configuration Profile'
+PREVIEW_SUCCESS_MSG = 'Successfully previewed the Server Configuration Profile'
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+REDFISH_JOB_TRACKING = "idrac_server_config_profile.idrac_redfish_job_tracking"
class TestServerConfigProfile(FakeAnsibleModule):
module = idrac_server_config_profile
@pytest.fixture
- def idrac_server_configure_profile_mock(self, mocker):
- omsdk_mock = MagicMock()
+ def idrac_server_configure_profile_mock(self):
idrac_obj = MagicMock()
- omsdk_mock.file_share_manager = idrac_obj
- omsdk_mock.config_mgr = idrac_obj
return idrac_obj
@pytest.fixture
- def idrac_file_manager_server_config_profile_mock(self, mocker):
- try:
- file_manager_obj = mocker.patch(
- MODULE_PATH + 'idrac_server_config_profile.file_share_manager')
- except AttributeError:
- file_manager_obj = MagicMock()
- obj = MagicMock()
- file_manager_obj.create_share_obj.return_value = obj
- return file_manager_obj
-
- @pytest.fixture
def idrac_scp_redfish_mock(self, mocker, idrac_server_configure_profile_mock):
idrac_conn_class_mock = mocker.patch(MODULE_PATH + 'idrac_server_config_profile.iDRACRedfishAPI',
return_value=idrac_server_configure_profile_mock)
idrac_conn_class_mock.return_value.__enter__.return_value = idrac_server_configure_profile_mock
return idrac_server_configure_profile_mock
- def test_run_export_import_http(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "export",
- "job_wait": True, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML", "export_use": "Default"})
- f_module = self.get_module_mock(params=idrac_default_args)
- export_response = {"msg": "Successfully exported the Server Configuration Profile.",
- "scp_status": {"Name": "Export: Server Configuration Profile", "PercentComplete": 100,
- "TaskState": "Completed", "TaskStatus": "OK", "Id": "JID_236654661194"}}
- mocker.patch(MODULE_PATH + "idrac_server_config_profile.urlparse",
- return_value=ParseResult(scheme='http', netloc='192.168.0.1',
- path='/share/',
- params='', query='', fragment=''))
- mocker.patch(MODULE_PATH + "idrac_server_config_profile.response_format_change",
- return_value=export_response)
- result = self.module.run_export_import_scp_http(idrac_scp_redfish_mock, f_module)
- assert result["msg"] == "Successfully exported the Server Configuration Profile."
- idrac_default_args.update({"command": "import"})
- f_module = self.get_module_mock(params=idrac_default_args)
- import_response = {"msg": "Successfully imported the Server Configuration Profile.",
- "scp_status": {"Name": "Import: Server Configuration Profile", "PercentComplete": 100,
- "TaskState": "Completed", "TaskStatus": "OK", "Id": "JID_236654661194"}}
- mocker.patch(MODULE_PATH + "idrac_server_config_profile.response_format_change",
- return_value=import_response)
- result = self.module.run_export_import_scp_http(idrac_scp_redfish_mock, f_module)
- assert result["msg"] == "Successfully imported the Server Configuration Profile."
+ @pytest.fixture
+ def idrac_redfish_job_tracking_mock(self, mocker, idrac_server_configure_profile_mock):
+ idrac_conn_class_mock = mocker.patch(MODULE_PATH + REDFISH_JOB_TRACKING,
+ return_value=idrac_server_configure_profile_mock)
+ idrac_conn_class_mock.return_value.__enter__.return_value = idrac_server_configure_profile_mock
+ idrac_conn_class_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/JID_123456789"}
+ return idrac_server_configure_profile_mock
- def test_http_share_msg_main(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "http://192.168.0.1:/share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "import",
- "job_wait": False, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False})
- share_return = {"Oem": {"Dell": {"MessageId": "SYS069"}}}
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http',
- return_value=share_return)
- result = self._run_module(idrac_default_args)
- assert result["msg"] == "Successfully triggered the job to import the Server Configuration Profile."
- share_return = {"Oem": {"Dell": {"MessageId": "SYS053"}}}
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http',
- return_value=share_return)
+ @pytest.mark.parametrize("params", [
+ {"message": SUCCESS_MSG.format("export"),
+ "mparams": {"share_name": "\\{SCP SHARE IP}\\share", "job_wait": True,
+ "scp_components": "IDRAC", "scp_file": "scp_file.xml",
+ "proxy_port": 80, "export_format": "XML"}},
+ {"message": SUCCESS_MSG.format("export"),
+ "mparams": {"share_name": "https://{SCP SHARE IP}/myshare/", "proxy_type": "socks4",
+ "proxy_support": True, "job_wait": True, "scp_components": "IDRAC",
+ "proxy_port": 80, "export_format": "JSON", "proxy_server": "PROXY_SERVER_IP",
+ "proxy_username": "proxy_username"}},
+ {"message": JOB_SUCCESS_MSG.format("export"),
+ "mparams": {"share_name": "{SCP SHARE IP}:/nfsshare", "job_wait": False,
+ "scp_components": "IDRAC", "scp_file": "scp_file.txt"}},
+ {"message": JOB_SUCCESS_MSG.format("export"),
+ "mparams": {"share_name": "/share", "job_wait": False,
+ "scp_components": "IDRAC", "scp_file": "scp_file.json"}},
+ ])
+ def test_run_export_scp(self, params, idrac_scp_redfish_mock, idrac_redfish_job_tracking_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"share_user": "sharename", "command": "export",
+ "export_use": "Default", "include_in_export": "default"})
+ idrac_default_args.update(params['mparams'])
+ mocker.patch("builtins.open", mocker.mock_open())
+ idrac_redfish_job_tracking_mock.status_code = 202
+ idrac_redfish_job_tracking_mock.success = True
+ mocker.patch(MODULE_PATH + REDFISH_JOB_TRACKING,
+ return_value=(False, False, {"Status": "Completed"}, {}))
+ result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False))
+ assert params['message'] in result['msg']
+
+ @pytest.mark.parametrize("params", [
+ {"message": CHANGES_FOUND,
+ "json_data": {"Id": "JID_932024672685", "Message": SUCCESS_MSG.format("import"), "MessageId": "SYS081",
+ "PercentComplete": 100, "file": "https://{SCP SHARE PATH}/{SCP FILE NAME}.json"},
+ "check_mode": True,
+ "mparams": {"share_name": "{SCP SHARE IP}:/nfsshare", "share_user": "sharename",
+ "job_wait": False, "scp_components": "IDRAC",
+ "scp_file": "scp_file1.xml", "end_host_power_state": "On",
+ "shutdown_type": "Graceful"}},
+ {"message": NO_CHANGES_FOUND,
+ "json_data": {"Id": "JID_932024672685", "Message": SUCCESS_MSG.format("import"), "MessageId": "SYS069",
+ "PercentComplete": 100, "file": "https://{SCP SHARE PATH}/{SCP FILE NAME}.json"},
+ "check_mode": True,
+ "mparams": {"share_name": "\\{SCP SHARE IP}\\share", "share_user": "sharename",
+ "job_wait": False, "scp_components": "IDRAC",
+ "scp_file": "scp_file1.xml", "end_host_power_state": "On",
+ "shutdown_type": "Graceful"}},
+ {"message": SUCCESS_MSG.format("import"),
+ "json_data": {"Id": "JID_932024672685", "Message": NO_CHANGES_FOUND, "MessageId": "SYS043",
+ "PercentComplete": 100, "file": "https://{SCP SHARE PATH}/{SCP FILE NAME}.json"},
+ "mparams": {"share_name": "/share", "share_user": "sharename",
+ "job_wait": True, "scp_components": "IDRAC",
+ "scp_file": "scp_file1.xml", "end_host_power_state": "On",
+ "shutdown_type": "Graceful"}},
+ {"message": SUCCESS_MSG.format("import"),
+ "json_data": {"Id": "JID_932024672685", "Message": SUCCESS_MSG.format("import"), "MessageId": "SYS069",
+ "PercentComplete": 100, "file": "https://{SCP SHARE PATH}/{SCP FILE NAME}.json"},
+ "mparams": {"share_name": "https://{SCP SHARE IP}/share", "share_user": "sharename",
+ "job_wait": True, "scp_components": "IDRAC",
+ "scp_file": "scp_file1.xml", "end_host_power_state": "On",
+ "shutdown_type": "Graceful"}},
+ {"message": SUCCESS_MSG.format("import"),
+ "json_data": {"Id": "JID_932024672685", "Message": SUCCESS_MSG.format("import"), "MessageId": "SYS053",
+ "PercentComplete": 100, "file": "https://{SCP SHARE PATH}/{SCP FILE NAME}.json"},
+ "mparams": {"share_name": "https://{SCP SHARE IP}/share", "share_user": "sharename",
+ "job_wait": True, "scp_components": "IDRAC",
+ "scp_file": "scp_file1.xml", "end_host_power_state": "On",
+ "shutdown_type": "Graceful"}},
+ {"message": SUCCESS_MSG.format("import"),
+ "json_data": {"Id": "JID_932024672685", "Message": NO_CHANGES_FOUND, "MessageId": "SYS069",
+ "PercentComplete": 100, "file": "https://{SCP SHARE PATH}/{SCP FILE NAME}.json"},
+ "mparams": {"command": "import", "job_wait": True, "scp_components": "IDRAC",
+ "import_buffer": "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'> \
+ <Value>Disabled</Value></Attribute></Component><Component FQDD='iDRAC.Embedded.1'>"}},
+ ])
+ @mock.patch(MODULE_PATH + "idrac_server_config_profile.exists", return_value=True)
+ def test_run_import_scp(self, mock_exists, params, idrac_scp_redfish_mock, idrac_redfish_job_tracking_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"command": "import"})
+ idrac_default_args.update(params['mparams'])
+ mocker.patch("builtins.open", mocker.mock_open())
+ if params.get('check_mode'):
+ mocker.patch(MODULE_PATH + 'idrac_server_config_profile.preview_scp_redfish',
+ return_value=params['json_data'])
+ elif params['mparams']['job_wait']:
+ mocker.patch(MODULE_PATH + REDFISH_JOB_TRACKING,
+ return_value=(False, False, {"Status": "Completed"}, {}))
+ else:
+ idrac_scp_redfish_mock.import_scp.return_value = params['json_data']
+ result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False))
+ assert params['message'] in result['msg']
+
+ @pytest.mark.parametrize("params", [
+ {"message": PREVIEW_SUCCESS_MSG,
+ "check_mode": True,
+ "mparams": {"share_name": "{SCP SHARE IP}:/nfsshare", "share_user": "sharename",
+ "command": "preview", "job_wait": True,
+ "scp_components": "IDRAC", "scp_file": "scp_file4.xml"}},
+ {"message": PREVIEW_SUCCESS_MSG,
+ "mparams": {"share_name": "https://{SCP SHARE IP}/nfsshare", "share_user": "sharename",
+ "command": "preview", "job_wait": True,
+ "scp_components": "IDRAC", "scp_file": "scp_file4.xml"}},
+ ])
+ def test_preview_scp(self, params, idrac_scp_redfish_mock, idrac_redfish_job_tracking_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"command": "preview"})
+ idrac_default_args.update(params['mparams'])
+ mocker.patch(MODULE_PATH + REDFISH_JOB_TRACKING,
+ return_value=(False, False, {"Status": "Completed"}, {}))
+ result = self._run_module(idrac_default_args, check_mode=params.get('check_mode', False))
+ assert params['message'] in result['msg']
+
+ def test_preview_scp_redfish_throws_ex(self, idrac_scp_redfish_mock, idrac_redfish_job_tracking_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"share_name": "{SCP SHARE IP}:/nfsshare", "share_user": "sharename",
+ "command": "preview", "job_wait": True,
+ "scp_components": "IDRAC", "scp_file": "scp_file5.xml"})
+ idrac_redfish_job_tracking_mock.headers = {"Location": "/redfish/v1/Managers/iDRAC.Embedded.1/JID_123456789"}
+ mocker.patch(MODULE_PATH + 'idrac_server_config_profile.idrac_redfish_job_tracking',
+ return_value=(True, False, {"Status": "Failed"}, {}))
result = self._run_module(idrac_default_args)
- assert result["msg"] == "Successfully triggered the job to import the Server Configuration Profile."
- idrac_default_args.update({"command": "export"})
- share_return = {"Oem": {"Dell": {"MessageId": "SYS043"}}}
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http',
- return_value=share_return)
+ assert result['failed']
+
+ def test_import_scp_http_throws_exception(self, idrac_scp_redfish_mock, idrac_redfish_job_tracking_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"share_name": "https://{SCP SHARE IP}/myshare/", "share_user": "sharename",
+ "command": "import", "job_wait": True, "scp_components": "IDRAC",
+ "scp_file": "scp_file2.xml", "end_host_power_state": "On",
+ "shutdown_type": "Graceful"})
+ mocker.patch(MODULE_PATH + REDFISH_JOB_TRACKING,
+ return_value=(True, False, {"Status": "Failed"}, {}))
result = self._run_module(idrac_default_args)
- assert result["msg"] == "Successfully triggered the job to export the Server Configuration Profile."
-
- def test_export_scp_redfish(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "import",
- "job_wait": False, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False})
- f_module = self.get_module_mock(params=idrac_default_args)
- share_return = {"Oem": {"Dell": {"MessageId": "SYS069"}}}
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http',
- return_value=share_return)
- f_module.check_mode = False
- result = self.module.export_scp_redfish(f_module, idrac_scp_redfish_mock)
- assert result["file"] == "192.168.0.1:/share/scp_file.xml"
- idrac_default_args.update({"share_name": "\\\\100.96.16.123\\cifsshare"})
- result = self.module.export_scp_redfish(f_module, idrac_scp_redfish_mock)
- assert result["file"] == "\\\\100.96.16.123\\cifsshare\\scp_file.xml"
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change',
- return_value={"TaskStatus": "Critical"})
- with pytest.raises(Exception) as ex:
- self.module.export_scp_redfish(f_module, idrac_scp_redfish_mock)
- assert ex.value.args[0] == "Failed to import scp."
-
- def test_response_format_change(self, idrac_scp_redfish_mock, idrac_default_args):
- idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "import",
- "job_wait": True, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False})
- f_module = self.get_module_mock(params=idrac_default_args)
- idrac_scp_redfish_mock.json_data = {"Oem": {"Dell": {"key": "value"}}}
- result = self.module.response_format_change(idrac_scp_redfish_mock, f_module, "export_scp.yml")
- assert result["key"] == "value"
- idrac_default_args.update({"command": "export"})
- f_module = self.get_module_mock(params=idrac_default_args)
- result = self.module.response_format_change(idrac_scp_redfish_mock, f_module, "export_scp.yml")
- assert result["key"] == "value"
-
- def test_preview_scp_redfish(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "192.168.0.1:/nfsshare", "share_user": "sharename",
- "share_password": "sharepswd", "command": "preview", "job_wait": True,
- "scp_components": "IDRAC", "scp_file": "scp_file.xml",
- "end_host_power_state": "On", "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- share = {"share_ip": "192.168.0.1", "share_user": "sharename", "share_password": "password",
- "job_wait": True}
- f_module.check_mode = False
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.get_scp_share_details',
- return_value=(share, "scp_file.xml"))
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change',
- return_value={"Status": "Success"})
- result = self.module.preview_scp_redfish(f_module, idrac_scp_redfish_mock, True, import_job_wait=False)
- assert result["Status"] == "Success"
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change',
- return_value={"TaskStatus": "Critical"})
- with pytest.raises(Exception) as ex:
- self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, True)
- assert ex.value.args[0] == "Failed to preview scp."
- idrac_default_args.update({"share_name": "192.168.0.1:/nfsshare", "share_user": "sharename",
- "share_password": "sharepswd", "command": "preview", "job_wait": True,
- "scp_components": "IDRAC", "scp_file": "scp_file.xml",
- "end_host_power_state": "On", "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- f_module.check_mode = False
- share = {"share_ip": "192.168.0.1", "share_user": "sharename", "share_password": "password",
- "job_wait": True, "share_type": "LOCAL", "share_name": "share_name"}
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.get_scp_share_details',
- return_value=(share, "scp_file.xml"))
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.exists',
- return_value=False)
- with pytest.raises(Exception) as ex:
- self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, False)
- assert ex.value.args[0] == "Invalid file path provided."
-
- def test_import_scp_redfish(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "import",
- "job_wait": True, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- f_module.check_mode = True
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.preview_scp_redfish',
- return_value={"MessageId": "SYS081"})
- with pytest.raises(Exception) as ex:
- self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, True)
- assert ex.value.args[0] == "Changes found to be applied."
- idrac_default_args.update({"share_name": "http://192.168.0.1/http-share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "import",
- "job_wait": True, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- f_module.check_mode = False
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change',
- return_value={"Status": "Success"})
- result = self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, True)
- assert result["Status"] == "Success"
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.response_format_change',
- return_value={"TaskStatus": "Critical"})
- with pytest.raises(Exception) as ex:
- self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, True)
- assert ex.value.args[0] == "Failed to import scp."
- idrac_default_args.update({"share_name": "local-share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "import",
- "job_wait": True, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- f_module.check_mode = False
- share = {"share_ip": "192.168.0.1", "share_user": "sharename", "share_password": "password",
- "job_wait": True, "share_type": "LOCAL", "share_name": "share_name"}
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.get_scp_share_details',
- return_value=(share, "scp_file.xml"))
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.exists',
- return_value=False)
+ assert result['failed']
+
+ @pytest.mark.parametrize("params", [
+ {"message": "Invalid file path provided.",
+ "mparams": {"share_name": "/share/", "share_user": "sharename",
+ "command": "import", "job_wait": False, "scp_components": "IDRAC",
+ "scp_file": "scp_file3.xml", "end_host_power_state": "On",
+ "shutdown_type": "Graceful"}},
+ {"message": "proxy_support is True but all of the following are missing: proxy_server",
+ "mparams": {"share_name": "https://{SCP SHARE IP}/myshare/", "proxy_type": "http",
+ "proxy_support": True, "job_wait": True, "scp_components": "IDRAC",
+ "proxy_port": 80, "export_format": "JSON",
+ "proxy_username": "proxy_username"}},
+ {"message": "import_buffer is mutually exclusive with share_name",
+ "mparams": {"share_name": "{SCP SHARE IP}:/nfsshare", "command": "preview", "job_wait": False,
+ "import_buffer": "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'> \
+ <Value>Disabled</Value></Attribute></Component><Component FQDD='iDRAC.Embedded.1'>"}},
+ {"message": "import_buffer is mutually exclusive with scp_file",
+ "mparams": {"scp_file": "example.json", "job_wait": False, "command": "import",
+ "import_buffer": "<SystemConfiguration><Component FQDD='iDRAC.Embedded.1'><Attribute Name='IPMILan.1#Enable'> \
+ <Value>Disabled</Value></Attribute></Component><Component FQDD='iDRAC.Embedded.1'>"}},
+ {"message": "The option ALL cannot be used with options IDRAC, BIOS, NIC, or RAID.",
+ "mparams": {"share_name": "https://{SCP SHARE IP}/myshare/", "share_user": "sharename",
+ "command": "import", "job_wait": True, "scp_components": ["IDRAC", "ALL"],
+ "scp_file": "scp_file2.xml", "end_host_power_state": "On",
+ "shutdown_type": "Graceful"}},
+ ])
+ def test_scp_invalid(self, params, idrac_scp_redfish_mock, idrac_default_args):
+ idrac_default_args.update(params['mparams'])
with pytest.raises(Exception) as ex:
- self.module.import_scp_redfish(f_module, idrac_scp_redfish_mock, False)
- assert ex.value.args[0] == "Invalid file path provided."
-
- def test_get_scp_file_format(self, idrac_scp_redfish_mock, idrac_default_args):
- idrac_default_args.update({"share_name": "192.168.0.1:/share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "import",
- "job_wait": True, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- result = self.module.get_scp_file_format(f_module)
- assert result == "scp_file.xml"
- idrac_default_args.update({"scp_file": None})
- f_module = self.get_module_mock(params=idrac_default_args)
- result = self.module.get_scp_file_format(f_module)
- assert result.startswith("idrac_ip_") is True
-
- def test_main_success_case(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "http://192.168.0.1/http-share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "import",
- "job_wait": True, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http',
- return_value={"MessageId": "SYS069"})
- result = self._run_module(idrac_default_args)
- assert result["scp_status"] == {'MessageId': 'SYS069'}
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.run_export_import_scp_http',
- return_value={"MessageId": "SYS053"})
- result = self._run_module(idrac_default_args)
- assert result["scp_status"] == {'MessageId': 'SYS053'}
- idrac_default_args.update({"share_name": "192.168.0.1:/nfsshare"})
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.import_scp_redfish',
- return_value={"Message": "No changes were applied since the current component configuration "
- "matched the requested configuration"})
- result = self._run_module(idrac_default_args)
- assert result["changed"] is False
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.import_scp_redfish',
- return_value={"MessageId": "SYS043"})
- result = self._run_module(idrac_default_args)
- assert result["scp_status"] == {'MessageId': 'SYS043'}
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.import_scp_redfish',
- return_value={"MessageId": "SYS069"})
- result = self._run_module(idrac_default_args)
- assert result["scp_status"] == {'MessageId': 'SYS069'}
- idrac_default_args.update({"command": "export"})
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.export_scp_redfish',
- return_value={"Status": "Success"})
- result = self._run_module(idrac_default_args)
- assert result["scp_status"] == {'Status': 'Success'}
- idrac_default_args.update({"command": "preview"})
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.preview_scp_redfish',
- return_value={"MessageId": "SYS081"})
- result = self._run_module(idrac_default_args)
- assert result["scp_status"] == {"MessageId": "SYS081"}
-
- def test_get_scp_share_details(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "/local-share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "export",
- "job_wait": True, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- mocker.patch(MODULE_PATH + 'idrac_server_config_profile.get_scp_file_format',
- return_value="export_scp.xml")
- result = self.module.get_scp_share_details(f_module)
- assert result[1] == "export_scp.xml"
-
- def test_wait_for_response(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "/local-share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "export",
- "job_wait": False, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "XML",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- idrac_scp_redfish_mock.headers = {"Location": "/redfish/v1/TaskService/Tasks/JID_123456789"}
- resp_return_value = {"return_data": b"<SystemConfiguration Model='PowerEdge MX840c'>"
- b"<Component FQDD='System.Embedded.1'>"
- b"<Attribute Name='Backplane.1#BackplaneSplitMode'>0</Attribute>"
- b"</Component> </SystemConfiguration>",
- "return_job": {"JobState": "Completed", "JobType": "ExportConfiguration",
- "PercentComplete": 100, "Status": "Success"}}
- idrac_scp_redfish_mock.wait_for_job_complete.return_value = resp_return_value["return_data"]
- idrac_scp_redfish_mock.job_resp = resp_return_value["return_job"]
- share = {"share_name": "/local_share", "file_name": "export_file.xml"}
- if sys.version_info.major == 3:
- builtin_module_name = 'builtins'
- else:
- builtin_module_name = '__builtin__'
- with patch("{0}.open".format(builtin_module_name), mock_open(read_data=resp_return_value["return_data"])) as mock_file:
- result = self.module.wait_for_response(idrac_scp_redfish_mock, f_module, share, idrac_scp_redfish_mock)
- assert result.job_resp == resp_return_value["return_job"]
-
- def test_wait_for_response_json(self, idrac_scp_redfish_mock, idrac_default_args, mocker):
- idrac_default_args.update({"share_name": "/local-share", "share_user": "sharename",
- "share_password": "sharepswd", "command": "export",
- "job_wait": False, "scp_components": "IDRAC",
- "scp_file": "scp_file.xml", "end_host_power_state": "On",
- "shutdown_type": "Graceful", "export_format": "JSON",
- "export_use": "Default", "validate_certs": False, "idrac_port": 443})
- f_module = self.get_module_mock(params=idrac_default_args)
- resp_return_value = {"return_data": {
- "SystemConfiguration": {"Components": [
- {"FQDD": "SupportAssist.Embedded.1",
- "Attributes": [{"Name": "SupportAssist.1#SupportAssistEULAAccepted"}]
- }]}
- },
- "return_job": {"JobState": "Completed", "JobType": "ExportConfiguration",
- "PercentComplete": 100, "Status": "Success"}}
- mock_scp_json_data = idrac_scp_redfish_mock
- mock_scp_json_data.json_data = resp_return_value["return_data"]
- idrac_scp_redfish_mock.wait_for_job_complete.return_value = mock_scp_json_data
- idrac_scp_redfish_mock.job_resp = resp_return_value["return_job"]
- share = {"share_name": "/local_share", "file_name": "export_file.xml"}
- if sys.version_info.major == 3:
- builtin_module_name = 'builtins'
- else:
- builtin_module_name = '__builtin__'
- with patch("{0}.open".format(builtin_module_name), mock_open(read_data=str(resp_return_value["return_data"]))) as mock_file:
- result = self.module.wait_for_response(idrac_scp_redfish_mock, f_module, share, idrac_scp_redfish_mock)
- assert result.job_resp == resp_return_value["return_job"]
+ self._run_module(idrac_default_args)
+ assert params['message'] in ex.value.args[0]['msg']
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py
index ae89c2808..a0cf954b9 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_syslog.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2018-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2018-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -17,8 +17,8 @@ import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_syslog
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock
from io import StringIO
from ansible.module_utils._text import to_text
from pytest import importorskip
@@ -73,6 +73,17 @@ class TestSetupSyslog(FakeAnsibleModule):
'msg': {'Status': 'Success', 'message': 'No changes found to commit!'}},
'changed': False}
+ @pytest.mark.parametrize("mock_message", [{"Status": "Success", "Message": "No changes found to commit!"},
+ {"Status": "Success", "Message": "No changes found"}])
+ def test_main_setup_syslog_success_case01_extra(self, mock_message, idrac_connection_setup_syslog_mock, idrac_default_args, mocker,
+ idrac_file_manager_mock):
+ idrac_default_args.update({"share_name": "sharename", 'share_password': None, "syslog": "Enabled",
+ 'share_mnt': None, 'share_user': None})
+ mocker.patch(
+ MODULE_PATH + 'idrac_syslog.run_setup_idrac_syslog', return_value=mock_message)
+ result = self._run_module(idrac_default_args)
+ assert result['msg'] == "Successfully fetch the syslogs."
+
def test_run_setup_idrac_syslog_success_case01(self, idrac_connection_setup_syslog_mock, idrac_default_args,
idrac_file_manager_mock):
idrac_default_args.update({"share_name": "sharename", "share_mnt": "mountname", "share_user": "shareuser",
@@ -187,7 +198,7 @@ class TestSetupSyslog(FakeAnsibleModule):
else:
mocker.patch(MODULE_PATH +
'idrac_syslog.run_setup_idrac_syslog',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
@@ -195,3 +206,63 @@ class TestSetupSyslog(FakeAnsibleModule):
else:
result = self._run_module(idrac_default_args)
assert 'msg' in result
+
+ def test_run_setup_idrac_syslog_invalid_share(self, idrac_connection_setup_syslog_mock, idrac_default_args,
+ idrac_file_manager_mock, mocker):
+ idrac_default_args.update(
+ {"share_name": "dummy_share_name", "share_mnt": "mountname", "share_user": "shareuser",
+ "syslog": "Disabled", "share_password": "sharepassword"})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ obj = MagicMock()
+ obj.IsValid = True
+
+ mocker.patch(
+ MODULE_PATH + "idrac_syslog.file_share_manager.create_share_obj", return_value=(obj))
+ message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True,
+ "Status": "Success"}
+ idrac_connection_setup_syslog_mock.config_mgr.disable_syslog.return_value = message
+ msg = self.module.run_setup_idrac_syslog(
+ idrac_connection_setup_syslog_mock, f_module)
+ assert msg == {'changes_applicable': True,
+ 'message': 'changes found to commit!', 'changed': True, 'Status': 'Success'}
+
+ obj.IsValid = False
+ mocker.patch(
+ MODULE_PATH + "idrac_syslog.file_share_manager.create_share_obj", return_value=(obj))
+ with pytest.raises(Exception) as exc:
+ self.module.run_setup_idrac_syslog(
+ idrac_connection_setup_syslog_mock, f_module)
+ assert exc.value.args[0] == "Unable to access the share. Ensure that the share name, share mount, and share credentials provided are correct."
+
+ def test_run_setup_idrac_syslog_disabled(self, idrac_connection_setup_syslog_mock, idrac_default_args,
+ idrac_file_manager_mock, mocker):
+ idrac_default_args.update(
+ {"share_name": "dummy_share_name", "share_mnt": "mountname", "share_user": "shareuser",
+ "syslog": "Disabled", "share_password": "sharepassword"})
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=True)
+ obj = MagicMock()
+ obj.IsValid = True
+
+ mocker.patch(
+ MODULE_PATH + "idrac_syslog.file_share_manager.create_share_obj", return_value=(obj))
+ message = {"changes_applicable": True, "message": "changes found to commit!", "changed": True,
+ "Status": "Success"}
+ idrac_connection_setup_syslog_mock.config_mgr.is_change_applicable.return_value = message
+ idrac_connection_setup_syslog_mock.config_mgr.disable_syslog.return_value = message
+ msg = self.module.run_setup_idrac_syslog(
+ idrac_connection_setup_syslog_mock, f_module)
+ assert msg == {'changes_applicable': True,
+ 'message': 'changes found to commit!', 'changed': True, 'Status': 'Success'}
+
+ def test_main_idrac_configure_timezone_attr_exception_handling_case(self, idrac_connection_setup_syslog_mock, idrac_default_args,
+ idrac_file_manager_mock, mocker):
+ idrac_default_args.update(
+ {"share_name": "dummy_share_name", "share_mnt": "mountname", "share_user": "shareuser",
+ "syslog": "Disabled", "share_password": "sharepassword"})
+ mocker.patch(
+ MODULE_PATH + 'idrac_syslog.run_setup_idrac_syslog',
+ side_effect=AttributeError('NoneType'))
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['msg'] == "Unable to access the share. Ensure that the share name, share mount, and share credentials provided are correct."
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py
index dbbb130e9..6913cb908 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_system_info.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -15,7 +15,7 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_system_info
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from mock import MagicMock, Mock
from pytest import importorskip
from ansible.module_utils.urls import ConnectionError, SSLValidationError
@@ -65,7 +65,7 @@ class TestSystemInventory(FakeAnsibleModule):
if exc_type not in [HTTPError, SSLValidationError]:
idrac_system_info_connection_mock.get_json_device.side_effect = exc_type('test')
else:
- idrac_system_info_connection_mock.get_json_device.side_effect = exc_type('http://testhost.com', 400,
+ idrac_system_info_connection_mock.get_json_device.side_effect = exc_type('https://testhost.com', 400,
'http error message',
{
"accept-type": "application/json"},
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py
index ee1d9d2e8..7358efed2 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_timezone_ntp.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 6.0.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -14,8 +14,8 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_timezone_ntp
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock, PropertyMock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock, Mock
from io import StringIO
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
@@ -79,6 +79,29 @@ class TestConfigTimezone(FakeAnsibleModule):
result = self._run_module(idrac_default_args)
assert result["msg"] == "Successfully configured the iDRAC time settings."
+ status_msg = {"Status": "Failure", "Message": "No changes found to commit!",
+ "msg": {"Status": "Success", "Message": "No changes found to commit!"}}
+ mocker.patch(MODULE_PATH +
+ 'idrac_timezone_ntp.run_idrac_timezone_config', return_value=status_msg)
+ result = self._run_module(idrac_default_args)
+ assert result["msg"] == "Successfully configured the iDRAC time settings."
+
+ status_msg = {"Status": "Success",
+ "msg": {"Status": "Success", "Message": "No changes found to commit!"}}
+ mocker.patch(MODULE_PATH +
+ 'idrac_timezone_ntp.run_idrac_timezone_config', return_value=status_msg)
+ result = self._run_module(idrac_default_args)
+ assert result["msg"] == "Successfully configured the iDRAC time settings."
+ assert result["changed"] is True
+
+ status_msg = {"Status": "Success", "Message": "No changes found",
+ "msg": {"Status": "Success", "Message": "No changes found to commit!"}}
+ mocker.patch(MODULE_PATH +
+ 'idrac_timezone_ntp.run_idrac_timezone_config', return_value=status_msg)
+ result = self._run_module(idrac_default_args)
+ assert result["msg"] == "Successfully configured the iDRAC time settings."
+ assert result["changed"] is True
+
def test_run_idrac_timezone_config_success_case01(self, idrac_connection_configure_timezone_mock,
idrac_default_args, idrac_file_manager_config_timesone_mock):
idrac_default_args.update({"share_name": None, "share_mnt": None, "share_user": None,
@@ -218,7 +241,7 @@ class TestConfigTimezone(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'idrac_timezone_ntp.run_idrac_timezone_config',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
@@ -226,3 +249,27 @@ class TestConfigTimezone(FakeAnsibleModule):
else:
result = self._run_module(idrac_default_args)
assert 'msg' in result
+
+ def test_run_idrac_timezone_config(self, mocker, idrac_default_args,
+ idrac_connection_configure_timezone_mock,
+ idrac_file_manager_config_timesone_mock):
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ obj = MagicMock()
+ obj.IsValid = False
+ mocker.patch(
+ MODULE_PATH + "idrac_timezone_ntp.file_share_manager.create_share_obj", return_value=(obj))
+ with pytest.raises(Exception) as exc:
+ self.module.run_idrac_timezone_config(
+ idrac_connection_configure_timezone_mock, f_module)
+ assert exc.value.args[0] == "Unable to access the share. Ensure that the share name, share mount, and share credentials provided are correct."
+
+ def test_main_idrac_configure_timezone_attr_exception_handling_case(self, mocker, idrac_default_args,
+ idrac_connection_configure_timezone_mock,
+ idrac_file_manager_config_timesone_mock):
+ idrac_default_args.update({"share_name": None})
+ mocker.patch(
+ MODULE_PATH + 'idrac_timezone_ntp.run_idrac_timezone_config',
+ side_effect=AttributeError('NoneType'))
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['msg'] == "Unable to access the share. Ensure that the share name, share mount, and share credentials provided are correct."
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py
index 2fa528d0d..0ef6e6da3 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -16,8 +16,8 @@ import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_user
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock
from ansible.module_utils._text import to_text
from io import StringIO
@@ -50,6 +50,17 @@ class TestIDRACUser(FakeAnsibleModule):
resp = self.module.get_payload(f_module, 1, action="update")
assert resp["Users.1.UserName"] == idrac_default_args["new_user_name"]
+ def test_get_payload_2(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
+ "user_name": "test", "user_password": "password",
+ "privilege": "Administrator", "custom_privilege": 17, "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ f_module = self.get_module_mock(params=idrac_default_args)
+ resp = self.module.get_payload(f_module, 1)
+ assert resp["Users.1.Privilege"] == idrac_default_args["custom_privilege"]
+
def test_convert_payload_xml(self, idrac_connection_user_mock, idrac_default_args, mocker):
idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
"user_name": "test", "user_password": "password",
@@ -134,6 +145,7 @@ class TestIDRACUser(FakeAnsibleModule):
response = self.module.get_user_account(f_module, idrac_connection_user_mock)
assert response[0]["Users.2#UserName"] == "test_user"
assert response[3] == 3
+ assert response[4] == "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/3"
def test_get_user_account_2(self, idrac_connection_user_mock, idrac_default_args, mocker):
idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
@@ -145,11 +157,23 @@ class TestIDRACUser(FakeAnsibleModule):
mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.export_scp",
return_value=MagicMock())
mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.get_idrac_local_account_attr",
- return_value={"Users.2#UserName": "test_user", "Users.3#UserName": ""})
+ return_value={"Users.2#UserName": "test_user", "Users.3#UserName": "test"})
f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
response = self.module.get_user_account(f_module, idrac_connection_user_mock)
- assert response[3] == 3
- assert response[4] == "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/3"
+ assert response[2] == 3
+ assert response[1] == "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/3"
+
+ def test_get_user_account_invalid_name(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
+ "user_name": "", "user_password": "password",
+ "privilege": "Administrator", "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ with pytest.raises(Exception) as err:
+ self.module.get_user_account(f_module, idrac_connection_user_mock)
+ assert err.value.args[0] == "User name is not valid."
def test_create_or_modify_account_1(self, idrac_connection_user_mock, idrac_default_args, mocker):
idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
@@ -325,9 +349,52 @@ class TestIDRACUser(FakeAnsibleModule):
None, None, user_attr)
assert response[1] == "Successfully updated user account."
+ def test_create_or_modify_account_both_slot_empty_input(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
+ "user_name": "test", "user_password": "password",
+ "privilege": "Administrator", "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ idrac_connection_user_mock.get_server_generation = (14, "3.60.60.60")
+ mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"})
+ mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml",
+ return_value=("<xml-data>", {"Users.1#UserName": "test_user"}))
+ mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.invoke_request",
+ return_value={"Message": "Successfully created a request."})
+ slot_id = 2
+ slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id)
+ user_attr = {"User.2#UserName": "test_user"}
+ response = self.module.create_or_modify_account(f_module, idrac_connection_user_mock, slot_id, slot_uri,
+ slot_id, slot_uri, user_attr)
+ assert response[1] == "Successfully updated user account."
+
+ def test_create_or_modify_account_both_slot_empty_none_input(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
+ "user_name": "test", "user_password": "password",
+ "privilege": "Administrator", "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ idrac_connection_user_mock.get_server_generation = (14, "3.60.60.60")
+ mocker.patch(MODULE_PATH + "idrac_user.get_payload", return_value={"Users.2#UserName": "test_user"})
+ mocker.patch(MODULE_PATH + "idrac_user.convert_payload_xml",
+ return_value=("<xml-data>", {"Users.1#UserName": "test_user"}))
+ mocker.patch(MODULE_PATH + "idrac_user.iDRACRedfishAPI.invoke_request",
+ return_value={"Message": "Successfully created a request."})
+ # slot_id = 2
+ # slot_uri = "/redfish/v1/Managers/iDRAC.Embedded.1/Accounts/{0}/".format(slot_id)
+ user_attr = {"User.2#UserName": "test_user"}
+ with pytest.raises(Exception) as exc:
+ self.module.create_or_modify_account(f_module, idrac_connection_user_mock, None, None,
+ None, None, user_attr)
+ assert exc.value.args[0] == "Maximum number of users reached. Delete a user account and retry the operation."
+
@pytest.mark.parametrize("exc_type", [SSLValidationError, URLError, ValueError, TypeError,
ConnectionError, HTTPError, ImportError, RuntimeError])
- def test_main(self, exc_type, idrac_connection_user_mock, idrac_default_args, mocker):
+ def test_main_execptions(self, exc_type, idrac_connection_user_mock, idrac_default_args, mocker):
idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
"user_name": "test", "user_password": "password",
"privilege": "Administrator", "ipmi_lan_privilege": "Administrator",
@@ -340,11 +407,96 @@ class TestIDRACUser(FakeAnsibleModule):
side_effect=exc_type('test'))
else:
mocker.patch(MODULE_PATH + "idrac_user.create_or_modify_account",
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
- if not exc_type == URLError:
+ if exc_type != URLError:
result = self._run_module_with_fail_json(idrac_default_args)
assert result['failed'] is True
else:
result = self._run_module(idrac_default_args)
assert 'msg' in result
+
+ def test_main_error(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "absent", "new_user_name": "new_user_name",
+ "user_name": "test", "user_password": "password",
+ "privilege": "Administrator", "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ obj = MagicMock()
+ obj.json_data = {"error": {"message": "Some Error Occured"}}
+ mocker.patch(MODULE_PATH + "idrac_user.remove_user_account", return_value=(obj, "error"))
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ assert result['msg'] == "Some Error Occured"
+
+ def test_main_error_oem(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "absent", "new_user_name": "new_user_name",
+ "user_name": "test", "user_password": "password",
+ "privilege": "Administrator", "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ obj = MagicMock()
+ obj.json_data = {"Oem": {"Dell": {"Message": "Unable to complete application of configuration profile values."}}}
+ mocker.patch(MODULE_PATH + "idrac_user.remove_user_account", return_value=(obj, "error"))
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ assert result['msg'] == "Unable to complete application of configuration profile values."
+
+ def test_main_create_oem(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
+ "user_name": "test", "user_password": "password",
+ "privilege": "Administrator", "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ obj = MagicMock()
+ obj.json_data = {"Oem": {"Dell": {"Message": "This Message Does Not Exists"}}}
+ mocker.patch(MODULE_PATH + "idrac_user.create_or_modify_account", return_value=(obj, "created"))
+ # with pytest.raises(Exception) as exc:
+ result = self._run_module(idrac_default_args)
+ assert result['changed'] is True
+ assert result['msg'] == "created"
+
+ def test_main_state_some(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "some", "new_user_name": "new_user_name",
+ "user_name": "test", "user_password": "password",
+ "privilege": "Administrator", "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ assert result['msg'] == "value of state must be one of: present, absent, got: some"
+
+ def test_validate_input(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ idrac_default_args.update({"state": "present", "new_user_name": "new_user_name",
+ "user_name": "test", "user_password": "password",
+ "custom_privilege": 512, "ipmi_lan_privilege": "Administrator",
+ "ipmi_serial_privilege": "Administrator", "enable": True,
+ "sol_enable": True, "protocol_enable": True,
+ "authentication_protocol": "SHA", "privacy_protocol": "AES"})
+ f_module = self.get_module_mock(params=idrac_default_args, check_mode=False)
+ with pytest.raises(Exception) as err:
+ self.module.validate_input(f_module)
+ assert err.value.args[0] == "custom_privilege value should be from 0 to 511."
+
+ idrac_default_args.update({"state": "absent"})
+ ret = self.module.validate_input(f_module)
+ assert ret is None
+
+ def test_compare_payload(self, idrac_connection_user_mock, idrac_default_args, mocker):
+ json_payload = {"Users.1#Password": "MyDummyPassword"}
+ is_change_required = self.module.compare_payload(json_payload, None)
+ assert is_change_required is True
+
+ json_payload = {"Users.1#Privilege": "123"}
+ idrac_attr = {"Users.1#Privilege": "123"}
+ is_change_required = self.module.compare_payload(json_payload, idrac_attr)
+ assert is_change_required is False
+
+ json_payload = {"Users.1#Privilege": "123"}
+ idrac_attr = {"Users.1#Privilege": "124"}
+ is_change_required = self.module.compare_payload(json_payload, idrac_attr)
+ assert is_change_required is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user_info.py
new file mode 100644
index 000000000..82121c2d9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_user_info.py
@@ -0,0 +1,231 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import json
+from ansible_collections.dellemc.openmanage.plugins.modules import idrac_user_info
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from mock import MagicMock
+from ansible.module_utils._text import to_text
+from io import StringIO
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+HTTPS_ADDRESS = 'https://testhost.com'
+
+
+class TestIDRACUserInfo(FakeAnsibleModule):
+ module = idrac_user_info
+
+ @pytest.fixture
+ def idrac_user_info_mock(self):
+ idrac_obj = MagicMock()
+ return idrac_obj
+
+ @pytest.fixture
+ def idrac_connection_user_info_mock(self, mocker, idrac_user_info_mock):
+ idrac_conn_mock = mocker.patch(MODULE_PATH + 'idrac_user_info.iDRACRedfishAPI',
+ return_value=idrac_user_info_mock)
+ idrac_conn_mock.return_value.__enter__.return_value = idrac_user_info_mock
+ return idrac_conn_mock
+
+ def test_fetch_all_accounts_success_case(self, idrac_default_args, idrac_connection_user_info_mock,
+ idrac_user_info_mock, mocker):
+ obj = MagicMock()
+ obj.json_data = {"Members": [
+ {"UserName": "test", "Oem": {"Dell": "test"}}]}
+ mocker.patch(MODULE_PATH + "idrac_user_info.iDRACRedfishAPI.invoke_request",
+ return_value=(obj))
+ resp = self.module.fetch_all_accounts(idrac_connection_user_info_mock, "/acounts/accdetails")
+ assert resp[0].get("UserName") == "test"
+
+ def test_get_user_id_accounts(self, idrac_default_args, idrac_connection_user_info_mock,
+ idrac_user_info_mock, mocker):
+ json_str = to_text(json.dumps({"data": "out"}))
+ idrac_default_args.update({"username": "test"})
+ obj = MagicMock()
+ obj.json_data = {"UserName": "test"}
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ mocker.patch(MODULE_PATH + "idrac_user_info.iDRACRedfishAPI.invoke_request",
+ return_value=(obj))
+ mocker.patch(MODULE_PATH + "idrac_user_info.strip_substr_dict",
+ return_value=({"UserName": "test"}))
+ resp = self.module.get_user_id_accounts(
+ idrac_connection_user_info_mock, f_module, "/acounts/accdetails", 1)
+ assert resp.get("UserName") == "test"
+
+ obj = MagicMock()
+ obj.json_data = {"UserName": "test", "Oem": {"Dell": "test"}}
+ mocker.patch(MODULE_PATH + "idrac_user_info.iDRACRedfishAPI.invoke_request",
+ return_value=(obj))
+ mocker.patch(MODULE_PATH + "idrac_user_info.strip_substr_dict",
+ return_value=({"UserName": "test", "Oem": {"Dell": "test"}}))
+ resp = self.module.get_user_id_accounts(
+ idrac_connection_user_info_mock, f_module, "/acounts/accdetails", 1)
+ assert resp.get("UserName") == "test"
+
+ idrac_connection_user_info_mock.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ with pytest.raises(Exception) as exc:
+ self.module.get_user_id_accounts(
+ idrac_connection_user_info_mock, f_module, "/acounts/accdetails", 1)
+ assert exc.value.args[0] == "'user_id' is not valid."
+
+ def test_get_user_name_accounts(self, idrac_default_args, idrac_connection_user_info_mock,
+ idrac_user_info_mock, mocker):
+ idrac_default_args.update({"username": "test"})
+ mocker.patch(MODULE_PATH + "idrac_user_info.fetch_all_accounts",
+ return_value=([{"UserName": "test"}]))
+ mocker.patch(MODULE_PATH + "idrac_user_info.strip_substr_dict",
+ return_value=({"UserName": "test"}))
+ f_module = self.get_module_mock(
+ params=idrac_default_args, check_mode=False)
+ resp = self.module.get_user_name_accounts(
+ idrac_connection_user_info_mock, f_module, "/acounts/accdetails", "test")
+ assert resp.get("UserName") == "test"
+
+ mocker.patch(MODULE_PATH + "idrac_user_info.strip_substr_dict",
+ return_value=({"UserName": "test", "Oem": {"Dell": "test"}}))
+ resp = self.module.get_user_name_accounts(
+ idrac_connection_user_info_mock, f_module, "/acounts/accdetails", "test")
+ assert resp.get("UserName") == "test"
+
+ with pytest.raises(Exception) as exc:
+ self.module.get_user_name_accounts(
+ idrac_connection_user_info_mock, f_module, "/acounts/accdetails", "test1")
+ assert exc.value.args[0] == "'username' is not valid."
+
+ def test_get_all_accounts_single(self, idrac_default_args, idrac_connection_user_info_mock,
+ idrac_user_info_mock, mocker):
+ idrac_default_args.update({"username": "test"})
+ mocker.patch(MODULE_PATH + "idrac_user_info.fetch_all_accounts",
+ return_value=([{"UserName": "test", "Oem": {"Dell": "test"}}]))
+ mocker.patch(MODULE_PATH + "idrac_user_info.strip_substr_dict",
+ return_value=({"UserName": "test", "Oem": {"Dell": "test"}}))
+ resp = self.module.get_all_accounts(
+ idrac_connection_user_info_mock, "/acounts/accdetails")
+ assert resp[0].get("UserName") == "test"
+
+ mocker.patch(MODULE_PATH + "idrac_user_info.fetch_all_accounts",
+ return_value=([{"UserName": ""}]))
+ resp = self.module.get_all_accounts(
+ idrac_connection_user_info_mock, "/acounts/accdetails")
+ assert resp == []
+
+ mocker.patch(MODULE_PATH + "idrac_user_info.fetch_all_accounts",
+ return_value=([]))
+ resp = self.module.get_all_accounts(
+ idrac_connection_user_info_mock, "/acounts/accdetails")
+ assert resp == []
+
+ def test_get_all_accounts_multiple(self, idrac_default_args, idrac_connection_user_info_mock,
+ idrac_user_info_mock, mocker):
+ def strip_substr_dict_mock(acc):
+ if acc.get("UserName") == "test":
+ return {"UserName": "test"}
+ else:
+ return {"UserName": "test1"}
+ mocker.side_effect = strip_substr_dict_mock
+
+ mocker.patch(MODULE_PATH + "idrac_user_info.fetch_all_accounts",
+ return_value=([{"UserName": "test"}, {"UserName": "test1"}]))
+ resp = self.module.get_all_accounts(
+ idrac_connection_user_info_mock, "/acounts/accdetails")
+ assert resp[0].get("UserName") == "test"
+ assert resp[1].get("UserName") == "test1"
+
+ def test_get_accounts_uri(self, idrac_default_args, idrac_connection_user_info_mock,
+ idrac_user_info_mock, mocker):
+ acc_service_uri = MagicMock()
+ acc_service_uri.json_data = {"AccountService": {
+ "@odata.id": "/account"}, "Accounts": {"@odata.id": "/account/accountdetails"}}
+ acc_service = MagicMock()
+ acc_service.json_data = {"Accounts": {
+ "@odata.id": "/account/accountdetails"}}
+
+ mocker.patch(MODULE_PATH + "idrac_user_info.iDRACRedfishAPI.invoke_request",
+ return_value=(acc_service_uri))
+ resp = self.module.get_accounts_uri(idrac_connection_user_info_mock)
+ assert resp == "/account/accountdetails"
+
+ json_str = to_text(json.dumps({"data": "out"}))
+ idrac_connection_user_info_mock.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 400,
+ 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+
+ resp = self.module.get_accounts_uri(idrac_connection_user_info_mock)
+ assert resp == "/redfish/v1/AccountService/Accounts"
+
+ def test_user_info_main_success_case_all(self, idrac_default_args, idrac_connection_user_info_mock,
+ idrac_user_info_mock, mocker):
+ idrac_default_args.update({"username": "test"})
+ mocker.patch(MODULE_PATH + "idrac_user_info.get_accounts_uri",
+ return_value=("/acounts/accdetails"))
+ mocker.patch(MODULE_PATH + "idrac_user_info.get_user_name_accounts",
+ return_value=({"UserName": "test"}))
+ idrac_user_info_mock.status_code = 200
+ idrac_user_info_mock.success = True
+ resp = self._run_module(idrac_default_args)
+ assert resp['msg'] == "Successfully retrieved the user information."
+ assert resp['user_info'][0].get("UserName") == "test"
+
+ mocker.patch(MODULE_PATH + "idrac_user_info.get_user_id_accounts",
+ return_value=({"UserName": "test"}))
+ idrac_default_args.update({"user_id": "1234"})
+ idrac_default_args.pop("username")
+ resp = self._run_module(idrac_default_args)
+ assert resp['msg'] == "Successfully retrieved the user information."
+ assert resp['user_info'][0].get("UserName") == "test"
+
+ mocker.patch(MODULE_PATH + "idrac_user_info.get_all_accounts",
+ return_value=([{"UserName": "test"}]))
+ idrac_default_args.pop("user_id")
+ resp = self._run_module(idrac_default_args)
+ assert resp['msg'] == "Successfully retrieved the information of 1 user(s)."
+ assert resp['user_info'][0].get("UserName") == "test"
+
+ mocker.patch(MODULE_PATH + "idrac_user_info.get_all_accounts",
+ return_value=([]))
+ resp = self._run_module_with_fail_json(idrac_default_args)
+ assert resp['failed'] is True
+ assert resp['msg'] == "Unable to retrieve the user information."
+
+ @pytest.mark.parametrize("exc_type",
+ [URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError])
+ def test_idrac_user_info_main_exception_handling_case(self, exc_type, mocker, idrac_default_args,
+ idrac_connection_user_info_mock, idrac_user_info_mock):
+ idrac_user_info_mock.status_code = 400
+ idrac_user_info_mock.success = False
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH + "idrac_user_info.get_accounts_uri",
+ side_effect=exc_type('test'))
+ else:
+ mocker.patch(MODULE_PATH + "idrac_user_info.get_accounts_uri",
+ side_effect=exc_type(HTTPS_ADDRESS, 400,
+ 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str)))
+ if exc_type != URLError:
+ result = self._run_module_with_fail_json(idrac_default_args)
+ assert result['failed'] is True
+ else:
+ result = self._run_module(idrac_default_args)
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py
index 94e620f3e..5c7c32b44 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_idrac_virtual_media.py
@@ -15,15 +15,15 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import idrac_virtual_media
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
-from mock import PropertyMock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from io import StringIO
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+ISO_PATH = "//XX.XX.XX.XX/path/file.iso"
+ISO_IMAGE_PATH = "//XX.XX.XX.XX/path/image_file.iso"
@pytest.fixture
@@ -40,17 +40,17 @@ class TestVirtualMedia(FakeAnsibleModule):
def test_validate_params(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args):
idrac_default_args.update(
- {"virtual_media": [{"index": 1, "insert": True, "image": "//192.168.0.1/path/image.iso"}]})
+ {"virtual_media": [{"index": 1, "insert": True, "image": "//XX.XX.XX.XX/path/image.iso"}]})
f_module = self.get_module_mock(params=idrac_default_args)
with pytest.raises(Exception) as err:
self.module._validate_params(f_module, {"index": 1, "insert": True,
- "image": "//192.168.0.1/path/image.iso"}, "140")
+ "image": "//XX.XX.XX.XX/path/image.iso"}, "140")
assert err.value.args[0] == "CIFS share required username and password."
idrac_default_args.update({"virtual_media": [{"index": 1, "insert": True, "username": "user", "password": "pwd",
- "image": "\\\\192.168.0.1\\path\\image.iso"}]})
+ "image": "\\\\XX.XX.XX.XX\\path\\image.iso"}]})
f_module = self.get_module_mock(params=idrac_default_args)
result = self.module._validate_params(f_module, {"password": "pwd", "insert": True, "username": "usr",
- "image": "\\\\192.168.0.1\\path\\image.iso", "index": 1},
+ "image": "\\\\XX.XX.XX.XX\\path\\image.iso", "index": 1},
"141")
assert result is None
@@ -59,7 +59,7 @@ class TestVirtualMedia(FakeAnsibleModule):
"RedfishVersion": "1.13.1",
"VirtualMedia": {"@odata.id": "/redfish/v1/Systems/System.Embedded.1/VirtualMedia"},
"Members": [{"Inserted": False, "Image": None},
- {"Inserted": True, "Image": "//192.168.0.1/file_path/file.iso"}]
+ {"Inserted": True, "Image": "//XX.XX.XX.XX/file_path/file.iso"}]
}
resp, vr_id, rd_version = self.module.get_virtual_media_info(virtual_media_conn_mock)
assert vr_id == "system"
@@ -68,17 +68,17 @@ class TestVirtualMedia(FakeAnsibleModule):
assert vr_id == "manager"
def test_get_payload_data(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args):
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}]})
- each = {"insert": True, "image": "//192.168.0.1/path/file.iso", "index": 1, "media_type": "CD"}
- vr_member = [{"Inserted": True, "Image": "//192.168.0.1/path/image_file.iso",
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": ISO_PATH}]})
+ each = {"insert": True, "image": ISO_PATH, "index": 1, "media_type": "CD"}
+ vr_member = [{"Inserted": True, "Image": ISO_IMAGE_PATH,
"UserName": "username", "Password": "password", "Id": "CD", "MediaTypes": ["CD", "DVD"]}]
is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager")
assert is_change is True
- assert input_vr_mem == {'Inserted': True, 'Image': '//192.168.0.1/path/file.iso'}
- assert vr_mem == {'Inserted': True, 'Image': '//192.168.0.1/path/image_file.iso', 'UserName': 'username',
+ assert input_vr_mem == {'Inserted': True, 'Image': '//XX.XX.XX.XX/path/file.iso'}
+ assert vr_mem == {'Inserted': True, 'Image': '//XX.XX.XX.XX/path/image_file.iso', 'UserName': 'username',
'Password': 'password', 'Id': 'CD', 'MediaTypes': ['CD', 'DVD']}
each.update({"username": "user_name", "password": "password", "domain": "domain",
- "image": "192.168.0.3:/file_path/image.iso"})
+ "image": "XX.XX.XX.XX:/file_path/image.iso"})
is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager")
assert is_change is True
each.update({"media_type": "USBStick"})
@@ -90,25 +90,25 @@ class TestVirtualMedia(FakeAnsibleModule):
is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "system")
assert is_change is True
each.update({"username": "user_name", "password": "password", "domain": "domain", "media_type": "CD",
- "image": "192.168.0.3:/file_path/image.img", "insert": True})
+ "image": "XX.XX.XX.XX:/file_path/image.img", "insert": True})
is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager")
assert unsup_media == 1
each.update({"username": "user_name", "password": "password", "domain": "domain", "media_type": "DVD",
- "image": "192.168.0.3:/file_path/image.img", "insert": True})
+ "image": "XX.XX.XX.XX:/file_path/image.img", "insert": True})
is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager")
assert unsup_media == 1
def test_domain_name(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args):
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}]})
- each = {"insert": True, "image": "//192.168.0.1/path/file.iso", "index": 1, "media_type": "CD",
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": ISO_PATH}]})
+ each = {"insert": True, "image": ISO_PATH, "index": 1, "media_type": "CD",
"domain": "domain", "username": "user", "password": "pwd"}
- vr_member = [{"Inserted": True, "Image": "//192.168.0.1/path/image_file.iso", "domain": "domain",
+ vr_member = [{"Inserted": True, "Image": ISO_IMAGE_PATH, "domain": "domain",
"UserName": "username", "Password": "password", "Id": "CD", "MediaTypes": ["CD", "DVD"]}]
is_change, input_vr_mem, vr_mem, unsup_media = self.module.get_payload_data(each, vr_member, "manager")
assert is_change is True
def test_virtual_media_operation(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args, mocker):
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}],
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": ISO_PATH}],
"force": True})
f_module = self.get_module_mock(params=idrac_default_args)
mocker.patch(MODULE_PATH + 'idrac_virtual_media.time.sleep', return_value=None)
@@ -119,8 +119,8 @@ class TestVirtualMedia(FakeAnsibleModule):
"#VirtualMedia.InsertMedia": {
"target": "/redfish/v1/Systems/System.Embedded.1/VirtualMedia/1/Actions/VirtualMedia.InsertMedia"}
}},
- "payload": {"Inserted": True, "Image": "http://192.168.0.1/file_path/file.iso"},
- "input": {"index": 1, "insert": True, "image": "//192.168.0.1/path/file.iso", "force": True}
+ "payload": {"Inserted": True, "Image": "https://XX.XX.XX.XX/file_path/file.iso"},
+ "input": {"index": 1, "insert": True, "image": ISO_PATH, "force": True}
}]
result = self.module.virtual_media_operation(virtual_media_conn_mock, f_module, payload, "manager")
assert result == []
@@ -138,7 +138,7 @@ class TestVirtualMedia(FakeAnsibleModule):
@pytest.mark.parametrize("exc_type", [HTTPError])
def test_virtual_media_operation_http(self, virtual_media_conn_mock, redfish_response_mock,
idrac_default_args, mocker, exc_type):
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}],
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": ISO_PATH}],
"force": True})
f_module = self.get_module_mock(params=idrac_default_args)
mocker.patch(MODULE_PATH + 'idrac_virtual_media.time.sleep', return_value=None)
@@ -149,8 +149,8 @@ class TestVirtualMedia(FakeAnsibleModule):
"#VirtualMedia.InsertMedia": {
"target": "/redfish/v1/Systems/System.Embedded.1/VirtualMedia/CD/Actions/VirtualMedia.InsertMedia"}
}},
- "payload": {"Inserted": True, "Image": "http://192.168.0.1/file_path/file.iso"},
- "input": {"index": 1, "insert": True, "image": "//192.168.0.1/path/file.iso", "force": True}
+ "payload": {"Inserted": True, "Image": "https://XX.XX.XX.XX/file_path/file.iso"},
+ "input": {"index": 1, "insert": True, "image": ISO_PATH, "force": True}
}]
if exc_type == HTTPError:
mocker.patch(MODULE_PATH + 'idrac_virtual_media.json.load', return_value={
@@ -159,25 +159,25 @@ class TestVirtualMedia(FakeAnsibleModule):
json_str = to_text(json.dumps({"data": "out"}))
mocker.patch(
MODULE_PATH + 'idrac_virtual_media.time.sleep',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self.module.virtual_media_operation(virtual_media_conn_mock, f_module, payload, "system")
assert result == [{'@Message.ExtendedInfo': [{'MessageId': 'VRM0012'}]}]
def test_virtual_media(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args, mocker):
- vr_member = [{"Inserted": True, "Image": "//192.168.0.1/path/image_file.iso",
+ vr_member = [{"Inserted": True, "Image": ISO_IMAGE_PATH,
"UserName": "username", "Password": "password", "Id": "CD", "MediaTypes": ["CD", "DVD"]}]
mocker.patch(MODULE_PATH + 'idrac_virtual_media.virtual_media_operation', return_value=[])
mocker.patch(MODULE_PATH + 'idrac_virtual_media._validate_params', return_value=None)
mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_payload_data', return_value=(True, {}, {}, 1))
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso"}],
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": ISO_PATH}],
"force": True})
f_module = self.get_module_mock(params=idrac_default_args)
with pytest.raises(Exception) as ex:
self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "manager", "141")
assert ex.value.args[0] == "Unable to complete the virtual media operation because unsupported " \
"media type provided for index 1"
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.img"}],
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//XX.XX.XX.XX/path/file.img"}],
"force": True})
f_module = self.get_module_mock(params=idrac_default_args)
with pytest.raises(Exception) as ex:
@@ -188,7 +188,7 @@ class TestVirtualMedia(FakeAnsibleModule):
self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "system", "141")
assert ex.value.args[0] == "Unable to complete the virtual media operation because " \
"unsupported media type provided for index 1"
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso",
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": ISO_PATH,
"index": 1, "media_type": "CD"}], "force": True})
f_module = self.get_module_mock(params=idrac_default_args)
mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_payload_data', return_value=(True, {}, {}, None))
@@ -202,7 +202,7 @@ class TestVirtualMedia(FakeAnsibleModule):
with pytest.raises(Exception) as ex:
self.module.virtual_media(virtual_media_conn_mock, f_module, vr_member, "manager", "141")
assert ex.value.args[0] == "Changes found to be applied."
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "//192.168.0.1/path/file.iso",
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": ISO_PATH,
"index": 1, "media_type": "CD"}], "force": False})
f_module = self.get_module_mock(params=idrac_default_args)
f_module.check_mode = True
@@ -213,8 +213,8 @@ class TestVirtualMedia(FakeAnsibleModule):
def test_main_success(self, virtual_media_conn_mock, redfish_response_mock, idrac_default_args, mocker):
idrac_default_args.update({"virtual_media": [
- {"insert": True, "image": "http://192.168.0.1/path/file.iso"},
- {"insert": True, "image": "192.168.0.2:/file/file.iso"}], "force": True})
+ {"insert": True, "image": "https://XX.XX.XX.XX/path/file.iso"},
+ {"insert": True, "image": "YY.YY.YY.YY:/file/file.iso"}], "force": True})
mocker.patch(MODULE_PATH + 'idrac_virtual_media.get_virtual_media_info',
return_value=([{"Insert": True}, {"Insert": True}], "manager", "141"))
with pytest.raises(Exception) as ex:
@@ -222,7 +222,7 @@ class TestVirtualMedia(FakeAnsibleModule):
assert ex.value.args[0]["msg"] == "Unable to complete the operation because the virtual media settings " \
"provided exceeded the maximum limit."
mocker.patch(MODULE_PATH + 'idrac_virtual_media.virtual_media', return_value=[])
- idrac_default_args.update({"virtual_media": [{"insert": True, "image": "http://192.168.0.1/path/file.iso"}],
+ idrac_default_args.update({"virtual_media": [{"insert": True, "image": "https://XX.XX.XX.XX/path/file.iso"}],
"force": True})
result = self._run_module(idrac_default_args)
assert result == {'changed': True, 'msg': 'Successfully performed the virtual media operation.'}
@@ -241,7 +241,7 @@ class TestVirtualMedia(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'idrac_virtual_media.get_virtual_media_info',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
if not exc_type == URLError:
result = self._run_module_with_fail_json(idrac_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py
index 1722a3daa..5f141775a 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_active_directory.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.0.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -97,14 +97,14 @@ class TestOmeAD(FakeAnsibleModule):
@pytest.mark.parametrize("params", [{
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"],
"group_domain": "domain.com", "name": "domdev"},
- "get_ad": ({"Name": "ad_test", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"],
+ "get_ad": ({"Name": "ad_test", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["XX.XX.XX.XX"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120,
"ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": MODIFY_SUCCESS}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"],
"group_domain": "domain.com", "name": "domdev", "test_connection": True,
"domain_username": "user", "domain_password": "passwd"}, "get_ad":
- ({"Name": "ad_test", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"], "DnsServer": [],
+ ({"Name": "ad_test", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["XX.XX.XX.XX"], "DnsServer": [],
"GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120, "ServerPort": 3269,
"CertificateValidation": False}, 1),
"msg": "{0}{1}".format(TEST_CONNECTION_SUCCESS, MODIFY_SUCCESS)},
@@ -116,7 +116,7 @@ class TestOmeAD(FakeAnsibleModule):
"msg": NO_CHANGES_MSG}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"],
"group_domain": "dellemcdomain.com", "name": "domdev"},
- "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"],
+ "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["XX.XX.XX.XX"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120,
"SearchTimeOut": 120, "ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": CHANGES_FOUND, "check_mode": True}
@@ -134,7 +134,7 @@ class TestOmeAD(FakeAnsibleModule):
@pytest.mark.parametrize("params", [{
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"],
"group_domain": "domain.com", "name": "domdev", "state": "absent"},
- "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"],
+ "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["XX.XX.XX.XX"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120, "SearchTimeOut": 120,
"ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": DELETE_SUCCESS},
@@ -143,7 +143,7 @@ class TestOmeAD(FakeAnsibleModule):
"msg": NO_CHANGES_MSG}, {
"module_args": {"domain_controller_lookup": "MANUAL", "domain_server": ["192.96.20.181"],
"group_domain": "dellemcdomain.com", "name": "domdev", "state": "absent"},
- "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["192.168.20.181"],
+ "get_ad": ({"Name": "domdev", "Id": 21789, "ServerType": "MANUAL", "ServerName": ["XX.XX.XX.XX"],
"DnsServer": [], "GroupDomain": "dellemcdomain.com", "NetworkTimeOut": 120,
"SearchTimeOut": 120, "ServerPort": 3269, "CertificateValidation": False}, 1),
"msg": CHANGES_FOUND, "check_mode": True}
@@ -215,7 +215,7 @@ class TestOmeAD(FakeAnsibleModule):
ome_connection_mock_obj = rest_obj_class_mock.return_value.__enter__.return_value
if params.get("is_http"):
json_str = to_text(json.dumps(params['error_info']))
- ome_connection_mock_obj.invoke_request.side_effect = HTTPError('http://testdellemcomead.com', 404,
+ ome_connection_mock_obj.invoke_request.side_effect = HTTPError('https://testdellemcomead.com', 404,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str))
@@ -242,7 +242,7 @@ class TestOmeAD(FakeAnsibleModule):
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
- mocker.patch(MODULE_PATH + 'get_ad', side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ mocker.patch(MODULE_PATH + 'get_ad', side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies.py
new file mode 100644
index 000000000..1bf0c2e7c
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies.py
@@ -0,0 +1,1578 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+import os
+import tempfile
+from datetime import datetime, timedelta
+from io import StringIO
+
+import pytest
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_alert_policies
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_alert_policies.'
+
+SUCCESS_MSG = "Successfully {0}d the alert policy."
+NO_CHANGES_MSG = "No changes found to be applied."
+CHANGES_MSG = "Changes found to be applied."
+INVALID_TIME = "The specified {0} date or {0} time `{1}` to schedule the policy is not valid. Enter a valid date and time."
+END_START_TIME = "The end time `{0}` to schedule the policy must be greater than the start time `{1}`."
+CATEGORY_FETCH_FAILED = "Unable to retrieve the category details from OpenManage Enterprise."
+INVALID_TARGETS = "Specify target devices to apply the alert policy."
+INVALID_CATEGORY_MESSAGE = "Specify categories or message to create the alert policy."
+INVALID_SCHEDULE = "Specify a date and time to schedule the alert policy."
+INVALID_ACTIONS = "Specify alert actions for the alert policy."
+INVALID_SEVERITY = "Specify the severity to create the alert policy."
+MULTIPLE_POLICIES = "Unable to update the alert policies because the number of alert policies entered are more than " \
+ "one. The update policy operation supports only one alert policy at a time."
+DISABLED_ACTION = "Action {0} is disabled. Enable it before applying to the alert policy."
+ACTION_INVALID_PARAM = "The Action {0} attribute contains invalid parameter name {1}. The valid values are {2}."
+ACTION_INVALID_VALUE = "The Action {0} attribute contains invalid value for {1} for parameter name {2}. The valid " \
+ "values are {3}."
+ACTION_DIS_EXIST = "Action {0} does not exist."
+SUBCAT_IN_CATEGORY = "The subcategory {0} does not exist in the category {1}."
+CATEGORY_IN_CATALOG = "The category {0} does not exist in the catalog {1}."
+OME_DATA_MSG = "The {0} with the following {1} do not exist: {2}."
+CATALOG_DIS_EXIST = "The catalog {0} does not exist."
+CSV_PATH = "The message file {0} does not exist."
+DEFAULT_POLICY_DELETE = "The following default policies cannot be deleted: {0}."
+POLICY_ENABLE_MISSING = "Unable to {0} the alert policies {1} because the policy names are invalid. Enter the valid " \
+ "alert policy names and retry the operation."
+NO_POLICY_EXIST = "The alert policy does not exist."
+SEPARATOR = ", "
+
+
+@pytest.fixture
+def ome_connection_mock_for_alert_policies(mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+
+class TestOmeAlertPolicies(FakeAnsibleModule):
+ module = ome_alert_policies
+
+ @pytest.mark.parametrize("params", [
+ {"message": SUCCESS_MSG.format("enable"), "success": True,
+ "json_data": {"value": [{'Name': "new alert policy", "Id": 12, "Enabled": False}]},
+ "mparams": {"name": "new alert policy", "enable": True}},
+ {"message": CHANGES_MSG, "success": True, "check_mode": True,
+ "json_data": {"value": [{'Name': "new alert policy", "Id": 12, "Enabled": False}]},
+ "mparams": {"name": "new alert policy", "enable": True}},
+ {"message": MULTIPLE_POLICIES, "success": True,
+ "json_data": {"value": [{'Name': "alert policy1", "Id": 12, "Enabled": True},
+ {'Name': "alert policy2", "Id": 13, "Enabled": True}]},
+ "mparams": {"name": ["alert policy1", "alert policy2"], "enable": False, "description": 'Update case failed'}},
+ {"message": POLICY_ENABLE_MISSING.format("disable", "alert policy3"), "success": True,
+ "json_data": {"value": [{'Name': "alert policy1", "Id": 12, "Enabled": True},
+ {'Name': "alert policy2", "Id": 13, "Enabled": True}]},
+ "mparams": {"name": ["alert policy3", "alert policy2"], "enable": False}},
+ {"message": NO_CHANGES_MSG, "success": True, "check_mode": True,
+ "json_data": {"value": [{'Name': "new alert policy", "Id": 12, "Enabled": False}]},
+ "mparams": {"name": "new alert policy", "enable": False}},
+ {"message": SUCCESS_MSG.format("delete"), "success": True,
+ "json_data": {"report_list": [{'Name': "new alert policy", "Id": 12, "DefaultPolicy": False}],
+ "value": [{'Name': "new alert policy", "Id": 12, "DefaultPolicy": False}]},
+ "mparams": {"name": "new alert policy", "state": "absent"}},
+ {"message": CHANGES_MSG, "success": True, "check_mode": True,
+ "json_data": {"report_list": [{'Name': "new alert policy", "Id": 12, "DefaultPolicy": False}],
+ "value": [{'Name': "new alert policy", "Id": 12, "DefaultPolicy": False}]},
+ "mparams": {"name": "new alert policy", "state": "absent"}},
+ {"message": DEFAULT_POLICY_DELETE.format("new alert policy"), "success": True,
+ "json_data": {"report_list": [{'Name': "new alert policy", "Id": 12, "DefaultPolicy": False}],
+ "value": [{'Name': "new alert policy", "Id": 12, "DefaultPolicy": True}]},
+ "mparams": {"name": "new alert policy", "state": "absent"}},
+ {"message": NO_POLICY_EXIST, "success": True, "check_mode": True,
+ "json_data": {"report_list": [{'Name': "new alert policy", "Id": 12, "DefaultPolicy": False}],
+ "value": [{'Name': "new alert policy 1", "Id": 12, "DefaultPolicy": False}]},
+ "mparams": {"name": "new alert policy", "state": "absent"}},
+ {"message": NO_POLICY_EXIST, "success": True,
+ "json_data": {"report_list": [{'Name': "new alert policy", "Id": 12, "DefaultPolicy": False}],
+ "value": [{'Name': "new alert policy 1", "Id": 12, "DefaultPolicy": False}]},
+ "mparams": {"name": "new alert policy", "state": "absent"}},
+ ])
+ def test_ome_alert_policies_enable_delete(self, params, ome_connection_mock_for_alert_policies,
+ ome_response_mock, ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_connection_mock_for_alert_policies.get_all_items_with_pagination.return_value = params[
+ 'json_data']
+ ome_default_args.update(params['mparams'])
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ assert result['msg'] == params['message']
+
+ trap_ip1 = "traphost1:162"
+ trap_ip2 = "traphost2:162"
+ trap_ip3 = "traphost3:514"
+ actions = [
+ {
+ "action_name": "Trap",
+ "parameters": [
+ {
+ "name": trap_ip2,
+ "value": "True"
+ }
+ ]
+ },
+ {
+ "action_name": "Mobile",
+ "parameters": []
+ },
+ {
+ "action_name": "Email",
+ "parameters": [
+ {
+ "name": "to",
+ "value": "email2@address.x"
+ },
+ {
+ "name": "from",
+ "value": "emailr@address.y"
+ },
+ {
+ "name": "subject",
+ "value": "test subject"
+ },
+ {
+ "name": "message",
+ "value": "test message"
+ }
+ ]
+ },
+ {
+ "action_name": "SMS",
+ "parameters": [
+ {
+ "name": "to",
+ "value": "1234567890"
+ }
+ ]
+ }
+ ]
+ create_input = {
+ "actions": actions,
+ "date_and_time": {
+ "date_from": (datetime.now() + timedelta(days=2)).strftime("%Y-%m-%d"),
+ "date_to": (datetime.now() + timedelta(days=3)).strftime("%Y-%m-%d"),
+ "days": [
+ "sunday",
+ "monday"
+ ],
+ "time_from": "11:00",
+ "time_to": "12:00",
+ "time_interval": True
+ },
+ "description": "Description of Alert Policy One",
+ "device_group": [
+ "AX",
+ "Linux Servers"
+ ],
+ "enable": True,
+ "message_ids": [
+ "AMP400",
+ "CTL201",
+ "AMP401"
+ ],
+ "name": [
+ "Alert Policy One"
+ ],
+ "severity": [
+ "unknown"
+ ],
+ "state": "present"
+ }
+ get_alert_policy = [{
+ "Id": 24792,
+ "Name": "Alert Policy One",
+ "Description": "CREATIOn of Alert Policy One",
+ "Enabled": True,
+ "DefaultPolicy": False,
+ "Editable": True,
+ "Visible": True,
+ "PolicyData": {
+ "Catalogs": [],
+ "Severities": [
+ 1
+ ],
+ "MessageIds": [
+ "'AMP401'",
+ "'AMP400'",
+ "'CTL201'"
+ ],
+ "Devices": [],
+ "DeviceTypes": [],
+ "Groups": [
+ 1011,
+ 1033
+ ],
+ "Schedule": {
+ "StartTime": "2023-10-09 00:00:00.000",
+ "EndTime": "2023-10-11 00:00:00.000",
+ "CronString": "* * * ? * mon,sun *",
+ "Interval": False
+ },
+ "Actions": [
+ {
+ "Id": 499,
+ "Name": "RemoteCommand",
+ "ParameterDetails": [
+ {
+ "Id": 0,
+ "Name": "remotecommandaction1",
+ "Value": "test",
+ "Type": "singleSelect",
+ "TypeParams": [
+ {
+ "Name": "option",
+ "Value": "test"
+ }
+ ]
+ }
+ ],
+ "TemplateId": 111
+ }
+ ],
+ "AllTargets": False,
+ "UndiscoveredTargets": []
+ },
+ "State": True,
+ "Owner": 10078
+ }]
+ get_all_actions = {
+ "Email": {
+ "Disabled": False,
+ "Id": 50,
+ "Parameters": {
+ "from": "admin@dell.com",
+ "message": "Event occurred for Device Name",
+ "subject": "Device Name: $name, Device IP Address: $ip, Severity: $severity",
+ "to": ""
+ },
+ "Type": {
+ "from": [],
+ "message": [],
+ "subject": [],
+ "to": []
+ }
+ },
+ "Ignore": {
+ "Disabled": False,
+ "Id": 100,
+ "Parameters": {},
+ "Type": {}
+ },
+ "Mobile": {
+ "Disabled": False,
+ "Id": 112,
+ "Parameters": {},
+ "Type": {}
+ },
+ "PowerControl": {
+ "Disabled": False,
+ "Id": 110,
+ "Parameters": {
+ "powercontrolaction": "poweroff"
+ },
+ "Type": {
+ "powercontrolaction": [
+ "powercycle",
+ "poweroff",
+ "poweron",
+ "gracefulshutdown"
+ ]
+ }
+ },
+ "RemoteCommand": {
+ "Disabled": False,
+ "Id": 111,
+ "Parameters": {
+ "remotecommandaction": "test"
+ },
+ "Type": {
+ "remotecommandaction": [
+ "test",
+ "cmd2 : XX.XX.XX.XX"
+ ]
+ }
+ },
+ "SMS": {
+ "Disabled": False,
+ "Id": 70,
+ "Parameters": {
+ "to": ""
+ },
+ "Type": {
+ "to": []
+ }
+ },
+ "Syslog": {
+ "Disabled": False,
+ "Id": 90,
+ "Parameters": {
+ trap_ip3: "true"
+ },
+ "Type": {
+ trap_ip3: [
+ "true",
+ "false"
+ ]
+ }
+ },
+ "Trap": {
+ "Disabled": False,
+ "Id": 60,
+ "Parameters": {
+ trap_ip1: "true",
+ trap_ip2: "true"
+ },
+ "Type": {
+ trap_ip1: [
+ "true",
+ "false"
+ ],
+ trap_ip2: [
+ "true",
+ "false"
+ ]
+ }
+ }
+ }
+ get_category_data_tree = {
+ 'Application': {
+ 'Audit': {
+ 4: {
+ 'Devices': 90,
+ 'Generic': 10,
+ 'Power Configuration': 151,
+ 'Users': 35
+ }
+ },
+ 'Configuration': {
+ 5: {
+ 'Application': 85,
+ 'Device Warranty': 116,
+ 'Devices': 90,
+ 'Discovery': 36,
+ 'Generic': 10,
+ 'Users': 35
+ }
+ },
+ 'Miscellaneous': {
+ 7: {
+ 'Miscellaneous': 20
+ }
+ },
+ 'Storage': {
+ 2: {
+ 'Devices': 90
+ }
+ },
+ 'System Health': {
+ 1: {
+ 'Devices': 90,
+ 'Health Status of Managed device': 7400,
+ 'Job': 47,
+ 'Metrics': 118,
+ 'Power Configuration': 151
+ }
+ },
+ 'Updates': {
+ 3: {
+ 'Application': 85,
+ 'Firmware': 112
+ }
+ }
+ },
+ 'Dell Storage': {
+ 'Storage': {
+ 2: {
+ 'Other': 7700
+ }
+ },
+ 'System Health': {
+ 1: {
+ 'Other': 7700,
+ 'Storage': 18
+ }
+ }
+ },
+ 'Storage': {'Audit': {
+ 4: {
+ 'Interface': 101
+ }
+ }},
+ 'iDRAC': {
+ 'Audit': {
+ 4: {
+ 'Interface': 101
+ }
+ }
+ },
+ }
+
+ @pytest.mark.parametrize("params", [
+ {"message": SUCCESS_MSG.format("create"), "success": True,
+ "mparams": create_input,
+ "get_alert_policies": [],
+ "validate_ome_data": (["AMP400", "AMP401", "CTL201"],),
+ "get_severity_payload": {"Severities": ["unknown"]},
+ "get_all_actions": get_all_actions,
+ "json_data": {"value": [{'Name': "new alert policy 1", "Id": 12, "DefaultPolicy": False}]}},
+ {"message": CHANGES_MSG, "success": True,
+ "check_mode": True,
+ "mparams": create_input,
+ "get_alert_policies": [],
+ "validate_ome_data": (["AMP400", "AMP401", "CTL201"],),
+ "get_severity_payload": {"Severities": ["unknown"]},
+ "get_all_actions": get_all_actions,
+ "json_data": {"value": [{'Name': "new alert policy 1", "Id": 12, "DefaultPolicy": False}]}},
+ {"message": SUCCESS_MSG.format("update"), "success": True,
+ "mparams": create_input,
+ "get_alert_policies": get_alert_policy,
+ "validate_ome_data": (["AMP400", "AMP401", "CTL201"],),
+ "get_category_data_tree": get_category_data_tree,
+ "get_all_actions": get_all_actions,
+ "json_data": {
+ "value": [
+ {
+
+ "Id": 1,
+ "Name": "Unknown",
+ "Description": "Unknown"
+ },
+ {
+ "Id": 2,
+ "Name": "Info",
+ "Description": "Info"
+ },
+ {
+ "Id": 4,
+ "Name": "Normal",
+ "Description": "Normal"
+ },
+ {
+ "Id": 8,
+ "Name": "Warning",
+ "Description": "Warning"
+ },
+ {
+ "Id": 16,
+ "Name": "Critical",
+ "Description": "Critical"
+ }
+ ]
+ }},
+ {"message": SUCCESS_MSG.format("update"), "success": True,
+ "mparams": {
+ "actions": [
+ {
+ "action_name": "Ignore",
+ "parameters": []
+ }
+ ],
+ "description": "Description of Alert Policy One",
+ "specific_undiscovered_devices": [
+ "host1",
+ "192.1.2.3-192.1.2.10"
+ ],
+ "enable": True,
+ "category": [
+ {
+ "catalog_category": [
+ {
+ "category_name": "Audit",
+ "sub_category_names": [
+ "Users",
+ "Generic"
+ ]
+ }
+ ],
+ "catalog_name": "Application"
+ },
+ {
+ "catalog_category": [
+ {
+ "category_name": "Storage",
+ "sub_category_names": [
+ "Other"
+ ]
+ }
+ ],
+ "catalog_name": "Dell Storage"
+ },
+ {"catalog_name": "Storage"},
+ {
+ "catalog_category": [
+ {
+ "category_name": "Audit",
+ "sub_category_names": []
+ }
+ ],
+ "catalog_name": "iDRAC"
+ }
+ ],
+ "name": [
+ "Alert Policy One"
+ ],
+ "new_name": "Alert Policy Renamed",
+ "severity": [
+ "unknown"
+ ],
+ "state": "present"
+ },
+ "get_alert_policies": get_alert_policy,
+ "validate_ome_data": (["AMP400", "AMP401", "CTL201"],),
+ "get_category_data_tree": get_category_data_tree,
+ "get_all_actions": get_all_actions,
+ "json_data": {"value": []}
+ },
+ {"message": OME_DATA_MSG.format("groups", "Name", "Linux Servers"), "success": True,
+ "mparams": {
+ "device_group": [
+ "AX",
+ "Linux Servers"
+ ],
+ "state": "present",
+ "name": "Test alert policy"
+ },
+ "get_alert_policies": get_alert_policy,
+ "json_data": {
+ "@odata.count": 102,
+ "@odata.nextLink": "/AlertPolicies",
+ "value": [{"Name": "AX", "Id": 121},
+ {"Name": "Group2", "Id": 122}]}
+ },
+ {"message": OME_DATA_MSG.format("groups", "Name", "Linux Servers"), "success": True,
+ "mparams": {
+ "device_group": [
+ "AX",
+ "Linux Servers"
+ ],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "Coverage for filter block in validate_ome_data"
+ },
+ "get_alert_policies": [{
+ "Id": 1234,
+ "Name": "Alert Policy Two",
+ "Description": "Alert Policy Two described",
+ "Enabled": True,
+ "DefaultPolicy": False,
+ "Editable": True,
+ "Visible": True,
+ "PolicyData": {
+ "Catalogs": [],
+ "Severities": [
+ 16
+ ],
+ "MessageIds": [
+ "'AMP403'",
+ "'AMP400'",
+ "'BIOS108'"
+ ],
+ "Devices": [],
+ "DeviceTypes": [],
+ "Groups": [
+ 111,
+ 133
+ ],
+ "Schedule": {
+ "StartTime": "2023-11-09 00:00:00.000",
+ "EndTime": "2023-11-11 00:00:00.000",
+ "CronString": "* * * ? * mon,sun *",
+ "Interval": False
+ },
+ "Actions": [
+ {
+ "Id": 499,
+ "Name": "RemoteCommand",
+ "ParameterDetails": [
+ {
+ "Id": 0,
+ "Name": "remotecommandaction1",
+ "Value": "test",
+ "Type": "singleSelect",
+ "TypeParams": [
+ {
+ "Name": "option",
+ "Value": "test"
+ }
+ ]
+ }
+ ],
+ "TemplateId": 111
+ }
+ ],
+ "AllTargets": False,
+ "UndiscoveredTargets": []
+ },
+ "State": True,
+ "Owner": 10078
+ }],
+ "json_data": {
+ "@odata.count": 300,
+ "value": [{"Name": "AX", "Id": 121},
+ {"Name": "Group2", "Id": 122}]}
+ },
+ {"message": INVALID_CATEGORY_MESSAGE, "success": True,
+ "mparams": {
+ "device_service_tag": [
+ "ABC1234",
+ "SVCTAG1"
+ ],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "Coverage for filter block in validate_ome_data"
+ },
+ "get_alert_policies": [],
+ "json_data": {
+ "@odata.count": 300,
+ "value": [{"DeviceServiceTag": "ABC1234", "Id": 121, "Type": 1000},
+ {"DeviceServiceTag": "SVCTAG1", "Id": 122, "Type": 1000}]}
+ },
+ {"message": INVALID_CATEGORY_MESSAGE, "success": True,
+ "mparams": {
+ "all_devices": True,
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "all devices coverage"
+ },
+ "get_alert_policies": [],
+ "json_data": {
+ "@odata.count": 300,
+ "value": [{"DeviceServiceTag": "ABC1234", "Id": 121, "Type": 1000},
+ {"DeviceServiceTag": "SVCTAG1", "Id": 122, "Type": 1000}]}
+ },
+ {"message": INVALID_CATEGORY_MESSAGE, "success": True,
+ "mparams": {
+ "any_undiscovered_devices": True,
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "all devices coverage"
+ },
+ "get_alert_policies": [],
+ "json_data": {
+ "@odata.count": 300,
+ "value": [{"DeviceServiceTag": "ABC1234", "Id": 121, "Type": 1000},
+ {"DeviceServiceTag": "SVCTAG1", "Id": 122, "Type": 1000}]}
+ },
+ {"message": INVALID_CATEGORY_MESSAGE, "success": True,
+ "mparams": {
+ "specific_undiscovered_devices": [
+ "192.1.2.3-192.1.2.10",
+ "hostforpolicy.domain.com"
+ ],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "all devices coverage"
+ },
+ "get_alert_policies": [],
+ "json_data": {
+ "@odata.count": 300,
+ "value": [{"DeviceServiceTag": "ABC1234", "Id": 121, "Type": 1000},
+ {"DeviceServiceTag": "SVCTAG1", "Id": 122, "Type": 1000}]}
+ },
+ {"message": INVALID_SCHEDULE, "success": True,
+ "mparams": {
+ "all_devices": True,
+ "message_file": "{0}/{1}".format(tempfile.gettempdir(), "myfile.csv"),
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "all devices coverage"
+ },
+ "get_alert_policies": [],
+ "create_temp_file": "MessageIds\nMSGID1",
+ "json_data": {
+ "@odata.count": 300,
+ "value": [{"MessageId": "MSGID1", "Id": 121, "Type": 1000},
+ {"MessageId": "MSGID2", "Id": 122, "Type": 1000}]}
+ },
+ {"message": INVALID_SCHEDULE, "success": True,
+ "mparams": {
+ "all_devices": True,
+ "category": [
+ {
+ "catalog_category": [
+ {
+ "category_name": "Audit",
+ "sub_category_names": [
+ "Users",
+ "Generic"
+ ]
+ }
+ ],
+ "catalog_name": "Application"
+ },
+ {
+ "catalog_category": [
+ {
+ "category_name": "Storage",
+ "sub_category_names": [
+ "Other"
+ ]
+ }
+ ],
+ "catalog_name": "Dell Storage"
+ }
+ ],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_category_data_tree coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "json_data": {
+ "value": [
+ {
+ "Name": "Application",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "Id": 4,
+ "Name": "Audit",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices description"
+ },
+ {
+ "Id": 10,
+ "Name": "Generic",
+ "Description": "Generic description"
+ },
+ {
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration description"
+ },
+ {
+ "Id": 35,
+ "Name": "Users",
+ "Description": "Users description"
+ }
+ ]
+ },
+ {
+ "Id": 7,
+ "Name": "Miscellaneous",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "Id": 20,
+ "Name": "Miscellaneous",
+ "Description": "Miscellaneous description"
+ }
+ ]
+ },
+ {
+ "Id": 2,
+ "Name": "Storage",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices description"
+ }
+ ]
+ },
+ {
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices description"
+ },
+ {
+ "Id": 7400,
+ "Name": "Health Status of Managed device",
+ "Description": "Health Status of Managed device description"
+ },
+ {
+ "Id": 47,
+ "Name": "Job",
+ "Description": "Job description"
+ },
+ {
+ "Id": 118,
+ "Name": "Metrics",
+ "Description": "Metrics description"
+ },
+ {
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration description"
+ }
+ ]
+ },
+ {
+ "Id": 3,
+ "Name": "Updates",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "Id": 85,
+ "Name": "Application",
+ "Description": "Application description"
+ },
+ {
+ "Id": 112,
+ "Name": "Firmware",
+ "Description": "Firmware description"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Name": "Dell Storage",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "Id": 2,
+ "Name": "Storage",
+ "CatalogName": "Dell Storage",
+ "SubCategoryDetails": [
+ {
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other description"
+ }
+ ]
+ },
+ {
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "Dell Storage",
+ "SubCategoryDetails": [
+ {
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other description"
+ },
+ {
+ "Id": 18,
+ "Name": "Storage",
+ "Description": "Storage description"
+ }
+ ]
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {"message": INVALID_SEVERITY, "success": True,
+ "mparams": {
+ "actions": actions,
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_all_actions coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG01", "MSG02"]},
+ "get_schedule_payload": {"StartTime": "", "EndTime": ""},
+ "get_severity_payload": {},
+ "json_data": {
+ "value": [
+ {
+ "Name": "Email",
+ "Description": "Email",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "subject",
+ "Value": "Device Name: $name, Device IP Address: $ip, Severity: $severity",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 2,
+ "Name": "to",
+ "Value": "",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 3,
+ "Name": "from",
+ "Value": "admin@dell.com",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ },
+ {
+ "Id": 4,
+ "Name": "message",
+ "Value": "Event occurred for Device Name",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertActionTemplate",
+ "@odata.id": "/api/AlertService/AlertActionTemplates(60)",
+ "Id": 60,
+ "Name": "Trap",
+ "Description": "Trap",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": trap_ip1,
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ },
+ {
+ "Id": 2,
+ "Name": trap_ip2,
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertActionTemplate",
+ "@odata.id": "/api/AlertService/AlertActionTemplates(90)",
+ "Id": 90,
+ "Name": "Syslog",
+ "Description": "Syslog",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": trap_ip3,
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertActionTemplate",
+ "@odata.id": "/api/AlertService/AlertActionTemplates(100)",
+ "Id": 100,
+ "Name": "Ignore",
+ "Description": "Ignore",
+ "Disabled": False,
+ "ParameterDetails": []
+ },
+ {
+ "@odata.type": "#AlertService.AlertActionTemplate",
+ "@odata.id": "/api/AlertService/AlertActionTemplates(70)",
+ "Id": 70,
+ "Name": "SMS",
+ "Description": "SMS",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "to",
+ "Value": "",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertActionTemplate",
+ "@odata.id": "/api/AlertService/AlertActionTemplates(110)",
+ "Id": 110,
+ "Name": "PowerControl",
+ "Description": "Power Control Action Template",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "powercontrolaction",
+ "Value": "poweroff",
+ "Type": "singleSelect",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "option",
+ "Value": "powercycle"
+ },
+ {
+ "Name": "option",
+ "Value": "poweroff"
+ },
+ {
+ "Name": "option",
+ "Value": "poweron"
+ },
+ {
+ "Name": "option",
+ "Value": "gracefulshutdown"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertActionTemplate",
+ "@odata.id": "/api/AlertService/AlertActionTemplates(111)",
+ "Id": 111,
+ "Name": "RemoteCommand",
+ "Description": "RemoteCommand",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "remotecommandaction",
+ "Value": "test",
+ "Type": "singleSelect",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "option",
+ "Value": "test"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertActionTemplate",
+ "@odata.id": "/api/AlertService/AlertActionTemplates(112)",
+ "Id": 112,
+ "Name": "Mobile",
+ "Description": "Mobile",
+ "Disabled": False,
+ "ParameterDetails": []
+ }
+ ]
+ }
+ },
+ {"message": DISABLED_ACTION.format("SMS"), "success": True,
+ "mparams": {
+ "actions": [{
+ "action_name": "SMS",
+ "parameters": [
+ {
+ "name": "to",
+ "value": "1234567890"
+ }
+ ]
+ }],
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_all_actions coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG01", "MSG02"]},
+ "get_schedule_payload": {"StartTime": "", "EndTime": ""},
+ "get_severity_payload": {},
+ "json_data": {
+ "value": [
+ {
+ "Id": 70,
+ "Name": "SMS",
+ "Description": "SMS",
+ "Disabled": True,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "to",
+ "Value": "",
+ "Type": "string",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "Id": 112,
+ "Name": "Mobile",
+ "Description": "Mobile",
+ "Disabled": False,
+ "ParameterDetails": []
+ }
+ ]
+ }
+ },
+ {"message": ACTION_INVALID_PARAM.format("Trap", "traphost2:162", "traphost1:162"), "success": True,
+ "mparams": {
+ "actions": [{
+ "action_name": "Trap",
+ "parameters": [
+ {
+ "name": trap_ip2,
+ "value": "True"
+ }
+ ]
+ }],
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_all_actions coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG01", "MSG02"]},
+ "get_schedule_payload": {"StartTime": "", "EndTime": ""},
+ "get_severity_payload": {},
+ "json_data": {
+ "value": [
+ {
+ "Id": 100,
+ "Name": "SMS",
+ "Description": "Ignore",
+ "Disabled": False,
+ "ParameterDetails": []
+ },
+ {
+ "Id": 60,
+ "Name": "Trap",
+ "Description": "Trap",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": trap_ip1,
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ }
+ ]
+ }
+ },
+ {"message": ACTION_INVALID_VALUE.format("Trap", "Truthy", "traphost1:162", "true, false"), "success": True,
+ "mparams": {
+ "actions": [{
+ "action_name": "Trap",
+ "parameters": [
+ {
+ "name": trap_ip1,
+ "value": "Truthy"
+ }
+ ]
+ }],
+ "all_devices": True,
+ "message_ids": ["AMP01", "CTL201"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "actions invalid coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Devices": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["AMP01", "CTL201"]},
+ "get_schedule_payload": {"StartTime": "2023-11-01 11:00:00.000", "EndTime": "2023-12-01 12:00:00.000"},
+ "get_severity_payload": {},
+ "json_data": {
+ "value": [
+ {
+ "Id": 60,
+ "Name": "Trap",
+ "Description": "Trap",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": trap_ip1,
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ }]
+ }
+ },
+ {"message": ACTION_DIS_EXIST.format("SNMPTrap"), "success": True,
+ "mparams": {
+ "actions": [{
+ "action_name": "SNMPTrap",
+ "parameters": [
+ {
+ "name": trap_ip1,
+ "value": "true"
+ }
+ ]
+ }],
+ "all_devices": True,
+ "message_ids": ["BIOS101", "RND123"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "No existing action coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG23", "MSG46"]},
+ "get_schedule_payload": {"StartTime": "2023-11-01 11:00:00.000", "EndTime": "2023-12-01 12:00:00.000"},
+ "get_severity_payload": {},
+ "json_data": {
+ "value": [
+ {
+ "Id": 60,
+ "Name": "Trap",
+ "Description": "Trap",
+ "Disabled": False,
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": trap_ip1,
+ "Value": "true",
+ "Type": "boolean",
+ "TemplateParameterTypeDetails": []
+ }
+ ]
+ }]
+ }
+ },
+ {"message": INVALID_TIME.format("from", "2023-20-01 11:00:00.000"), "success": True,
+ "mparams": {
+ "date_and_time": {
+ "date_from": "2023-20-01",
+ "date_to": "2023-10-02",
+ "days": [
+ "sunday",
+ "monday"
+ ],
+ "time_from": "11:00",
+ "time_to": "12:00",
+ "time_interval": True
+ },
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG01", "MSG02"]},
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": INVALID_TIME.format("from", "2023-10-01 31:00:00.000"), "success": True,
+ "mparams": {
+ "date_and_time": {
+ "date_from": "2023-10-01",
+ "date_to": "2023-10-02",
+ "days": [
+ "sunday",
+ "monday"
+ ],
+ "time_from": "31:00",
+ "time_to": "12:00",
+ "time_interval": True
+ },
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG01", "MSG02"]},
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": END_START_TIME.format("2023-10-01 12:00:00", "2023-10-02 11:00:00"), "success": True,
+ "mparams": {
+ "date_and_time": {
+ "date_from": "2023-10-02",
+ "date_to": "2023-10-01",
+ "days": [
+ "sunday",
+ "monday"
+ ],
+ "time_from": "11:00",
+ "time_to": "12:00",
+ "time_interval": True
+ },
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG01", "MSG02"]},
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": INVALID_TIME.format("to", "2023-10-32 32:00:00.000"), "success": True,
+ "mparams": {
+ "date_and_time": {
+ "date_from": "2023-10-01",
+ "date_to": "2023-10-32",
+ "days": [
+ "sunday",
+ "monday"
+ ],
+ "time_from": "11:00",
+ "time_to": "32:00",
+ "time_interval": True
+ },
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG01", "MSG02"]},
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": INVALID_TARGETS, "success": True,
+ "mparams": {
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "INVALID_TARGETS coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {},
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": INVALID_ACTIONS, "success": True,
+ "mparams": {
+ "all_devices": True,
+ "message_ids": ["MSG01", "MSG02"],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage",
+ "date_and_time": {
+ "date_from": "2023-10-01",
+ "days": [
+ "sunday",
+ "monday"
+ ],
+ "time_from": "11:00",
+ "time_to": "12:00",
+ "time_interval": True
+ },
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_or_message": {"MessageIds": ["MSG01", "MSG02"]},
+ "get_actions_payload": {},
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": CATEGORY_FETCH_FAILED, "success": True,
+ "mparams": {
+ "all_devices": True,
+ "category": [
+ {
+ "catalog_category": [
+ {
+ "category_name": "Audit",
+ "sub_category_names": [
+ "Users",
+ "Generic"
+ ]
+ }
+ ],
+ "catalog_name": "Application"
+ }
+ ],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_data_tree": {},
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": SUBCAT_IN_CATEGORY.format("General", "Audit"), "success": True,
+ "mparams": {
+ "all_devices": True,
+ "category": [
+ {
+ "catalog_category": [
+ {
+ "category_name": "Audit",
+ "sub_category_names": [
+ "General",
+ "Generic"
+ ]
+ }
+ ],
+ "catalog_name": "Application"
+ }
+ ],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_data_tree": get_category_data_tree,
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": CATEGORY_IN_CATALOG.format("Audi", "Application"), "success": True,
+ "mparams": {
+ "all_devices": True,
+ "category": [
+ {
+ "catalog_category": [
+ {
+ "category_name": "Audi",
+ "sub_category_names": [
+ "General",
+ "Generic"
+ ]
+ }
+ ],
+ "catalog_name": "Application"
+ }
+ ],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_data_tree": get_category_data_tree,
+ "json_data": {
+ "value": []
+ }
+ },
+ {"message": CATALOG_DIS_EXIST.format("Alpha"), "success": True,
+ "mparams": {
+ "all_devices": True,
+ "category": [
+ {
+ "catalog_name": "Alpha"
+ }
+ ],
+ "state": "present",
+ "name": "Test alert policy",
+ "description": "get_schedule coverage"
+ },
+ "get_alert_policies": [],
+ "get_target_payload": {"Groups": [123, 124]},
+ "get_category_data_tree": get_category_data_tree,
+ "json_data": {
+ "value": []
+ }
+ }
+ ])
+ def test_ome_alert_policies_state_present(self, params, ome_connection_mock_for_alert_policies,
+ ome_response_mock, ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_connection_mock_for_alert_policies.get_all_items_with_pagination.return_value = params[
+ 'json_data']
+ ome_default_args.update(params['mparams'])
+ mocks = ["get_alert_policies", "validate_ome_data", "get_target_payload",
+ "get_all_actions", "get_severity_payload", "get_category_data_tree",
+ "get_schedule_payload", "get_category_or_message"]
+ for m in mocks:
+ if m in params:
+ mocker.patch(MODULE_PATH + m, return_value=params.get(m, {}))
+ if "create_temp_file" in params:
+ with open(f"{params['mparams'].get('message_file')}", 'w', encoding='utf-8') as fp:
+ fp.write(params["create_temp_file"])
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ if "create_temp_file" in params:
+ fpath = f"{params['mparams'].get('message_file')}"
+ if os.path.exists(fpath):
+ os.remove(fpath)
+ assert result['msg'] == params['message']
+
+ @pytest.mark.parametrize("exc_type",
+ [SSLValidationError, ConnectionError, TypeError, ValueError, OSError, HTTPError, URLError])
+ def test_ome_alert_policies_category_info_main_exception_failure_case(self, exc_type, mocker, ome_default_args,
+ ome_connection_mock_for_alert_policies,
+ ome_response_mock):
+ json_str = to_text(json.dumps({"data": "out"}))
+ ome_default_args.update({"name": "new alert policy", "enable": True})
+ if exc_type == HTTPError:
+ mocker.patch(MODULE_PATH + 'get_alert_policies', side_effect=exc_type(
+ 'https://testhost.com', 401, 'http error message', {
+ "accept-type": "application/json"},
+ StringIO(json_str)))
+ result = self._run_module(ome_default_args)
+ assert result['failed'] is True
+ elif exc_type == URLError:
+ mocker.patch(MODULE_PATH + 'get_alert_policies',
+ side_effect=exc_type("exception message"))
+ result = self._run_module(ome_default_args)
+ assert result['unreachable'] is True
+ else:
+ mocker.patch(MODULE_PATH + 'get_alert_policies',
+ side_effect=exc_type("exception message"))
+ result = self._run_module(ome_default_args)
+ assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_actions_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_actions_info.py
new file mode 100644
index 000000000..a5ebba338
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_actions_info.py
@@ -0,0 +1,93 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+import json
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_alert_policies_actions_info
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from io import StringIO
+from ansible.module_utils._text import to_text
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+
+
+@pytest.fixture
+def ome_alert_policies_actions_info_mock(mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'ome_alert_policies_actions_info.RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+
+class TestOmeAlertPoliciesActionsInfo(FakeAnsibleModule):
+ module = ome_alert_policies_actions_info
+
+ def test_ome_alert_policies_action_info_main_success_case_all(self,
+ ome_alert_policies_actions_info_mock,
+ ome_default_args, ome_response_mock):
+ ome_response_mock.json_data = {"value": [
+ {
+ "Description": "Email",
+ "Disabled": False,
+ "Id": 50,
+ "Name": "Email",
+ "ParameterDetails": [
+ {
+ "Id": 1,
+ "Name": "subject",
+ "TemplateParameterTypeDetails": [
+ {
+ "Name": "maxLength",
+ "Value": "255"
+ }
+ ],
+ "Type": "string",
+ "Value": "Device Name: $name, Device IP Address: $ip, Severity: $severity"
+ }]}]}
+ ome_response_mock.status_code = 200
+ result = self._run_module(ome_default_args)
+ assert 'actions' in result
+
+ def test_ome_alert_policies_action_info_empty_case(self, ome_default_args,
+ ome_alert_policies_actions_info_mock,
+ ome_response_mock):
+ ome_response_mock.json_data = {"value": []}
+ ome_response_mock.status_code = 200
+ ome_response_mock.success = True
+ result = self._run_module(ome_default_args)
+ assert result['actions'] == []
+
+ @pytest.mark.parametrize("exc_type",
+ [URLError, HTTPError, SSLValidationError, ConnectionError,
+ TypeError, ValueError])
+ def test_ome_alert_policies_action_info_main_exception_handling_case(self, exc_type, ome_default_args,
+ ome_alert_policies_actions_info_mock,
+ ome_response_mock):
+ ome_response_mock.status_code = 400
+ ome_response_mock.success = False
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ ome_alert_policies_actions_info_mock.invoke_request.side_effect = exc_type('test')
+ else:
+ ome_alert_policies_actions_info_mock.invoke_request.side_effect = exc_type('https://testhost.com',
+ 400,
+ 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ result = self._run_module(ome_default_args)
+ if exc_type != URLError:
+ assert result['failed'] is True
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_category_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_category_info.py
new file mode 100644
index 000000000..b2ff4a7d9
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_category_info.py
@@ -0,0 +1,2670 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+from io import StringIO
+
+import pytest
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_alert_policies_category_info
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_alert_policies_category_info.'
+SUCCESS_MSG = "Successfully retrieved alert policies category information."
+
+
+@pytest.fixture
+def ome_connection_mock_for_alert_category(mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+
+class TestOmeAlertCategoryInfo(FakeAnsibleModule):
+ module = ome_alert_policies_category_info
+
+ @pytest.mark.parametrize("params", [
+ {"message": SUCCESS_MSG,
+ "json_data": {
+ "@odata.context": "/api/$metadata#Collection(AlertService.AlertCategories)",
+ "@odata.count": 13,
+ "value": [
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('Application')",
+ "Name": "Application",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 4,
+ "Name": "Audit",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 10,
+ "Name": "Generic",
+ "Description": "Generic"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 35,
+ "Name": "Users",
+ "Description": "Users"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 5,
+ "Name": "Configuration",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 85,
+ "Name": "Application",
+ "Description": "Application"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 116,
+ "Name": "Device Warranty",
+ "Description": "Device Warranty"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 36,
+ "Name": "Discovery",
+ "Description": "Discovery"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 10,
+ "Name": "Generic",
+ "Description": "Generic"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 84,
+ "Name": "Groups",
+ "Description": "Groups"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 47,
+ "Name": "Job",
+ "Description": "Job"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 118,
+ "Name": "Metrics",
+ "Description": "Metrics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 20,
+ "Name": "Miscellaneous",
+ "Description": "Miscellaneous"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 93,
+ "Name": "Monitoring",
+ "Description": "Monitoring"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 31,
+ "Name": "Reports",
+ "Description": "Reports"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 9,
+ "Name": "Security",
+ "Description": "Security"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 88,
+ "Name": "Templates",
+ "Description": "Templates"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 35,
+ "Name": "Users",
+ "Description": "Users"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 7,
+ "Name": "Miscellaneous",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 20,
+ "Name": "Miscellaneous",
+ "Description": "Miscellaneous"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 2,
+ "Name": "Storage",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7400,
+ "Name": "Health Status of Managed device",
+ "Description": "Health Status of Managed device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 47,
+ "Name": "Job",
+ "Description": "Job"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 118,
+ "Name": "Metrics",
+ "Description": "Metrics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 3,
+ "Name": "Updates",
+ "CatalogName": "Application",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 85,
+ "Name": "Application",
+ "Description": "Application"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 112,
+ "Name": "Firmware",
+ "Description": "Firmware"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('Dell%20Storage')",
+ "Name": "Dell Storage",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 2,
+ "Name": "Storage",
+ "CatalogName": "Dell Storage",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "Dell Storage",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 18,
+ "Name": "Storage",
+ "Description": "Storage"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('iDRAC')",
+ "Name": "iDRAC",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 4,
+ "Name": "Audit",
+ "CatalogName": "iDRAC",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 41,
+ "Name": "Auto System Reset",
+ "Description": "Auto System Reset"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 54,
+ "Name": "BIOS Management",
+ "Description": "BIOS Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 75,
+ "Name": "BIOS POST",
+ "Description": "BIOS POST"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 12,
+ "Name": "Debug",
+ "Description": "Debug"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 53,
+ "Name": "Group Manager",
+ "Description": "Group Manager"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 45,
+ "Name": "iDRAC Service Module",
+ "Description": "iDRAC Service Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 114,
+ "Name": "IP Address",
+ "Description": "IP Address"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 122,
+ "Name": "iSM PEEK Component",
+ "Description": "iSM PEEK Component"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 48,
+ "Name": "Licensing",
+ "Description": "Licensing"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 15,
+ "Name": "Management Module",
+ "Description": "Management Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 96,
+ "Name": "OS Event",
+ "Description": "OS Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 81,
+ "Name": "PCI Device",
+ "Description": "PCI Device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 78,
+ "Name": "Power Supply",
+ "Description": "Power Supply"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 23,
+ "Name": "Power Usage",
+ "Description": "Power Usage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 28,
+ "Name": "Power Usage POW",
+ "Description": "Power Usage POW"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 120,
+ "Name": "Secure Enterprise Key Management",
+ "Description": "Secure Enterprise Key Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 52,
+ "Name": "Software Change",
+ "Description": "Software Change"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 39,
+ "Name": "Software Config",
+ "Description": "Software Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 92,
+ "Name": "Support Assist",
+ "Description": "Support Assist"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 55,
+ "Name": "UEFI Event",
+ "Description": "UEFI Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 56,
+ "Name": "User Tracking",
+ "Description": "User Tracking"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 5,
+ "Name": "Configuration",
+ "CatalogName": "iDRAC",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 49,
+ "Name": "Auto-Discovery",
+ "Description": "Auto-Discovery"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 107,
+ "Name": "Backup/Restore",
+ "Description": "Backup/Restore"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 54,
+ "Name": "BIOS Management",
+ "Description": "BIOS Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 104,
+ "Name": "BOOT Control",
+ "Description": "BOOT Control"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 59,
+ "Name": "Certificate Management",
+ "Description": "Certificate Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 51,
+ "Name": "Firmware Download",
+ "Description": "Firmware Download"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 53,
+ "Name": "Group Manager",
+ "Description": "Group Manager"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 98,
+ "Name": "IO Identity Optimization",
+ "Description": "IO Identity Optimization"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 105,
+ "Name": "IO Virtualization",
+ "Description": "IO Virtualization"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 114,
+ "Name": "IP Address",
+ "Description": "IP Address"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 27,
+ "Name": "Job Control",
+ "Description": "Job Control"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 57,
+ "Name": "Lifecycle Controller",
+ "Description": "Lifecycle Controller"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 3,
+ "Name": "Link Status",
+ "Description": "Link Status"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 123,
+ "Name": "Liquid Cooling System",
+ "Description": "Liquid Cooling System"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 19,
+ "Name": "Log Event",
+ "Description": "Log Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 15,
+ "Name": "Management Module",
+ "Description": "Management Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 80,
+ "Name": "Memory",
+ "Description": "Memory"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 102,
+ "Name": "NIC Configuration",
+ "Description": "NIC Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 97,
+ "Name": "OS Deployment",
+ "Description": "OS Deployment"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 81,
+ "Name": "PCI Device",
+ "Description": "PCI Device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 23,
+ "Name": "Power Usage",
+ "Description": "Power Usage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 61,
+ "Name": "Processor",
+ "Description": "Processor"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 120,
+ "Name": "Secure Enterprise Key Management",
+ "Description": "Secure Enterprise Key Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 39,
+ "Name": "Software Config",
+ "Description": "Software Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 18,
+ "Name": "Storage",
+ "Description": "Storage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 113,
+ "Name": "Storage Controller",
+ "Description": "Storage Controller"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 92,
+ "Name": "Support Assist",
+ "Description": "Support Assist"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 29,
+ "Name": "System Event Log",
+ "Description": "System Event Log"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 79,
+ "Name": "Test Alert",
+ "Description": "Test Alert"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 55,
+ "Name": "UEFI Event",
+ "Description": "UEFI Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 66,
+ "Name": "vFlash Event",
+ "Description": "vFlash Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7,
+ "Name": "Virtual Console",
+ "Description": "Virtual Console"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 2,
+ "Name": "Storage",
+ "CatalogName": "iDRAC",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 108,
+ "Name": "Battery Event",
+ "Description": "Battery Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 106,
+ "Name": "Fan Event",
+ "Description": "Fan Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 94,
+ "Name": "Physical Disk",
+ "Description": "Physical Disk"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 78,
+ "Name": "Power Supply",
+ "Description": "Power Supply"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 23,
+ "Name": "Power Usage",
+ "Description": "Power Usage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 44,
+ "Name": "Redundancy",
+ "Description": "Redundancy"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 52,
+ "Name": "Software Change",
+ "Description": "Software Change"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 119,
+ "Name": "Software Defined Storage",
+ "Description": "Software Defined Storage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 18,
+ "Name": "Storage",
+ "Description": "Storage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 113,
+ "Name": "Storage Controller",
+ "Description": "Storage Controller"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 82,
+ "Name": "Storage Enclosure",
+ "Description": "Storage Enclosure"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 110,
+ "Name": "Temperature",
+ "Description": "Temperature"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 46,
+ "Name": "Virtual Disk",
+ "Description": "Virtual Disk"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "iDRAC",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 67,
+ "Name": "Amperage",
+ "Description": "Amperage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 41,
+ "Name": "Auto System Reset",
+ "Description": "Auto System Reset"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 108,
+ "Name": "Battery Event",
+ "Description": "Battery Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 75,
+ "Name": "BIOS POST",
+ "Description": "BIOS POST"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 89,
+ "Name": "Cable",
+ "Description": "Cable"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 106,
+ "Name": "Fan Event",
+ "Description": "Fan Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 83,
+ "Name": "Fibre Channel",
+ "Description": "Fibre Channel"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 45,
+ "Name": "iDRAC Service Module",
+ "Description": "iDRAC Service Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 111,
+ "Name": "IDSDM Redundancy",
+ "Description": "IDSDM Redundancy"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 105,
+ "Name": "IO Virtualization",
+ "Description": "IO Virtualization"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 3,
+ "Name": "Link Status",
+ "Description": "Link Status"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 123,
+ "Name": "Liquid Cooling System",
+ "Description": "Liquid Cooling System"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 19,
+ "Name": "Log Event",
+ "Description": "Log Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 15,
+ "Name": "Management Module",
+ "Description": "Management Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 80,
+ "Name": "Memory",
+ "Description": "Memory"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 102,
+ "Name": "NIC Configuration",
+ "Description": "NIC Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 96,
+ "Name": "OS Event",
+ "Description": "OS Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 81,
+ "Name": "PCI Device",
+ "Description": "PCI Device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 94,
+ "Name": "Physical Disk",
+ "Description": "Physical Disk"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 78,
+ "Name": "Power Supply",
+ "Description": "Power Supply"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 23,
+ "Name": "Power Usage",
+ "Description": "Power Usage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 61,
+ "Name": "Processor",
+ "Description": "Processor"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 68,
+ "Name": "Processor Absent",
+ "Description": "Processor Absent"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 103,
+ "Name": "PSU Absent",
+ "Description": "PSU Absent"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 44,
+ "Name": "Redundancy",
+ "Description": "Redundancy"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 120,
+ "Name": "Secure Enterprise Key Management",
+ "Description": "Secure Enterprise Key Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 39,
+ "Name": "Software Config",
+ "Description": "Software Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 18,
+ "Name": "Storage",
+ "Description": "Storage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 92,
+ "Name": "Support Assist",
+ "Description": "Support Assist"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 29,
+ "Name": "System Event Log",
+ "Description": "System Event Log"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 13,
+ "Name": "System Performance Event",
+ "Description": "System Performance Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 110,
+ "Name": "Temperature",
+ "Description": "Temperature"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 16,
+ "Name": "Temperature Statistics",
+ "Description": "Temperature Statistics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 55,
+ "Name": "UEFI Event",
+ "Description": "UEFI Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 5,
+ "Name": "vFlash Absent",
+ "Description": "vFlash Absent"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 66,
+ "Name": "vFlash Event",
+ "Description": "vFlash Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7,
+ "Name": "Virtual Console",
+ "Description": "Virtual Console"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 46,
+ "Name": "Virtual Disk",
+ "Description": "Virtual Disk"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 40,
+ "Name": "Voltage",
+ "Description": "Voltage"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 3,
+ "Name": "Updates",
+ "CatalogName": "iDRAC",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 51,
+ "Name": "Firmware Download",
+ "Description": "Firmware Download"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 24,
+ "Name": "Firmware Update Job",
+ "Description": "Firmware Update Job"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 53,
+ "Name": "Group Manager",
+ "Description": "Group Manager"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 27,
+ "Name": "Job Control",
+ "Description": "Job Control"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 52,
+ "Name": "Software Change",
+ "Description": "Software Change"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 55,
+ "Name": "UEFI Event",
+ "Description": "UEFI Event"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 6,
+ "Name": "Work Notes",
+ "CatalogName": "iDRAC",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 54,
+ "Name": "BIOS Management",
+ "Description": "BIOS Management"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('IF-MIB')",
+ "Name": "IF-MIB",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 4,
+ "Name": "Audit",
+ "CatalogName": "IF-MIB",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 101,
+ "Name": "Interface",
+ "Description": "Interface"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('Internal%20Events%20Catalog')",
+ "Name": "Internal Events Catalog",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 4,
+ "Name": "Audit",
+ "CatalogName": "Internal Events Catalog",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 54,
+ "Name": "BIOS Management",
+ "Description": "BIOS Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 12,
+ "Name": "Debug",
+ "Description": "Debug"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 115,
+ "Name": "Fabric",
+ "Description": "Fabric"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 106,
+ "Name": "Fan Event",
+ "Description": "Fan Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 21,
+ "Name": "Feature Card",
+ "Description": "Feature Card"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 10,
+ "Name": "Generic",
+ "Description": "Generic"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 53,
+ "Name": "Group Manager",
+ "Description": "Group Manager"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 45,
+ "Name": "iDRAC Service Module",
+ "Description": "iDRAC Service Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 101,
+ "Name": "Interface",
+ "Description": "Interface"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 114,
+ "Name": "IP Address",
+ "Description": "IP Address"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 27,
+ "Name": "Job Control",
+ "Description": "Job Control"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 48,
+ "Name": "Licensing",
+ "Description": "Licensing"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 57,
+ "Name": "Lifecycle Controller",
+ "Description": "Lifecycle Controller"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 32,
+ "Name": "Link",
+ "Description": "Link"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 3,
+ "Name": "Link Status",
+ "Description": "Link Status"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 19,
+ "Name": "Log Event",
+ "Description": "Log Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 15,
+ "Name": "Management Module",
+ "Description": "Management Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 80,
+ "Name": "Memory",
+ "Description": "Memory"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 77,
+ "Name": "Node",
+ "Description": "Node"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 81,
+ "Name": "PCI Device",
+ "Description": "PCI Device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 78,
+ "Name": "Power Supply",
+ "Description": "Power Supply"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 23,
+ "Name": "Power Usage",
+ "Description": "Power Usage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 44,
+ "Name": "Redundancy",
+ "Description": "Redundancy"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 95,
+ "Name": "REST",
+ "Description": "REST"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 9,
+ "Name": "Security",
+ "Description": "Security"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 14,
+ "Name": "Server",
+ "Description": "Server"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 52,
+ "Name": "Software Change",
+ "Description": "Software Change"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 39,
+ "Name": "Software Config",
+ "Description": "Software Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 92,
+ "Name": "Support Assist",
+ "Description": "Support Assist"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 110,
+ "Name": "Temperature",
+ "Description": "Temperature"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 56,
+ "Name": "User Tracking",
+ "Description": "User Tracking"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 35,
+ "Name": "Users",
+ "Description": "Users"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 50,
+ "Name": "Virtual Media",
+ "Description": "Virtual Media"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 5,
+ "Name": "Configuration",
+ "CatalogName": "Internal Events Catalog",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 49,
+ "Name": "Auto-Discovery",
+ "Description": "Auto-Discovery"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 107,
+ "Name": "Backup/Restore",
+ "Description": "Backup/Restore"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 54,
+ "Name": "BIOS Management",
+ "Description": "BIOS Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 104,
+ "Name": "BOOT Control",
+ "Description": "BOOT Control"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 59,
+ "Name": "Certificate Management",
+ "Description": "Certificate Management"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 4,
+ "Name": "Chassis",
+ "Description": "Chassis"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 8,
+ "Name": "Common",
+ "Description": "Common"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 116,
+ "Name": "Device Warranty",
+ "Description": "Device Warranty"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 34,
+ "Name": "Diagnostics",
+ "Description": "Diagnostics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 115,
+ "Name": "Fabric",
+ "Description": "Fabric"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 70,
+ "Name": "Fabric NVFA",
+ "Description": "Fabric NVFA"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 106,
+ "Name": "Fan Event",
+ "Description": "Fan Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 83,
+ "Name": "Fibre Channel",
+ "Description": "Fibre Channel"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 51,
+ "Name": "Firmware Download",
+ "Description": "Firmware Download"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 53,
+ "Name": "Group Manager",
+ "Description": "Group Manager"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 84,
+ "Name": "Groups",
+ "Description": "Groups"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 86,
+ "Name": "Interface NVIF",
+ "Description": "Interface NVIF"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 98,
+ "Name": "IO Identity Optimization",
+ "Description": "IO Identity Optimization"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 114,
+ "Name": "IP Address",
+ "Description": "IP Address"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 27,
+ "Name": "Job Control",
+ "Description": "Job Control"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 48,
+ "Name": "Licensing",
+ "Description": "Licensing"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 57,
+ "Name": "Lifecycle Controller",
+ "Description": "Lifecycle Controller"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 19,
+ "Name": "Log Event",
+ "Description": "Log Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 15,
+ "Name": "Management Module",
+ "Description": "Management Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 37,
+ "Name": "Network",
+ "Description": "Network"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 102,
+ "Name": "NIC Configuration",
+ "Description": "NIC Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 77,
+ "Name": "Node",
+ "Description": "Node"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 73,
+ "Name": "Node NVNO",
+ "Description": "Node NVNO"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 97,
+ "Name": "OS Deployment",
+ "Description": "OS Deployment"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 1,
+ "Name": "Part Replacement",
+ "Description": "Part Replacement"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 81,
+ "Name": "PCI Device",
+ "Description": "PCI Device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 78,
+ "Name": "Power Supply",
+ "Description": "Power Supply"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 23,
+ "Name": "Power Usage",
+ "Description": "Power Usage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 22,
+ "Name": "Remote Service",
+ "Description": "Remote Service"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 95,
+ "Name": "REST",
+ "Description": "REST"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 63,
+ "Name": "SAS IOM",
+ "Description": "SAS IOM"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 9,
+ "Name": "Security",
+ "Description": "Security"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 30,
+ "Name": "Server Interface",
+ "Description": "Server Interface"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 39,
+ "Name": "Software Config",
+ "Description": "Software Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 18,
+ "Name": "Storage",
+ "Description": "Storage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 6,
+ "Name": "Subscription",
+ "Description": "Subscription"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 92,
+ "Name": "Support Assist",
+ "Description": "Support Assist"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 88,
+ "Name": "Templates",
+ "Description": "Templates"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 79,
+ "Name": "Test Alert",
+ "Description": "Test Alert"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 43,
+ "Name": "Topology",
+ "Description": "Topology"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 17,
+ "Name": "Topology Graph",
+ "Description": "Topology Graph"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 55,
+ "Name": "UEFI Event",
+ "Description": "UEFI Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 33,
+ "Name": "Uplink",
+ "Description": "Uplink"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 56,
+ "Name": "User Tracking",
+ "Description": "User Tracking"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 35,
+ "Name": "Users",
+ "Description": "Users"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 66,
+ "Name": "vFlash Event",
+ "Description": "vFlash Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 74,
+ "Name": "vFlash Media",
+ "Description": "vFlash Media"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 7,
+ "Name": "Miscellaneous",
+ "CatalogName": "Internal Events Catalog",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 85,
+ "Name": "Application",
+ "Description": "Application"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 2,
+ "Name": "Storage",
+ "CatalogName": "Internal Events Catalog",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 108,
+ "Name": "Battery Event",
+ "Description": "Battery Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 89,
+ "Name": "Cable",
+ "Description": "Cable"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 34,
+ "Name": "Diagnostics",
+ "Description": "Diagnostics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 106,
+ "Name": "Fan Event",
+ "Description": "Fan Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 100,
+ "Name": "Fluid Cache",
+ "Description": "Fluid Cache"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 94,
+ "Name": "Physical Disk",
+ "Description": "Physical Disk"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 78,
+ "Name": "Power Supply",
+ "Description": "Power Supply"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 63,
+ "Name": "SAS IOM",
+ "Description": "SAS IOM"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 52,
+ "Name": "Software Change",
+ "Description": "Software Change"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 99,
+ "Name": "SSD Devices",
+ "Description": "SSD Devices"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 18,
+ "Name": "Storage",
+ "Description": "Storage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 113,
+ "Name": "Storage Controller",
+ "Description": "Storage Controller"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 82,
+ "Name": "Storage Enclosure",
+ "Description": "Storage Enclosure"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 110,
+ "Name": "Temperature",
+ "Description": "Temperature"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 46,
+ "Name": "Virtual Disk",
+ "Description": "Virtual Disk"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "Internal Events Catalog",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 67,
+ "Name": "Amperage",
+ "Description": "Amperage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 41,
+ "Name": "Auto System Reset",
+ "Description": "Auto System Reset"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 108,
+ "Name": "Battery Event",
+ "Description": "Battery Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 75,
+ "Name": "BIOS POST",
+ "Description": "BIOS POST"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 89,
+ "Name": "Cable",
+ "Description": "Cable"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 69,
+ "Name": "Dell Key Manager",
+ "Description": "Dell Key Manager"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 90,
+ "Name": "Devices",
+ "Description": "Devices"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 34,
+ "Name": "Diagnostics",
+ "Description": "Diagnostics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 115,
+ "Name": "Fabric",
+ "Description": "Fabric"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 70,
+ "Name": "Fabric NVFA",
+ "Description": "Fabric NVFA"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 106,
+ "Name": "Fan Event",
+ "Description": "Fan Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 83,
+ "Name": "Fibre Channel",
+ "Description": "Fibre Channel"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 64,
+ "Name": "FlexAddress SD",
+ "Description": "FlexAddress SD"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 62,
+ "Name": "IDSDM Absent",
+ "Description": "IDSDM Absent"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 65,
+ "Name": "IDSDM Media",
+ "Description": "IDSDM Media"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 111,
+ "Name": "IDSDM Redundancy",
+ "Description": "IDSDM Redundancy"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 3,
+ "Name": "Link Status",
+ "Description": "Link Status"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 19,
+ "Name": "Log Event",
+ "Description": "Log Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 15,
+ "Name": "Management Module",
+ "Description": "Management Module"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 80,
+ "Name": "Memory",
+ "Description": "Memory"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 118,
+ "Name": "Metrics",
+ "Description": "Metrics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 102,
+ "Name": "NIC Configuration",
+ "Description": "NIC Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 77,
+ "Name": "Node",
+ "Description": "Node"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 96,
+ "Name": "OS Event",
+ "Description": "OS Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 81,
+ "Name": "PCI Device",
+ "Description": "PCI Device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 94,
+ "Name": "Physical Disk",
+ "Description": "Physical Disk"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 78,
+ "Name": "Power Supply",
+ "Description": "Power Supply"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 23,
+ "Name": "Power Usage",
+ "Description": "Power Usage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 61,
+ "Name": "Processor",
+ "Description": "Processor"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 68,
+ "Name": "Processor Absent",
+ "Description": "Processor Absent"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 103,
+ "Name": "PSU Absent",
+ "Description": "PSU Absent"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 44,
+ "Name": "Redundancy",
+ "Description": "Redundancy"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 63,
+ "Name": "SAS IOM",
+ "Description": "SAS IOM"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 39,
+ "Name": "Software Config",
+ "Description": "Software Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 92,
+ "Name": "Support Assist",
+ "Description": "Support Assist"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 29,
+ "Name": "System Event Log",
+ "Description": "System Event Log"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 13,
+ "Name": "System Performance Event",
+ "Description": "System Performance Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 110,
+ "Name": "Temperature",
+ "Description": "Temperature"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 16,
+ "Name": "Temperature Statistics",
+ "Description": "Temperature Statistics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 55,
+ "Name": "UEFI Event",
+ "Description": "UEFI Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 56,
+ "Name": "User Tracking",
+ "Description": "User Tracking"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 5,
+ "Name": "vFlash Absent",
+ "Description": "vFlash Absent"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 66,
+ "Name": "vFlash Event",
+ "Description": "vFlash Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 74,
+ "Name": "vFlash Media",
+ "Description": "vFlash Media"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 40,
+ "Name": "Voltage",
+ "Description": "Voltage"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 3,
+ "Name": "Updates",
+ "CatalogName": "Internal Events Catalog",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 106,
+ "Name": "Fan Event",
+ "Description": "Fan Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 51,
+ "Name": "Firmware Download",
+ "Description": "Firmware Download"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 24,
+ "Name": "Firmware Update Job",
+ "Description": "Firmware Update Job"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 27,
+ "Name": "Job Control",
+ "Description": "Job Control"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 57,
+ "Name": "Lifecycle Controller",
+ "Description": "Lifecycle Controller"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 109,
+ "Name": "RAC Event",
+ "Description": "RAC Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 52,
+ "Name": "Software Change",
+ "Description": "Software Change"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 39,
+ "Name": "Software Config",
+ "Description": "Software Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 6,
+ "Name": "Work Notes",
+ "CatalogName": "Internal Events Catalog",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 56,
+ "Name": "User Tracking",
+ "Description": "User Tracking"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('Networking')",
+ "Name": "Networking",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "Networking",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('OMSA')",
+ "Name": "OMSA",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 4,
+ "Name": "Audit",
+ "CatalogName": "OMSA",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 19,
+ "Name": "Log Event",
+ "Description": "Log Event"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 5,
+ "Name": "Configuration",
+ "CatalogName": "OMSA",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 41,
+ "Name": "Auto System Reset",
+ "Description": "Auto System Reset"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 61,
+ "Name": "Processor",
+ "Description": "Processor"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "OMSA",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 67,
+ "Name": "Amperage",
+ "Description": "Amperage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 41,
+ "Name": "Auto System Reset",
+ "Description": "Auto System Reset"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 108,
+ "Name": "Battery Event",
+ "Description": "Battery Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 89,
+ "Name": "Cable",
+ "Description": "Cable"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 11,
+ "Name": "Hardware Config",
+ "Description": "Hardware Config"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 80,
+ "Name": "Memory",
+ "Description": "Memory"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 78,
+ "Name": "Power Supply",
+ "Description": "Power Supply"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 23,
+ "Name": "Power Usage",
+ "Description": "Power Usage"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 61,
+ "Name": "Processor",
+ "Description": "Processor"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 44,
+ "Name": "Redundancy",
+ "Description": "Redundancy"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 25,
+ "Name": "Security Event",
+ "Description": "Security Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 29,
+ "Name": "System Event Log",
+ "Description": "System Event Log"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 110,
+ "Name": "Temperature",
+ "Description": "Temperature"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 66,
+ "Name": "vFlash Event",
+ "Description": "vFlash Event"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 40,
+ "Name": "Voltage",
+ "Description": "Voltage"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('OpenManage%20Enterprise')",
+ "Name": "OpenManage Enterprise",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "OpenManage Enterprise",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7400,
+ "Name": "Health Status of Managed device",
+ "Description": "Health Status of Managed device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 118,
+ "Name": "Metrics",
+ "Description": "Metrics"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 71,
+ "Name": "System Info",
+ "Description": "System Info"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('OpenManage%20Essentials')",
+ "Name": "OpenManage Essentials",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "OpenManage Essentials",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7400,
+ "Name": "Health Status of Managed device",
+ "Description": "Health Status of Managed device"
+ },
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 6,
+ "Name": "Work Notes",
+ "CatalogName": "OpenManage Essentials",
+ "SubCategoryDetails": []
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('Power%20Manager')",
+ "Name": "Power Manager",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "Power Manager",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 151,
+ "Name": "Power Configuration",
+ "Description": "Power Configuration"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('RFC1215')",
+ "Name": "RFC1215",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "RFC1215",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('SNMPv2-MIB')",
+ "Name": "SNMPv2-MIB",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "SNMPv2-MIB",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ }
+ ]
+ }
+ ]
+ },
+ {
+ "@odata.type": "#AlertService.AlertCategories",
+ "@odata.id": "/api/AlertService/AlertCategories('VMWare')",
+ "Name": "VMWare",
+ "IsBuiltIn": True,
+ "CategoriesDetails": [
+ {
+ "@odata.type": "#AlertService.AlertCategory",
+ "Id": 1,
+ "Name": "System Health",
+ "CatalogName": "VMWare",
+ "SubCategoryDetails": [
+ {
+ "@odata.type": "#AlertService.AlertSubCategory",
+ "Id": 7700,
+ "Name": "Other",
+ "Description": "Other"
+ }
+ ]
+ }
+ ]
+ }
+ ]}}])
+ def test_ome_alert_policies_category_info(self, params, ome_connection_mock_for_alert_category, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ assert isinstance(result['categories'], list)
+ assert result['msg'] == params['message']
+ for ctr in result['categories']:
+ assert 'CategoriesDetails' in ctr
+ for k in ctr.keys():
+ assert '@odata.' not in k
+
+ @pytest.mark.parametrize("exc_type",
+ [SSLValidationError, ConnectionError, TypeError, ValueError, OSError, HTTPError, URLError])
+ def test_ome_alert_policies_category_info_main_exception_failure_case(self, exc_type, mocker, ome_default_args,
+ ome_connection_mock_for_alert_category,
+ ome_response_mock):
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type == HTTPError:
+ mocker.patch(MODULE_PATH + 'get_all_data_with_pagination', side_effect=exc_type(
+ 'https://testhost.com', 401, 'http error message', {
+ "accept-type": "application/json"},
+ StringIO(json_str)))
+ result = self._run_module(ome_default_args)
+ assert result['failed'] is True
+ elif exc_type == URLError:
+ mocker.patch(MODULE_PATH + 'get_all_data_with_pagination',
+ side_effect=exc_type("exception message"))
+ # ome_connection_mock_for_alert_category.get_all_data_with_pagination.side_effect = exc_type("exception message")
+ result = self._run_module(ome_default_args)
+ assert result['unreachable'] is True
+ else:
+ mocker.patch(MODULE_PATH + 'get_all_data_with_pagination',
+ side_effect=exc_type("exception message"))
+ result = self._run_module(ome_default_args)
+ assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_info.py
new file mode 100644
index 000000000..425bc1faa
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_info.py
@@ -0,0 +1,121 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+import json
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_alert_policies_info
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from io import StringIO
+from ansible.module_utils._text import to_text
+
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+MODULE_SUCCESS_MESSAGE_ALL = "Successfully retrieved all the OME alert policies information."
+MODULE_SUCCESS_MESSAGE_SPECIFIC = "Successfully retrieved {0} OME alert policy information."
+POLICY_NAME_NOT_FOUND_OR_EMPTY = "The OME alert policy name {0} provided does not exist or empty."
+
+
+class TestOmeAlertPolicyInfo(FakeAnsibleModule):
+ """Pyest class for ome_alert_policies_info module."""
+ module = ome_alert_policies_info
+ resp_mock_value = {"@odata.context": "/api/$metadata#Collection(JobService.Job)",
+ "@odata.count": 1,
+ "value": [
+ {
+ "Id": 10006,
+ "Name": "TestAlert1",
+ "Description": "This policy is applicable to critical alerts.",
+ "State": True,
+ "Visible": True,
+ "Owner": None,
+ },
+ {
+ "Id": 10010,
+ "Name": "TestAlert2",
+ "Description": "This policy is applicable to critical alerts.",
+ "State": True,
+ "Visible": True,
+ "Owner": None,
+ }
+ ]}
+
+ @pytest.fixture
+ def ome_connection_alert_policy_info_mock(self, mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'ome_alert_policies_info.RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+ def test_all_ome_alert_policy_info_success_case(self, ome_default_args, ome_connection_alert_policy_info_mock,
+ ome_response_mock):
+ ome_response_mock.json_data = self.resp_mock_value
+ ome_response_mock.success = True
+ result = self._run_module(ome_default_args)
+ assert result['policies'][0]["Id"] == 10006
+ assert "@odata.count" not in result['policies'][0]
+ assert result['msg'] == MODULE_SUCCESS_MESSAGE_ALL
+
+ def test_policy_name_ome_alert_policy_info_success_case(self, ome_default_args, ome_connection_alert_policy_info_mock,
+ ome_response_mock):
+ policy_name = 'TestAlert2'
+ ome_default_args.update({"policy_name": policy_name})
+ ome_response_mock.json_data = self.resp_mock_value
+ ome_response_mock.success = True
+ result = self._run_module(ome_default_args)
+ assert result['policies'][0]["Id"] == 10010
+ assert "@odata.count" not in result['policies'][0]
+ assert result['msg'] == MODULE_SUCCESS_MESSAGE_SPECIFIC.format(policy_name)
+
+ def test_random_policy_name_ome_alert_policy_info(self, ome_default_args, ome_connection_alert_policy_info_mock,
+ ome_response_mock):
+ random_name = 'Random'
+ ome_default_args.update({"policy_name": random_name})
+ ome_response_mock.json_data = self.resp_mock_value
+ ome_response_mock.success = True
+ result = self._run_module(ome_default_args)
+ assert result['policies'] == []
+ assert result['msg'] == POLICY_NAME_NOT_FOUND_OR_EMPTY.format(random_name)
+
+ def test_empty_policy_name_ome_alert_policy_info(self, ome_default_args, ome_connection_alert_policy_info_mock,
+ ome_response_mock):
+ empty_name = ""
+ ome_default_args.update({"policy_name": empty_name})
+ ome_response_mock.json_data = self.resp_mock_value
+ ome_response_mock.success = True
+ result = self._run_module(ome_default_args)
+ assert result['policies'] == []
+ assert result['msg'] == POLICY_NAME_NOT_FOUND_OR_EMPTY.format(empty_name)
+
+ @pytest.mark.parametrize("exc_type", [URLError, HTTPError, SSLValidationError, ConnectionError,
+ TypeError, ValueError])
+ def test_ome_alert_policy_info_main_exception_case(self, exc_type, mocker, ome_default_args, ome_connection_alert_policy_info_mock,
+ ome_response_mock):
+ ome_response_mock.status_code = 400
+ ome_response_mock.success = False
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(
+ MODULE_PATH + 'ome_alert_policies_info.OMEAlertPolicyInfo.get_alert_policy_info',
+ side_effect=exc_type('test'))
+ else:
+ mocker.patch(
+ MODULE_PATH + 'ome_alert_policies_info.OMEAlertPolicyInfo.get_alert_policy_info',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ result = self._run_module(ome_default_args)
+ if exc_type != URLError:
+ assert result['failed'] is True
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_message_id_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_message_id_info.py
new file mode 100644
index 000000000..758bbfaaf
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_alert_policies_message_id_info.py
@@ -0,0 +1,84 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+import json
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_alert_policies_message_id_info
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from io import StringIO
+from ansible.module_utils._text import to_text
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+
+
+@pytest.fixture
+def ome_alert_policies_message_id_info_mock(mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'ome_alert_policies_message_id_info.RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+
+class TestOmeAlertPoliciesMessageIDInfo(FakeAnsibleModule):
+ module = ome_alert_policies_message_id_info
+
+ def test_alert_policies_message_id_info_success_case(self, ome_default_args, ome_alert_policies_message_id_info_mock, ome_response_mock):
+ ome_response_mock.json_data = {"value": [
+ {
+ "Category": "System Health",
+ "Message": "The ${0} sensor has failed, and the last recorded value by the sensor was ${1} A.",
+ "MessageId": "AMP400",
+ "Prefix": "AMP",
+ "SequenceNo": 400,
+ "Severity": "Critical",
+ "SubCategory": "Amperage"
+ }
+ ]}
+ ome_response_mock.status_code = 200
+ result = self._run_module(ome_default_args)
+ assert 'message_ids' in result
+ assert result['msg'] == "Successfully retrieved alert policies message ids information."
+
+ def test_ome_alert_policies_message_id_info_empty_case(self, ome_default_args,
+ ome_alert_policies_message_id_info_mock,
+ ome_response_mock):
+ ome_response_mock.json_data = {"value": []}
+ ome_response_mock.status_code = 200
+ ome_response_mock.success = True
+ result = self._run_module(ome_default_args)
+ assert result['message_ids'] == []
+
+ @pytest.mark.parametrize("exc_type",
+ [URLError, HTTPError, SSLValidationError, ConnectionError,
+ TypeError, ValueError])
+ def test_ome_alert_policies_message_id_info_main_exception_handling_case(self, exc_type, ome_default_args,
+ ome_alert_policies_message_id_info_mock,
+ ome_response_mock):
+ ome_response_mock.status_code = 400
+ ome_response_mock.success = False
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ ome_alert_policies_message_id_info_mock.invoke_request.side_effect = exc_type('test')
+ else:
+ ome_alert_policies_message_id_info_mock.invoke_request.side_effect = exc_type('https://testhost.com',
+ 400,
+ 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ result = self._run_module(ome_default_args)
+ if exc_type != URLError:
+ assert result['failed'] is True
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py
index b5bc1d947..f30a6a049 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_smtp.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.3.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -18,10 +18,9 @@ from io import StringIO
import pytest
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import SSLValidationError
from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_alerts_smtp
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \
- AnsibleFailJSonException
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
SUCCESS_MSG = "Successfully updated the SMTP settings."
SMTP_URL = "AlertService/AlertDestinations/SMTPConfiguration"
@@ -451,7 +450,7 @@ class TestAppAlertsSMTP(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'fetch_smtp_settings',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py
index ea4551d93..4ae3922a0 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_alerts_syslog.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.3.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -45,25 +45,25 @@ class TestOmeAlertSyslog(FakeAnsibleModule):
{"module_args": {
"syslog_servers": [
{
- "destination_address": "192.168.10.41",
+ "destination_address": "XX.XX.XX.XX",
"enabled": True,
"id": 1,
"port_number": 514
},
{
- "destination_address": "192.168.10.46",
+ "destination_address": "XY.XY.XY.XY",
"enabled": False,
"id": 2,
"port_number": 514
},
{
- "destination_address": "192.168.10.43",
+ "destination_address": "YY.YY.YY.YY",
"enabled": False,
"id": 3,
"port_number": 514
},
{
- "destination_address": "192.168.10.44",
+ "destination_address": "ZZ.ZZ.ZZ.ZZ",
"enabled": True,
"id": 4,
"port_number": 514
@@ -77,28 +77,28 @@ class TestOmeAlertSyslog(FakeAnsibleModule):
"@odata.type": "#AlertDestinations.SyslogConfiguration",
"Id": 1,
"Enabled": True,
- "DestinationAddress": "192.168.10.41",
+ "DestinationAddress": "XX.XX.XX.XX",
"PortNumber": 514
},
{
"@odata.type": "#AlertDestinations.SyslogConfiguration",
"Id": 2,
"Enabled": False,
- "DestinationAddress": "192.168.10.46",
+ "DestinationAddress": "XY.XY.XY.XY",
"PortNumber": 0
},
{
"@odata.type": "#AlertDestinations.SyslogConfiguration",
"Id": 3,
"Enabled": False,
- "DestinationAddress": "192.168.10.43",
+ "DestinationAddress": "YY.YY.YY.YY",
"PortNumber": 514
},
{
"@odata.type": "#AlertDestinations.SyslogConfiguration",
"Id": 4,
"Enabled": True,
- "DestinationAddress": "192.168.10.44",
+ "DestinationAddress": "ZZ.ZZ.ZZ.ZZ",
"PortNumber": 514
}
]
@@ -106,13 +106,13 @@ class TestOmeAlertSyslog(FakeAnsibleModule):
{"module_args": {
"syslog_servers": [
{
- "destination_address": "192.168.10.41",
+ "destination_address": "XX.XX.XX.XX",
"enabled": True,
"id": 1,
"port_number": 514
},
{
- "destination_address": "192.168.10.46",
+ "destination_address": "XY.XY.XY.XY",
"enabled": False,
"id": 2,
"port_number": 514
@@ -126,14 +126,14 @@ class TestOmeAlertSyslog(FakeAnsibleModule):
"@odata.type": "#AlertDestinations.SyslogConfiguration",
"Id": 1,
"Enabled": True,
- "DestinationAddress": "192.168.10.41",
+ "DestinationAddress": "XX.XX.XX.XX",
"PortNumber": 511
},
{
"@odata.type": "#AlertDestinations.SyslogConfiguration",
"Id": 2,
"Enabled": True,
- "DestinationAddress": "192.168.10.46",
+ "DestinationAddress": "XY.XY.XY.XY",
"PortNumber": 514
}
]
@@ -141,13 +141,13 @@ class TestOmeAlertSyslog(FakeAnsibleModule):
{"check_mode": True, "module_args": {
"syslog_servers": [
{
- "destination_address": "192.168.10.41",
+ "destination_address": "XX.XX.XX.XX",
"enabled": True,
"id": 1,
"port_number": 514
},
{
- "destination_address": "192.168.10.46",
+ "destination_address": "XY.XY.XY.XY",
"enabled": False,
"id": 2,
"port_number": 514
@@ -161,14 +161,14 @@ class TestOmeAlertSyslog(FakeAnsibleModule):
"@odata.type": "#AlertDestinations.SyslogConfiguration",
"Id": 1,
"Enabled": True,
- "DestinationAddress": "192.168.10.41",
+ "DestinationAddress": "XX.XX.XX.XX",
"PortNumber": 511
},
{
"@odata.type": "#AlertDestinations.SyslogConfiguration",
"Id": 2,
"Enabled": True,
- "DestinationAddress": "192.168.10.46",
+ "DestinationAddress": "XY.XY.XY.XY",
"PortNumber": 514
}
]
@@ -179,31 +179,31 @@ class TestOmeAlertSyslog(FakeAnsibleModule):
{"module_args": {
"syslog_servers": [
{
- "destination_address": "192.168.10.41",
+ "destination_address": "XX.XX.XX.XX",
"enabled": True,
"id": 1,
"port_number": 514
},
{
- "destination_address": "192.168.10.46",
+ "destination_address": "XY.XY.XY.XY",
"enabled": False,
"id": 2,
"port_number": 514
},
{
- "destination_address": "192.168.10.43",
+ "destination_address": "YY.YY.YY.YY",
"enabled": False,
"id": 3,
"port_number": 514
},
{
- "destination_address": "192.168.10.44",
+ "destination_address": "ZZ.ZZ.ZZ.ZZ",
"enabled": True,
"id": 4,
"port_number": 514
},
{
- "destination_address": "192.168.10.44",
+ "destination_address": "ZZ.ZZ.ZZ.ZZ",
"enabled": True,
"id": 4,
"port_number": 514
@@ -241,7 +241,7 @@ class TestOmeAlertSyslog(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'validate_input',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py
index c31983bca..99c49c210 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_certificate.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 2.1.3
-# Copyright (C) 2019-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -21,9 +21,10 @@ from io import StringIO
from ansible.module_utils._text import to_text
from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_certificate
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+EMAIL_ADDRESS = "support@dell.com"
@pytest.fixture
@@ -47,7 +48,7 @@ class TestOmeAppCSR(FakeAnsibleModule):
args = {"command": "generate_csr", "distinguished_name": "hostname.com",
"department_name": "Remote Access Group", "business_name": "Dell Inc.",
"locality": "Round Rock", "country_state": "Texas", "country": "US",
- "email": "support@dell.com"}
+ "email": EMAIL_ADDRESS, "subject_alternative_names": "XX.XX.XX.XX"}
ome_default_args.update(args)
if exc_type == URLError:
mocker.patch(MODULE_PATH + 'ome_application_certificate.get_resource_parameters',
@@ -61,7 +62,7 @@ class TestOmeAppCSR(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'ome_application_certificate.get_resource_parameters',
- side_effect=exc_type('http://testhost.com', 400,
+ side_effect=exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
@@ -76,14 +77,15 @@ class TestOmeAppCSR(FakeAnsibleModule):
args = {"command": "generate_csr", "distinguished_name": "hostname.com",
"department_name": "Remote Access Group", "business_name": "Dell Inc.",
"locality": "Round Rock", "country_state": "Texas", "country": "US",
- "email": "support@dell.com"}
+ "email": EMAIL_ADDRESS, "subject_alternative_names": "XX.XX.XX.XX"}
f_module = self.get_module_mock(params=args)
result = self.module.get_resource_parameters(f_module)
assert result[0] == "POST"
assert result[1] == "ApplicationService/Actions/ApplicationService.GenerateCSR"
assert result[2] == {'DistinguishedName': 'hostname.com', 'Locality': 'Round Rock',
'DepartmentName': 'Remote Access Group', 'BusinessName': 'Dell Inc.',
- 'State': 'Texas', 'Country': 'US', 'Email': 'support@dell.com'}
+ 'State': 'Texas', 'Country': 'US', 'Email': 'support@dell.com',
+ 'San': 'XX.XX.XX.XX'}
def test_upload_csr_fail01(self, mocker, ome_default_args, ome_connection_mock_for_application_certificate,
ome_response_mock):
@@ -108,13 +110,13 @@ class TestOmeAppCSR(FakeAnsibleModule):
csr_json = {"CertificateData": "--BEGIN-REQUEST--"}
payload = {"DistinguishedName": "hostname.com", "DepartmentName": "Remote Access Group",
"BusinessName": "Dell Inc.", "Locality": "Round Rock", "State": "Texas",
- "Country": "US", "Email": "support@dell.com"}
+ "Country": "US", "Email": EMAIL_ADDRESS, "subject_alternative_names": "XX.XX.XX.XX"}
mocker.patch(MODULE_PATH + 'ome_application_certificate.get_resource_parameters',
return_value=("POST", "ApplicationService/Actions/ApplicationService.GenerateCSR", payload))
ome_default_args.update({"command": "generate_csr", "distinguished_name": "hostname.com",
"department_name": "Remote Access Group", "business_name": "Dell Inc.",
"locality": "Round Rock", "country_state": "Texas", "country": "US",
- "email": "support@dell.com"})
+ "email": EMAIL_ADDRESS, "subject_alternative_names": "XX.XX.XX.XX, YY.YY.YY.YY"})
ome_response_mock.success = True
ome_response_mock.json_data = csr_json
result = self.execute_module(ome_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py
index 3a86a3f0d..627c5e71d 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_console_preferences.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -18,11 +18,9 @@ from io import StringIO
import pytest
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ssl import SSLError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import SSLValidationError
from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_console_preferences
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \
- AnsibleFailJSonException
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
SUCCESS_MSG = "Successfully updated the Console Preferences settings."
SETTINGS_URL = "ApplicationService/Settings"
@@ -2233,7 +2231,7 @@ class TestOmeAppConsolePreferences(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + '_validate_params',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py
index 3938184ed..01cf4afdd 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_address.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -19,7 +19,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_address
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
@@ -351,7 +351,7 @@ class TestOmeAppNetwork(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'ome_application_network_address.validate_input',
- side_effect=exc_type('http://testhost.com', 400,
+ side_effect=exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py
index f4d32fcd3..af34a6652 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_proxy.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -21,7 +21,7 @@ from io import StringIO
from ansible.module_utils._text import to_text
from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_proxy
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
CHECK_MODE_CHANGE_FOUND_MSG = "Changes found to be applied."
@@ -128,7 +128,7 @@ class TestOmeTemplate(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'ome_application_network_proxy.get_payload',
- side_effect=exc_type('http://testhost.com', 400,
+ side_effect=exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
@@ -160,7 +160,7 @@ class TestOmeTemplate(FakeAnsibleModule):
def test_get_payload(self, ome_default_args):
new_param = {
- "ip_address": "192.168.0.2",
+ "ip_address": "YY.YY.YY.YY",
"proxy_port": 443,
"enable_proxy": True,
"proxy_username": "username",
@@ -171,18 +171,18 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_default_args.update(new_param)
f_module = self.get_module_mock(params=ome_default_args)
payload = self.module.get_payload(f_module)
- assert ome_default_args == {"ip_address": "192.168.0.2",
+ assert ome_default_args == {"ip_address": "YY.YY.YY.YY",
"proxy_port": 443,
"enable_proxy": True,
"proxy_username": "username",
"proxy_password": "password",
"enable_authentication": False,
- "hostname": "192.168.0.1",
+ "hostname": "XX.XX.XX.XX",
"username": "username",
"password": "password",
"port": 443,
"ca_path": "/path/ca_bundle"}
- assert payload == {"EnableProxy": True, "IpAddress": "192.168.0.2", "PortNumber": 443, "Username": "username",
+ assert payload == {"EnableProxy": True, "IpAddress": "YY.YY.YY.YY", "PortNumber": 443, "Username": "username",
"Password": "password", "EnableAuthentication": False}
def test_get_updated_payload_success_case(self, mocker, ome_default_args, ome_connection_mock_for_application_network_proxy,
@@ -192,7 +192,7 @@ class TestOmeTemplate(FakeAnsibleModule):
"@odata.id": "/api/ApplicationService/Network/ProxyConfiguration", "IpAddress": "255.0.0.0",
"PortNumber": 443, "EnableAuthentication": False, "EnableProxy": True,
"Username": "username1", "Password": "password1"}
- payload = {"EnableAuthentication": True, "IpAddress": "192.168.0.1", "PortNumber": 443, 'EnableProxy': True,
+ payload = {"EnableAuthentication": True, "IpAddress": "XX.XX.XX.XX", "PortNumber": 443, 'EnableProxy': True,
'Username': 'username2', "Password": "password2"}
f_module = self.get_module_mock(params=ome_default_args)
ome_response_mock.json_data = current_setting
@@ -212,14 +212,14 @@ class TestOmeTemplate(FakeAnsibleModule):
"@odata.id": "/api/ApplicationService/Network/ProxyConfiguration", "IpAddress": "255.0.0.0",
"PortNumber": 443, "EnableAuthentication": True, "EnableProxy": True,
"Username": "username1", "Password": "password1"}
- payload = {"EnableAuthentication": False, "IpAddress": "192.168.0.1", "PortNumber": 443, 'EnableProxy': True,
+ payload = {"EnableAuthentication": False, "IpAddress": "XX.XX.XX.XX", "PortNumber": 443, 'EnableProxy': True,
'Username': 'username2', "Password": "password2"}
f_module = self.get_module_mock(params=ome_default_args)
ome_response_mock.json_data = current_setting
mocker.patch(MODULE_PATH + "ome_application_network_proxy.validate_check_mode_for_network_proxy",
return_value=None)
setting = self.module.get_updated_payload(ome_connection_mock_for_application_network_proxy, f_module, payload)
- assert setting == {"EnableAuthentication": False, "IpAddress": "192.168.0.1", "PortNumber": 443,
+ assert setting == {"EnableAuthentication": False, "IpAddress": "XX.XX.XX.XX", "PortNumber": 443,
'EnableProxy': True}
def test_get_updated_payload_when_same_setting_failure_case1(self, mocker, ome_default_args,
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py
index 0cd91a7f5..7a4ec5354 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_settings.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.4.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -16,12 +16,11 @@ import json
import pytest
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible.module_utils.urls import SSLValidationError
from io import StringIO
from ansible.module_utils._text import to_text
-from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_settings
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
SUCCESS_MSG = "Successfully updated the session timeout settings."
NO_CHANGES = "No changes found to be applied."
@@ -375,7 +374,7 @@ class TestOmeApplicationNetworkSettings(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'fetch_session_inactivity_settings',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py
index 53e323117..b5b7de549 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_time.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -21,7 +21,7 @@ from io import StringIO
from ansible.module_utils._text import to_text
from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_time
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
@@ -72,13 +72,13 @@ class TestOmeTemplate(FakeAnsibleModule):
assert result["msg"] == "Successfully configured network time."
@pytest.mark.parametrize("param1", [{"enable_ntp": True, "time_zone": "TZ_ID_66"}])
- @pytest.mark.parametrize("param2", [{"primary_ntp_address": "192.168.0.2"},
- {"secondary_ntp_address1": "192.168.0.3"},
- {"secondary_ntp_address2": "192.168.0.4"},
- {"primary_ntp_address": "192.168.0.2", "secondary_ntp_address1": "192.168.0.3"},
- {"primary_ntp_address": "192.168.0.2", "secondary_ntp_address2": "192.168.0.4"},
- {"primary_ntp_address": "192.168.0.2", "secondary_ntp_address1": "192.168.0.3",
- "secondary_ntp_address2": "192.168.0.4"}
+ @pytest.mark.parametrize("param2", [{"primary_ntp_address": "YY.YY.YY.YY"},
+ {"secondary_ntp_address1": "XX.XX.XX.XX"},
+ {"secondary_ntp_address2": "XY.XY.XY.XY"},
+ {"primary_ntp_address": "YY.YY.YY.YY", "secondary_ntp_address1": "XX.XX.XX.XX"},
+ {"primary_ntp_address": "YY.YY.YY.YY", "secondary_ntp_address2": "XY.XY.XY.XY"},
+ {"primary_ntp_address": "YY.YY.YY.YY", "secondary_ntp_address1": "XX.XX.XX.XX",
+ "secondary_ntp_address2": "XY.XY.XY.XY"}
])
def test_ome_application_network_time_main_enable_ntp_true_success_case_01(self, mocker, ome_default_args, param1,
param2,
@@ -93,9 +93,9 @@ class TestOmeTemplate(FakeAnsibleModule):
time_data = {
"EnableNTP": True,
"JobId": None,
- "PrimaryNTPAddress": "192.168.0.2",
- "SecondaryNTPAddress1": "192.168.0.3",
- "SecondaryNTPAddress2": "192.168.0.4",
+ "PrimaryNTPAddress": "YY.YY.YY.YY",
+ "SecondaryNTPAddress1": "XX.XX.XX.XX",
+ "SecondaryNTPAddress2": "XY.XY.XY.XY",
"SystemTime": None,
"TimeSource": "10.136.112.222",
"TimeZone": "TZ_ID_66",
@@ -196,7 +196,7 @@ class TestOmeTemplate(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'ome_application_network_time.get_payload',
- side_effect=exc_type('http://testhost.com', 400,
+ side_effect=exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
@@ -210,9 +210,9 @@ class TestOmeTemplate(FakeAnsibleModule):
new_param = {
"enable_ntp": True,
"time_zone": "TimeZone",
- "primary_ntp_address": "192.168.0.2",
- "secondary_ntp_address1": "192.168.0.3",
- "secondary_ntp_address2": "192.168.0.4"
+ "primary_ntp_address": "YY.YY.YY.YY",
+ "secondary_ntp_address1": "XX.XX.XX.XX",
+ "secondary_ntp_address2": "XY.XY.XY.XY"
}
ome_default_args.update(new_param)
self.module.remove_unwanted_keys(removable_keys, ome_default_args)
@@ -268,7 +268,7 @@ class TestOmeTemplate(FakeAnsibleModule):
"secondary_ntp_address2": "10.136.112.222",
"system_time": None,
"time_zone": "TZ_ID_66",
- "hostname": "192.168.0.1",
+ "hostname": "XX.XX.XX.XX",
"username": "username",
"password": "password",
"ca_path": "/path/ca_bundle"}
@@ -464,13 +464,13 @@ class TestOmeTemplate(FakeAnsibleModule):
assert exc.value.args[0] == msg
@pytest.mark.parametrize("sub_param", [
- {"primary_ntp_address": "192.168.02.1", "secondary_ntp_address1": "192.168.02.3",
- "secondary_ntp_address2": "192.168.02.2"},
- {"secondary_ntp_address1": "192.168.02.1"},
- {"secondary_ntp_address2": "192.168.02.1"},
- {"primary_ntp_address": "192.168.02.1", "time_zone": "TZ_01"},
- {"primary_ntp_address": "192.168.02.1"},
- {"secondary_ntp_address1": "192.168.02.1", "time_zone": "TZ_01"},
+ {"primary_ntp_address": "XX.XX.XX.XX", "secondary_ntp_address1": "ZZ.ZZ.ZZ.ZZ",
+ "secondary_ntp_address2": "YY.YY.YY.YY"},
+ {"secondary_ntp_address1": "XX.XX.XX.XX"},
+ {"secondary_ntp_address2": "XX.XX.XX.XX"},
+ {"primary_ntp_address": "XX.XX.XX.XX", "time_zone": "TZ_01"},
+ {"primary_ntp_address": "XX.XX.XX.XX"},
+ {"secondary_ntp_address1": "XX.XX.XX.XX", "time_zone": "TZ_01"},
])
def test_validate_input_time_enable_false_case_01(self, ome_default_args, sub_param):
params = {"enable_ntp": False}
@@ -482,10 +482,10 @@ class TestOmeTemplate(FakeAnsibleModule):
self.module.validate_input(f_module)
assert exc.value.args[0] == msg
- @pytest.mark.parametrize("sub_param", [{"time_zone": "TZ_01"}, {"primary_ntp_address": "192.168.02.1"},
- {"secondary_ntp_address1": "192.168.02.1"},
- {"secondary_ntp_address2": "192.168.02.1"},
- {"primary_ntp_address": "192.168.02.1", "time_zone": "TZ_01"}, {}
+ @pytest.mark.parametrize("sub_param", [{"time_zone": "TZ_01"}, {"primary_ntp_address": "XX.XX.XX.XX"},
+ {"secondary_ntp_address1": "XX.XX.XX.XX"},
+ {"secondary_ntp_address2": "XX.XX.XX.XX"},
+ {"primary_ntp_address": "XX.XX.XX.XX", "time_zone": "TZ_01"}, {}
])
def test_validate_input_time_enable_true_case_04(self, ome_default_args, sub_param):
"""
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py
index d6fbc3680..d5792ce30 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_network_webserver.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 2.1.3
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,7 +20,7 @@ from ansible.module_utils.urls import ConnectionError, SSLValidationError
from io import StringIO
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_application_network_webserver
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
@@ -133,7 +133,7 @@ class TestOmeAppNetwork(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'ome_application_network_webserver.get_updated_payload',
- side_effect=exc_type('http://testhost.com', 400,
+ side_effect=exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py
index ef945ae63..e0ba31825 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_application_security_settings.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.4.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -350,6 +350,7 @@ class TestOmeSecuritySettings(FakeAnsibleModule):
ome_default_args.update(params['module_args'])
ome_connection_mock_for_security_settings.job_tracking.return_value = \
(params.get('job_failed'), params.get('job_message'))
+ mocker.patch(MODULE_PATH + 'time.sleep', return_value=None)
result = self._run_module(
ome_default_args, check_mode=params.get(
'check_mode', False))
@@ -390,7 +391,7 @@ class TestOmeSecuritySettings(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'login_security_setting',
- side_effect=exc_type('http://testhost.com',
+ side_effect=exc_type('https://testhost.com',
400,
'http error message',
{"accept-type": "application/json"},
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py
index 0d3504b14..10841d435 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_chassis_slots.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.6.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_chassis_slots
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
DEVICE_REPEATED = "Duplicate device entry found for devices with identifiers {0}."
INVALID_SLOT_DEVICE = "Unable to rename one or more slots because either the specified device is invalid or slots " \
@@ -290,7 +290,7 @@ class TestOmeChassisSlots(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'get_device_slot_config',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py
index 51ff166f0..370f53246 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_baseline.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.2.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -562,7 +562,7 @@ class TestOmeConfigCompBaseline(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'compliance_operation',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py
index b038b1191..d743ed53d 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_configuration_compliance_info.py
@@ -2,8 +2,8 @@
#
# Dell OpenManage Ansible Modules
-# Version 6.1.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Version 8.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -12,16 +12,15 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
-import pytest
import json
-from ssl import SSLError
-from ansible_collections.dellemc.openmanage.plugins.modules import ome_configuration_compliance_info
-from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
-from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \
- AnsibleFailJSonException
from io import StringIO
+
+import pytest
from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_configuration_compliance_info
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_configuration_compliance_info.'
@@ -31,18 +30,53 @@ def ome_connection_mock_for_compliance_info(mocker, ome_response_mock):
connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
- ome_connection_mock_obj.get_all_report_details.return_value = {"report_list": []}
- ome_connection_mock_obj.get_all_items_with_pagination.return_value = {"value": []}
+ ome_connection_mock_obj.get_all_report_details.return_value = {
+ "report_list": []}
+ ome_connection_mock_obj.get_all_items_with_pagination.return_value = {
+ "value": []}
return ome_connection_mock_obj
class TestBaselineComplianceInfo(FakeAnsibleModule):
module = ome_configuration_compliance_info
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"report_list": [
+ {'Name': 'b1', 'Id': 123,
+ 'TemplateId': 23},
+ {'Name': 'b2', 'Id': 124,
+ 'TemplateId': 24}],
+ 'ComplianceAttributeGroups': [{"Device": "Compliant"}]},
+ 'report': [{'Device': 'Compliant'}],
+ 'mparams': {"baseline": "b1", "device_id": 1234}},
+ {"json_data": {"report_list": [
+ {'Name': 'b1', 'Id': 123, 'TemplateId': 23},
+ {'Name': 'b2', 'Id': 124, 'TemplateId': 24}],
+ 'value': [{'Id': 123, 'ServiceTag': 'ABCD123'},
+ {'Id': 124, 'ServiceTag': 'ABCD124'}],
+ 'ComplianceAttributeGroups': [{"Device": "Compliant"}]},
+ 'report': [{'ComplianceAttributeGroups': [{'Device': 'Compliant'}], 'Id': 123, 'ServiceTag': 'ABCD123'}],
+ 'mparams': {"baseline": "b1", "device_service_tag": 'ABCD123'}}
+ ])
+ def test_ome_configuration_compliance_info_success(self, params, ome_connection_mock_for_compliance_info, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_connection_mock_for_compliance_info.get_all_report_details.return_value = params[
+ 'json_data']
+ ome_connection_mock_for_compliance_info.get_all_items_with_pagination.return_value = params[
+ 'json_data']
+ ome_default_args.update(params['mparams'])
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ assert result['compliance_info'] == params['report']
+
def test_validate_device(self, ome_connection_mock_for_compliance_info):
value_list = [{"Id": 25011, "ServiceTag": "FGHREF"}]
- report = ome_connection_mock_for_compliance_info.get_all_items_with_pagination.return_value = {"value": value_list}
- f_module = self.get_module_mock(params={'baseline': "baseline_one", "device_id": 25011})
+ report = ome_connection_mock_for_compliance_info.get_all_items_with_pagination.return_value = {
+ "value": value_list}
+ f_module = self.get_module_mock(
+ params={'baseline': "baseline_one", "device_id": 25011})
device = self.module.validate_device(f_module, report,
device_id=25011, service_tag=None, base_id=None)
service_tag = self.module.validate_device(f_module, report,
@@ -57,31 +91,53 @@ class TestBaselineComplianceInfo(FakeAnsibleModule):
def test_get_baseline_id(self, ome_connection_mock_for_compliance_info):
report_list = [{"Id": 1, "Name": "baseline_one", "TemplateId": 1}]
- ome_connection_mock_for_compliance_info.get_all_report_details.return_value = {"report_list": report_list}
+ ome_connection_mock_for_compliance_info.get_all_report_details.return_value = {
+ "report_list": report_list}
f_module = self.get_module_mock(params={'baseline': "baseline_one"})
- base_id, template_id = self.module.get_baseline_id(f_module, "baseline_one", ome_connection_mock_for_compliance_info)
+ base_id, template_id = self.module.get_baseline_id(
+ f_module, "baseline_one", ome_connection_mock_for_compliance_info)
with pytest.raises(Exception) as exc:
- self.module.get_baseline_id(f_module, "baseline_two", ome_connection_mock_for_compliance_info)
+ self.module.get_baseline_id(
+ f_module, "baseline_two", ome_connection_mock_for_compliance_info)
assert exc.value.args[0] == "Unable to complete the operation because the entered " \
"target baseline name 'baseline_two' is invalid."
assert base_id == 1
def test_compliance_report(self, ome_connection_mock_for_compliance_info, mocker, ome_response_mock):
value_list = [{"Id": 25011, "TemplateId": 1}]
- ome_connection_mock_for_compliance_info.get_all_items_with_pagination.return_value = {"value": value_list}
+ ome_connection_mock_for_compliance_info.get_all_items_with_pagination.return_value = {
+ "value": value_list}
mocker.patch(MODULE_PATH + "get_baseline_id", return_value=25011)
f_module = self.get_module_mock(params={'baseline': "baseline_one"})
- ome_response_mock.json_data = {"value": [{"Id": 25011, "TemplateId": 1}]}
+ ome_response_mock.json_data = {
+ "value": [{"Id": 25011, "TemplateId": 1}]}
mocker.patch(MODULE_PATH + 'get_baseline_id', return_value=(1, 1))
- report = self.module.compliance_report(f_module, ome_connection_mock_for_compliance_info)
- assert report == [{'Id': 25011, 'ComplianceAttributeGroups': None, 'TemplateId': 1}]
+ report = self.module.compliance_report(
+ f_module, ome_connection_mock_for_compliance_info)
+ assert report == [
+ {'Id': 25011, 'ComplianceAttributeGroups': None, 'TemplateId': 1}]
- def test_main_exception(self, ome_connection_mock_for_compliance_info, mocker,
+ @pytest.mark.parametrize("exc_type",
+ [SSLValidationError, ConnectionError, TypeError, ValueError, OSError, HTTPError, URLError])
+ def test_main_exception(self, exc_type, ome_connection_mock_for_compliance_info, mocker,
ome_response_mock, ome_default_args):
- ome_default_args.update({"baseline": "baseline_one", "device_id": 25011})
- response = mocker.patch(MODULE_PATH + 'compliance_report')
- ome_response_mock.status_code = 200
- ome_response_mock.success = True
- ome_response_mock.json_data = {"report": "compliance_report"}
- report = self._run_module(ome_default_args)
- assert report["changed"] is False
+ ome_default_args.update(
+ {"baseline": "baseline_one", "device_id": 25011})
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type == HTTPError:
+ mocker.patch(MODULE_PATH + 'compliance_report', side_effect=exc_type(
+ 'https://testhost.com', 401, 'http error message', {
+ "accept-type": "application/json"},
+ StringIO(json_str)))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
+ elif exc_type == URLError:
+ mocker.patch(MODULE_PATH + 'compliance_report',
+ side_effect=exc_type("exception message"))
+ result = self._run_module(ome_default_args)
+ assert result['unreachable'] is True
+ else:
+ mocker.patch(MODULE_PATH + 'compliance_report',
+ side_effect=exc_type("exception message"))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py
index f92a0abe5..e3f832c59 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_group.py
@@ -17,8 +17,7 @@ import json
from ssl import SSLError
from io import StringIO
from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_group
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \
- AnsibleFailJSonException
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
@@ -32,7 +31,6 @@ INVALID_IP_FORMAT = "The format {0} of the IP address provided is not supported
IP_NOT_EXISTS = "The IP addresses provided do not exist in OpenManage Enterprise."
try:
from netaddr import IPAddress, IPNetwork, IPRange
- from netaddr.core import AddrFormatError
HAS_NETADDR = True
except ImportError:
@@ -67,7 +65,7 @@ class TestOMEDeviceGroup(FakeAnsibleModule):
def test_ome_device_group_get_group_id_case02(self, ome_connection_mock_for_device_group, ome_response_mock):
f_module = self.get_module_mock(params={"group_id": 1234,
"device_ids": [25011], "device_service_tags": []})
- ome_connection_mock_for_device_group.invoke_request.side_effect = HTTPError('http://testhost.com', 400,
+ ome_connection_mock_for_device_group.invoke_request.side_effect = HTTPError('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(to_text(json.dumps(
@@ -195,7 +193,7 @@ class TestOMEDeviceGroup(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'get_group_id',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py
index bb41b51a3..d9bb6e82d 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_info.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -23,6 +23,7 @@ resource_detailed_inventory = {"detailed_inventory:": {"device_id": {Constants.d
Constants.device_id2: Constants.service_tag1}}}
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+HTTPS_ADDRESS = 'https://testhost.com'
class TestOmeDeviceInfo(FakeAnsibleModule):
@@ -61,13 +62,17 @@ class TestOmeDeviceInfo(FakeAnsibleModule):
validate_device_inputs_mock, ome_connection_mock,
get_device_resource_parameters_mock, ome_response_mock):
quer_param_mock = mocker.patch(MODULE_PATH + 'ome_device_info._get_query_parameters')
- quer_param_mock.return_value = {"filter": "Type eq '1000'"}
- ome_response_mock.json_data = {"value": [{"device_id1": "details", "device_id2": "details"}]}
+ quer_param_mock.return_value = {"filter": "Type eq 1000"}
+ ome_response_mock.json_data = {
+ "value": [{"device_id1": "details", "device_id2": "details"}],
+ "@odata.context": "/api/$metadata#Collection(DeviceService.Device)",
+ "@odata.count": 2,
+ }
ome_response_mock.status_code = 200
result = self._run_module(ome_default_args)
assert result['changed'] is False
assert 'device_info' in result
- assert result["device_info"] == {"value": [{"device_id1": "details", "device_id2": "details"}]}
+ assert "@odata.context" in result["device_info"]
def test_main_basic_inventory_failure_case(self, ome_default_args, module_mock, validate_device_inputs_mock,
ome_connection_mock,
@@ -108,14 +113,14 @@ class TestOmeDeviceInfo(FakeAnsibleModule):
"device_id": {Constants.device_id1: "DeviceService/Devices(Constants.device_id1)/InventoryDetails"},
"device_service_tag": {Constants.service_tag1: "DeviceService/Devices(4321)/InventoryDetails"}}}
get_device_resource_parameters_mock.return_value = detailed_inventory
- ome_connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', 400, '', {}, None)
+ ome_connection_mock.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400, '', {}, None)
result = self._run_module(ome_default_args)
assert 'device_info' in result
def test_main_HTTPError_error_case(self, ome_default_args, module_mock, validate_device_inputs_mock,
ome_connection_mock,
get_device_resource_parameters_mock, ome_response_mock):
- ome_connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', 400, '', {}, None)
+ ome_connection_mock.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400, '', {}, None)
ome_response_mock.json_data = {"value": [{"device_id1": "details", "device_id2": "details"}]}
ome_response_mock.status_code = 400
result = self._run_module(ome_default_args)
@@ -197,7 +202,7 @@ class TestOmeDeviceInfo(FakeAnsibleModule):
self.module._get_device_id_from_service_tags([Constants.service_tag1, "INVALID"], ome_connection_mock)
def test_get_device_id_from_service_tags_error_case(self, ome_connection_mock, ome_response_mock):
- ome_connection_mock.get_all_report_details.side_effect = HTTPError('http://testhost.com', 400, '', {}, None)
+ ome_connection_mock.get_all_report_details.side_effect = HTTPError(HTTPS_ADDRESS, 400, '', {}, None)
with pytest.raises(HTTPError) as ex:
self.module._get_device_id_from_service_tags(["INVALID"], ome_connection_mock)
@@ -224,7 +229,7 @@ class TestOmeDeviceInfo(FakeAnsibleModule):
error_msg = '400: Bad Request'
service_tag_dict = {}
non_available_tags = [Constants.service_tag2]
- ome_connection_mock.invoke_request.side_effect = HTTPError('http://testhost.com', 400, error_msg, {}, None)
+ ome_connection_mock.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400, error_msg, {}, None)
with pytest.raises(HTTPError, match=error_msg) as ex:
self.module.update_device_details_with_filtering(non_available_tags, service_tag_dict, ome_connection_mock)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py
index 23bae781c..9b92bb3c2 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_local_access_configuration.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -13,18 +13,32 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
-import pytest
-from ssl import SSLError
from io import StringIO
+from ssl import SSLError
+
+import pytest
+from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_local_access_configuration
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_local_access_configuration.'
+CONFIG_FAIL_MSG = "one of the following is required: enable_kvm_access, enable_chassis_direct_access, " \
+ "chassis_power_button, quick_sync, lcd"
+DOMAIN_FAIL_MSG = "The operation to configure the local access is supported only on " \
+ "OpenManage Enterprise Modular."
+FETCH_FAIL_MSG = "Unable to retrieve the device information."
+DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid."
+LAC_FAIL_MSG = "Unable to complete the operation because the local access configuration settings " \
+ "are not supported on the specified device."
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+SUCCESS_MSG = "Successfully updated the local access settings."
+HTTPS_ADDRESS = 'https://testhost.com'
+HTTP_ERROR_MSG = 'http error message'
+
@pytest.fixture
def ome_conn_mock_lac(mocker, ome_response_mock):
@@ -35,32 +49,214 @@ def ome_conn_mock_lac(mocker, ome_response_mock):
class TestOMEMDevicePower(FakeAnsibleModule):
-
module = ome_device_local_access_configuration
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceServiceTag': 'ABCD123', "Type": 1000},
+ {'PublicAddress': "YY.YY.YY.YY", 'DeviceId': 1235, "Type": 1000}],
+ "SettingType": "LocalAccessConfiguration", "EnableChassisDirect": False,
+ "EnableChassisPowerButton": False, "EnableKvmAccess": True, "EnableLcdOverridePin": False,
+ "LcdAccess": "VIEW_ONLY", "LcdCustomString": "LCD Text", "LcdLanguage": "en",
+ "LcdPresence": "Present", "LcdOverridePin": "123456",
+ "QuickSync": {"QuickSyncAccess": True, "TimeoutLimit": 10, "EnableInactivityTimeout": True,
+ "TimeoutLimitUnit": "MINUTES", "EnableReadAuthentication": True,
+ "EnableQuickSyncWifi": True, "QuickSyncHardware": "Present"}},
+ 'message': "Successfully updated the local access settings.",
+ 'mparams': {"hostname": "XX.XX.XX.XX",
+ "device_service_tag": 'ABCD123',
+ 'enable_kvm_access': True, 'enable_chassis_direct_access': False,
+ 'chassis_power_button':
+ {'enable_chassis_power_button': False, 'enable_lcd_override_pin': True,
+ 'disabled_button_lcd_override_pin': "123456"
+ },
+ 'lcd':
+ {'lcd_access': 'VIEW_AND_MODIFY',
+ 'user_defined': 'LCD Text', 'lcd_language': 'en'},
+ 'quick_sync': {'enable_quick_sync_wifi': True, 'enable_inactivity_timeout': True,
+ 'timeout_limit': 10, 'timeout_limit_unit': 'MINUTES',
+ 'enable_read_authentication': True,
+ 'quick_sync_access': 'READ_WRITE'}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "dummyhostname_shouldnotexist",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "YY.YY.YY.YY", 'DeviceId': 1235, "Type": 1000}],
+ "SettingType": "LocalAccessConfiguration", "EnableChassisDirect": False,
+ "EnableChassisPowerButton": False, "EnableKvmAccess": True, "EnableLcdOverridePin": False,
+ "LcdAccess": "VIEW_ONLY", "LcdCustomString": "LCD Text", "LcdLanguage": "en",
+ "LcdPresence": "Present", "LcdOverridePin": "123456",
+ "QuickSync": {"QuickSyncAccess": True, "TimeoutLimit": 10, "EnableInactivityTimeout": True,
+ "TimeoutLimitUnit": "MINUTES", "EnableReadAuthentication": True,
+ "EnableQuickSyncWifi": True, "QuickSyncHardware": "Present"}},
+ 'message': "Successfully updated the local access settings.",
+ 'mparams': {"hostname": "dummyhostname_shouldnotexist",
+ 'enable_kvm_access': True, 'enable_chassis_direct_access': False,
+ 'chassis_power_button':
+ {'enable_chassis_power_button': False, 'enable_lcd_override_pin': True,
+ 'disabled_button_lcd_override_pin': "123456"
+ },
+ 'lcd':
+ {'lcd_access': 'VIEW_AND_MODIFY',
+ 'user_defined': 'LCD Text', 'lcd_language': 'en'},
+ 'quick_sync': {'enable_quick_sync_wifi': True, 'enable_inactivity_timeout': True,
+ 'timeout_limit': 10, 'timeout_limit_unit': 'MINUTES',
+ 'enable_read_authentication': True,
+ 'quick_sync_access': 'READ_WRITE'}
+ }}
+ ])
+ def test_ome_devices_lac_success(self, params, ome_conn_mock_lac, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_default_args.update(params['mparams'])
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ assert result['msg'] == params['message']
+
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "YY.YY.YY.YY", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': DOMAIN_FAIL_MSG,
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }},
+ 'mparams': {"hostname": "XX.XX.XX.XX",
+ "device_service_tag": 'ABCD123',
+ 'enable_kvm_access': True, 'enable_chassis_direct_access': False,
+ 'chassis_power_button':
+ {'enable_chassis_power_button': False, 'enable_lcd_override_pin': True,
+ 'disabled_button_lcd_override_pin': "123456"
+ },
+ 'lcd':
+ {'lcd_access': 'VIEW_AND_MODIFY',
+ 'user_defined': 'LCD Text', 'lcd_language': 'en'},
+ 'quick_sync': {'enable_quick_sync_wifi': True, 'enable_inactivity_timeout': True,
+ 'timeout_limit': 10, 'timeout_limit_unit': 'MINUTES',
+ 'enable_read_authentication': True,
+ 'quick_sync_access': 'READ_WRITE'}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "YY.YY.YY.YY", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': LAC_FAIL_MSG,
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1004",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }},
+ 'check_domain_service': 'mocked_check_domain_service',
+ 'get_chassis_device': ('Id', 1234),
+ 'mparams': {"hostname": "XX.XX.XX.XX",
+ 'enable_kvm_access': True, 'enable_chassis_direct_access': False,
+ 'chassis_power_button':
+ {'enable_chassis_power_button': False, 'enable_lcd_override_pin': True,
+ 'disabled_button_lcd_override_pin': "123456"
+ },
+ 'lcd':
+ {'lcd_access': 'VIEW_AND_MODIFY',
+ 'user_defined': 'LCD Text', 'lcd_language': 'en'},
+ 'quick_sync': {'enable_quick_sync_wifi': True, 'enable_inactivity_timeout': True,
+ 'timeout_limit': 10, 'timeout_limit_unit': 'MINUTES',
+ 'enable_read_authentication': True,
+ 'quick_sync_access': 'READ_WRITE'}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "YY.YY.YY.YY", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': "Unable to complete the operation because the entered target device id '123' is invalid.",
+ 'mparams': {"hostname": "XX.XX.XX.XX", "device_id": 123,
+ 'enable_kvm_access': True, 'enable_chassis_direct_access': False,
+ 'chassis_power_button':
+ {'enable_chassis_power_button': False, 'enable_lcd_override_pin': True,
+ 'disabled_button_lcd_override_pin': "123456"
+ },
+ 'lcd':
+ {'lcd_access': 'VIEW_AND_MODIFY',
+ 'user_defined': 'LCD Text', 'lcd_language': 'en'},
+ 'quick_sync': {'enable_quick_sync_wifi': True, 'enable_inactivity_timeout': True,
+ 'timeout_limit': 10, 'timeout_limit_unit': 'MINUTES',
+ 'enable_read_authentication': True,
+ 'quick_sync_access': 'READ_WRITE'}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "YY.YY.YY.YY", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': CONFIG_FAIL_MSG,
+ 'mparams': {"hostname": "XX.XX.XX.XX", "device_id": 123}}
+ ])
+ def test_ome_devices_lac_failure(self, params, ome_conn_mock_lac, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ mocks = ["check_domain_service", 'get_chassis_device']
+ for m in mocks:
+ if m in params:
+ mocker.patch(MODULE_PATH + m, return_value=params.get(m, {}))
+ if 'http_error_json' in params:
+ json_str = to_text(json.dumps(params.get('http_error_json', {})))
+ ome_conn_mock_lac.invoke_request.side_effect = HTTPError(
+ HTTPS_ADDRESS, 401, HTTP_ERROR_MSG, {
+ "accept-type": "application/json"},
+ StringIO(json_str))
+ ome_default_args.update(params['mparams'])
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['msg'] == params['message']
+
def test_check_domain_service(self, ome_conn_mock_lac, ome_default_args):
f_module = self.get_module_mock()
result = self.module.check_domain_service(f_module, ome_conn_mock_lac)
assert result is None
def test_get_chassis_device(self, ome_conn_mock_lac, ome_default_args, mocker, ome_response_mock):
- mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1")
+ mocker.patch(MODULE_PATH + "get_ip_from_host",
+ return_value="X.X.X.X")
ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD",
- "PublicAddress": ["192.168.1.1"]},
+ "PublicAddress": ["XX.XX.XX.XX"]},
{"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE",
- "PublicAddress": ["192.168.1.2"]}]}
- param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": True}
+ "PublicAddress": ["YY.YY.YY.YY"]}]}
+ param = {"device_id": 25012, "hostname": "XX.XX.XX.XX",
+ "enable_kvm_access": True}
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as err:
self.module.get_chassis_device(f_module, ome_conn_mock_lac)
assert err.value.args[0] == "Unable to retrieve the device information."
def test_get_ip_from_host(self, ome_conn_mock_lac, ome_default_args, ome_response_mock):
- result = self.module.get_ip_from_host("192.168.0.1")
- assert result == "192.168.0.1"
+ result = self.module.get_ip_from_host("XX.XX.XX.XX")
+ assert result == "XX.XX.XX.XX"
def test_get_device_details(self, ome_conn_mock_lac, ome_default_args, ome_response_mock, mocker):
- param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": True}
+ param = {"device_id": 25012, "hostname": "XX.XX.XX.XX",
+ "enable_kvm_access": True}
f_module = self.get_module_mock(params=param)
ome_response_mock.status_code = 200
ome_response_mock.success = True
@@ -72,26 +268,31 @@ class TestOMEMDevicePower(FakeAnsibleModule):
self.module.get_device_details(ome_conn_mock_lac, f_module)
assert err.value.args[0] == "Unable to complete the operation because the entered target " \
"device id '25012' is invalid."
- param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": True}
+ param = {"device_id": 25012, "hostname": "XX.XX.XX.XX",
+ "enable_kvm_access": True}
f_module = self.get_module_mock(params=param)
- ome_response_mock.json_data = {"value": [{"Id": 25012, "DeviceServiceTag": "GHRT2RL"}], "EnableKvmAccess": True}
- mocker.patch(MODULE_PATH + 'check_mode_validation', return_value={"EnableKvmAccess": True})
+ ome_response_mock.json_data = {"value": [
+ {"Id": 25012, "DeviceServiceTag": "GHRT2RL"}], "EnableKvmAccess": True}
+ mocker.patch(MODULE_PATH + 'check_mode_validation',
+ return_value={"EnableKvmAccess": True})
resp = self.module.get_device_details(ome_conn_mock_lac, f_module)
assert resp.json_data["EnableKvmAccess"] is True
- param = {"hostname": "192.168.1.6", "enable_kvm_access": True}
+ param = {"hostname": "XX.XX.XX.XX", "enable_kvm_access": True}
f_module = self.get_module_mock(params=param)
- mocker.patch(MODULE_PATH + 'get_chassis_device', return_value=("Id", 25011))
+ mocker.patch(MODULE_PATH + 'get_chassis_device',
+ return_value=("Id", 25011))
resp = self.module.get_device_details(ome_conn_mock_lac, f_module)
assert resp.json_data["EnableKvmAccess"] is True
def test_check_mode_validation(self, ome_conn_mock_lac, ome_default_args, ome_response_mock, mocker):
loc_data = {"EnableKvmAccess": True, "EnableChassisDirect": True, "EnableChassisPowerButton": True,
"EnableLcdOverridePin": True, "LcdAccess": True, "LcdCustomString": "LCD Text",
- "LcdLanguage": "en", "LcdOverridePin": 123456, "LcdPresence": "Present",
+ "LcdLanguage": "en", "LcdOverridePin": "123456", "LcdPresence": "Present",
"QuickSync": {"QuickSyncAccess": True, "TimeoutLimit": 10, "EnableInactivityTimeout": True,
"TimeoutLimitUnit": "MINUTES", "EnableReadAuthentication": True,
"EnableQuickSyncWifi": True, "QuickSyncHardware": "Present"}, }
- param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": True}
+ param = {"device_id": 25012, "hostname": "XX.XX.XX.XX",
+ "enable_kvm_access": True}
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as err:
self.module.check_mode_validation(f_module, loc_data)
@@ -100,7 +301,8 @@ class TestOMEMDevicePower(FakeAnsibleModule):
with pytest.raises(Exception) as err:
self.module.check_mode_validation(f_module, loc_data)
assert err.value.args[0] == "No changes found to be applied."
- param = {"device_id": 25012, "hostname": "192.168.1.6", "enable_kvm_access": False}
+ param = {"device_id": 25012, "hostname": "XX.XX.XX.XX",
+ "enable_kvm_access": False}
f_module = self.get_module_mock(params=param)
f_module.check_mode = True
with pytest.raises(Exception) as err:
@@ -114,21 +316,30 @@ class TestOMEMDevicePower(FakeAnsibleModule):
[IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
def test_ome_device_power_main_exception_case(self, exc_type, mocker, ome_default_args,
ome_conn_mock_lac, ome_response_mock):
- ome_default_args.update({"device_id": 25011, "enable_kvm_access": True})
+ ome_default_args.update(
+ {"device_id": 25011, "enable_kvm_access": True})
ome_response_mock.status_code = 400
ome_response_mock.success = False
json_str = to_text(json.dumps({"info": "error_details"}))
if exc_type == URLError:
- mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error"))
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type("url open error"))
result = self._run_module(ome_default_args)
assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
- mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message"))
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type("exception message"))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
+ elif exc_type in [HTTPError]:
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type(HTTPS_ADDRESS, 400, HTTP_ERROR_MSG,
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ result = self._run_module(ome_default_args)
+ assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_domain_service',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type(HTTPS_ADDRESS, 400, HTTP_ERROR_MSG,
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py
index 8133e0167..40fe1b1a2 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_location.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.3.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -13,16 +13,22 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
-import pytest
-from ssl import SSLError
from io import StringIO
+from ssl import SSLError
+
+import pytest
+from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_location
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_location.'
+PARAM_DATA_CENTER = "data center 1"
+PARAM_ROOM = "room 1"
+PARAM_AISLE = "aisle 1"
+PARAM_RACK = "rack 1"
+PARAM_LOCATION = "location 1"
@pytest.fixture
@@ -34,96 +40,227 @@ def ome_conn_mock_location(mocker, ome_response_mock):
class TestOMEMDeviceLocation(FakeAnsibleModule):
-
module = ome_device_location
def test_check_domain_service(self, ome_conn_mock_location, ome_default_args, mocker):
f_module = self.get_module_mock()
- result = self.module.check_domain_service(f_module, ome_conn_mock_location)
+ result = self.module.check_domain_service(
+ f_module, ome_conn_mock_location)
assert result is None
def test_standalone_chassis(self, ome_conn_mock_location, ome_default_args, mocker, ome_response_mock):
- mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1")
+ mocker.patch(MODULE_PATH + "get_ip_from_host",
+ return_value="X.X.X.X")
ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD",
- "PublicAddress": ["192.168.1.1"]},
+ "PublicAddress": ["XX.XX.XX.XX"]},
{"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE",
- "PublicAddress": ["192.168.1.2"]}]}
+ "PublicAddress": ["YY.YY.YY.YY"]}]}
- param = {"data_center": "data center 1", "rack_slot": 2, "device_id": 25012, "hostname": "192.168.1.6",
- "room": "room 1", "aisle": "aisle 1", "rack": "rack 1", "location": "location 1"}
+ param = {"data_center": PARAM_DATA_CENTER, "rack_slot": 2, "device_id": 25012, "hostname": "XY.XY.XY.XY",
+ "room": PARAM_ROOM, "aisle": PARAM_AISLE, "rack": PARAM_RACK, "location": PARAM_LOCATION}
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as err:
self.module.standalone_chassis(f_module, ome_conn_mock_location)
assert err.value.args[0] == "Failed to fetch the device information."
def test_validate_dictionary(self, ome_conn_mock_location, ome_default_args, mocker):
- param = {"data_center": "data center 1", "rack_slot": 2,
- "room": "room 1", "aisle": "aisle 1", "rack": "rack 1", "location": "location 1"}
+ param = {"data_center": PARAM_DATA_CENTER, "rack_slot": 2,
+ "room": PARAM_ROOM, "aisle": PARAM_AISLE, "rack": PARAM_RACK, "location": PARAM_LOCATION}
f_module = self.get_module_mock(params=param)
f_module.check_mode = True
- loc_resp = {"DataCenter": "data center 1", "RackSlot": 2, "Room": "room 1",
- "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1"}
+ loc_resp = {"DataCenter": PARAM_DATA_CENTER, "RackSlot": 2, "Room": PARAM_ROOM,
+ "Aisle": PARAM_AISLE, "RackName": PARAM_RACK, "Location": PARAM_LOCATION}
with pytest.raises(Exception) as err:
self.module.validate_dictionary(f_module, loc_resp)
- loc_resp = {"DataCenter": "data center 1", "RackSlot": 3, "Room": "room 1",
- "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1"}
+ loc_resp = {"DataCenter": PARAM_DATA_CENTER, "RackSlot": 3, "Room": PARAM_ROOM,
+ "Aisle": PARAM_AISLE, "RackName": PARAM_RACK, "Location": PARAM_LOCATION}
with pytest.raises(Exception) as err:
self.module.validate_dictionary(f_module, loc_resp)
assert err.value.args[0] == "Changes found to be applied."
- loc_resp = {"DataCenter": "data center 1", "RackSlot": 2, "Room": "room 1",
- "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1"}
+ loc_resp = {"DataCenter": PARAM_DATA_CENTER, "RackSlot": 2, "Room": PARAM_ROOM,
+ "Aisle": PARAM_AISLE, "RackName": PARAM_RACK, "Location": PARAM_LOCATION}
f_module.check_mode = False
with pytest.raises(Exception) as err:
self.module.validate_dictionary(f_module, loc_resp)
assert err.value.args[0] == "No changes found to be applied."
- loc_resp = {"DataCenter": "data center 1", "RackSlot": 3, "Room": "room 1",
- "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1"}
+ loc_resp = {"DataCenter": PARAM_DATA_CENTER, "RackSlot": 3, "Room": PARAM_ROOM,
+ "Aisle": PARAM_AISLE, "RackName": PARAM_RACK, "Location": PARAM_LOCATION}
result = self.module.validate_dictionary(f_module, loc_resp)
- assert result == {"DataCenter": "data center 1", "RackSlot": 2,
- "Room": "room 1", "Aisle": "aisle 1", "RackName": "rack 1",
- "Location": "location 1", "SettingType": "Location"}
+ assert result == {"DataCenter": PARAM_DATA_CENTER, "RackSlot": 2,
+ "Room": PARAM_ROOM, "Aisle": PARAM_AISLE, "RackName": PARAM_RACK,
+ "Location": PARAM_LOCATION, "SettingType": "Location"}
def test_device_validation(self, ome_conn_mock_location, ome_default_args, mocker, ome_response_mock):
mocker.patch(MODULE_PATH + "validate_dictionary",
- return_value={"DataCenter": "data center 1", "RackSlot": 2, "Room": "room 1",
- "Aisle": "aisle 1", "RackName": "rack 1", "Location": "location 1",
+ return_value={"DataCenter": PARAM_DATA_CENTER, "RackSlot": 2, "Room": PARAM_ROOM,
+ "Aisle": PARAM_AISLE, "RackName": PARAM_RACK, "Location": PARAM_LOCATION,
"SettingType": "Location"})
- param = {"data_center": "data center 1", "rack_slot": 2, "device_id": 25012,
- "room": "room 1", "aisle": "aisle 1", "rack": "rack 1", "location": "location 1"}
+ param = {"data_center": PARAM_DATA_CENTER, "rack_slot": 2, "device_id": 25012,
+ "room": PARAM_ROOM, "aisle": PARAM_AISLE, "rack": PARAM_RACK, "location": PARAM_LOCATION}
ome_default_args.update(param)
f_module = self.get_module_mock(params=param)
ome_response_mock.status_code = 200
ome_response_mock.success = True
ome_response_mock.json_data = {
- "value": [], "DataCenter": "data center 1",
- "RackSlot": 3, "Room": "room 1", "Aisle": "aisle 1", "RackName": "rack 1",
- "Location": "location 1", "SettingType": "Location", "result": {"RackSlot": 4}}
+ "value": [], "DataCenter": PARAM_DATA_CENTER,
+ "RackSlot": 3, "Room": PARAM_ROOM, "Aisle": PARAM_AISLE, "RackName": PARAM_RACK,
+ "Location": PARAM_LOCATION, "SettingType": "Location", "result": {"RackSlot": 4}}
with pytest.raises(Exception) as err:
self.module.device_validation(f_module, ome_conn_mock_location)
assert err.value.args[0] == "Unable to complete the operation because the entered target " \
"device id '25012' is invalid."
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': "Successfully updated the location settings.",
+ 'mparams': {"hostname": "1.2.3.4",
+ "device_id": 1234, "data_center": "data center",
+ "room": "room", "aisle": "aisle", "rack": "rack"}
+ },
+ {"json_data": {"value": [
+ {'Id': 1234, 'DeviceServiceTag': 'ABCD123',
+ 'PublicAddress': "1.2.3.4", 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': "Successfully updated the location settings.",
+ 'mparams': {"hostname": "1.2.3.4",
+ "device_service_tag": "ABCD123", "data_center": "data center",
+ "room": "room", "aisle": "aisle", "rack": "rack"}
+ },
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': "Successfully updated the location settings.",
+ 'mparams': {"hostname": "1.2.3.4",
+ "data_center": "data center",
+ "room": "room", "aisle": "aisle", "rack": "rack"}
+ },
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "dummyhost_shouldnotexist",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': "Successfully updated the location settings.",
+ 'mparams': {"hostname": "dummyhost_shouldnotexist",
+ "data_center": "data center",
+ "room": "room", "aisle": "aisle", "rack": "rack"}
+ }
+ ])
+ def test_ome_devices_location_success(self, params, ome_conn_mock_location, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_default_args.update(params['mparams'])
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ assert result['msg'] == params['message']
+
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': "The device location settings operation is supported only on OpenManage Enterprise Modular systems.",
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ },
+ 'mparams': {"hostname": "1.2.3.4",
+ "data_center": "data center",
+ "room": "room", "aisle": "aisle", "rack": "rack"}
+ },
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': "Unable to complete the operation because the location settings are not supported on the specified device.",
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1004",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ },
+ 'check_domain_service': 'mocked_check_domain_service',
+ 'standalone_chassis': ('Id', 1234),
+ 'mparams': {"hostname": "1.2.3.4",
+ "data_center": "data center",
+ "room": "room", "aisle": "aisle", "rack": "rack"}
+ },
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': "Unable to complete the operation because the entered target device id '123' is invalid.",
+ 'mparams': {"hostname": "1.2.3.4", "device_id": 123,
+ "data_center": "data center",
+ "room": "room", "aisle": "aisle", "rack": "rack"}
+ },
+ ])
+ def test_ome_devices_location_failure(self, params, ome_conn_mock_location, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ mocks = ["check_domain_service", "standalone_chassis"]
+ for m in mocks:
+ if m in params:
+ mocker.patch(MODULE_PATH + m, return_value=params.get(m, {}))
+ if 'http_error_json' in params:
+ json_str = to_text(json.dumps(params.get('http_error_json', {})))
+ ome_conn_mock_location.invoke_request.side_effect = HTTPError(
+ 'https://testhost.com', 401, 'http error message', {
+ "accept-type": "application/json"},
+ StringIO(json_str))
+ ome_default_args.update(params['mparams'])
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['msg'] == params['message']
+
@pytest.mark.parametrize("exc_type",
[IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
def test_ome_device_location_main_exception_case(self, exc_type, mocker, ome_default_args,
ome_conn_mock_location, ome_response_mock):
- ome_default_args.update({"device_id": 25011, "data_center": "data center 1",
- "room": "room 1", "aisle": "aisle 1", "rack": "rack 1",
- "rack_slot": "2", "location": "location 1"})
+ ome_default_args.update({"device_id": 25011, "data_center": PARAM_DATA_CENTER,
+ "room": PARAM_ROOM, "aisle": PARAM_AISLE, "rack": PARAM_RACK,
+ "rack_slot": "2", "location": PARAM_LOCATION})
ome_response_mock.status_code = 400
ome_response_mock.success = False
json_str = to_text(json.dumps({"info": "error_details"}))
if exc_type == URLError:
- mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error"))
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type("url open error"))
result = self._run_module(ome_default_args)
assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
- mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message"))
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type("exception message"))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_domain_service',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py
index 692061430..004586393 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_mgmt_network.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.2.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -401,7 +401,7 @@ class TestOmeDeviceMgmtNetwork(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'validate_input',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py
index 0a68ac9d4..b3e258ffe 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_network_services.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -13,7 +13,6 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
-import pdb
import pytest
from ssl import SSLError
@@ -22,8 +21,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_network_services
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_network_services.'
@@ -48,7 +46,7 @@ class TestOMEMDeviceNetworkService(FakeAnsibleModule):
def test_check_domain_service_http(self, ome_conn_mock_network, ome_default_args, mocker):
f_module = self.get_module_mock()
err_message = {'error': {'@Message.ExtendedInfo': [{'MessageId': 'CGEN1006'}]}}
- ome_conn_mock_network.invoke_request.side_effect = HTTPError('http://testhost.com', 400,
+ ome_conn_mock_network.invoke_request.side_effect = HTTPError('https://testhost.com', 400,
json.dumps(err_message),
{"accept-type": "application/json"}, None)
mocker.patch(MODULE_PATH + 'json.loads', return_value=err_message)
@@ -58,19 +56,19 @@ class TestOMEMDeviceNetworkService(FakeAnsibleModule):
"OpenManage Enterprise Modular."
def test_get_chassis_device(self, ome_conn_mock_network, ome_default_args, mocker, ome_response_mock):
- mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1")
+ mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="X.X.X.X")
ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD",
- "PublicAddress": ["192.168.1.1"]},
+ "PublicAddress": ["XX.XX.XX.XX"]},
{"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE",
- "PublicAddress": ["192.168.1.2"]}]}
- param = {"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True}}
+ "PublicAddress": ["YY.YY.YY.YY"]}]}
+ param = {"device_id": 25012, "hostname": "Y.Y.Y.Y", "remote_racadm_settings": {"enabled": True}}
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as err:
self.module.get_chassis_device(f_module, ome_conn_mock_network)
assert err.value.args[0] == "Failed to retrieve the device information."
ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD",
- "PublicAddress": ["192.18.1.1"]}]}
- param = {"hostname": "192.18.1.1", "remote_racadm_settings": {"enabled": True}}
+ "PublicAddress": ["X.X.X.X"]}]}
+ param = {"hostname": "X.X.X.X", "remote_racadm_settings": {"enabled": True}}
f_module = self.get_module_mock(params=param)
key, value = self.module.get_chassis_device(f_module, ome_conn_mock_network)
assert key == "Id"
@@ -88,7 +86,7 @@ class TestOMEMDeviceNetworkService(FakeAnsibleModule):
"SnmpV1V2Credential": {"CommunityName": "public"}},
"SshConfiguration": {"IdleTimeout": 60, "MaxAuthRetries": 3, "MaxSessions": 1,
"PortNumber": 22, "SshEnabled": False}}
- ome_default_args.update({"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True},
+ ome_default_args.update({"device_id": 25012, "hostname": "Y.Y.Y.Y", "remote_racadm_settings": {"enabled": True},
"snmp_settings": {"enabled": True, "port_number": 161, "community_name": "public"},
"ssh_settings": {"enabled": True, "port_number": 22, "max_sessions": 1,
"max_auth_retries": 3, "idle_timeout": 60}})
@@ -96,7 +94,7 @@ class TestOMEMDeviceNetworkService(FakeAnsibleModule):
assert resp['msg'] == "Successfully updated the network services settings."
def test_fetch_device_details(self, ome_conn_mock_network, ome_default_args, ome_response_mock, mocker):
- param = {"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True}}
+ param = {"device_id": 25012, "hostname": "Y.Y.Y.Y", "remote_racadm_settings": {"enabled": True}}
f_module = self.get_module_mock(params=param)
ome_response_mock.status_code = 200
ome_response_mock.success = True
@@ -115,18 +113,18 @@ class TestOMEMDeviceNetworkService(FakeAnsibleModule):
"EnableRemoteRacadm": True, "SnmpConfiguration": {}, "SshConfiguration": {}}
resp = self.module.fetch_device_details(f_module, ome_conn_mock_network)
assert resp.json_data["SnmpConfiguration"] == {}
- param = {"hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True}}
+ param = {"hostname": "Y.Y.Y.Y", "remote_racadm_settings": {"enabled": True}}
f_module = self.get_module_mock(params=param)
mocker.patch(MODULE_PATH + "get_chassis_device", return_value=("Id", "25012"))
resp = self.module.fetch_device_details(f_module, ome_conn_mock_network)
assert resp.json_data["SnmpConfiguration"] == {}
def test_get_ip_from_host(self, ome_conn_mock_network, ome_default_args, ome_response_mock):
- result = self.module.get_ip_from_host("192.168.0.1")
- assert result == "192.168.0.1"
+ result = self.module.get_ip_from_host("ZZ.ZZ.ZZ.ZZ")
+ assert result == "ZZ.ZZ.ZZ.ZZ"
def test_check_mode_validation(self, ome_conn_mock_network, ome_default_args, ome_response_mock):
- param = {"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": True},
+ param = {"device_id": 25012, "hostname": "Y.Y.Y.Y", "remote_racadm_settings": {"enabled": True},
"snmp_settings": {"enabled": True, "port_number": 161, "community_name": "public"},
"ssh_settings": {"enabled": True, "port_number": 22, "max_sessions": 1,
"max_auth_retries": 3, "idle_timeout": 120}}
@@ -152,7 +150,7 @@ class TestOMEMDeviceNetworkService(FakeAnsibleModule):
with pytest.raises(Exception) as err:
self.module.check_mode_validation(f_module, loc_data, ome_conn_mock_network)
assert err.value.args[0] == "Changes found to be applied."
- param = {"device_id": 25012, "hostname": "192.168.1.6", "remote_racadm_settings": {"enabled": False},
+ param = {"device_id": 25012, "hostname": "Y.Y.Y.Y", "remote_racadm_settings": {"enabled": False},
"snmp_settings": {"enabled": False, "port_number": 161, "community_name": "public"},
"ssh_settings": {"enabled": False, "port_number": 22, "max_sessions": 1,
"max_auth_retries": 3, "idle_timeout": 60}}
@@ -178,7 +176,7 @@ class TestOMEMDeviceNetworkService(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_domain_service',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py
index 928c407c3..553a57369 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_power_settings.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,11 +20,22 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_power_settings
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
-from mock import MagicMock, patch, Mock
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_power_settings.'
+DEVICE_FAIL_MSG = "Unable to complete the operation because the entered target device {0} '{1}' is invalid."
+CONFIG_FAIL_MSG = "one of the following is required: power_configuration, " \
+ "redundancy_configuration, hot_spare_configuration"
+CHANGES_FOUND = "Changes found to be applied."
+NO_CHANGES_FOUND = "No changes found to be applied."
+SUCCESS_MSG = "Successfully updated the power settings."
+FETCH_FAIL_MSG = "Failed to fetch the device information."
+POWER_FAIL_MSG = "Unable to complete the operation because the power settings " \
+ "are not supported on the specified device."
+DOMAIN_FAIL_MSG = "The device location settings operation is supported only on " \
+ "OpenManage Enterprise Modular."
+
@pytest.fixture
def ome_conn_mock_power(mocker, ome_response_mock):
@@ -38,18 +49,212 @@ class TestOMEMDevicePower(FakeAnsibleModule):
module = ome_device_power_settings
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceServiceTag': 'ABCD123', "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}],
+ "EnableHotSpare": True,
+ "EnablePowerCapSettings": True,
+ "MaxPowerCap": "3424",
+ "MinPowerCap": "3291",
+ "PowerCap": "3425",
+ "PrimaryGrid": "GRID_1",
+ "RedundancyPolicy": "NO_REDUNDANCY",
+ "SettingType": "Power"},
+ 'message': SUCCESS_MSG,
+ 'mparams': {"hostname": "1.2.3.4",
+ "power_configuration": {"enable_power_cap": True, "power_cap": 3424},
+ "hot_spare_configuration": {"enable_hot_spare": False, "primary_grid": "GRID_1"},
+ "device_id": 1234,
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceServiceTag': 'ABCD123', "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}],
+ "EnableHotSpare": True,
+ "EnablePowerCapSettings": True,
+ "MaxPowerCap": "3424",
+ "MinPowerCap": "3291",
+ "PowerCap": "3425",
+ "PrimaryGrid": "GRID_1",
+ "RedundancyPolicy": "NO_REDUNDANCY",
+ "SettingType": "Power"},
+ 'message': SUCCESS_MSG,
+ 'mparams': {"hostname": "1.2.3.4",
+ "power_configuration": {"enable_power_cap": False, "power_cap": 3424},
+ "hot_spare_configuration": {"enable_hot_spare": True, "primary_grid": "GRID_1"},
+ "device_service_tag": 'ABCD123',
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}],
+ "EnableHotSpare": True,
+ "EnablePowerCapSettings": True,
+ "MaxPowerCap": "3424",
+ "MinPowerCap": "3291",
+ "PowerCap": "3425",
+ "PrimaryGrid": "GRID_1",
+ "RedundancyPolicy": "NO_REDUNDANCY",
+ "SettingType": "Power"},
+ 'message': SUCCESS_MSG,
+ 'mparams': {"hostname": "1.2.3.4",
+ "power_configuration": {"enable_power_cap": False, "power_cap": 3424},
+ "hot_spare_configuration": {"enable_hot_spare": True, "primary_grid": "GRID_1"}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "dummyhostname_shouldnotexist",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}],
+ "EnableHotSpare": True,
+ "EnablePowerCapSettings": True,
+ "MaxPowerCap": "3424",
+ "MinPowerCap": "3291",
+ "PowerCap": "3425",
+ "PrimaryGrid": "GRID_1",
+ "RedundancyPolicy": "NO_REDUNDANCY",
+ "SettingType": "Power"},
+ 'message': SUCCESS_MSG,
+ 'mparams': {"hostname": "dummyhostname_shouldnotexist",
+ "power_configuration": {"enable_power_cap": False, "power_cap": 3424},
+ "hot_spare_configuration": {"enable_hot_spare": True, "primary_grid": "GRID_1"}
+ }}
+ ])
+ def test_ome_devices_power_settings_success(self, params, ome_conn_mock_power, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_default_args.update(params['mparams'])
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ assert result['msg'] == params['message']
+
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': DOMAIN_FAIL_MSG,
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }},
+ 'mparams': {"hostname": "1.2.3.4",
+ "device_service_tag": 'ABCD123',
+ "power_configuration": {"enable_power_cap": True, "power_cap": 3424}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': POWER_FAIL_MSG,
+ 'check_domain_service': 'mocked_check_domain_service',
+ 'get_chassis_device': ('Id', 1234),
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1004",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }},
+ 'mparams': {"hostname": "1.2.3.4",
+ "power_configuration": {"enable_power_cap": True, "power_cap": 3424}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': POWER_FAIL_MSG,
+ 'check_domain_service': 'mocked_check_domain_service',
+ 'get_chassis_device': ('Id', 1234),
+ 'http_err_code': 404,
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1004",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }},
+ 'mparams': {"hostname": "1.2.3.4",
+ "power_configuration": {"enable_power_cap": True, "power_cap": 3424}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': DEVICE_FAIL_MSG.format('id', 123),
+ 'check_domain_service': 'mocked_check_domain_service',
+ 'get_chassis_device': ('Id', 1234),
+ 'mparams': {"hostname": "1.2.3.4", 'device_id': 123,
+ "power_configuration": {"enable_power_cap": True, "power_cap": 3424}
+ }},
+ {"json_data": {"value": [
+ {'Id': 1234, 'PublicAddress': "1.2.3.4",
+ 'DeviceId': 1234, "Type": 1000},
+ {'PublicAddress': "1.2.3.5", 'DeviceId': 1235, "Type": 1000}]},
+ 'message': CONFIG_FAIL_MSG,
+ 'mparams': {"hostname": "1.2.3.4", "device_id": 123}}
+ ])
+ def test_ome_devices_power_settings_failure(self, params, ome_conn_mock_power, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ mocks = ["check_domain_service", 'get_chassis_device']
+ for m in mocks:
+ if m in params:
+ mocker.patch(MODULE_PATH + m, return_value=params.get(m, {}))
+ if 'http_error_json' in params:
+ json_str = to_text(json.dumps(params.get('http_error_json', {})))
+ ome_conn_mock_power.invoke_request.side_effect = HTTPError(
+ 'https://testhost.com', params.get('http_err_code', 401), 'http error message', {
+ "accept-type": "application/json"},
+ StringIO(json_str))
+ ome_default_args.update(params['mparams'])
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['msg'] == params['message']
+
def test_check_domain_service(self, ome_conn_mock_power, ome_default_args):
f_module = self.get_module_mock()
- result = self.module.check_domain_service(f_module, ome_conn_mock_power)
+ result = self.module.check_domain_service(
+ f_module, ome_conn_mock_power)
assert result is None
def test_get_chassis_device(self, ome_conn_mock_power, ome_default_args, mocker, ome_response_mock):
- mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1")
+ mocker.patch(MODULE_PATH + "get_ip_from_host",
+ return_value="X.X.X.X")
ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD",
- "PublicAddress": ["192.168.1.1"]},
+ "PublicAddress": ["XX.XX.XX.XX"]},
{"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE",
- "PublicAddress": ["192.168.1.2"]}]}
- param = {"device_id": 25012, "hostname": "192.168.1.6",
+ "PublicAddress": ["YY.YY.YY.YY"]}]}
+ param = {"device_id": 25012, "hostname": "Y.Y.Y.Y",
"power_configuration": {"enable_power_cap": True, "power_cap": 3424}}
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as err:
@@ -60,7 +265,8 @@ class TestOMEMDevicePower(FakeAnsibleModule):
loc_data = {"PowerCap": "3424", "MinPowerCap": "3291", "MaxPowerCap": "3424",
"RedundancyPolicy": "NO_REDUNDANCY", "EnablePowerCapSettings": True,
"EnableHotSpare": True, "PrimaryGrid": "GRID_1", "PowerBudgetOverride": False}
- param = {"power_configuration": {"enable_power_cap": True, "power_cap": 3424}}
+ param = {"power_configuration": {
+ "enable_power_cap": True, "power_cap": 3424}}
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as err:
self.module.check_mode_validation(f_module, loc_data)
@@ -70,7 +276,8 @@ class TestOMEMDevicePower(FakeAnsibleModule):
with pytest.raises(Exception) as err:
self.module.check_mode_validation(f_module, loc_data)
assert err.value.args[0] == "Changes found to be applied."
- param = {"redundancy_configuration": {"redundancy_policy": "NO_REDUNDANCY"}}
+ param = {"redundancy_configuration": {
+ "redundancy_policy": "NO_REDUNDANCY"}}
f_module = self.get_module_mock(params=param)
f_module.check_mode = True
with pytest.raises(Exception) as err:
@@ -78,7 +285,7 @@ class TestOMEMDevicePower(FakeAnsibleModule):
assert err.value.args[0] == "No changes found to be applied."
def test_fetch_device_details(self, ome_conn_mock_power, ome_default_args, ome_response_mock):
- param = {"device_id": 25012, "hostname": "192.168.1.6",
+ param = {"device_id": 25012, "hostname": "Y.Y.Y.Y",
"power_configuration": {"enable_power_cap": True, "power_cap": 3424}}
f_module = self.get_module_mock(params=param)
ome_response_mock.status_code = 200
@@ -93,8 +300,8 @@ class TestOMEMDevicePower(FakeAnsibleModule):
"device id '25012' is invalid."
def test_get_ip_from_host(self, ome_conn_mock_power, ome_default_args, ome_response_mock):
- result = self.module.get_ip_from_host("192.168.0.1")
- assert result == "192.168.0.1"
+ result = self.module.get_ip_from_host("ZZ.ZZ.ZZ.ZZ")
+ assert result == "ZZ.ZZ.ZZ.ZZ"
@pytest.mark.parametrize("exc_type",
[IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
@@ -106,16 +313,18 @@ class TestOMEMDevicePower(FakeAnsibleModule):
ome_response_mock.success = False
json_str = to_text(json.dumps({"info": "error_details"}))
if exc_type == URLError:
- mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error"))
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type("url open error"))
result = self._run_module(ome_default_args)
assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
- mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message"))
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type("exception message"))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_domain_service',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py
index 97b611cee..60b8c17cc 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_device_quick_deploy.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.0.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.3.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,9 +20,12 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_device_quick_deploy
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_device_quick_deploy.'
+ACCESS_TYPE = "application/json"
+HTTP_ADDRESS = 'https://testhost.com'
+HTTP_ERROR_MSG = 'http error message'
@pytest.fixture
@@ -42,51 +45,72 @@ class TestOMEMDevicePower(FakeAnsibleModule):
result = self.module.check_domain_service(f_module, ome_conn_mock_qd)
assert result is None
+ @pytest.mark.parametrize("exc_type", [HTTPError])
+ def test_check_domain_service_http(self, exc_type, ome_conn_mock_qd, ome_default_args):
+ f_module = self.get_module_mock()
+ json_str = to_text(json.dumps({"error": {"@Message.ExtendedInfo": [{"MessageId": "CGEN1006"}]}}))
+ if exc_type == HTTPError:
+ ome_conn_mock_qd.invoke_request.side_effect = exc_type(
+ HTTP_ADDRESS, 400, HTTP_ERROR_MSG, {"accept-type": ACCESS_TYPE},
+ StringIO(json_str)
+ )
+ with pytest.raises(Exception) as err:
+ self.module.check_domain_service(f_module, ome_conn_mock_qd)
+ assert err.value.args[0] == "The operation to configure the Quick Deploy settings is supported only " \
+ "on OpenManage Enterprise Modular."
+
def test_get_chassis_device(self, ome_conn_mock_qd, ome_default_args, mocker, ome_response_mock):
- mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="192.18.1.1")
+ mocker.patch(MODULE_PATH + "get_ip_from_host", return_value="X.X.X.X")
ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD",
- "PublicAddress": ["192.168.1.1"]},
+ "PublicAddress": ["ZZ.ZZ.ZZ.ZZ"]},
{"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE",
- "PublicAddress": ["192.168.1.2"]}]}
- param = {"device_id": 25012, "hostname": "192.168.1.6"}
+ "PublicAddress": ["ZX.ZX.ZX.ZX"]}]}
+ param = {"device_id": 25012, "hostname": "XY.XY.XY.XY"}
f_module = self.get_module_mock(params=param)
with pytest.raises(Exception) as err:
self.module.get_chassis_device(f_module, ome_conn_mock_qd)
assert err.value.args[0] == "Unable to retrieve the device information."
+ ome_response_mock.json_data = {"value": [{"DeviceId": 25011, "DomainRoleTypeValue": "LEAD",
+ "PublicAddress": ["ZZ.ZZ.ZZ.ZZ"]},
+ {"DeviceId": 25012, "DomainRoleTypeValue": "STANDALONE",
+ "PublicAddress": ["X.X.X.X"]}]}
+ result = self.module.get_chassis_device(f_module, ome_conn_mock_qd)
+ assert result[0] == "Id"
+ assert result[1] == 25012
def test_get_ip_from_host(self, ome_conn_mock_qd, ome_default_args, ome_response_mock):
- result = self.module.get_ip_from_host("192.168.0.1")
- assert result == "192.168.0.1"
+ result = self.module.get_ip_from_host("XX.XX.XX.XX")
+ assert result == "XX.XX.XX.XX"
def test_validate_ip_address(self, ome_conn_mock_qd, ome_response_mock, ome_default_args):
result = self.module.validate_ip_address("192.168.0.1", "IPV4")
assert result is True
- result = self.module.validate_ip_address("192.168.0.1.1", "IPV4")
+ result = self.module.validate_ip_address("XX.XX.XX.XX.1", "IPV4")
assert result is False
result = self.module.validate_ip_address("::", "IPV6")
assert result is True
def test_ip_address_field(self, ome_conn_mock_qd, ome_response_mock, ome_default_args, mocker):
param = {"device_id": 25011, "setting_type": "ServerQuickDeploy",
- "quick_deploy_options": {"ipv4_enabled": False, "ipv4_subnet_mask": "192.168.0.1",
+ "quick_deploy_options": {"ipv4_enabled": False, "ipv4_subnet_mask": "XX.XX.XX.XX",
"ipv4_gateway": "0.0.0.0.0"}, "slots": [{"vlan_id": 1}]}
fields = [("ipv4_subnet_mask", "IPV4"), ("ipv4_gateway", "IPV4"), ("ipv6_gateway", "IPV6")]
f_module = self.get_module_mock(params=param)
mocker.patch(MODULE_PATH + "validate_ip_address", return_value=False)
with pytest.raises(Exception) as err:
self.module.ip_address_field(f_module, fields, param["quick_deploy_options"], slot=False)
- assert err.value.args[0] == "Invalid '192.168.0.1' address provided for the ipv4_subnet_mask."
+ assert err.value.args[0] == "Invalid 'XX.XX.XX.XX' address provided for the ipv4_subnet_mask."
def test_get_device_details(self, ome_conn_mock_qd, ome_response_mock, ome_default_args, mocker):
- param = {"device_id": 25012, "hostname": "192.168.1.6", "setting_type": "ServerQuickDeploy",
- "quick_deploy_options": {"ipv4_enabled": False, "ipv4_subnet_mask": "192.168.0.1",
+ param = {"device_id": 25012, "hostname": "XY.XY.XY.XY", "setting_type": "ServerQuickDeploy",
+ "quick_deploy_options": {"ipv4_enabled": False, "ipv4_subnet_mask": "XX.XX.XX.XX",
"ipv4_gateway": "0.0.0.0"}, "slots": [{"vlan_id": 1}]}
f_module = self.get_module_mock(params=param)
ome_response_mock.status_code = 200
ome_response_mock.success = True
ome_response_mock.json_data = {"value": [], "SettingType": "ServerQuickDeploy",
"ProtocolTypeV4": "true", "NetworkTypeV4": "Static",
- "IpV4Gateway": "192.168.0.1", "IpV4SubnetMask": "255.255.255.0"}
+ "IpV4Gateway": "XX.XX.XX.XX", "IpV4SubnetMask": "XXX.XXX.XXX.XXX"}
mocker.patch(MODULE_PATH + 'get_chassis_device', return_value=("Id", 25011))
mocker.patch(MODULE_PATH + "check_mode_validation", return_value=({}, {}))
mocker.patch(MODULE_PATH + "job_payload_submission", return_value=12345)
@@ -99,38 +123,77 @@ class TestOMEMDevicePower(FakeAnsibleModule):
f_module = self.get_module_mock(params=param)
result = self.module.get_device_details(ome_conn_mock_qd, f_module)
assert result == (12345, None)
- param.update({"job_wait": True})
+ param.update({"job_wait": True, "job_wait_timeout": 60})
+ ome_conn_mock_qd.job_tracking.return_value = (True, "error message")
+ with pytest.raises(Exception) as err:
+ self.module.get_device_details(ome_conn_mock_qd, f_module)
+ assert err.value.args[0] == "Unable to deploy the Quick Deploy settings."
+ ome_conn_mock_qd.job_tracking.return_value = (False, "error message")
+ result = self.module.get_device_details(ome_conn_mock_qd, f_module)
+ assert result[0] == 12345
+
+ @pytest.mark.parametrize("exc_type", [HTTPError])
+ def test_get_device_details_http(self, exc_type, ome_conn_mock_qd, ome_response_mock, ome_default_args, mocker):
+ param = {"hostname": "XY.XY.XY.XY", "setting_type": "ServerQuickDeploy",
+ "quick_deploy_options": {"ipv4_enabled": False, "ipv4_subnet_mask": "XX.XX.XX.XX",
+ "ipv4_gateway": "0.0.0.0"}, "slots": [{"vlan_id": 1}]}
+ mocker.patch(MODULE_PATH + 'get_chassis_device', return_value=("Id", 25011))
+ json_str = to_text(json.dumps({"error": {"@Message.ExtendedInfo": [{"MessageId": "CGEN1004"}]}}))
+ if exc_type == HTTPError:
+ ome_conn_mock_qd.invoke_request.side_effect = exc_type(
+ HTTP_ADDRESS, 400, HTTP_ERROR_MSG, {"accept-type": ACCESS_TYPE},
+ StringIO(json_str)
+ )
+ f_module = self.get_module_mock(params=param)
+ with pytest.raises(Exception) as err:
+ self.module.get_device_details(ome_conn_mock_qd, f_module)
+ assert err.value.args[0] == "Unable to complete the operation because the Server Quick Deploy configuration " \
+ "settings are not supported on the specified device."
def test_job_payload_submission(self, ome_conn_mock_qd, ome_response_mock, ome_default_args):
ome_response_mock.status_code = 200
ome_response_mock.success = True
ome_response_mock.json_data = {"Id": 12345}
ome_conn_mock_qd.job_submission.return_value = ome_response_mock
- payload = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "255.255.255.0",
+ payload = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "XXX.XXX.XXX.XXX",
"IpV4Gateway": "0.0.0.0", "ProtocolTypeV6": True, "NetworkTypeV6": "Static",
"PrefixLength": "1", "IpV6Gateway": "0.0.0.0"}
- slot_payload = [{"SlotId": 1, "IPV4Address": "192.168.0.2", "IPV6Address": "::", "VlanId": 1}]
+ slot_payload = [{"SlotId": 1, "IPV4Address": "YY.YY.YY.YY", "IPV6Address": "::", "VlanId": 1}]
+ resp_data = {"Slots": [
+ {"SlotId": 1, "IPV4Address": "YY.YY.YY.YY", "IPV6Address": "::", "VlanId": 1, "SlotSelected": False},
+ {"SlotId": 2, "IPV4Address": "YY.YY.YY.YY", "IPV6Address": "::", "VlanId": 1, "SlotSelected": False},
+ ]}
+ result = self.module.job_payload_submission(ome_conn_mock_qd, payload, slot_payload,
+ "ServerQuickDeploy", 25012, resp_data)
+ assert result == 12345
+
+ payload = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "XXX.XXX.XXX.XXX",
+ "IpV4Gateway": "0.0.0.0", "ProtocolTypeV6": True, "NetworkTypeV6": "Static",
+ "PrefixLength": "1", "IpV6Gateway": "0.0.0.0", "rootCredential": "secret"}
+ slot_payload = [{"SlotId": 1, "IPV4Address": "YY.YY.YY.YY", "IPV6Address": "::", "VlanId": 1}]
resp_data = {"Slots": [
- {"SlotId": 1, "IPV4Address": "192.168.0.2", "IPV6Address": "::", "VlanId": 1, "SlotSelected": False},
- {"SlotId": 1, "IPV4Address": "192.168.0.2", "IPV6Address": "::", "VlanId": 1, "SlotSelected": False},
+ {"SlotId": 1, "SlotIPV4Address": "YY.YY.YY.YY", "IPV4Address": "YY.YY.YY.YY", "IPV6Address": "::",
+ "VlanId": 1, "SlotSelected": False, "SlotIPV6Address": "::"},
+ {"SlotId": 2, "IPV4Address": "YY.YY.YY.YY", "IPV6Address": "::", "VlanId": 1, "SlotSelected": False,
+ "SlotIPV4Address": "YY.YY.YY.YY", "SlotIPV6Address": "::"},
]}
result = self.module.job_payload_submission(ome_conn_mock_qd, payload, slot_payload,
"ServerQuickDeploy", 25012, resp_data)
assert result == 12345
def test_check_mode_validation(self, ome_conn_mock_qd, ome_response_mock, ome_default_args):
- param = {"device_id": 25012, "hostname": "192.168.1.6", "setting_type": "ServerQuickDeploy",
+ param = {"device_id": 25012, "hostname": "XY.XY.XY.XY", "setting_type": "ServerQuickDeploy",
"quick_deploy_options": {
- "ipv4_enabled": True, "ipv4_network_type": "Static", "ipv4_subnet_mask": "255.255.255.0",
+ "ipv4_enabled": True, "ipv4_network_type": "Static", "ipv4_subnet_mask": "XXX.XXX.XXX.XXX",
"ipv4_gateway": "0.0.0.0", "ipv6_enabled": True, "ipv6_network_type": "Static",
"ipv6_prefix_length": "1", "ipv6_gateway": "0.0.0.0",
- "slots": [{"slot_id": 1, "slot_ipv4_address": "192.168.0.1",
+ "slots": [{"slot_id": 1, "slot_ipv4_address": "XX.XX.XX.XX",
"slot_ipv6_address": "::", "vlan_id": "1"}]}}
f_module = self.get_module_mock(params=param)
- deploy_data = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "255.255.255.0",
+ deploy_data = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "XXX.XXX.XXX.XXX",
"IpV4Gateway": "0.0.0.0", "ProtocolTypeV6": True, "NetworkTypeV6": "Static",
"PrefixLength": "1", "IpV6Gateway": "0.0.0.0",
- "Slots": [{"SlotId": 1, "SlotIPV4Address": "192.168.0.1", "SlotIPV6Address": "::", "VlanId": "1"}]}
+ "Slots": [{"SlotId": 1, "SlotIPV4Address": "XX.XX.XX.XX", "SlotIPV6Address": "::", "VlanId": "1"}]}
with pytest.raises(Exception) as err:
self.module.check_mode_validation(f_module, deploy_data)
assert err.value.args[0] == "No changes found to be applied."
@@ -145,6 +208,48 @@ class TestOMEMDevicePower(FakeAnsibleModule):
f_module.check_mode = False
result = self.module.check_mode_validation(f_module, deploy_data)
assert result[0]["NetworkTypeV4"] == "Static"
+ param["quick_deploy_options"].update({"password": "secret", "ipv4_enabled": False, "ipv6_enabled": False,
+ "ProtocolTypeV4": False, "ProtocolTypeV6": False})
+ deploy_data = {"ProtocolTypeV4": False, "NetworkTypeV4": "Static", "IpV4SubnetMask": "XXX.XXX.XXX.XXX",
+ "IpV4Gateway": "0.0.0.0", "ProtocolTypeV6": False, "NetworkTypeV6": "Static",
+ "PrefixLength": "1", "IpV6Gateway": "0.0.0.0",
+ "Slots": [{"SlotId": 1, "SlotIPV4Address": "XX.XX.XX.XX", "SlotIPV6Address": "::",
+ "VlanId": "1"}]}
+ f_module = self.get_module_mock(params=param)
+ result = self.module.check_mode_validation(f_module, deploy_data)
+ assert result[0]["NetworkTypeV4"] == "Static"
+ param = {"device_id": 25012, "hostname": "XY.XY.XY.XY", "setting_type": "ServerQuickDeploy",
+ "quick_deploy_options": {
+ "ipv4_enabled": True, "ipv4_network_type": "Static", "ipv4_subnet_mask": "XXX.XXX.XXX.XXX",
+ "ipv4_gateway": "0.0.0.0", "ipv6_enabled": True, "ipv6_network_type": "Static",
+ "ipv6_prefix_length": "1", "ipv6_gateway": "0.0.0.0",
+ "slots": [{"slot_id": 1, "slot_ipv4_address": "XX.XX.XX.XX",
+ "slot_ipv6_address": "::", "vlan_id": "1"}]}}
+ f_module = self.get_module_mock(params=param)
+ deploy_data = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "XXX.XXX.XXX.XXX",
+ "IpV4Gateway": "0.0.0.0", "ProtocolTypeV6": True, "NetworkTypeV6": "Static",
+ "PrefixLength": "1", "IpV6Gateway": "0.0.0.0",
+ "Slots": [{"SlotId": 2, "SlotIPV4Address": "XX.XX.XX.XX", "SlotIPV6Address": "::",
+ "VlanId": "1"}]}
+ with pytest.raises(Exception) as err:
+ self.module.check_mode_validation(f_module, deploy_data)
+ assert err.value.args[0] == "Unable to complete the operation because the entered slot(s) '1' does not exist."
+ param = {"device_id": 25012, "hostname": "XY.XY.XY.XY", "setting_type": "ServerQuickDeploy",
+ "quick_deploy_options": {
+ "ipv4_enabled": True, "ipv4_network_type": "Static", "ipv4_subnet_mask": "XXX.XXX.XXX.XXX",
+ "ipv4_gateway": "0.0.0.0", "ipv6_enabled": True, "ipv6_network_type": "Static",
+ "ipv6_prefix_length": "1", "ipv6_gateway": "0.0.0.0",
+ "slots": [{"slot_id": 5, "slot_ipv4_address": "XX.XX.XX.XX",
+ "slot_ipv6_address": "::", "vlan_id": ""}]}}
+ f_module = self.get_module_mock(params=param)
+ deploy_data = {"ProtocolTypeV4": True, "NetworkTypeV4": "Static", "IpV4SubnetMask": "XXX.XXX.XXX.XXX",
+ "IpV4Gateway": "0.0.0.0", "ProtocolTypeV6": True, "NetworkTypeV6": "Static",
+ "PrefixLength": "1", "IpV6Gateway": "0.0.0.0",
+ "Slots": [{"SlotId": 5, "SlotIPV4Address": "XX.XX.XX.XX",
+ "SlotIPV6Address": "::", "VlanId": ""}]}
+ with pytest.raises(Exception) as err:
+ self.module.check_mode_validation(f_module, deploy_data)
+ assert err.value.args[0] == "No changes found to be applied."
@pytest.mark.parametrize("exc_type",
[IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
@@ -166,8 +271,22 @@ class TestOMEMDevicePower(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_domain_service',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
- {"accept-type": "application/json"}, StringIO(json_str)))
+ side_effect=exc_type(HTTP_ADDRESS, 400, HTTP_ERROR_MSG,
+ {"accept-type": ACCESS_TYPE}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
assert 'msg' in result
+
+ def test_main(self, mocker, ome_default_args, ome_conn_mock_qd, ome_response_mock):
+ mocker.patch(MODULE_PATH + 'check_domain_service', return_value=None)
+ mocker.patch(MODULE_PATH + 'ip_address_field', return_value=None)
+ mocker.patch(MODULE_PATH + 'get_device_details', return_value=("JID_123456789", {"Status": "Success"}))
+ ome_default_args.update({"device_id": 25011, "setting_type": "ServerQuickDeploy", "validate_certs": False,
+ "quick_deploy_options": {"ipv4_enabled": False,
+ "slots": [{"slot_id": 1, "vlan_id": 1}]}})
+ result = self._run_module(ome_default_args)
+ assert result["msg"] == "Successfully deployed the Quick Deploy settings."
+ assert result["job_id"] == "JID_123456789"
+ mocker.patch(MODULE_PATH + 'get_device_details', return_value=("JID_135792468", None))
+ result = self._run_module(ome_default_args)
+ assert result["msg"] == "Successfully submitted the Quick Deploy job settings."
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py
index 94e76df11..23148d390 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_devices.py
@@ -3,7 +3,7 @@
#
# Dell OpenManage Ansible Modules
# Version 6.1.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -460,7 +460,7 @@ class TestOmeDevices(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'get_dev_ids',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py
index 79c94b5cb..ca6bfd7f9 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_diagnostics.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -20,7 +20,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_diagnostics
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_diagnostics.'
@@ -83,7 +83,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
"are not applicable for export log."
def test_extract_log_operation(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker):
- f_module = self.get_module_mock(params={"log_type": "application", "share_address": "192.168.0.1",
+ f_module = self.get_module_mock(params={"log_type": "application", "share_address": "XX.XX.XX.XX",
"share_type": "NFS", "share_name": "iso", "share_user": "username",
"share_password": "password", "share_domain": "domain",
"mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"],
@@ -100,7 +100,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
result = self.module.extract_log_operation(f_module, ome_conn_mock_diagnostics)
assert result["Id"] == 16011
- f_module = self.get_module_mock(params={"log_type": "support_assist_collection", "share_address": "192.168.0.1",
+ f_module = self.get_module_mock(params={"log_type": "support_assist_collection", "share_address": "XX.XX.XX.XX",
"share_type": "NFS", "share_name": "iso", "share_user": "username",
"share_password": "password", "share_domain": "domain",
"mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"]})
@@ -108,7 +108,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
assert result["Id"] == 16011
def test_extract_log_operation_member(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker):
- f_module = self.get_module_mock(params={"log_type": "application", "share_address": "192.168.0.1",
+ f_module = self.get_module_mock(params={"log_type": "application", "share_address": "XX.XX.XX.XX",
"share_type": "NFS", "share_name": "iso", "share_user": "username",
"share_password": "password", "share_domain": "domain",
"mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"],
@@ -123,7 +123,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
def test_extract_log_operation_no_lead_chassis(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker):
f_module = self.get_module_mock(params={"lead_chassis_only": False, "log_type": "application",
- "share_address": "192.168.0.1",
+ "share_address": "XX.XX.XX.XX",
"share_type": "NFS", "share_name": "iso", "share_user": "username",
"share_password": "password", "share_domain": "domain",
"mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], })
@@ -134,7 +134,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
def test_extract_log_operation_s1(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker):
f_module = self.get_module_mock(params={"lead_chassis_only": False, "log_type": "application",
- "share_address": "192.168.0.1",
+ "share_address": "XX.XX.XX.XX",
"share_type": "NFS",
"mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"], })
ome_response_mock.json_data = {"value": [{"Id": 16011, "Type": 2000}]}
@@ -143,7 +143,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
assert result["Id"] == 16011
def test_main_succes_case(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker):
- ome_default_args.update({"log_type": "support_assist_collection", "share_address": "192.168.0.1",
+ ome_default_args.update({"log_type": "support_assist_collection", "share_address": "XX.XX.XX.XX",
"share_type": "NFS", "share_name": "iso", "share_user": "username",
"share_password": "password", "share_domain": "domain",
"mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"],
@@ -170,7 +170,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
"share domain, and share credentials provided are correct."
def test_main_succes_case02(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker):
- ome_default_args.update({"log_type": "supportassist_collection", "share_address": "192.168.0.1",
+ ome_default_args.update({"log_type": "supportassist_collection", "share_address": "XX.XX.XX.XX",
"share_type": "CIFS", "share_name": "iso", "share_user": "username",
"share_password": "password", "share_domain": "domain",
"mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"],
@@ -197,7 +197,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
"share domain, and share credentials provided are correct."
def test_main_succes_case03(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker):
- ome_default_args.update({"log_type": "application", "share_address": "192.168.0.1",
+ ome_default_args.update({"log_type": "application", "share_address": "XX.XX.XX.XX",
"share_type": "NFS", "share_name": "iso", "mask_sensitive_info": "true",
"test_connection": True, "job_wait": True, "device_ids": [25011]})
mocker.patch(MODULE_PATH + "check_domain_service", return_value=None)
@@ -222,7 +222,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
"share domain, and share credentials provided are correct."
def test_main_succes_case04(self, ome_conn_mock_diagnostics, ome_response_mock, ome_default_args, mocker):
- ome_default_args.update({"log_type": "supportassist_collection", "share_address": "192.168.0.1",
+ ome_default_args.update({"log_type": "supportassist_collection", "share_address": "XX.XX.XX.XX",
"share_type": "CIFS", "share_name": "iso", "share_user": "username",
"share_password": "password", "share_domain": "domain",
"mask_sensitive_info": "true", "log_selectors": ["OS_LOGS"],
@@ -252,7 +252,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
[IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
def test_ome_diagnostics_main_exception_case(self, exc_type, mocker, ome_default_args,
ome_conn_mock_diagnostics, ome_response_mock):
- ome_default_args.update({"log_type": "application", "share_address": "192.168.0.1",
+ ome_default_args.update({"log_type": "application", "share_address": "XX.XX.XX.XX",
"share_type": "NFS", "mask_sensitive_info": False})
ome_response_mock.status_code = 400
ome_response_mock.success = False
@@ -267,7 +267,7 @@ class TestOMEDiagnostics(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_domain_service',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py
index e84e7c7e2..0b5ee8290 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_discovery.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.3.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_discovery
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_discovery.'
NO_CHANGES_MSG = "No changes found to be applied."
@@ -152,8 +152,7 @@ class TestOmeDiscovery(FakeAnsibleModule):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = params["json_data"]
ome_connection_mock_for_discovery.get_all_items_with_pagination.return_value = params['pag_ret_val']
- f_module = self.get_module_mock()
- ips = self.module.get_execution_details(f_module, ome_connection_mock_for_discovery, 1)
+ ips, job_status = self.module.get_execution_details(ome_connection_mock_for_discovery, 1)
assert ips == params['ips']
@pytest.mark.parametrize("params", [{"json_data": {'JobStatusId': 2060}, 'job_wait_sec': 60, 'job_failed': False,
@@ -166,9 +165,8 @@ class TestOmeDiscovery(FakeAnsibleModule):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = params["json_data"]
mocker.patch(MODULE_PATH + 'time.sleep', return_value=None)
- job_failed, msg = self.module.discovery_job_tracking(ome_connection_mock_for_discovery, 1,
- params['job_wait_sec'])
- assert job_failed == params['job_failed']
+ msg = self.module.discovery_job_tracking(ome_connection_mock_for_discovery, 1,
+ params['job_wait_sec'])
assert msg == params['msg']
@pytest.mark.parametrize("params", [{"discovery_json": {'DiscoveryConfigTaskParam': [{'TaskId': 12}]},
@@ -223,8 +221,7 @@ class TestOmeDiscovery(FakeAnsibleModule):
mocker.patch(MODULE_PATH + 'get_connection_profile', return_value=params['get_conn_json'])
disc_cfg_list = self.module.get_discovery_config(f_module, ome_connection_mock_for_discovery)
assert disc_cfg_list[0]['DeviceType'] == params['DeviceType']
- assert disc_cfg_list[0]['DiscoveryConfigTargets'] == params[
- 'DiscoveryConfigTargets'] # assert disc_cfg_list == params['disc_cfg_list']
+ assert disc_cfg_list[0]['DiscoveryConfigTargets'] == params['DiscoveryConfigTargets']
@pytest.mark.parametrize("params", [{"json_data": {"@odata.type": "#DiscoveryConfigService.DiscoveryJob",
"@odata.id": "/api/DiscoveryConfigService/Jobs(12617)",
@@ -243,20 +240,22 @@ class TestOmeDiscovery(FakeAnsibleModule):
assert djob == params['djob']
@pytest.mark.parametrize("params", [
- {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_failed': False, 'job_message': DISCOVER_JOB_COMPLETE,
+ {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_message': DISCOVER_JOB_COMPLETE,
'mparams': {'job_wait': True, 'schedule': 'RunNow', 'job_wait_timeout': 1000}},
- {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_failed': True, 'job_message': JOB_TRACK_FAIL,
+ {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_message': JOB_TRACK_FAIL,
'mparams': {'job_wait': True, 'schedule': 'RunNow', 'job_wait_timeout': 1000}},
- {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_failed': True, 'job_message': DISCOVERY_SCHEDULED,
+ {"json_data": {"DiscoveryConfigGroupName": 'd1'}, 'job_message': DISCOVERY_SCHEDULED,
'mparams': {'job_wait': False, 'schedule': 'RunLater', 'job_wait_timeout': 1000}}])
def test_create_discovery(self, params, mocker, ome_connection_mock_for_discovery, ome_response_mock):
mocker.patch(MODULE_PATH + 'get_discovery_config', return_value={})
mocker.patch(MODULE_PATH + 'get_schedule', return_value={})
mocker.patch(MODULE_PATH + 'get_other_discovery_payload', return_value={})
mocker.patch(MODULE_PATH + 'get_job_data', return_value=12)
- mocker.patch(MODULE_PATH + 'get_execution_details', return_value={})
- mocker.patch(MODULE_PATH + 'get_discovery_job', return_value={})
- mocker.patch(MODULE_PATH + 'discovery_job_tracking', return_value=(params['job_failed'], params['job_message']))
+ mocker.patch(MODULE_PATH + 'get_execution_details', return_value=({"Completed": ["XX.XX.XX.XX"], "Failed": []},
+ {"JobStatusId": 2050}))
+ mocker.patch(MODULE_PATH + 'get_discovery_job', return_value={"JobStatusId": 2050})
+ mocker.patch(MODULE_PATH + 'discovery_job_tracking', return_value=(params['job_message']))
+ mocker.patch(MODULE_PATH + 'time.sleep', return_value=None)
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = params["json_data"]
f_module = self.get_module_mock(params=params['mparams'])
@@ -283,7 +282,7 @@ class TestOmeDiscovery(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_existing_discovery',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
@@ -305,11 +304,13 @@ class TestOmeDiscovery(FakeAnsibleModule):
mocker.patch(MODULE_PATH + 'get_other_discovery_payload', return_value={"DiscoveryConfigGroupId": 10})
mocker.patch(MODULE_PATH + 'update_modify_payload', return_value=None)
mocker.patch(MODULE_PATH + 'get_job_data', return_value=12)
- mocker.patch(MODULE_PATH + 'get_execution_details', return_value={})
- mocker.patch(MODULE_PATH + 'get_discovery_job', return_value={})
+ mocker.patch(MODULE_PATH + 'get_execution_details', return_value=({"Completed": ["XX.XX.XX.XX"], "Failed": []},
+ {"JobStatusId": 2050}))
+ mocker.patch(MODULE_PATH + 'get_discovery_job', return_value={"JobStatusId": 2050})
mocker.patch(MODULE_PATH + 'get_discovery_config', return_value={})
mocker.patch(MODULE_PATH + 'get_discovery_states', return_value={12: 15})
- mocker.patch(MODULE_PATH + 'discovery_job_tracking', return_value=(params['job_failed'], params['job_message']))
+ mocker.patch(MODULE_PATH + 'discovery_job_tracking', return_value=(params['job_message']))
+ mocker.patch(MODULE_PATH + 'time.sleep', return_value=None)
error_message = params["job_message"]
with pytest.raises(Exception) as err:
self.module.modify_discovery(f_module, ome_connection_mock_for_discovery, discov_list)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py
index c931ed82c..d69093033 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_domain_user_groups.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.0.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_domain_user_groups
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_domain_user_groups.'
NO_CHANGES_MSG = "No changes found to be applied."
@@ -74,7 +74,7 @@ class TestOMEADUser(FakeAnsibleModule):
def test_delete_directory_user(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker):
ome_response_mock.status_code = 204
msg, changed = self.module.delete_directory_user(ome_conn_mock_ad, 15011)
- assert msg == "Successfully deleted the active directory user group."
+ assert msg == "Successfully deleted the domain user group."
assert changed is True
def test_get_role(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker):
@@ -100,13 +100,15 @@ class TestOMEADUser(FakeAnsibleModule):
def test_search_directory(self, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker):
f_module = self.get_module_mock(params={"state": "present", "group_name": "Administrator",
- "domain_username": "admin@dev0", "domain_password": "password"})
+ "domain_username": "admin@dev0", "domain_password": "password",
+ "directory_type": "LDAP"})
ome_response_mock.json_data = [{"CommonName": "Administrator", "ObjectGuid": "object_id"}]
obj_id, name = self.module.search_directory(f_module, ome_conn_mock_ad, 16011)
assert obj_id == "object_id"
f_module = self.get_module_mock(params={"state": "present", "group_name": "Admin",
- "domain_username": "admin@dev0", "domain_password": "password"})
+ "domain_username": "admin@dev0", "domain_password": "password",
+ "directory_type": "AD"})
with pytest.raises(Exception) as err:
self.module.search_directory(f_module, ome_conn_mock_ad, 16011)
assert err.value.args[0] == "Unable to complete the operation because the entered " \
@@ -173,26 +175,50 @@ class TestOMEADUser(FakeAnsibleModule):
resp, msg = self.module.directory_user(f_module, ome_conn_mock_ad)
assert msg == "imported"
+ @pytest.mark.parametrize("params", [{
+ "module_args": {"state": "present", "group_name": "group1",
+ "domain_username": "admin@dev0", "domain_password": "password",
+ "directory_type": "LDAP"},
+ "directory_user": ([{"UserName": "Group1", "Id": 15011, "RoleId": "10", "Enabled": True}], 'imported'),
+ "msg": "Successfully imported the domain user group."},
+ {
+ "module_args": {"state": "absent", "group_name": "group1",
+ "domain_username": "admin@dev0", "domain_password": "password",
+ "directory_type": "LDAP"},
+ "get_directory_user": ({"UserName": "Group1", "Id": 15011, "RoleId": "10", "Enabled": True}),
+ "delete_directory_user": ("Successfully deleted the domain user group.", True),
+ "msg": "Successfully deleted the domain user group."}])
+ def test_main_success(self, params, ome_conn_mock_ad, ome_response_mock, ome_default_args, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = {"Name": "LDAP2"}
+ ome_conn_mock_ad.strip_substr_dict.return_value = params.get("directory_user", (None, 1))[0]
+ mocker.patch(MODULE_PATH + 'directory_user', return_value=params.get("directory_user", (None, 1)))
+ mocker.patch(MODULE_PATH + 'get_directory_user', return_value=params.get("get_directory_user", (None, 1)))
+ mocker.patch(MODULE_PATH + 'delete_directory_user', return_value=params.get("delete_directory_user", (None, 1)))
+ ome_default_args.update(params['module_args'])
+ result = self._run_module(ome_default_args)
+ assert result['msg'] == params['msg']
+
@pytest.mark.parametrize("exc_type",
[IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
- def test_ome_domain_exception(self, exc_type, mocker, ome_default_args,
- ome_conn_mock_ad, ome_response_mock):
- ome_default_args.update({"state": "absent"})
+ def test_main_exception_failure_case(self, exc_type, mocker, ome_default_args,
+ ome_conn_mock_ad, ome_response_mock):
+ ome_default_args.update({"state": "absent", "group_name": "group1"})
ome_response_mock.status_code = 400
ome_response_mock.success = False
json_str = to_text(json.dumps({"info": "error_details"}))
if exc_type == URLError:
mocker.patch(MODULE_PATH + 'get_directory_user', side_effect=exc_type("url open error"))
- result = self._run_module_with_fail_json(ome_default_args)
- assert result["failed"] is True
+ result = self._run_module(ome_default_args)
+ assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
mocker.patch(MODULE_PATH + 'get_directory_user', side_effect=exc_type("exception message"))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
- mocker.patch(MODULE_PATH + 'get_directory_user',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
- {"accept-type": "application/json"}, StringIO(json_str)))
+ mocker.patch(MODULE_PATH + 'get_directory_user', side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py
index 082b82934..f13a61b8c 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -228,10 +228,10 @@ class TestOmeFirmware(FakeAnsibleModule):
"PrerequisiteInfo": ""
}
],
- "DeviceIPAddress": "192.168.0.3",
+ "DeviceIPAddress": "XX.XX.XX.XX",
"DeviceId": "28628",
"DeviceModel": "PowerEdge R940",
- "DeviceName": "192.168.0.3",
+ "DeviceName": "XX.XX.XX.XX",
"DeviceServiceTag": "HC2XFL2",
"DeviceTypeId": "1000",
"DeviceTypeName": "SERVER"
@@ -315,12 +315,12 @@ class TestOmeFirmware(FakeAnsibleModule):
else:
builtin_module_name = '__builtin__'
f_module = self.get_module_mock(
- params={'dup_file': "/root1/Ansible_EXE/BIOS_87V69_WN64_2.4.7.EXE", 'hostname': '192.168.0.1'})
+ params={'dup_file': "/root1/Ansible_EXE/BIOS_87V69_WN64_2.4.7.EXE", 'hostname': 'XX.XX.XX.XX'})
with patch("{0}.open".format(builtin_module_name), mock_open(read_data="data")) as mock_file:
with pytest.raises(Exception) as exc:
self.module.upload_dup_file(ome_connection_firmware_mock, f_module)
assert exc.value.args[0] == "Unable to upload {0} to {1}".format('/root1/Ansible_EXE/BIOS_87V69_WN64_2.4.7.EXE',
- '192.168.0.1')
+ 'XX.XX.XX.XX')
def test_get_device_ids_success_case(self, ome_connection_firmware_mock, ome_response_mock, ome_default_args):
ome_default_args.update()
@@ -435,7 +435,8 @@ class TestOmeFirmware(FakeAnsibleModule):
def test_job_payload_for_update_case_02(self, ome_connection_firmware_mock, ome_response_mock):
"""baseline case"""
- f_module = self.get_module_mock(params={'schedule': 'RebootNow'})
+ f_module = self.get_module_mock(params={'schedule': 'RebootNow',
+ 'reboot_type': 'GracefulReboot'})
target_data = {}
baseline = {"baseline_id": 1, "repo_id": 2, "catalog_id": 3}
ome_connection_firmware_mock.get_job_type_id.return_value = ome_response_mock
@@ -450,7 +451,8 @@ class TestOmeFirmware(FakeAnsibleModule):
def test_job_payload_for_update_case_03(self, ome_connection_firmware_mock, ome_response_mock):
"""response None case"""
- f_module = self.get_module_mock(params={'schedule': 'RebootNow'})
+ f_module = self.get_module_mock(params={'schedule': 'RebootNow',
+ 'reboot_type': 'PowerCycle'})
target_data = {}
ome_connection_firmware_mock.get_job_type_id.return_value = ome_response_mock
payload = self.module.job_payload_for_update(ome_connection_firmware_mock, f_module, target_data)
@@ -547,7 +549,7 @@ class TestOmeFirmware(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'ome_firmware._validate_device_attributes',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py
index 8af8d6760..76d2ee0db 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.1.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -45,6 +45,7 @@ payload_out1 = {
"CatalogId": 12,
"RepositoryId": 23,
"DowngradeEnabled": True,
+ 'FilterNoRebootRequired': True,
"Is64Bit": True,
"Targets": [
{"Id": 123,
@@ -56,6 +57,7 @@ payload_out1 = {
payload_out2 = {
"Name": "baseline1",
"CatalogId": 12,
+ 'FilterNoRebootRequired': False,
"RepositoryId": 23, 'Description': None, 'DowngradeEnabled': True, 'Is64Bit': True,
"Targets": [
{"Id": 123,
@@ -361,12 +363,14 @@ class TestOmeFirmwareBaseline(FakeAnsibleModule):
"baseline_name": "baseline1",
"baseline_description": "baseline_description",
"downgrade_enabled": True,
- "is_64_bit": True}
+ "is_64_bit": True,
+ "filter_no_reboot_required": True}
payload_param2 = {"catalog_name": "cat1",
"baseline_name": "baseline1",
"baseline_description": None,
"downgrade_enabled": None,
- "is_64_bit": None}
+ "is_64_bit": None,
+ "filter_no_reboot_required": False}
@pytest.mark.parametrize("params", [{"inp": payload_param1, "out": payload_out1},
{"inp": payload_param2, "out": payload_out2}])
@@ -547,7 +551,7 @@ class TestOmeFirmwareBaseline(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_existing_baseline',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py
index 96672f6d6..76592ef05 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_compliance_info.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -22,6 +22,8 @@ from ansible_collections.dellemc.openmanage.plugins.modules import ome_firmware_
from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, \
AnsibleFailJSonException, Constants
+HTTP_ADDRESS = 'https://testhost.com'
+
@pytest.fixture
def ome_connection_mock_for_firmware_baseline_compliance_info(mocker, ome_response_mock):
@@ -60,7 +62,7 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
ome_connection_mock_for_firmware_baseline_compliance_info,
ome_response_mock):
ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.side_effect = HTTPError(
- 'http://testhost.com', 400, '', {}, None)
+ HTTP_ADDRESS, 400, '', {}, None)
f_module = self.get_module_mock()
with pytest.raises(HTTPError) as ex:
self.module._get_device_id_from_service_tags(["INVALID"],
@@ -100,7 +102,7 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
def test_get_device_ids_from_group_ids_error_case(self, ome_connection_mock_for_firmware_baseline_compliance_info,
ome_response_mock):
ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.side_effect = HTTPError(
- 'http://testhost.com', 400, '', {}, None)
+ HTTP_ADDRESS, 400, '', {}, None)
f_module = self.get_module_mock()
with pytest.raises(HTTPError) as ex:
device_ids = self.module.get_device_ids_from_group_ids(f_module, ["123456"],
@@ -145,7 +147,7 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
def test_get_device_ids_from_group_names_error_case(self, ome_connection_mock_for_firmware_baseline_compliance_info,
ome_response_mock):
ome_connection_mock_for_firmware_baseline_compliance_info.get_all_report_details.side_effect = HTTPError(
- 'http://testhost.com', 400, '', {}, None)
+ HTTP_ADDRESS, 400, '', {}, None)
f_module = self.get_module_mock(params={"device_group_names": ["abc", "xyz"]})
with pytest.raises(HTTPError) as ex:
self.module.get_device_ids_from_group_names(f_module,
@@ -253,7 +255,7 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
ome_connection_mock_for_firmware_baseline_compliance_info,
ome_response_mock):
ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.side_effect = HTTPError(
- 'http://testhost.com', 400, '', {}, None)
+ HTTP_ADDRESS, 400, '', {}, None)
f_module = self.get_module_mock(params={"baseline_name": "baseline_name1"})
with pytest.raises(HTTPError) as ex:
self.module.get_baseline_id_from_name(ome_connection_mock_for_firmware_baseline_compliance_info, f_module)
@@ -268,7 +270,7 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
'test')
else:
ome_connection_mock_for_firmware_baseline_compliance_info.get_all_items_with_pagination.side_effect = exc_type(
- 'http://testhost.com', 400, '', {}, None)
+ HTTP_ADDRESS, 400, '', {}, None)
ome_response_mock.status_code = 400
ome_response_mock.success = False
f_module = self.get_module_mock(params={"baseline_name": "baseline_name1"})
@@ -348,7 +350,7 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
ome_connection_mock_for_firmware_baseline_compliance_info.invoke_request.side_effect = exc_type('test')
else:
ome_connection_mock_for_firmware_baseline_compliance_info.invoke_request.side_effect = exc_type(
- 'http://testhost.com', 400, '', err_dict, None)
+ HTTP_ADDRESS, 400, '', err_dict, None)
f_module = self.get_module_mock()
with pytest.raises(exc_type):
self.module.get_baselines_report_by_device_ids(
@@ -379,7 +381,7 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
else:
mocker.patch(
'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baseline_id_from_name',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type(HTTP_ADDRESS, 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
f_module = self.get_module_mock(params={"baseline_name": "baseline1"})
with pytest.raises(exc_type):
@@ -527,7 +529,7 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
else:
mocker.patch(
'ansible_collections.dellemc.openmanage.plugins.modules.ome_firmware_baseline_compliance_info.get_baselines_report_by_device_ids',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type(HTTP_ADDRESS, 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert 'baseline_compliance_info' not in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py
index 6d394a1ae..7095b3b95 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_baseline_info.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -18,7 +18,7 @@ from ssl import SSLError
from ansible_collections.dellemc.openmanage.plugins.modules import ome_firmware_baseline_info
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from io import StringIO
from ansible.module_utils._text import to_text
@@ -111,7 +111,7 @@ class TestOmeFirmwareBaselineInfo(FakeAnsibleModule):
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
- ome_connection_ome_firmware_baseline_info_mock.invoke_request.side_effect = exc_type('http://testhost.com',
+ ome_connection_ome_firmware_baseline_info_mock.invoke_request.side_effect = exc_type('https://testhost.com',
400,
'http error message',
{
@@ -122,7 +122,7 @@ class TestOmeFirmwareBaselineInfo(FakeAnsibleModule):
assert "error_info" in result
assert result['msg'] == 'HTTP Error 400: http error message'
- ome_connection_ome_firmware_baseline_info_mock.invoke_request.side_effect = exc_type('http://testhost.com',
+ ome_connection_ome_firmware_baseline_info_mock.invoke_request.side_effect = exc_type('https://testhost.com',
404,
'<404 not found>',
{
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py
index c0f0a5147..07f7260ab 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_firmware_catalog.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
-# Copyright (C) 2019-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -259,11 +259,11 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
mocker.patch(MODULE_PATH + 'check_existing_catalog', side_effect=exc_type("exception message"))
- result = self._run_module_with_fail_json(ome_default_args)
+ result = self._run_module(ome_default_args)
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_existing_catalog',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
@@ -820,11 +820,11 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
mocker.patch(MODULE_PATH + 'validate_names', side_effect=exc_type("exception message"))
- result = self._run_module_with_fail_json(ome_default_args)
+ result = self._run_module(ome_default_args)
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'validate_names',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
@@ -862,3 +862,19 @@ class TestOmeFirmwareCatalog(FakeAnsibleModule):
ome_default_args.update({"repository_type": "HTTPS", "catalog_name": "t1", "catalog_id": 1})
result = self._run_module_with_fail_json(ome_default_args)
assert result["msg"] == "parameters are mutually exclusive: catalog_name|catalog_id"
+
+ @pytest.mark.parametrize("param", [{"hostname": "invalid-host-abcd"}])
+ def test_ome_catalog_invalid_hostname_case1(self, ome_default_args, param):
+ # To verify invalid IP or hostname in module_utils/ome
+ ome_default_args.update({"hostname": param['hostname'], "catalog_name": "catalog1", "repository_type": "HTTPS", "ca_path": ""})
+ result = self._run_module(ome_default_args)
+ assert result["unreachable"] is True
+ assert "error" in result['msg']
+
+ @pytest.mark.parametrize("param", [{"hostname": "ABCD:ABCD:ABCD:EF12:3456:7890"}])
+ def _test_ome_catalog_invalid_hostname_case2(self, ome_default_args, param):
+ # To verify invalid IP or hostname in module_utils/ome
+ ome_default_args.update({"hostname": param['hostname'], "catalog_name": "catalog1", "repository_type": "HTTPS", "ca_path": ""})
+ result = self._run_module(ome_default_args)
+ assert "does not appear to be an IPv4 or IPv6 address" in result['msg']
+ assert param['hostname'] in result['msg']
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py
index 6aede9323..224f8388a 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_groups.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.5.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2021-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_groups
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MULTIPLE_GROUPS_MSG = "Provide only one unique device group when state is present."
NONEXIST_GROUP_ID = "A device group with the provided ID does not exist."
@@ -117,6 +117,7 @@ class TestOmeGroups(FakeAnsibleModule):
ome_connection_mock_for_groups.strip_substr_dict.return_value = params.get('created_group', {})
mocker.patch(MODULE_PATH + 'get_ome_group_by_id', return_value=params.get('created_group', {}))
mocker.patch(MODULE_PATH + 'create_parent', return_value=params['created_group'].get('ParentId'))
+ mocker.patch(MODULE_PATH + 'time.sleep', return_value=None)
ome_default_args.update(params['mparams'])
result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False))
assert result['msg'] == (params['message']).format(op='create')
@@ -151,6 +152,7 @@ class TestOmeGroups(FakeAnsibleModule):
ome_connection_mock_for_groups.strip_substr_dict.return_value = params.get('created_group', {})
mocker.patch(MODULE_PATH + 'get_ome_group_by_id', return_value=params.get('created_group', {}))
mocker.patch(MODULE_PATH + 'create_parent', return_value=params['created_group'].get('ParentId'))
+ mocker.patch(MODULE_PATH + 'time.sleep', return_value=None)
# mocker.patch(MODULE_PATH + 'is_parent_in_subtree', return_value=False)
ome_default_args.update(params['mparams'])
result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False))
@@ -267,7 +269,7 @@ class TestOmeGroups(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'get_valid_groups',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py
index 93c18d22e..d7a6d8b84 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_identity_pool.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -14,7 +14,7 @@ __metaclass__ = type
import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import ome_identity_pool
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ssl import SSLError
@@ -58,7 +58,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
"ip_range": "10.33.0.1-10.33.0.255",
"primary_dns_server": "10.8.8.8",
"secondary_dns_server": "8.8.8.8",
- "subnet_mask": "255.255.255.0"
+ "subnet_mask": "XXX.XXX.XXX.XXX"
},
"starting_mac_address": "60:60:60:60:60:00"
},
@@ -100,7 +100,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'ome_identity_pool.pool_create_modify',
- side_effect=exc_type('http://testhost.com', 400,
+ side_effect=exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str)))
@@ -267,7 +267,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
"identity_count": 75,
"starting_mac_address": "aabb.ccdd.7070"
},
- "hostname": "192.168.0.1",
+ "hostname": "XX.XX.XX.XX",
"iscsi_settings": {
"identity_count": 30,
"initiator_config": {
@@ -278,7 +278,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
"ip_range": "10.33.0.1-10.33.0.255",
"primary_dns_server": "10.8.8.8",
"secondary_dns_server": "8.8.8.8",
- "subnet_mask": "255.255.255.0"
+ "subnet_mask": "XXX.XXX.XXX.XXX"
},
"starting_mac_address": "60:60:60:60:60:00"
},
@@ -311,7 +311,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
},
"InitiatorIpPoolSettings": {
"IpRange": "10.33.0.1-10.33.0.255",
- "SubnetMask": "255.255.255.0",
+ "SubnetMask": "XXX.XXX.XXX.XXX",
"Gateway": "192.168.4.1",
"PrimaryDnsServer": "10.8.8.8",
"SecondaryDnsServer": "8.8.8.8"
@@ -339,7 +339,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
assert payload["IscsiSettings"]["Mac"] == {"IdentityCount": 30, "StartingMacAddress": "YGBgYGAA"}
assert payload["IscsiSettings"]["InitiatorIpPoolSettings"] == {
"IpRange": "10.33.0.1-10.33.0.255",
- "SubnetMask": "255.255.255.0",
+ "SubnetMask": "XXX.XXX.XXX.XXX",
"Gateway": "192.168.4.1",
"PrimaryDnsServer": "10.8.8.8",
"SecondaryDnsServer": "8.8.8.8"
@@ -364,7 +364,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
"ip_range": "20.33.0.1-20.33.0.255",
"primary_dns_server": "10.8.8.8",
"secondary_dns_server": "8.8.8.8",
- "subnet_mask": "255.255.255.0"
+ "subnet_mask": "XXX.XXX.XXX.XXX"
},
"starting_mac_address": "10:10:10:10:10:00"
}
@@ -379,7 +379,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
assert payload["IscsiSettings"]["Mac"] == {"IdentityCount": 30, "StartingMacAddress": "EBAQEBAA"}
assert payload["IscsiSettings"]["InitiatorIpPoolSettings"] == {
"IpRange": "20.33.0.1-20.33.0.255",
- "SubnetMask": "255.255.255.0",
+ "SubnetMask": "XXX.XXX.XXX.XXX",
"Gateway": "192.168.4.1",
"PrimaryDnsServer": "10.8.8.8",
"SecondaryDnsServer": "8.8.8.8"
@@ -1040,7 +1040,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
},
"InitiatorIpPoolSettings": {
"IpRange": "10.33.0.1-10.33.0.255",
- "SubnetMask": "255.255.255.0",
+ "SubnetMask": "XXX.XXX.XXX.XXX",
"Gateway": "192.168.4.1",
"PrimaryDnsServer": "10.8.8.8",
"SecondaryDnsServer": "8.8.8.8"
@@ -1185,7 +1185,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
self.module.validate_modify_create_payload(modify_payload, f_module, action)
payload_iscsi3 = {
- "SubnetMask": "255.255.255.0",
+ "SubnetMask": "XXX.XXX.XXX.XXX",
"Gateway": "192.168.4.1",
"PrimaryDnsServer": "10.8.8.8",
"SecondaryDnsServer": "8.8.8.8"
@@ -1300,7 +1300,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
"ip_range": "10.33.0.1-10.33.0.255",
"primary_dns_server": "10.8.8.8",
"secondary_dns_server": "8.8.8.8",
- "subnet_mask": "255.255.255.0"
+ "subnet_mask": "XXX.XXX.XXX.XXX"
},
"starting_mac_address": "60:60:60:60:60:00"
}
@@ -1317,7 +1317,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
},
"InitiatorIpPoolSettings": {
"IpRange": "10.33.0.1-10.33.0.255",
- "SubnetMask": "255.255.255.0",
+ "SubnetMask": "XXX.XXX.XXX.XXX",
"Gateway": "192.168.4.1",
"PrimaryDnsServer": "10.8.8.8",
"SecondaryDnsServer": "8.8.8.8"
@@ -1331,7 +1331,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
"initiator_ip_pool_settings": {
"gateway": "192.168.4.1",
"ip_range": "10.33.0.1-10.33.0.255",
- "subnet_mask": "255.255.255.0"
+ "subnet_mask": "XXX.XXX.XXX.XXX"
}
}
self.module.update_iscsi_specific_settings(payload, settings_params, setting_type)
@@ -1340,7 +1340,7 @@ class TestOMeIdentityPool(FakeAnsibleModule):
"IscsiSettings": {
"InitiatorIpPoolSettings": {
"IpRange": "10.33.0.1-10.33.0.255",
- "SubnetMask": "255.255.255.0",
+ "SubnetMask": "XXX.XXX.XXX.XXX",
"Gateway": "192.168.4.1"
}
}}
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py
index 34de35d11..d73e119b6 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_job_info.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 2.1.3
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2019-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -15,7 +15,7 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import ome_job_info
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from io import StringIO
@@ -66,6 +66,19 @@ class TestOmeJobInfo(FakeAnsibleModule):
assert result['changed'] is False
assert 'job_info' in result
+ def test_get_execution_history_and_last_execution_detail_of_a_job(self, ome_default_args,
+ ome_connection_job_info_mock,
+ ome_response_mock):
+ ome_default_args.update({"job_id": 1, "fetch_execution_history": True})
+ ome_response_mock.success = True
+ ome_response_mock.json_data = {"value": [{"job_id": 1}]}
+ ome_response_mock.status_code = 200
+ result = self._run_module(ome_default_args)
+ assert result['changed'] is False
+ assert 'job_info' in result
+ assert 'LastExecutionDetail' in result['job_info']
+ assert 'ExecutionHistories' in result['job_info']
+
def test_job_info_success_case03(self, ome_default_args, ome_connection_job_info_mock,
ome_response_mock):
ome_default_args.update({"system_query_options": {"filter": "abc"}})
@@ -96,9 +109,9 @@ class TestOmeJobInfo(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'ome_job_info._get_query_parameters',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
- if not exc_type == URLError:
+ if exc_type != URLError:
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py
index 44ceef4d2..196c0fd32 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_port_breakout.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.0.0
-# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -17,8 +17,7 @@ import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import ome_network_port_breakout
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \
- AnsibleFailJSonException
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from io import StringIO
from ansible.module_utils._text import to_text
@@ -210,7 +209,7 @@ class TestOMEPortBreakout(FakeAnsibleModule):
if exc_type not in [HTTPError, SSLValidationError]:
ome_connection_breakout_mock.invoke_request.side_effect = exc_type('test')
else:
- ome_connection_breakout_mock.invoke_request.side_effect = exc_type('http://testhost.com', 400,
+ ome_connection_breakout_mock.invoke_request.side_effect = exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str))
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py
index e7b7a05c6..0420e3a25 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -19,7 +19,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_network_vlan
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_network_vlan.'
@@ -202,7 +202,7 @@ class TestOmeNetworkVlan(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_existing_vlan',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py
index 084fcd85c..6cbabe928 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_network_vlan_info.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 2.1.3
-# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -15,13 +15,15 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import ome_network_vlan_info
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from io import StringIO
from ansible.module_utils._text import to_text
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+ACCESS_TYPE = "application/json"
+HTTP_ADDRESS = 'https://testhost.com'
response = {
'@odata.context': '/api/$metadata#Collection(NetworkConfigurationService.Network)',
@@ -168,7 +170,7 @@ class TestOmeNetworkVlanInfo(FakeAnsibleModule):
assert result["unreachable"] is True
elif exc_type == HTTPError:
ome_connection_network_vlan_info_mock.invoke_request.side_effect = exc_type(
- 'http://testhost.com', 400, '<400 bad request>', {"accept-type": "application/json"},
+ HTTP_ADDRESS, 400, '<400 bad request>', {"accept-type": ACCESS_TYPE},
StringIO(json_str))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
@@ -176,7 +178,7 @@ class TestOmeNetworkVlanInfo(FakeAnsibleModule):
assert 'error_info' in result
ome_connection_network_vlan_info_mock.invoke_request.side_effect = exc_type(
- 'http://testhost.com', 404, '<404 not found>', {"accept-type": "application/json"}, StringIO(json_str))
+ HTTP_ADDRESS, 404, '<404 not found>', {"accept-type": ACCESS_TYPE}, StringIO(json_str))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
assert 'msg' in result
@@ -188,8 +190,8 @@ class TestOmeNetworkVlanInfo(FakeAnsibleModule):
assert 'msg' in result
else:
mocker.patch(MODULE_PATH + 'ome_network_vlan_info.get_network_type_and_qos_type_information',
- side_effect=exc_type('http://testhost.com', 404, 'http error message',
- {"accept-type": "application/json"}, StringIO(json_str)))
+ side_effect=exc_type(HTTP_ADDRESS, 404, 'http error message',
+ {"accept-type": ACCESS_TYPE}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py
index 707e495c2..0f23a3e11 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_powerstate.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -422,11 +422,11 @@ class TestOmePowerstate(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'ome_powerstate.spawn_update_job',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
mocker.patch(
MODULE_PATH + 'ome_powerstate.get_device_resource',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert 'power_state' not in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py
index 91f7fc1b5..a1afe7635 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -278,7 +278,7 @@ class TestOmeProfile(FakeAnsibleModule):
"res": "Profile with the name 'profile' not found."},
{"mparams": {"command": "modify", "name": "profile", "new_name": "modified profile",
"description": "new description",
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1",
@@ -298,7 +298,7 @@ class TestOmeProfile(FakeAnsibleModule):
"json_data": 0, "res": "No changes found to be applied."},
{"mparams": {"command": "modify", "name": "profile", "new_name": "modified profile",
"description": "new description",
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso", "iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1",
"IsIgnored": True}]}}, "success": True,
@@ -363,7 +363,7 @@ class TestOmeProfile(FakeAnsibleModule):
"json_data": [234, 123],
"res": "The target device is invalid for the given profile."},
{"mparams": {"command": "assign", "name": "profile", "device_id": 234,
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}},
@@ -371,14 +371,14 @@ class TestOmeProfile(FakeAnsibleModule):
"prof": {"Id": 123, "ProfileState": 0}, "target": {"Id": 234, "Name": "mytarget"}, "json_data": [23, 123],
"res": "Successfully applied the assign operation."},
{"mparams": {"command": "assign", "name": "profile", "device_service_tag": "ABCDEFG",
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}},
"success": True, "prof": {"Id": 123, "ProfileState": 0}, "target": {"Id": 234, "Name": "mytarget"},
"json_data": [23, 123], "res": "Successfully applied the assign operation."},
{"mparams": {"command": "assign", "name": "profile", "device_id": 234,
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}},
@@ -387,7 +387,7 @@ class TestOmeProfile(FakeAnsibleModule):
"json_data": [23, 123],
"res": "The profile is assigned to the target 234."},
{"mparams": {"command": "assign", "name": "profile", "device_id": 234,
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}},
@@ -397,7 +397,7 @@ class TestOmeProfile(FakeAnsibleModule):
"res": "The profile is assigned to a different target. Use the migrate command or unassign the profile and "
"then proceed with assigning the profile to the target."},
{"mparams": {"command": "assign", "name": "profile", "device_service_tag": "STG1234",
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}},
@@ -406,7 +406,7 @@ class TestOmeProfile(FakeAnsibleModule):
"json_data": [23, 123],
"res": "The profile is assigned to the target STG1234."},
{"mparams": {"command": "assign", "name": "profile", "device_id": 123,
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}},
@@ -415,7 +415,7 @@ class TestOmeProfile(FakeAnsibleModule):
"json_data": [23, 123],
"res": "Target invalid."},
{"mparams": {"command": "assign", "name": "profile", "device_id": 234,
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}},
@@ -423,7 +423,7 @@ class TestOmeProfile(FakeAnsibleModule):
"prof": {"Id": 123, "ProfileState": 0}, "target": {"Id": 234, "Name": "mytarget"}, "json_data": [23, 123],
"res": CHANGES_MSG},
{"mparams": {"command": "assign", "name": "profile", "device_id": 234,
- "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "192.168.0.1",
+ "boot_to_network_iso": {"boot_to_network": True, "share_type": "NFS", "share_ip": "XX.XX.XX.XX",
"iso_path": "path/to/my_iso.iso",
"iso_timeout": 8},
"attributes": {"Attributes": [{"Id": 4506, "Value": "server attr 1", "IsIgnored": True}]}},
@@ -540,7 +540,7 @@ class TestOmeProfile(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'profile_operation',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile_info.py
new file mode 100644
index 000000000..22175439b
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_profile_info.py
@@ -0,0 +1,1279 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+from io import StringIO
+from ssl import SSLError
+
+import pytest
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_profile_info
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+
+SUCCESS_MSG = "Successfully retrieved the profile information."
+NO_PROFILES_MSG = "No profiles were found."
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_profile_info.'
+
+
+@pytest.fixture
+def ome_connection_mock_for_profile_info(mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+
+class TestOmeProfileInfo(FakeAnsibleModule):
+ module = ome_profile_info
+
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [{'Id': 1234, 'Name': "ABCTAG1", "Type": 1000}],
+ "AttributeGroups": [
+ {
+ "GroupNameId": 9,
+ "DisplayName": "iDRAC",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 32688,
+ "DisplayName": "Active Directory",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 7587,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2342,
+ "DisplayName": "ActiveDirectory 1 Active Directory RAC Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32851,
+ "DisplayName": "IPv4 Information",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 8133,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2199,
+ "DisplayName": "IPv4 1 IPv4 DHCP Enable",
+ "Description": None,
+ "Value": "Enabled",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 7974,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2198,
+ "DisplayName": "IPv4 1 IPv4 Enable",
+ "Description": None,
+ "Value": "Enabled",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32852,
+ "DisplayName": "IPv4 Static Information",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 7916,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2400,
+ "DisplayName": "IPv4Static 1 Gateway",
+ "Description": None,
+ "Value": "XX.XX.XX.XX",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 8245,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2399,
+ "DisplayName": "IPv4Static 1 IPv4 Address",
+ "Description": None,
+ "Value": "XX.XX.XX.XX20",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 7724,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2403,
+ "DisplayName": "IPv4Static 1 Net Mask",
+ "Description": None,
+ "Value": "XXX.XXX.XXX.XXX",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32855,
+ "DisplayName": "IPv6 Information",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 8186,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2207,
+ "DisplayName": "IPv6 1 IPV6 Auto Config",
+ "Description": None,
+ "Value": "Enabled",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 7973,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2205,
+ "DisplayName": "IPv6 1 IPV6 Enable",
+ "Description": None,
+ "Value": "Disabled",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32856,
+ "DisplayName": "IPv6 Static Information",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 8244,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2405,
+ "DisplayName": "IPv6Static 1 IPv6 Address 1",
+ "Description": None,
+ "Value": "::",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 7917,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2404,
+ "DisplayName": "IPv6Static 1 IPv6 Gateway",
+ "Description": None,
+ "Value": "::",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 7687,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2408,
+ "DisplayName": "IPv6Static 1 IPV6 Link Local Prefix Length",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32930,
+ "DisplayName": "NIC Information",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 8111,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2193,
+ "DisplayName": "NIC 1 DNS RAC Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 7189,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2194,
+ "DisplayName": "NIC 1 Enable VLAN",
+ "Description": None,
+ "Value": "Disabled",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 7166,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2197,
+ "DisplayName": "NIC 1 VLAN ID",
+ "Description": None,
+ "Value": "1",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32934,
+ "DisplayName": "NIC Static Information",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 8116,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2396,
+ "DisplayName": "NICStatic 1 DNS Domain Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 4,
+ "DisplayName": "NIC",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 66,
+ "DisplayName": "NIC.Integrated.1-1-1",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 32761,
+ "DisplayName": "FCoE Target 01",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6723,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4769,
+ "DisplayName": "Boot LUN",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6735,
+ "CustomId": 0,
+ "AttributeEditInfoId": 5083,
+ "DisplayName": "Boot Order",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6722,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4734,
+ "DisplayName": "Virtual LAN ID",
+ "Description": None,
+ "Value": "1",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6721,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4641,
+ "DisplayName": "World Wide Port Name Target",
+ "Description": None,
+ "Value": "00:00:00:00:00:00:00:00",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32762,
+ "DisplayName": "FCoE Target 02",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6733,
+ "CustomId": 0,
+ "AttributeEditInfoId": 5113,
+ "DisplayName": "Boot Order",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32763,
+ "DisplayName": "FCoE Target 03",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6732,
+ "CustomId": 0,
+ "AttributeEditInfoId": 5122,
+ "DisplayName": "Boot Order",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32764,
+ "DisplayName": "FCoE Target 04",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6734,
+ "CustomId": 0,
+ "AttributeEditInfoId": 5082,
+ "DisplayName": "Boot Order",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32870,
+ "DisplayName": "iSCSI General Parameters",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6730,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4768,
+ "DisplayName": "CHAP Authentication",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 6729,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4767,
+ "DisplayName": "CHAP Mutual Authentication",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32871,
+ "DisplayName": "iSCSI Initiator Parameters",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6713,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4601,
+ "DisplayName": "CHAP ID",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6712,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4681,
+ "DisplayName": "CHAP Secret",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32867,
+ "DisplayName": "iSCSI Target 01",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6720,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4802,
+ "DisplayName": "Boot LUN",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6719,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4920,
+ "DisplayName": "CHAP Secret",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6718,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4609,
+ "DisplayName": "IP Address",
+ "Description": None,
+ "Value": "0.0.0.0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6717,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4537,
+ "DisplayName": "iSCSI Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6716,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4698,
+ "DisplayName": "TCP Port",
+ "Description": None,
+ "Value": "3260",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 67,
+ "DisplayName": "NIC.Integrated.1-2-1",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 32761,
+ "DisplayName": "FCoE Target 01",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6788,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4769,
+ "DisplayName": "Boot LUN",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6801,
+ "CustomId": 0,
+ "AttributeEditInfoId": 5083,
+ "DisplayName": "Boot Order",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6787,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4734,
+ "DisplayName": "Virtual LAN ID",
+ "Description": None,
+ "Value": "1",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6786,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4641,
+ "DisplayName": "World Wide Port Name Target",
+ "Description": None,
+ "Value": "00:00:00:00:00:00:00:00",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32762,
+ "DisplayName": "FCoE Target 02",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6799,
+ "CustomId": 0,
+ "AttributeEditInfoId": 5113,
+ "DisplayName": "Boot Order",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32763,
+ "DisplayName": "FCoE Target 03",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6798,
+ "CustomId": 0,
+ "AttributeEditInfoId": 5122,
+ "DisplayName": "Boot Order",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32764,
+ "DisplayName": "FCoE Target 04",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6800,
+ "CustomId": 0,
+ "AttributeEditInfoId": 5082,
+ "DisplayName": "Boot Order",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32870,
+ "DisplayName": "iSCSI General Parameters",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6796,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4768,
+ "DisplayName": "CHAP Authentication",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 6795,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4767,
+ "DisplayName": "CHAP Mutual Authentication",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32871,
+ "DisplayName": "iSCSI Initiator Parameters",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6778,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4601,
+ "DisplayName": "CHAP ID",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6777,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4681,
+ "DisplayName": "CHAP Secret",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32867,
+ "DisplayName": "iSCSI Target 01",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6785,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4802,
+ "DisplayName": "Boot LUN",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6784,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4920,
+ "DisplayName": "CHAP Secret",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6783,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4609,
+ "DisplayName": "IP Address",
+ "Description": None,
+ "Value": "0.0.0.0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6782,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4537,
+ "DisplayName": "iSCSI Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6781,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4698,
+ "DisplayName": "TCP Port",
+ "Description": None,
+ "Value": "3260",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 65,
+ "DisplayName": "NIC.Integrated.1-3-1",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 32870,
+ "DisplayName": "iSCSI General Parameters",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6677,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4768,
+ "DisplayName": "CHAP Authentication",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 6676,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4767,
+ "DisplayName": "CHAP Mutual Authentication",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32871,
+ "DisplayName": "iSCSI Initiator Parameters",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6664,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4601,
+ "DisplayName": "CHAP ID",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6663,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4681,
+ "DisplayName": "CHAP Secret",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32867,
+ "DisplayName": "iSCSI Target 01",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6671,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4802,
+ "DisplayName": "Boot LUN",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6670,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4920,
+ "DisplayName": "CHAP Secret",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6669,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4609,
+ "DisplayName": "IP Address",
+ "Description": None,
+ "Value": "0.0.0.0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6668,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4537,
+ "DisplayName": "iSCSI Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6667,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4698,
+ "DisplayName": "TCP Port",
+ "Description": None,
+ "Value": "3260",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 68,
+ "DisplayName": "NIC.Integrated.1-4-1",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 32870,
+ "DisplayName": "iSCSI General Parameters",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6852,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4768,
+ "DisplayName": "CHAP Authentication",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 6851,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4767,
+ "DisplayName": "CHAP Mutual Authentication",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32871,
+ "DisplayName": "iSCSI Initiator Parameters",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6838,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4601,
+ "DisplayName": "CHAP ID",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6837,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4681,
+ "DisplayName": "CHAP Secret",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ },
+ {
+ "GroupNameId": 32867,
+ "DisplayName": "iSCSI Target 01",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 6846,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4802,
+ "DisplayName": "Boot LUN",
+ "Description": None,
+ "Value": "0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6845,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4920,
+ "DisplayName": "CHAP Secret",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6844,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4609,
+ "DisplayName": "IP Address",
+ "Description": None,
+ "Value": "0.0.0.0",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6843,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4537,
+ "DisplayName": "iSCSI Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 6842,
+ "CustomId": 0,
+ "AttributeEditInfoId": 4698,
+ "DisplayName": "TCP Port",
+ "Description": None,
+ "Value": "3260",
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 5,
+ "DisplayName": "System",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 33016,
+ "DisplayName": "Server Operating System",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 8513,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2497,
+ "DisplayName": "ServerOS 1 Server Host Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ }
+ ]
+ },
+ {
+ "GroupNameId": 33019,
+ "DisplayName": "Server Topology",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 8593,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2248,
+ "DisplayName": "ServerTopology 1 Aisle Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 8551,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2247,
+ "DisplayName": "ServerTopology 1 Data Center Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ },
+ {
+ "AttributeId": 8371,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2249,
+ "DisplayName": "ServerTopology 1 Rack Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 8370,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2250,
+ "DisplayName": "ServerTopology 1 Rack Slot",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 3
+ },
+ {
+ "AttributeId": 8346,
+ "CustomId": 0,
+ "AttributeEditInfoId": 2500,
+ "DisplayName": "ServerTopology 1 Room Name",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": True,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 2
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ }]
+ },
+ 'message': SUCCESS_MSG, "success": True, 'case': "template with id",
+ 'mparams': {"template_id": 1234}},
+ {"json_data": {"value": [{'Id': 1234, 'Name': "temp1", "Type": 1000}]},
+ 'message': SUCCESS_MSG, "success": True, 'case': "template with name",
+ 'mparams': {"template_name": "temp1"}},
+ {"json_data": {"value": [{'Id': 1234, 'Name': "temp2", "Type": 1000}]},
+ 'message': "Template with name 'temp1' not found.", "success": True, 'case': "template with name",
+ 'mparams': {"template_name": "temp1"}},
+ {"json_data": {'Id': 1234, 'Name': "temp1", "Type": 1000},
+ 'message': SUCCESS_MSG, "success": True, 'case': "profile with id",
+ 'mparams': {"profile_id": 1234}},
+ {"json_data": {"value": [{'Id': 1235, 'ProfileName': "prof0", "Type": 1000},
+ {'Id': 1234, 'ProfileName': "prof1", "Type": 1000}]},
+ 'message': SUCCESS_MSG, "success": True, 'case': "profile with name",
+ 'mparams': {"profile_name": "prof1"}},
+ {"json_data": {"value": [{'Id': 1235, 'ProfileName': "prof0", "Type": 1000},
+ {'Id': 1234, 'ProfileName': "prof1", "Type": 1000}]},
+ 'message': "Profiles with profile_name prof2 not found.", "success": True, 'case': "profile with name",
+ 'mparams': {"profile_name": "prof2"}},
+ {"json_data": {"value": [{'Id': 1234, 'Name': "prof1", "Type": 1000}]},
+ 'message': SUCCESS_MSG, "success": True, 'case': "template with name",
+ 'mparams': {"system_query_options": {"filter": "ProfileName eq 'prof2'"}}},
+ {"json_data": {"value": [{'Id': 1234, 'Name': "prof1", "Type": 1000}]},
+ 'message': SUCCESS_MSG, "success": True, 'case': "template with name",
+ 'mparams': {}},
+ ])
+ def test_ome_profile_info_success(self, params, ome_connection_mock_for_profile_info, ome_response_mock,
+ ome_default_args, module_mock):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_connection_mock_for_profile_info.get_all_items_with_pagination.return_value = params['json_data']
+ ome_default_args.update(params['mparams'])
+ result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False))
+ assert result['msg'] == params['message']
+
+ @pytest.mark.parametrize("exc_type",
+ [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
+ def test_ome_profile_info_main_exception_failure_case(self, exc_type, mocker, ome_default_args,
+ ome_connection_mock_for_profile_info, ome_response_mock):
+ ome_default_args.update({"template_id": 1234})
+ ome_response_mock.status_code = 400
+ ome_response_mock.success = False
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ if exc_type == URLError:
+ mocker.patch(MODULE_PATH + 'get_template_details', side_effect=exc_type("url open error"))
+ result = self._run_module(ome_default_args)
+ assert result["unreachable"] is True
+ elif exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH + 'get_template_details', side_effect=exc_type("exception message"))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
+ else:
+ mocker.patch(MODULE_PATH + 'get_template_details',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py
index d83725d25..34ebb99a8 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profile_info.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -20,7 +20,7 @@ from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ansible.module_utils._text import to_text
from ansible_collections.dellemc.openmanage.plugins.modules import ome_server_interface_profile_info
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_server_interface_profile_info.'
@@ -52,8 +52,10 @@ class TestOMEMSIP(FakeAnsibleModule):
assert err.value.args[0] == "Unable to complete the operation because the entered target " \
"device id(s) '25011' are invalid."
f_module = self.get_module_mock(params={"device_id": [25012]})
- ome_response_mock.json_data = {"Id": "HKRF20", "ServerServiceTag": "HKRF20", "value": [{"Network": []}]}
- ome_conn_mock_sip.json_data = [{"Id": "HKRF20", "ServerServiceTag": "HKRF20"}]
+ ome_response_mock.json_data = {
+ "Id": "HKRF20", "ServerServiceTag": "HKRF20", "value": [{"Network": []}]}
+ ome_conn_mock_sip.json_data = [
+ {"Id": "HKRF20", "ServerServiceTag": "HKRF20"}]
ome_conn_mock_sip.strip_substr_dict.return_value = {"Id": "HKRF20", "ServerServiceTag": "HKRF20",
"Networks": [{"Id": 10001}]}
result = self.module.get_sip_info(f_module, ome_conn_mock_sip)
@@ -64,31 +66,127 @@ class TestOMEMSIP(FakeAnsibleModule):
with pytest.raises(Exception) as err:
self._run_module(ome_default_args)
assert err.value.args[0]['msg'] == "one of the following is required: device_id, device_service_tag."
- ome_default_args.update({"device_id": [25011], "validate_certs": False})
+ ome_default_args.update(
+ {"device_id": [25011], "validate_certs": False})
mocker.patch(MODULE_PATH + 'check_domain_service')
- mocker.patch(MODULE_PATH + 'get_sip_info', return_value={"server_profiles": [{"Id": 25011}]})
+ mocker.patch(MODULE_PATH + 'get_sip_info',
+ return_value={"server_profiles": [{"Id": 25011}]})
result = self._run_module(ome_default_args)
assert result["msg"] == "Successfully retrieved the server interface profile information."
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"report_list": [{"Id": 25012, "DeviceServiceTag": "HKRF20"}],
+ "value": [
+ {'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ 'message': "Unable to complete the operation because the server profile(s) for HKRF20 do not exist in the Fabric Manager.",
+ "check_domain_service": True,
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CDEV5008",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ },
+ 'mparams': {"device_service_tag": ['HKRF20']}
+ },
+ {"json_data": {"report_list": [{"Id": 25012, "DeviceServiceTag": "HKRF20"}],
+ "value": [
+ {'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ 'message': "Unable to complete the operation because the server profile(s) for 25012 do not exist in the Fabric Manager.",
+ "check_domain_service": True,
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CDEV5008",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]}
+ },
+ 'mparams': {"device_id": [25012]}
+ },
+ {"json_data": {"report_list": [{"Id": 25012, "DeviceServiceTag": "HKRF20"}],
+ "value": [
+ {'Id': 1234, 'PublicAddress': "XX.XX.XX.XX", 'DeviceId': 1234, "Type": 1000}]},
+ 'message': "The information retrieval operation of server interface profile is supported only on OpenManage Enterprise Modular.",
+ 'http_error_json': {
+ "error": {
+ "code": "Base.1.0.GeneralError",
+ "message": "A general error has occurred. See ExtendedInfo for more information.",
+ "@Message.ExtendedInfo": [
+ {
+ "MessageId": "CGEN1006",
+ "RelatedProperties": [],
+ "Message": "Unable to process the request because an error occurred.",
+ "MessageArgs": [],
+ "Severity": "Critical",
+ "Resolution": "Retry the operation. If the issue persists, contact your system administrator."
+ }
+ ]
+ }
+ },
+ 'mparams': {"device_id": [25012]}
+ }
+ ])
+ def test_ome_sip_info_failure(self, params, ome_conn_mock_sip, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_conn_mock_sip.get_all_report_details.return_value = params[
+ 'json_data']
+ mocks = ["check_domain_service"]
+ for m in mocks:
+ if m in params:
+ mocker.patch(MODULE_PATH + m, return_value=params.get(m, {}))
+ if 'http_error_json' in params:
+ json_str = to_text(json.dumps(params.get('http_error_json', {})))
+ ome_conn_mock_sip.invoke_request.side_effect = HTTPError(
+ 'https://testhost.com', 401, 'http error message', {
+ "accept-type": "application/json"},
+ StringIO(json_str))
+ ome_default_args.update(params['mparams'])
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['msg'] == params['message']
+
@pytest.mark.parametrize("exc_type",
[IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
def test_ome_sip_power_main_exception_case(self, exc_type, mocker, ome_default_args,
ome_conn_mock_sip, ome_response_mock):
- ome_default_args.update({"device_id": [25011], "validate_certs": False})
+ ome_default_args.update(
+ {"device_id": [25011], "validate_certs": False})
ome_response_mock.status_code = 400
ome_response_mock.success = False
json_str = to_text(json.dumps({"info": "error_details"}))
if exc_type == URLError:
- mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("url open error"))
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type("url open error"))
result = self._run_module(ome_default_args)
assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
- mocker.patch(MODULE_PATH + 'check_domain_service', side_effect=exc_type("exception message"))
+ mocker.patch(MODULE_PATH + 'check_domain_service',
+ side_effect=exc_type("exception message"))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'check_domain_service',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py
index dcb1688a0..1231f4404 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_server_interface_profiles.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
-# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -74,14 +74,14 @@ class TestOmeSIPs(FakeAnsibleModule):
],
"NicBonded": False
}},
- "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
+ "vlan_map": {"testvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
"three": 14681},
"natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0},
'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"],
"nic_configuration": [{
"nic_identifier": "NIC.Mezzanine.1A-1-1",
"tagged_networks": {
- "names": ["jagvlan"],
+ "names": ["testvlan"],
"state": "present"},
"team": False,
"untagged_network": 3},
@@ -132,14 +132,14 @@ class TestOmeSIPs(FakeAnsibleModule):
],
"NicBonded": False
}},
- "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
+ "vlan_map": {"testvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
"three": 14681},
"natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0},
'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"],
"nic_configuration": [{
"nic_identifier": "NIC.Mezzanine.1A-1-1",
"tagged_networks": {
- "names": ["jagvlan"],
+ "names": ["testvlan"],
"state": "present"},
"team": False,
"untagged_network": 10},
@@ -218,14 +218,14 @@ class TestOmeSIPs(FakeAnsibleModule):
],
"NicBonded": False
}},
- "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
+ "vlan_map": {"testvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
"three": 14681},
"natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0},
'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"],
"nic_configuration": [{
"nic_identifier": "NIC.Mezzanine.1A-1-1",
"tagged_networks": {
- "names": ["jagvlan", "VLAN 1"],
+ "names": ["testvlan", "VLAN 1"],
"state": "present"},
"team": False,
"untagged_network": 3},
@@ -259,14 +259,14 @@ class TestOmeSIPs(FakeAnsibleModule):
],
"NicBonded": False
}},
- "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
+ "vlan_map": {"testvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
"three": 14681},
"natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0},
'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"],
"nic_configuration": [{
"nic_identifier": "NIC.Mezzanine.1A-1-1",
"tagged_networks": {
- "names": ["jagvlan"],
+ "names": ["testvlan"],
"state": "present"},
"team": False,
"untagged_network": 3},
@@ -303,14 +303,14 @@ class TestOmeSIPs(FakeAnsibleModule):
],
"NicBonded": False
}},
- "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
+ "vlan_map": {"testvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
"three": 14681},
"natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0},
'mparams': {"job_wait": False, "device_service_tag": ["ABC1234"],
"nic_configuration": [{
"nic_identifier": "NIC.Mezzanine.1A-1-1",
"tagged_networks": {
- "names": ["jagvlan"],
+ "names": ["testvlan"],
"state": "present"},
"team": False,
"untagged_network": 3},
@@ -358,14 +358,14 @@ class TestOmeSIPs(FakeAnsibleModule):
],
"NicBonded": False
}},
- "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
+ "vlan_map": {"testvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
"three": 14681},
"natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0},
'mparams': {"device_service_tag": ["ABC1234"],
"nic_configuration": [{
"nic_identifier": "NIC.Mezzanine.1A-1-1",
"tagged_networks": {
- "names": ["jagvlan"],
+ "names": ["testvlan"],
"state": "present"},
"team": False,
"untagged_network": 3},
@@ -413,14 +413,14 @@ class TestOmeSIPs(FakeAnsibleModule):
],
"NicBonded": False
}},
- "vlan_map": {"jagvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
+ "vlan_map": {"testvlan": 10155, "VLAN 1": 11569, "range120-125": 12350, "range130-135": 12352, "two": 14679,
"three": 14681},
"natives": {143: 10155, 1: 11569, 2: 14679, 3: 14681, 0: 0},
'mparams': {"device_service_tag": ["ABC1234"],
"nic_configuration": [{
"nic_identifier": "NIC.Mezzanine.1A-1-1",
"tagged_networks": {
- "names": ["jagvlan"],
+ "names": ["testvlan"],
"state": "present"},
"team": False,
"untagged_network": 3},
@@ -503,7 +503,7 @@ class TestOmeSIPs(FakeAnsibleModule):
"Networks": [
{
"Id": 10155,
- "Name": "jagvlan",
+ "Name": "testvlan",
"Description": None,
"VlanMaximum": 143,
"VlanMinimum": 143,
@@ -529,7 +529,7 @@ class TestOmeSIPs(FakeAnsibleModule):
"Networks": [
{
"Id": 10155,
- "Name": "jagvlan",
+ "Name": "testvlan",
"Description": None,
"VlanMaximum": 143,
"VlanMinimum": 143,
@@ -594,7 +594,7 @@ class TestOmeSIPs(FakeAnsibleModule):
[{"json_data": {"@odata.context": "/api/$metadata#Collection(NetworkConfigurationService.Network)",
"@odata.count": 6,
"value": [{"Id": 10155,
- "Name": "jagvlan",
+ "Name": "testvlan",
"VlanMaximum": 143,
"VlanMinimum": 143,
"Type": 1,
@@ -630,7 +630,7 @@ class TestOmeSIPs(FakeAnsibleModule):
"VlanMinimum": 3,
"Type": 3,
}]},
- "vlan_map": {"jagvlan": 10155,
+ "vlan_map": {"testvlan": 10155,
"VLAN 1": 11569,
"range120-125": 12350,
"range130-135": 12352,
@@ -689,7 +689,7 @@ class TestOmeSIPs(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'get_valid_service_tags',
- side_effect=exc_type('http://testhost.com',
+ side_effect=exc_type('https://testhost.com',
400,
'http error message',
{"accept-type": "application/json"},
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py
index 5d275f197..4f27b8081 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 3.6.0
-# Copyright (C) 2020-2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -164,7 +164,7 @@ class TestOmeSmartFabric(FakeAnsibleModule):
else:
for status_code, msg in {501: SYSTEM_NOT_SUPPORTED_ERROR_MSG, 400: 'http error message'}.items():
mocker.patch(MODULE_PATH + 'ome_smart_fabric.fabric_actions',
- side_effect=exc_type('http://testhost.com', status_code, msg,
+ side_effect=exc_type('https://testhost.com', status_code, msg,
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_info.py
new file mode 100644
index 000000000..afe071acd
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_info.py
@@ -0,0 +1,324 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.1.0
+# Copyright (C) 2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+import json
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_smart_fabric_info
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from io import StringIO
+from ssl import SSLError
+from ansible.module_utils._text import to_text
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+
+
+@pytest.fixture
+def ome_connection_smart_fabric_info_mock(mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(
+ MODULE_PATH + 'ome_smart_fabric_info.RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+
+class TestOMESmartFabricInfo(FakeAnsibleModule):
+ module = ome_smart_fabric_info
+
+ smart_fabric_details_dict = [{"Description": "Fabric f1",
+ "FabricDesignMapping": [
+ {
+ "DesignNode": "Switch-A",
+ "PhysicalNode": "NODEID1"
+ },
+ {
+ "DesignNode": "Switch-B",
+ "PhysicalNode": "NODEID2"
+ }],
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "LifeCycleStatus": [
+ {
+ "Activity": "Create",
+ "Status": "2060"
+ }
+ ],
+ "Uplinks": [
+ {
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "MediaType": "Ethernet",
+ "Name": "u1",
+ "NativeVLAN": 1}],
+ "Switches": [
+ {
+ "ChassisServiceTag": "6H5S6Z2",
+ "ConnectionState": True}],
+ "Servers": [
+ {
+ "ChassisServiceTag": "6H5S6Z2",
+ "ConnectionState": True,
+ "ConnectionStateReason": 101}],
+
+ "Multicast": [
+ {
+ "FloodRestrict": True,
+ "IgmpVersion": "3",
+ "MldVersion": "2"
+ }
+ ],
+ "FabricDesign": [
+ {
+ "FabricDesignNode": [
+ {
+ "ChassisName": "Chassis-X",
+ "NodeName": "Switch-B",
+ "Slot": "Slot-A2",
+ "Type": "WeaverSwitch"
+ },
+ {
+ "ChassisName": "Chassis-X",
+ "NodeName": "Switch-A",
+ "Slot": "Slot-A1",
+ "Type": "WeaverSwitch"
+ }
+ ],
+ "Name": "2xMX9116n_Fabric_Switching_Engines_in_same_chassis",
+ }
+ ],
+ "Name": "f2",
+ "OverrideLLDPConfiguration": "Disabled",
+ "ScaleVLANProfile": "Enabled",
+ "Summary": {
+ "NodeCount": 2,
+ "ServerCount": 1,
+ "UplinkCount": 1
+ }}]
+
+ @pytest.mark.parametrize("params", [{"json_data": {"Multicast": {
+ "@odata.id": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Multicast",
+ "Id": "123hg"}}, "json_data_two": {"Multicast": {
+ "@odata.id": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Multicast"}},
+ "json_data_three": {"Id": 123},
+ "output_one": {'Multicast': {'Id': "123hg"}}, "output_two": {}, "output_three": {"Id": 123}}])
+ def test_clean_data(self, params):
+ result_one = self.module.clean_data(params.get("json_data"))
+ result_two = self.module.clean_data(params.get("json_data_two"))
+ result_three = self.module.clean_data(params.get("json_data_three"))
+ assert result_one == params.get("output_one")
+ assert result_two == params.get("output_two")
+ assert result_three == params.get("output_three")
+
+ @pytest.mark.parametrize("params", [{"json_data": {
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "Name": "f1",
+ "Description": "Fabric f1",
+ "Switches@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Switches",
+ "Servers@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Servers",
+ "FabricDesign": {
+ "@odata.id": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/FabricDesign"
+ },
+ "ValidationErrors@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/ValidationErrors",
+ "Uplinks@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Uplinks",
+ "Topology": {
+ "@odata.id": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Topology"
+ },
+ "ISLLinks@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/ISLLinks",
+ "Multicast": {
+ "@odata.id": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Multicast"
+ }
+ }}])
+ def test_fetch_smart_fabric_link_details(self, params, ome_connection_mock):
+ f_module = self.get_module_mock()
+ result = self.module.fetch_smart_fabric_link_details(
+ f_module, ome_connection_mock, params.get('json_data'))
+ assert result is not None
+
+ @pytest.mark.parametrize("params", [{"json_data": {
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "Name": "f1",
+ "Description": "Fabric f1",
+ "Switches@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Switches",
+ "Servers@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Servers",
+ "FabricDesign": {
+ "@odata.id": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/FabricDesign"
+ },
+ "ValidationErrors@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/ValidationErrors",
+ "Uplinks@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Uplinks",
+ "Topology": {
+ "@odata.id": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Topology"
+ },
+ "ISLLinks@odata.navigationLink": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/ISLLinks",
+ "Multicast": {
+ "@odata.id": "/api/NetworkService/Fabrics('61c20a59-9ed5-4ae5-b850-5e5acf42d2f2')/Multicast"
+ }
+ }}])
+ def test_fetch_smart_fabric_link_details_HTTPError_error_case(self, params, ome_default_args, mocker, ome_connection_mock):
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ error_msg = "Unable to retrieve smart fabric information."
+ ome_connection_mock.invoke_request.side_effect = HTTPError('https://testdell.com', 404,
+ error_msg,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ f_module = self.get_module_mock()
+ with pytest.raises(Exception) as exc:
+ self.module.fetch_smart_fabric_link_details(
+ f_module, ome_connection_mock, params.get('json_data'))
+ assert exc.value.args[0] == error_msg
+
+ def test_ome_smart_fabric_info_main_success_case_all(self, ome_default_args, ome_connection_smart_fabric_info_mock,
+ ome_response_mock):
+ ome_response_mock.status_code = 200
+ result = self._run_module(ome_default_args)
+ assert 'smart_fabric_info' in result
+ assert result['msg'] == "Successfully retrieved the smart fabric information."
+
+ def test_ome_smart_fabric_main_success_case_fabric_id(self, mocker, ome_default_args, ome_connection_smart_fabric_info_mock,
+ ome_response_mock):
+ ome_default_args.update({"fabric_id": "1"})
+ ome_response_mock.success = True
+ ome_response_mock.json_data = {"value": [{"fabric_id": "1"}]}
+ ome_response_mock.status_code = 200
+ mocker.patch(
+ MODULE_PATH + 'ome_smart_fabric_info.strip_smart_fabric_info',
+ return_value=self.smart_fabric_details_dict)
+ result = self._run_module(ome_default_args)
+ assert 'smart_fabric_info' in result
+ assert result['msg'] == "Successfully retrieved the smart fabric information."
+
+ @pytest.mark.parametrize("params", [{"fabric_name": "f1",
+ "json_data": {"value": [{"Description": "Fabric f1",
+ "FabricDesignMapping": [
+ {
+ "DesignNode": "Switch-A",
+ "PhysicalNode": "NODEID1"
+ },
+ {
+ "DesignNode": "Switch-B",
+ "PhysicalNode": "NODEID2"
+ }],
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "LifeCycleStatus": [
+ {
+ "Activity": "Create",
+ "Status": "2060"
+ }
+ ],
+ "Name": "f1",
+ "OverrideLLDPConfiguration": "Disabled",
+ "ScaleVLANProfile": "Enabled",
+ "Summary": {
+ "NodeCount": 2,
+ "ServerCount": 1,
+ "UplinkCount": 1
+ }}]
+ }}])
+ def test_ome_smart_fabric_main_success_case_fabric_name(self, mocker, params, ome_default_args, ome_connection_smart_fabric_info_mock,
+ ome_response_mock):
+ ome_default_args.update({"fabric_name": params["fabric_name"]})
+ ome_response_mock.success = True
+ ome_response_mock.status_code = 200
+ ome_response_mock.json_data = params["json_data"]
+ mocker.patch(
+ MODULE_PATH + 'ome_smart_fabric_info.strip_smart_fabric_info',
+ return_value=self.smart_fabric_details_dict)
+ result = self._run_module(ome_default_args)
+ assert 'smart_fabric_info' in result
+ assert result['msg'] == "Successfully retrieved the smart fabric information."
+
+ @pytest.mark.parametrize("params", [{"fabric_name": "f1",
+ "json_data": {"value": [{"Description": "Fabric f1",
+ "FabricDesignMapping": [
+ {
+ "DesignNode": "Switch-A",
+ "PhysicalNode": "NODEID1"
+ },
+ {
+ "DesignNode": "Switch-B",
+ "PhysicalNode": "NODEID2"
+ }],
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "LifeCycleStatus": [
+ {
+ "Activity": "Create",
+ "Status": "2060"
+ }
+ ],
+ "Name": "f2",
+ "OverrideLLDPConfiguration": "Disabled",
+ "ScaleVLANProfile": "Enabled",
+ "Summary": {
+ "NodeCount": 2,
+ "ServerCount": 1,
+ "UplinkCount": 1
+ }}]
+ }}])
+ def test_ome_smart_fabric_main_failure_case_fabric_name(self, params, ome_default_args, ome_connection_smart_fabric_info_mock,
+ ome_response_mock):
+ ome_default_args.update({"fabric_name": params["fabric_name"]})
+ ome_response_mock.success = True
+ ome_response_mock.status_code = 200
+ ome_response_mock.json_data = params["json_data"]
+ result = self._run_module(ome_default_args)
+ assert result['msg'] == 'Unable to retrieve smart fabric information with fabric name {0}.'.format(
+ params["fabric_name"])
+
+ def test_ome_smart_fabric_main_failure_case(self, ome_default_args, ome_connection_smart_fabric_info_mock,
+ ome_response_mock):
+ ome_response_mock.success = True
+ ome_response_mock.status_code = 200
+ ome_response_mock.json_data = {}
+ result = self._run_module(ome_default_args)
+ assert 'smart_fabric_info' not in result
+ assert result['msg'] == "Unable to retrieve smart fabric information."
+
+ @pytest.mark.parametrize("params", [{"fabric_id": "f1"}])
+ def test_get_smart_fabric_details_via_id_HTTPError_error_case(self, params, ome_default_args, mocker, ome_connection_mock):
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ error_msg = "Unable to retrieve smart fabric information with fabric ID {0}.".format(
+ params.get('fabric_id'))
+ ome_connection_mock.invoke_request.side_effect = HTTPError('https://testdell.com', 404,
+ error_msg,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ f_module = self.get_module_mock()
+ with pytest.raises(Exception) as exc:
+ self.module.get_smart_fabric_details_via_id(
+ f_module, ome_connection_mock, params.get('fabric_id'))
+ assert exc.value.args[0] == error_msg
+
+ @pytest.mark.parametrize("exc_type",
+ [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
+ def test_ome_smart_fabric_info_main_exception_failure_case(self, exc_type, mocker, ome_default_args,
+ ome_connection_smart_fabric_info_mock,
+ ome_response_mock):
+ ome_response_mock.status_code = 404
+ ome_response_mock.success = False
+ fabric_name_dict = {"fabric_name": "f1"}
+ ome_default_args.update(fabric_name_dict)
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ ome_connection_smart_fabric_info_mock.invoke_request.side_effect = exc_type(
+ 'test')
+ else:
+ ome_connection_smart_fabric_info_mock.invoke_request.side_effect = exc_type('https://testhost.com', 400,
+ 'http error message',
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ if not exc_type == URLError:
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
+ else:
+ result = self._run_module(ome_default_args)
+ assert 'smart_fabric_info' not in result
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py
index 6670499e9..7d62223aa 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -378,7 +378,7 @@ class TestOmeSmartFabricUplink(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'get_item_id',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink_info.py
new file mode 100644
index 000000000..18fbe8816
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_smart_fabric_uplink_info.py
@@ -0,0 +1,1155 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+from io import StringIO
+from ssl import SSLError
+
+import pytest
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_smart_fabric_uplink_info
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_smart_fabric_uplink_info.'
+
+
+@pytest.fixture
+def ome_connection_mock_for_smart_fabric_uplink_info(mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+
+class TestOmeSmartFabricUplinkInfo(FakeAnsibleModule):
+ module = ome_smart_fabric_uplink_info
+
+ uplink_info = [{
+ "Description": "",
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "MediaType": "Ethernet",
+ "Name": "u1",
+ "NativeVLAN": 1,
+ "Networks": [{
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "Description": "null",
+ "Id": 10155,
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d",
+ "Name": "testvlan",
+ "Type": 1,
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143
+ }],
+ "Ports": [{
+ "AdminStatus": "Enabled",
+ "BlinkStatus": "OFF",
+ "ConfiguredSpeed": "0",
+ "CurrentSpeed": "0",
+ "Description": "",
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "MaxSpeed": "0",
+ "MediaType": "Ethernet",
+ "Name": "",
+ "NodeServiceTag": "SVCTAG1",
+ "OpticsType": "NotPresent",
+ "PortNumber": "ethernet1/1/35",
+ "Role": "Uplink",
+ "Status": "Down",
+ "Type": "PhysicalEthernet"
+ }, {
+ "AdminStatus": "Enabled",
+ "BlinkStatus": "OFF",
+ "ConfiguredSpeed": "0",
+ "CurrentSpeed": "0",
+ "Description": "",
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "MaxSpeed": "0",
+ "MediaType": "Ethernet",
+ "Name": "",
+ "NodeServiceTag": "SVCTAG1",
+ "OpticsType": "NotPresent",
+ "PortNumber": "ethernet1/1/35",
+ "Role": "Uplink",
+ "Status": "Down",
+ "Type": "PhysicalEthernet"
+ }],
+ "Summary": {
+ "NetworkCount": 1,
+ "PortCount": 2
+ },
+ "UfdEnable": "Disabled"
+ }]
+
+ @pytest.mark.parametrize("params", [{"success": True,
+ "json_data": {"value": [{
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {"PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]
+ }]
+ },
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d"}])
+ def test_uplink_details_from_fabric_id(self, params, ome_connection_mock_for_smart_fabric_uplink_info, ome_response_mock):
+ ome_response_mock.success = params["success"]
+ ome_response_mock.json_data = params["json_data"]
+ f_module = self.get_module_mock(params=params.get("fabric_id"))
+ resp = self.module.get_uplink_details_from_fabric_id(f_module, ome_connection_mock_for_smart_fabric_uplink_info,
+ params.get("fabric_id"))
+ assert resp[0]["Id"] == params["uplink_id"]
+
+ @pytest.mark.parametrize("params", [{"success": True,
+ "json_data": {"value": [{
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "Name": "f1",
+ "Description": "Fabric f1",
+ "OverrideLLDPConfiguration": "Disabled",
+ "ScaleVLANProfile": "Enabled",
+ "Summary": {
+ "NodeCount": 2,
+ "ServerCount": 1,
+ "UplinkCount": 1
+ },
+ "LifeCycleStatus": [{
+ "Activity": "Create",
+ "Status": "2060"
+ }],
+ "FabricDesignMapping": [{
+ "DesignNode": "Switch-A",
+ "PhysicalNode": "SVCTAG1"
+ }, {
+ "DesignNode": "Switch-B",
+ "PhysicalNode": "SVCTAG1"
+ }],
+ "Actions": "null",
+ }]},
+ "fabric_name": "f1",
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ }]
+ )
+ def test_get_fabric_name_details(self, params, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_response_mock.success = params["success"]
+ ome_response_mock.json_data = params["json_data"]
+ f_module = self.get_module_mock(params=params.get("fabric_name"))
+ fabric_id = self.module.get_fabric_id_from_name(f_module, ome_connection_mock_for_smart_fabric_uplink_info,
+ params.get("fabric_name"))
+ assert fabric_id == params["fabric_id"]
+
+ @pytest.mark.parametrize("params", [{"inp": {"fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2", "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d"},
+ "success": True,
+ "json_data": {
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {
+ "PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]},
+ "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ }]
+ )
+ def test_get_uplink_details(self, params, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_response_mock.success = params["success"]
+ ome_response_mock.json_data = params["json_data"]
+ f_module = self.get_module_mock(params=params.get("inp", {}))
+ resp = self.module.get_uplink_details(f_module, ome_connection_mock_for_smart_fabric_uplink_info,
+ params.get("fabric_id"), params.get("uplink_id"))
+ assert resp[0]["Id"] == params["uplink_id"]
+
+ @pytest.mark.parametrize("params", [{"inp": {"fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"},
+ "success": True,
+ "json_data": {
+ "value": [{
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {
+ "PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]
+ }]},
+ "uplink_name": "u1",
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d"
+ }]
+ )
+ def test_get_uplink_name_details(self, params, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_response_mock.success = params["success"]
+ ome_response_mock.json_data = params["json_data"]
+ f_module = self.get_module_mock(params=params.get("inp", {}))
+ uplink_id = self.module.get_uplink_id_from_name(f_module, ome_connection_mock_for_smart_fabric_uplink_info,
+ params.get("uplink_name"), params.get("fabric_id"))
+ assert uplink_id == params["uplink_id"]
+
+ @pytest.mark.parametrize("params", [{"success": True,
+ "mparams": {"fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f"},
+ "msg": "Successfully retrieved the fabric uplink information.",
+ "get_uplink_details_from_fabric_id": {"value": [{
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {
+ "PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]
+ }]}
+ }, {"success": False,
+ "mparams": {"fabric_id": "f1"},
+ "msg": "Unable to retrieve smart fabric uplink information.",
+ "get_uplink_details_from_fabric_id": {}},
+ ]
+ )
+ def test_main_case_success_all(self, params, ome_connection_mock_for_smart_fabric_uplink_info, ome_default_args, ome_response_mock,
+ mocker):
+ mocker.patch(MODULE_PATH + 'get_uplink_details_from_fabric_id',
+ return_value=params.get("get_uplink_details_from_fabric_id"))
+ mocker.patch(MODULE_PATH + 'strip_uplink_info',
+ return_value=params.get("get_uplink_details_from_fabric_id"))
+ ome_response_mock.success = True
+ ome_response_mock.json_data = params.get("strip_uplink_info")
+ ome_default_args.update(params.get('mparams'))
+ result = self._run_module(ome_default_args)
+ assert result["msg"] == 'Successfully retrieved the fabric uplink information.'
+
+ def test_ome_smart_fabric_main_success_case_fabric_id(self, mocker, ome_default_args, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_default_args.update({"fabric_id": "1"})
+ ome_response_mock.success = True
+ ome_response_mock.json_data = {"value": [{"fabric_id": "1"}]}
+ ome_response_mock.status_code = 200
+ mocker.patch(
+ MODULE_PATH + 'strip_uplink_info',
+ return_value=self.uplink_info)
+ result = self._run_module(ome_default_args)
+ assert 'uplink_info' in result
+ assert result['msg'] == "Successfully retrieved the fabric uplink information."
+
+ @pytest.mark.parametrize("params", [{"success": True,
+ "json_data": {"value": [{
+ "Id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "Name": "f1",
+ "Description": "Fabric f1",
+ "OverrideLLDPConfiguration": "Disabled",
+ "ScaleVLANProfile": "Enabled",
+ "Summary": {
+ "NodeCount": 2,
+ "ServerCount": 1,
+ "UplinkCount": 1
+ },
+ "LifeCycleStatus": [{
+ "Activity": "Create",
+ "Status": "2060"
+ }],
+ "FabricDesignMapping": [{
+ "DesignNode": "Switch-A",
+ "PhysicalNode": "SVCTAG1"
+ }, {
+ "DesignNode": "Switch-B",
+ "PhysicalNode": "SVCTAG1"
+ }],
+ "Actions": "null",
+ }]},
+ "fabric_name": "f1",
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ }]
+ )
+ def test_ome_smart_fabric_main_success_case_fabric_name(self, params, mocker, ome_default_args, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_default_args.update({"fabric_name": "f1"})
+ ome_response_mock.success = True
+ ome_response_mock.json_data = params["json_data"]
+ ome_response_mock.status_code = 200
+ mocker.patch(
+ MODULE_PATH + 'strip_uplink_info',
+ return_value=self.uplink_info)
+ result = self._run_module(ome_default_args)
+ assert 'uplink_info' in result
+ assert result['msg'] == "Successfully retrieved the fabric uplink information."
+
+ @pytest.mark.parametrize("params", [{"inp": {"fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2", "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d"},
+ "success": True,
+ "json_data": {
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {
+ "PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]},
+ "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ }]
+ )
+ def test_ome_smart_fabric_main_failure_case_uplink_id(self, params, mocker, ome_default_args, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_default_args.update({"uplink_id": "u1"})
+ ome_response_mock.success = True
+ ome_response_mock.json_data = params["json_data"]
+ ome_response_mock.status_code = 200
+ mocker.patch(
+ MODULE_PATH + 'strip_uplink_info',
+ return_value=self.uplink_info)
+ result = self._run_module(ome_default_args)
+ assert result['msg'] == "fabric_id or fabric_name is required along with uplink_id."
+
+ @pytest.mark.parametrize("params", [{"inp": {"fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2", "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d"},
+ "success": True,
+ "json_data": {
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {
+ "PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]},
+ "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ }]
+ )
+ def test_ome_smart_fabric_main_success_case_uplink_id(self, params, mocker, ome_default_args, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_default_args.update({"fabric_id": "f1", "uplink_id": "u1"})
+ ome_response_mock.success = True
+ ome_response_mock.json_data = params["json_data"]
+ ome_response_mock.status_code = 200
+ mocker.patch(
+ MODULE_PATH + 'strip_uplink_info',
+ return_value=self.uplink_info)
+ result = self._run_module(ome_default_args)
+ assert 'uplink_info' in result
+ assert result['msg'] == "Successfully retrieved the fabric uplink information."
+
+ @pytest.mark.parametrize("params", [{"inp": {"fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2", "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d"},
+ "success": True,
+ "json_data": {
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {
+ "PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]},
+ "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2"
+ }]
+ )
+ def test_ome_smart_fabric_main_failure_case_uplink_name(self, params, mocker, ome_default_args, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_default_args.update({"uplink_name": "u1"})
+ ome_response_mock.success = True
+ ome_response_mock.json_data = params["json_data"]
+ ome_response_mock.status_code = 200
+ mocker.patch(
+ MODULE_PATH + 'strip_uplink_info',
+ return_value=self.uplink_info)
+ result = self._run_module(ome_default_args)
+ assert result['msg'] == "fabric_id or fabric_name is required along with uplink_name."
+
+ @pytest.mark.parametrize("params", [{"success": True,
+ "json_data": {"value": [{
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {"PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]
+ }]
+ },
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d"}])
+ def test_ome_smart_fabric_main_success_case_uplink_name(self, params, mocker, ome_default_args, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_default_args.update({"fabric_id": "f1", "uplink_name": "u1"})
+ ome_response_mock.success = True
+ ome_response_mock.json_data = params.get("json_data")
+ ome_response_mock.status_code = 200
+ mocker.patch(
+ MODULE_PATH + 'strip_uplink_info',
+ return_value=self.uplink_info)
+ result = self._run_module(ome_default_args)
+ assert 'uplink_info' in result
+ assert result['msg'] == "Successfully retrieved the fabric uplink information."
+
+ @pytest.mark.parametrize("params", [{"success": True,
+ "json_data": {"value": [{
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {"PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": ""
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null"
+ }]
+ }],
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null"
+ }],
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": ""
+ }]
+ },
+ 'message': "Successfully retrieved the fabric uplink information.",
+ 'mparams': {"fabric_id": "f1",
+ "uplink_id": "u1"}
+ }, {"success": True,
+ "json_data": {"value": [{
+ "Uplinks@odata.navigationLink": "/odata/UpLink/1ad54420/b145/49a1/9779/21a579ef6f2d",
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {"PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": ""
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null"
+ }]
+ }],
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null"
+ }],
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": ""
+ }]
+ },
+ 'message': "Successfully retrieved the fabric uplink information.",
+ 'mparams': {}
+ }])
+ def test_ome_smart_fabric_exit_json(self, params, ome_default_args, ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_default_args.update(params['mparams'])
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ assert 'uplink_info' in result
+ assert result['msg'] == params['message']
+
+ @pytest.mark.parametrize("params", [{"success": True,
+ "json_data": {"value": [{
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {"PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]
+ }]
+ },
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "uplink_id": "1ad54420-b145-49a1-9779-21a579ef6f2d"}])
+ def test_get_all_uplink_details(self, params, ome_connection_mock_for_smart_fabric_uplink_info, ome_response_mock):
+ ome_response_mock.success = params["success"]
+ ome_response_mock.json_data = params["json_data"]
+ f_module = self.get_module_mock()
+ resp = self.module.get_all_uplink_details(
+ f_module, ome_connection_mock_for_smart_fabric_uplink_info)
+ assert resp == []
+
+ @pytest.mark.parametrize("params", [{"success": True,
+ "inp": {"fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "uplink_name": "1ad54420-b145-49a1-9779-21a579ef6f2d"},
+ "json_data": {"value": [{
+ "Id": "1ad54420-b145-49a1-9779-21a579ef6f2d",
+ "Name": "u1",
+ "Description": "",
+ "MediaType": "Ethernet",
+ "NativeVLAN": 1,
+ "Summary": {"PortCount": 2,
+ "NetworkCount": 1
+ },
+ "UfdEnable": "Disabled",
+ "Ports@odata.count": 2,
+ "Ports": [{
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }, {
+ "Id": "SVCTAG1:ethernet1/1/35",
+ "Name": "",
+ "Description": "",
+ "Type": "PhysicalEthernet",
+ "MediaType": "Ethernet",
+ "NodeServiceTag": "SVCTAG1",
+ "PortNumber": "ethernet1/1/35",
+ "Status": "Down",
+ "AdminStatus": "Enabled",
+ "CurrentSpeed": "0",
+ "MaxSpeed": "0",
+ "ConfiguredSpeed": "0",
+ "OpticsType": "NotPresent",
+ "BlinkStatus": "OFF",
+ "Role": "Uplink"
+ }],
+ "Networks@odata.count": 1,
+ "Networks": [{
+ "Id": 10155,
+ "Name": "testvlan",
+ "Description": "null",
+ "VlanMaximum": 143,
+ "VlanMinimum": 143,
+ "Type": 1,
+ "CreatedBy": "system",
+ "CreationTime": "2018-09-25 14:46:12.374",
+ "UpdatedBy": "root",
+ "UpdatedTime": "2019-06-27 15:06:22.836",
+ "InternalRefNWUUId": "f15a36b6-e3d3-46b2-9e7d-bf9cd66e180d"
+ }]
+ }]
+ },
+ "fabric_id": "61c20a59-9ed5-4ae5-b850-5e5acf42d2f2",
+ "uplink_name": "1ad54420-b145-49a1-9779-21a579ef6f2d"}])
+ def test_get_uplink_name_failure_case(self, params, mocker, ome_connection_mock_for_smart_fabric_uplink_info, ome_response_mock, ome_default_args):
+ ome_default_args.update(params.get("inp"))
+ ome_response_mock.success = params["success"]
+ ome_response_mock.json_data = params["json_data"]
+ f_module = self.get_module_mock(params=params.get("inp"))
+ # result = self.module.get_uplink_id_from_name(f_module, ome_connection_mock_for_smart_fabric_uplink_info,
+ # params.get("uplink_name"), params.get("fabric_id"))
+ mocker.patch(
+ MODULE_PATH + 'get_uplink_id_from_name',
+ return_value="")
+ uplink_id = self.module.get_uplink_id_from_name(ome_default_args)
+ assert uplink_id == ""
+
+ @pytest.mark.parametrize("params", [{"uplink_name": "f1", "fabric_id": "u1"}])
+ def test_get_uplink_id_from_name_HTTPError_error_case(self, params, ome_default_args, mocker,
+ ome_connection_mock):
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ error_msg = "Unable to retrieve smart fabric uplink information."
+ ome_connection_mock.invoke_request.side_effect = HTTPError('https://testdell.com', 404,
+ error_msg,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ f_module = self.get_module_mock()
+ with pytest.raises(Exception) as exc:
+ self.module.get_uplink_id_from_name(f_module, ome_connection_mock, params.get("uplink_name"),
+ params.get('fabric_id'))
+ assert exc.value.args[0] == error_msg
+
+ @pytest.mark.parametrize("params", [{"fabric_name": "f1"}])
+ def test_get_all_uplink_details_HTTPError_error_case(self, params, ome_default_args, mocker,
+ ome_connection_mock):
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ error_msg = "Unable to retrieve smart fabric uplink information."
+ ome_connection_mock.invoke_request.side_effect = HTTPError('https://testdell.com', 404,
+ error_msg,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ f_module = self.get_module_mock()
+ with pytest.raises(Exception) as exc:
+ self.module.get_all_uplink_details(f_module, ome_connection_mock)
+ assert exc.value.args[0] == error_msg
+
+ @pytest.mark.parametrize("params", [{"fabric_name": "f1"}])
+ def test_get_fabric_id_from_name_HTTPError_error_case(self, params, ome_default_args, mocker,
+ ome_connection_mock):
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ error_msg = "Unable to retrieve smart fabric uplink information."
+ ome_connection_mock.invoke_request.side_effect = HTTPError('https://testdell.com', 404,
+ error_msg,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ f_module = self.get_module_mock()
+ with pytest.raises(Exception) as exc:
+ self.module.get_fabric_id_from_name(
+ f_module, ome_connection_mock, params.get('fabric_name'))
+ assert exc.value.args[0] == error_msg
+
+ @pytest.mark.parametrize("params", [{"fabric_id": "f1", "uplink_id": "u1"}])
+ def test_get_uplink_details_HTTPError_error_case(self, params, ome_default_args, mocker,
+ ome_connection_mock):
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ error_msg = "Unable to retrieve smart fabric uplink information with uplink ID {0}.".format(
+ params.get('uplink_id'))
+ ome_connection_mock.invoke_request.side_effect = HTTPError('https://testdell.com', 404,
+ error_msg,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ f_module = self.get_module_mock()
+ with pytest.raises(Exception) as exc:
+ self.module.get_uplink_details(f_module, ome_connection_mock, params.get(
+ 'fabric_id'), params.get('uplink_id'))
+ assert exc.value.args[0] == error_msg
+
+ @pytest.mark.parametrize("params", [{"fabric_id": "f1"}])
+ def test_get_uplink_details_from_fabric_id_HTTPError_error_case(self, params, ome_default_args, mocker,
+ ome_connection_mock):
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ error_msg = "Unable to retrieve smart fabric uplink information with fabric ID {0}.".format(
+ params.get('fabric_id'))
+ ome_connection_mock.invoke_request.side_effect = HTTPError('https://testdell.com', 404,
+ error_msg,
+ {"accept-type": "application/json"},
+ StringIO(json_str))
+ f_module = self.get_module_mock()
+ with pytest.raises(Exception) as exc:
+ self.module.get_uplink_details_from_fabric_id(
+ f_module, ome_connection_mock, params.get('fabric_id'))
+ assert exc.value.args[0] == error_msg
+
+ @pytest.mark.parametrize("exc_type",
+ [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
+ def test_ome_smart_fabric_uplink_info_main_exception_failure_case(self, exc_type, mocker, ome_default_args,
+ ome_connection_mock_for_smart_fabric_uplink_info,
+ ome_response_mock):
+ ome_default_args.update({"fabric_id": "f1"})
+ ome_response_mock.status_code = 400
+ ome_response_mock.success = False
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ if exc_type == URLError:
+ mocker.patch(MODULE_PATH + 'get_uplink_details_from_fabric_id',
+ side_effect=exc_type("url open error"))
+ result = self._run_module(ome_default_args)
+ assert result["unreachable"] is True
+ elif exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH + 'get_uplink_details_from_fabric_id',
+ side_effect=exc_type("exception message"))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
+ else:
+ mocker.patch(MODULE_PATH + 'get_uplink_details_from_fabric_id',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py
index 27c84ffab..35b6f7b44 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.2.0
-# Copyright (C) 2019-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2019-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -31,7 +31,8 @@ def ome_connection_mock_for_template(mocker, ome_response_mock):
connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
- ome_connection_mock_obj.get_all_report_details.return_value = {"report_list": []}
+ ome_connection_mock_obj.get_all_report_details.return_value = {
+ "report_list": []}
return ome_connection_mock_obj
@@ -51,8 +52,10 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_connection_mock_for_template.get_all_report_details.return_value = {
"report_list": [{"Id": Constants.device_id1,
"DeviceServiceTag": Constants.service_tag1}]}
- f_module = self.get_module_mock({'device_id': [], 'device_service_tag': [Constants.service_tag1]})
- data = self.module.get_device_ids(f_module, ome_connection_mock_for_template)
+ f_module = self.get_module_mock(
+ {'device_id': [], 'device_service_tag': [Constants.service_tag1]})
+ data = self.module.get_device_ids(
+ f_module, ome_connection_mock_for_template)
assert data == [Constants.device_id1]
def test_get_device_ids_failure_case01(self, ome_connection_mock_for_template, ome_response_mock, ome_default_args):
@@ -60,7 +63,8 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_response_mock.success = False
f_module = self.get_module_mock(params={'device_id': ["#@!1"]})
with pytest.raises(Exception) as exc:
- self.module.get_device_ids(f_module, ome_connection_mock_for_template)
+ self.module.get_device_ids(
+ f_module, ome_connection_mock_for_template)
assert exc.value.args[0] == "Unable to complete the operation because the entered target device id(s) " \
"'{0}' are invalid.".format("#@!1")
@@ -205,9 +209,11 @@ class TestOmeTemplate(FakeAnsibleModule):
{"Id": Constants.device_id2,
"DeviceServiceTag": "tag2"}
]}
- f_module = self.get_module_mock(params={'device_id': [Constants.device_id2], 'device_service_tag': ["abcd"]})
+ f_module = self.get_module_mock(
+ params={'device_id': [Constants.device_id2], 'device_service_tag': ["abcd"]})
with pytest.raises(Exception) as exc:
- self.module.get_device_ids(f_module, ome_connection_mock_for_template)
+ self.module.get_device_ids(
+ f_module, ome_connection_mock_for_template)
assert exc.value.args[0] == "Unable to complete the operation because the entered target service tag(s) " \
"'{0}' are invalid.".format('abcd')
@@ -217,9 +223,11 @@ class TestOmeTemplate(FakeAnsibleModule):
"report_list": [{"Id": Constants.device_id1,
"DeviceServiceTag": Constants.service_tag1}
], "resp_obj": ome_response_mock}
- f_module = self.get_module_mock(params={'device_service_tag': [Constants.service_tag1], 'device_id': []})
+ f_module = self.get_module_mock(
+ params={'device_service_tag': [Constants.service_tag1], 'device_id': []})
with pytest.raises(Exception) as exc:
- device_ids = self.module.get_device_ids(f_module, ome_connection_mock_for_template)
+ device_ids = self.module.get_device_ids(
+ f_module, ome_connection_mock_for_template)
assert exc.value.args[0] == "Failed to fetch the device ids."
def test_get_view_id_success_case(self, ome_connection_mock_for_template, ome_response_mock):
@@ -237,7 +245,8 @@ class TestOmeTemplate(FakeAnsibleModule):
"SourceDeviceId": 2224}])
def test_get_create_payload(self, param, ome_response_mock, ome_connection_mock_for_template):
f_module = self.get_module_mock(params=param)
- data = self.module.get_create_payload(f_module, ome_connection_mock_for_template, 2224, 4)
+ data = self.module.get_create_payload(
+ f_module, ome_connection_mock_for_template, 2224, 4)
assert data['Fqdds'] == "All"
def test_get_template_by_id_success_case(self, ome_response_mock):
@@ -249,7 +258,8 @@ class TestOmeTemplate(FakeAnsibleModule):
assert data
def test_get_template_by_name_success_case(self, ome_response_mock, ome_connection_mock_for_template):
- ome_response_mock.json_data = {'value': [{"Name": "test Sample Template import1", "Id": 24}]}
+ ome_response_mock.json_data = {
+ 'value': [{"Name": "test Sample Template import1", "Id": 24}]}
ome_response_mock.status_code = 200
ome_response_mock.success = True
f_module = self.get_module_mock()
@@ -259,20 +269,24 @@ class TestOmeTemplate(FakeAnsibleModule):
assert data["Id"] == 24
def test_get_group_devices_all(self, ome_response_mock, ome_connection_mock_for_template):
- ome_response_mock.json_data = {'value': [{"Name": "Device1", "Id": 24}]}
+ ome_response_mock.json_data = {
+ 'value': [{"Name": "Device1", "Id": 24}]}
ome_response_mock.status_code = 200
ome_response_mock.success = True
f_module = self.get_module_mock()
- data = self.module.get_group_devices_all(ome_connection_mock_for_template, "uri")
+ data = self.module.get_group_devices_all(
+ ome_connection_mock_for_template, "uri")
assert data == [{"Name": "Device1", "Id": 24}]
def _test_get_template_by_name_fail_case(self, ome_response_mock):
- ome_response_mock.json_data = {'value': [{"Name": "template by name for template name", "Id": 12}]}
+ ome_response_mock.json_data = {
+ 'value': [{"Name": "template by name for template name", "Id": 12}]}
ome_response_mock.status_code = 500
ome_response_mock.success = False
f_module = self.get_module_mock()
with pytest.raises(Exception) as exc:
- self.module.get_template_by_name("template by name for template name", f_module, ome_response_mock)
+ self.module.get_template_by_name(
+ "template by name for template name", f_module, ome_response_mock)
assert exc.value.args[0] == "Unable to complete the operation because the" \
" requested template with name {0} is not present." \
.format("template by name for template name")
@@ -305,7 +319,8 @@ class TestOmeTemplate(FakeAnsibleModule):
return_value=["Deployment"])
mocker.patch(MODULE_PATH + 'get_create_payload',
return_value=params["mid"])
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
assert data == params["out"]
modify_payload = {"command": "modify", "device_id": [25007], "template_id": 1234,
@@ -334,68 +349,90 @@ class TestOmeTemplate(FakeAnsibleModule):
return_value={})
mocker.patch(MODULE_PATH + 'get_modify_payload',
return_value={})
- mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"})
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
+ mocker.patch(MODULE_PATH + 'get_template_details',
+ return_value={"Id": 1234, "Name": "templ1"})
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
assert data == ('TemplateService/Templates(1234)', {}, 'PUT')
def test__get_resource_parameters_delete_success_case(self, mocker, ome_response_mock,
ome_connection_mock_for_template):
- f_module = self.get_module_mock({"command": "delete", "template_id": 1234})
- mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"})
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
+ f_module = self.get_module_mock(
+ {"command": "delete", "template_id": 1234})
+ mocker.patch(MODULE_PATH + 'get_template_details',
+ return_value={"Id": 1234, "Name": "templ1"})
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
assert data == ('TemplateService/Templates(1234)', {}, 'DELETE')
def test__get_resource_parameters_export_success_case(self, mocker, ome_response_mock,
ome_connection_mock_for_template):
- f_module = self.get_module_mock({"command": "export", "template_id": 1234})
- mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"})
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
- assert data == ('TemplateService/Actions/TemplateService.Export', {'TemplateId': 1234}, 'POST')
+ f_module = self.get_module_mock(
+ {"command": "export", "template_id": 1234})
+ mocker.patch(MODULE_PATH + 'get_template_details',
+ return_value={"Id": 1234, "Name": "templ1"})
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
+ assert data == (
+ 'TemplateService/Actions/TemplateService.Export', {'TemplateId': 1234}, 'POST')
def test__get_resource_parameters_deploy_success_case(self, mocker, ome_response_mock,
ome_connection_mock_for_template):
- f_module = self.get_module_mock({"command": "deploy", "template_id": 1234})
+ f_module = self.get_module_mock(
+ {"command": "deploy", "template_id": 1234})
mocker.patch(MODULE_PATH + 'get_device_ids',
return_value=[Constants.device_id1])
mocker.patch(MODULE_PATH + 'get_deploy_payload',
return_value={"deploy_payload": "value"})
- mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"})
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
- assert data == ('TemplateService/Actions/TemplateService.Deploy', {"deploy_payload": "value"}, 'POST')
+ mocker.patch(MODULE_PATH + 'get_template_details',
+ return_value={"Id": 1234, "Name": "templ1"})
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
+ assert data == ('TemplateService/Actions/TemplateService.Deploy',
+ {"deploy_payload": "value"}, 'POST')
def test__get_resource_parameters_clone_success_case(self, mocker, ome_response_mock,
ome_connection_mock_for_template):
- f_module = self.get_module_mock({"command": "clone", "template_id": 1234, "template_view_type": 2})
+ f_module = self.get_module_mock(
+ {"command": "clone", "template_id": 1234, "template_view_type": 2})
mocker.patch(MODULE_PATH + 'get_view_id',
return_value=2)
mocker.patch(MODULE_PATH + 'get_clone_payload',
return_value={"clone_payload": "value"})
- mocker.patch(MODULE_PATH + 'get_template_details', return_value={"Id": 1234, "Name": "templ1"})
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
- assert data == ('TemplateService/Actions/TemplateService.Clone', {"clone_payload": "value"}, 'POST')
+ mocker.patch(MODULE_PATH + 'get_template_details',
+ return_value={"Id": 1234, "Name": "templ1"})
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
+ assert data == ('TemplateService/Actions/TemplateService.Clone',
+ {"clone_payload": "value"}, 'POST')
def test__get_resource_parameters_import_success_case(self, mocker, ome_response_mock,
ome_connection_mock_for_template):
- f_module = self.get_module_mock({"command": "import", "template_id": 1234, "template_view_type": 2})
+ f_module = self.get_module_mock(
+ {"command": "import", "template_id": 1234, "template_view_type": 2})
mocker.patch(MODULE_PATH + 'get_view_id',
return_value=2)
mocker.patch(MODULE_PATH + 'get_import_payload',
return_value={"import_payload": "value"})
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
- assert data == ('TemplateService/Actions/TemplateService.Import', {"import_payload": "value"}, 'POST')
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
+ assert data == ('TemplateService/Actions/TemplateService.Import',
+ {"import_payload": "value"}, 'POST')
@pytest.mark.parametrize("params", [{"inp": {"command": "modify"}, "mid": inter_payload, "out": payload_out}])
def test__get_resource_parameters_modify_template_none_failure_case(self, mocker, ome_response_mock,
ome_connection_mock_for_template, params):
f_module = self.get_module_mock(params=params["inp"])
with pytest.raises(Exception) as exc:
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
assert exc.value.args[0] == "Enter a valid template_name or template_id"
@pytest.mark.parametrize("params",
[{"success": True, "json_data": {"value": [{"Name": "template_name", "Id": 123}]},
"id": 123, "gtype": True},
- {"success": True, "json_data": {}, "id": 0, "gtype": False},
+ {"success": True, "json_data": {},
+ "id": 0, "gtype": False},
{"success": False, "json_data": {"value": [{"Name": "template_name", "Id": 123}]},
"id": 0, "gtype": False},
{"success": True, "json_data": {"value": [{"Name": "template_name1", "Id": 123}]},
@@ -404,13 +441,15 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_response_mock):
ome_response_mock.success = params["success"]
ome_response_mock.json_data = params["json_data"]
- id = self.module.get_type_id_valid(ome_connection_mock_for_template, params["id"])
+ id = self.module.get_type_id_valid(
+ ome_connection_mock_for_template, params["id"])
assert id == params["gtype"]
@pytest.mark.parametrize("params",
[{"success": True, "json_data": {"value": [{"Description": "Deployment", "Id": 2}]},
"view": "Deployment", "gtype": 2},
- {"success": True, "json_data": {}, "view": "Compliance", "gtype": 1},
+ {"success": True, "json_data": {},
+ "view": "Compliance", "gtype": 1},
{"success": False, "json_data": {"value": [{"Description": "template_name", "Id": 1}]},
"view": "Deployment", "gtype": 2},
{"success": True, "json_data": {"value": [{"Description": "template_name1", "Id": 2}]},
@@ -419,12 +458,14 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_response_mock):
ome_response_mock.success = params["success"]
ome_response_mock.json_data = params["json_data"]
- id = self.module.get_view_id(ome_connection_mock_for_template, params["view"])
+ id = self.module.get_view_id(
+ ome_connection_mock_for_template, params["view"])
assert id == params["gtype"]
@pytest.mark.parametrize("param",
[{"pin": {"NetworkBootIsoModel": {"ShareDetail": {"Password": "share_password"}}}},
- {"pin": {"NetworkBootIsoModel": {"ShareDetail": {"Password1": "share_password"}}}},
+ {"pin": {"NetworkBootIsoModel": {
+ "ShareDetail": {"Password1": "share_password"}}}},
{"pin": {"NetworkBootIsoModel": {"ShareDetail": [{"Password1": "share_password"}]}}}])
def test_password_no_log(self, param):
attributes = param["pin"]
@@ -432,13 +473,15 @@ class TestOmeTemplate(FakeAnsibleModule):
def test__get_resource_parameters_create_failure_case_02(self, mocker, ome_response_mock,
ome_connection_mock_for_template):
- f_module = self.get_module_mock({"command": "create", "template_name": "name"})
+ f_module = self.get_module_mock(
+ {"command": "create", "template_name": "name"})
mocker.patch(MODULE_PATH + 'get_device_ids',
return_value=[Constants.device_id1, Constants.device_id2])
mocker.patch(MODULE_PATH + 'get_template_by_name',
return_value=("template", 1234))
with pytest.raises(Exception) as exc:
- data = self.module._get_resource_parameters(f_module, ome_connection_mock_for_template)
+ data = self.module._get_resource_parameters(
+ f_module, ome_connection_mock_for_template)
assert exc.value.args[0] == "Create template requires only one reference device"
def test_main_template_success_case2(self, ome_default_args, mocker, module_mock, ome_connection_mock_for_template,
@@ -453,17 +496,22 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_response_mock.success = True
mocker.patch(MODULE_PATH + '_get_resource_parameters',
return_value=(TEMPLATE_RESOURCE, "template_payload", "POST"))
+ mocker.patch(MODULE_PATH + 'time.sleep', return_value=None)
result = self._run_module(ome_default_args)
assert result['changed'] is True
- assert result['msg'] == "Successfully created a template with ID {0}".format(ome_response_mock.json_data)
+ assert result['msg'] == "Successfully created a template with ID {0}".format(
+ ome_response_mock.json_data)
def test_get_import_payload_success_case_01(self, ome_connection_mock_for_template):
- f_module = self.get_module_mock(params={"attributes": {"Name": "template1", "Content": "Content"}})
- self.module.get_import_payload(f_module, ome_connection_mock_for_template, 2)
+ f_module = self.get_module_mock(
+ params={"attributes": {"Name": "template1", "Content": "Content"}})
+ self.module.get_import_payload(
+ f_module, ome_connection_mock_for_template, 2)
def test_get_deploy_payload_success_case_01(self):
module_params = {"attributes": {"Name": "template1"}}
- self.module.get_deploy_payload(module_params, [Constants.device_id1], 1234)
+ self.module.get_deploy_payload(
+ module_params, [Constants.device_id1], 1234)
@pytest.mark.parametrize("param",
[{"mparams": {"attributes": {"Name": "template1"}}, "name": "template0",
@@ -473,7 +521,8 @@ class TestOmeTemplate(FakeAnsibleModule):
def test_get_clone_payload_success_case_01(self, param, ome_connection_mock_for_template):
f_module = self.get_module_mock(param["mparams"])
module_params = param["mparams"]
- payload = self.module.get_clone_payload(f_module, ome_connection_mock_for_template, param['template_id'], 2)
+ payload = self.module.get_clone_payload(
+ f_module, ome_connection_mock_for_template, param['template_id'], 2)
assert payload == param['clone_payload']
@pytest.mark.parametrize("param",
@@ -511,8 +560,10 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_response_mock.json_data = {
"value": [{'Id': 1, "Name": "mygroup3"}, {'Id': 2, "Name": "mygroup2"}, {'Id': 3, "Name": "mygroup"}]}
ome_response_mock.status_code = 200
- mocker.patch(MODULE_PATH + 'get_group_devices_all', return_value=[{'Id': 1}, {'Id': 2}, {'Id': 3}])
- dev_list = self.module.get_group_details(ome_connection_mock_for_template, f_module)
+ mocker.patch(MODULE_PATH + 'get_group_devices_all',
+ return_value=[{'Id': 1}, {'Id': 2}, {'Id': 3}])
+ dev_list = self.module.get_group_details(
+ ome_connection_mock_for_template, f_module)
assert dev_list == param["dev_list"]
@pytest.mark.parametrize("param", [
@@ -526,8 +577,10 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_response_mock.json_data = {
"value": [{'Id': 1, "Name": "mygroup3"}, {'Id': 2, "Name": "mygroup2"}, {'Id': 3, "Name": "mygroup"}]}
ome_response_mock.status_code = 200
- mocker.patch(MODULE_PATH + 'get_group_devices_all', return_value=[{'Id': 1}, {'Id': 2}, {'Id': 3}])
- dev_list = self.module.get_group_details(ome_connection_mock_for_template, f_module)
+ mocker.patch(MODULE_PATH + 'get_group_devices_all',
+ return_value=[{'Id': 1}, {'Id': 2}, {'Id': 3}])
+ dev_list = self.module.get_group_details(
+ ome_connection_mock_for_template, f_module)
assert dev_list == param["dev_list"]
@pytest.mark.parametrize("params", [
@@ -567,35 +620,150 @@ class TestOmeTemplate(FakeAnsibleModule):
ome_response_mock):
ome_response_mock.success = params.get("success", True)
ome_response_mock.json_data = params["json_data"]
- mocker.patch(MODULE_PATH + 'get_template_by_name', return_value=params.get('get_template_by_name'))
- mocker.patch(MODULE_PATH + 'attributes_check', return_value=params.get('attributes_check', 0))
- f_module = self.get_module_mock(params=params["mparams"], check_mode=params.get('check_mode', False))
+ mocker.patch(MODULE_PATH + 'get_template_by_name',
+ return_value=params.get('get_template_by_name'))
+ mocker.patch(MODULE_PATH + 'attributes_check',
+ return_value=params.get('attributes_check', 0))
+ f_module = self.get_module_mock(
+ params=params["mparams"], check_mode=params.get('check_mode', False))
error_message = params["res"]
with pytest.raises(Exception) as err:
- self.module.get_modify_payload(f_module, ome_connection_mock_for_template, params.get('template'))
+ self.module.get_modify_payload(
+ f_module, ome_connection_mock_for_template, params.get('template'))
assert err.value.args[0] == error_message
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [
+ {'Id': 123, 'TargetId': 123, 'ProfileState': 1,
+ 'DeviceId': 1234, "Type": 1000},
+ {'Id': 234, 'TargetId': 235, 'ProfileState': 1, 'DeviceId': 1235, "Type": 1000}],
+ "report_list": [{'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ "job_tracking": (True, "msg", {'LastRunStatus': {"Name": "Running"}}, True),
+ 'message': "Template operation is in progress. Task excited after 'job_wait_timeout'.",
+ 'mparams': {"command": "deploy", "template_id": 123, "device_id": 1234}
+ },
+ {"json_data": {"value": [
+ {'Id': 123, 'TargetId': 123, 'ProfileState': 1,
+ 'DeviceId': 1234, "Type": 1000},
+ {'Id': 234, 'TargetId': 235, 'ProfileState': 1, 'DeviceId': 1235, "Type": 1000}],
+ "report_list": [{'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ "job_tracking": (True, "msg", {'LastRunStatus': {"Name": "Running"}}, True),
+ 'message': "Changes found to be applied.",
+ 'mparams': {"command": "deploy", "template_id": 123, "device_id": 1234},
+ "check_mode": True
+ },
+ {"json_data": {"value": [
+ {'Id': 123, 'TargetId': 123, 'ProfileState': 1,
+ 'TemplateId': 1, 'DeviceId': 1234, "Type": 1000},
+ {'Id': 234, 'TargetId': 235, 'ProfileState': 1, 'TemplateId': 12, 'DeviceId': 1235, "Type": 1000}],
+ "report_list": [{'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ "job_tracking": (True, "msg", {'LastRunStatus': {"Name": "Running"}}, True),
+ "get_device_ids": [123, 1234],
+ 'message': "The device(s) '123' have been assigned the template(s) '1' respectively. Please unassign the profiles from the devices.",
+ 'mparams': {"command": "deploy", "template_id": 123, "device_id": 1234}
+ },
+ {"json_data": {"value": [
+ {'Id': 123, 'TargetId': 123, 'ProfileState': 1,
+ 'TemplateId': 123, 'DeviceId': 1234, "Type": 1000},
+ {'Id': 234, 'TargetId': 235, 'ProfileState': 1, 'TemplateId': 12, 'DeviceId': 1235, "Type": 1000}],
+ "report_list": [{'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ "job_tracking": (True, "msg", {'LastRunStatus': {"Name": "Running"}}, True),
+ "get_device_ids": [123],
+ 'message': "No changes found to be applied.",
+ 'mparams': {"command": "deploy", "template_id": 123, "device_id": 1234}
+ },
+ {"json_data": {"value": [
+ {'Id': 123, 'TargetId': 123, 'ProfileState': 1,
+ 'TemplateId': 123, 'DeviceId': 1234, "Type": 1000},
+ {'Id': 234, 'TargetId': 235, 'ProfileState': 1, 'TemplateId': 12, 'DeviceId': 1235, "Type": 1000}],
+ "report_list": [{'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ "job_tracking": (True, "msg", {'LastRunStatus': {"Name": "Running"}}, True),
+ "get_device_ids": [123],
+ 'message': "No changes found to be applied.",
+ 'mparams': {"command": "delete", "template_id": 12, "device_id": 1234}
+ }
+ ])
+ def test_ome_template_success(self, params, ome_connection_mock_for_template, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_connection_mock_for_template.get_all_report_details.return_value = params[
+ 'json_data']
+ ome_default_args.update(params['mparams'])
+ mocks = ["job_tracking", "get_device_ids"]
+ for m in mocks:
+ if m in params:
+ mocker.patch(MODULE_PATH + m, return_value=params.get(m, {}))
+ result = self._run_module(
+ ome_default_args, check_mode=params.get('check_mode', False))
+ assert result['msg'] == params['message']
+
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [
+ {'Id': 123, 'TargetId': 123, 'ProfileState': 1,
+ 'DeviceId': 1234, "Type": 1000},
+ {'Id': 234, 'TargetId': 235, 'ProfileState': 1, 'DeviceId': 1235, "Type": 1000}],
+ "report_list": [{'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ "job_tracking": (True, "msg", {'LastRunStatus': {"Name": "Complete"}}, True),
+ 'message': "Failed to deploy template.",
+ 'mparams': {"command": "deploy", "template_id": 123, "device_id": 1234}
+ },
+ {"json_data": {"value": [
+ {'Id': 123, 'TargetId': 123, 'ProfileState': 1,
+ 'DeviceId': 1234, "Type": 1000},
+ {'Id': 234, 'TargetId': 235, 'ProfileState': 1, 'DeviceId': 1235, "Type": 1000}],
+ "report_list": [{'Id': 1234, 'PublicAddress': "XX.XX.XX.XX",
+ 'DeviceId': 1234, "Type": 1000}]},
+ "job_tracking": (True, "msg", {'LastRunStatus': {"Name": "Complete"}}, True),
+ "get_device_ids": [],
+ 'message': "There are no devices provided for deploy operation",
+ 'mparams': {"command": "deploy", "template_id": 123, "device_id": 1234}
+ }
+ ])
+ def test_ome_template_fail_json(self, params, ome_connection_mock_for_template, ome_response_mock,
+ ome_default_args, module_mock, mocker):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_connection_mock_for_template.get_all_report_details.return_value = params[
+ 'json_data']
+ ome_default_args.update(params['mparams'])
+ mocks = ["job_tracking", "get_device_ids"]
+ for m in mocks:
+ if m in params:
+ mocker.patch(MODULE_PATH + m, return_value=params.get(m, {}))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['msg'] == params['message']
+
@pytest.mark.parametrize("exc_type",
[IOError, ValueError, TypeError, ConnectionError,
HTTPError, URLError, SSLError])
def test_main_template_exception_case(self, exc_type, mocker, ome_default_args,
ome_connection_mock_for_template, ome_response_mock):
- ome_default_args.update({"command": "export", "template_name": "t1", 'attributes': {'Attributes': "myattr1"}})
+ ome_default_args.update(
+ {"command": "export", "template_name": "t1", 'attributes': {'Attributes': "myattr1"}})
ome_response_mock.status_code = 400
ome_response_mock.success = False
json_str = to_text(json.dumps({"info": "error_details"}))
if exc_type == URLError:
mocker.patch(MODULE_PATH + 'password_no_log')
- mocker.patch(MODULE_PATH + '_get_resource_parameters', side_effect=exc_type("url open error"))
+ mocker.patch(MODULE_PATH + '_get_resource_parameters',
+ side_effect=exc_type("url open error"))
result = self._run_module(ome_default_args)
assert result["unreachable"] is True
elif exc_type not in [HTTPError, SSLValidationError]:
- mocker.patch(MODULE_PATH + '_get_resource_parameters', side_effect=exc_type("exception message"))
+ mocker.patch(MODULE_PATH + '_get_resource_parameters',
+ side_effect=exc_type("exception message"))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + '_get_resource_parameters',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py
index 0e6cbca4f..425e6f299 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_identity_pool.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.1.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -15,7 +15,7 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import ome_template_identity_pool
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from ssl import SSLError
@@ -85,7 +85,7 @@ class TestOMETemplateIdentityPool(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'get_identity_id',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str))
)
result = self._run_module_with_fail_json(ome_default_args)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py
index 8f8bb3285..f59520e55 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_info.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 2.1.3
-# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -88,7 +88,7 @@ class TestOmeTemplateInfo(FakeAnsibleModule):
if exc_type not in [HTTPError, SSLValidationError]:
ome_connection_template_info_mock.invoke_request.side_effect = exc_type('test')
else:
- ome_connection_template_info_mock.invoke_request.side_effect = exc_type('http://testhost.com', 400,
+ ome_connection_template_info_mock.invoke_request.side_effect = exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str))
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py
index c182b2b94..0ec0759f3 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -341,7 +341,7 @@ class TestOmeTemplateNetworkVlan(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'validate_vlans',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan_info.py
new file mode 100644
index 000000000..dfb718f0a
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_template_network_vlan_info.py
@@ -0,0 +1,346 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 7.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import json
+from io import StringIO
+from ssl import SSLError
+
+import pytest
+from ansible.module_utils._text import to_text
+from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from ansible_collections.dellemc.openmanage.plugins.modules import ome_template_network_vlan_info
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+
+SUCCESS_MSG = "Successfully retrieved the template network VLAN information."
+NO_TEMPLATES_MSG = "No templates with network info were found."
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.ome_template_network_vlan_info.'
+
+
+@pytest.fixture
+def ome_connection_mock_for_vlaninfo(mocker, ome_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'RestOME')
+ ome_connection_mock_obj = connection_class_mock.return_value.__enter__.return_value
+ ome_connection_mock_obj.invoke_request.return_value = ome_response_mock
+ return ome_connection_mock_obj
+
+
+class TestOmeTemplateVlanInfo(FakeAnsibleModule):
+ module = ome_template_network_vlan_info
+
+ @pytest.mark.parametrize("params", [
+ {"json_data": {"value": [{'Id': 1234, 'Name': "ABCTAG1", "Type": 1000}],
+ "AttributeGroups": [
+ {
+ "GroupNameId": 1001,
+ "DisplayName": "NICModel",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 3,
+ "DisplayName": "NIC in Mezzanine 1B",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 1,
+ "DisplayName": "Port ",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 1,
+ "DisplayName": "Partition ",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 0,
+ "CustomId": 32,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Vlan Tagged",
+ "Description": None,
+ "Value": "25367, 32656, 32658, 26898",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ },
+ {
+ "AttributeId": 0,
+ "CustomId": 32,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Vlan UnTagged",
+ "Description": None,
+ "Value": "21474",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ },
+ {
+ "AttributeId": 0,
+ "CustomId": 32,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "NIC Bonding Enabled",
+ "Description": None,
+ "Value": "False",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 2,
+ "DisplayName": "Port ",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 1,
+ "DisplayName": "Partition ",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 0,
+ "CustomId": 31,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Vlan Tagged",
+ "Description": None,
+ "Value": None,
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ },
+ {
+ "AttributeId": 0,
+ "CustomId": 31,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Vlan UnTagged",
+ "Description": None,
+ "Value": "32658",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ },
+ {
+ "AttributeId": 0,
+ "CustomId": 31,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "NIC Bonding Enabled",
+ "Description": None,
+ "Value": "true",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 1,
+ "DisplayName": "NIC in Mezzanine 1A",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 1,
+ "DisplayName": "Port ",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 1,
+ "DisplayName": "Partition ",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 0,
+ "CustomId": 30,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Vlan Tagged",
+ "Description": None,
+ "Value": "32656, 32658",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ },
+ {
+ "AttributeId": 0,
+ "CustomId": 30,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Vlan UnTagged",
+ "Description": None,
+ "Value": "25367",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ },
+ {
+ "AttributeId": 0,
+ "CustomId": 30,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "NIC Bonding Enabled",
+ "Description": None,
+ "Value": "true",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 2,
+ "DisplayName": "Port ",
+ "SubAttributeGroups": [
+ {
+ "GroupNameId": 1,
+ "DisplayName": "Partition ",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 0,
+ "CustomId": 29,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Vlan Tagged",
+ "Description": None,
+ "Value": "21474",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ },
+ {
+ "AttributeId": 0,
+ "CustomId": 29,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Vlan UnTagged",
+ "Description": None,
+ "Value": "32656",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ },
+ {
+ "AttributeId": 0,
+ "CustomId": 29,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "NIC Bonding Enabled",
+ "Description": None,
+ "Value": "False",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ }
+ ]
+ }
+ ],
+ "Attributes": []
+ }
+ ],
+ "Attributes": []
+ }
+ ],
+ "Attributes": []
+ },
+ {
+ "GroupNameId": 1005,
+ "DisplayName": "NicBondingTechnology",
+ "SubAttributeGroups": [],
+ "Attributes": [
+ {
+ "AttributeId": 0,
+ "CustomId": 0,
+ "AttributeEditInfoId": 0,
+ "DisplayName": "Nic Bonding Technology",
+ "Description": None,
+ "Value": "LACP",
+ "IsReadOnly": False,
+ "IsIgnored": False,
+ "IsSecure": False,
+ "IsLinkedToSecure": False,
+ "TargetSpecificTypeId": 0
+ }
+ ]
+ }]},
+ 'message': SUCCESS_MSG, "success": True, 'case': "template with id",
+ 'mparams': {"template_id": 1234}},
+ {"json_data": {"value": [{'Id': 1234, 'Name': "temp1", "ViewTypeId": 1}]},
+ 'message': SUCCESS_MSG, "success": True, 'case': "template with name",
+ 'mparams': {"template_name": "temp1"}},
+ {"json_data": {"value": [{'Id': 1234, 'Name': "temp2", "ViewTypeId": 2}]},
+ 'message': "Template with name 'temp1' not found.", "success": True, 'case': "template not found",
+ 'mparams': {"template_name": "temp1"}},
+ {"json_data": {"value": [{'Id': 1234, 'Name': "temp2", "ViewTypeId": 3}]},
+ 'message': SUCCESS_MSG, "success": True, 'case': "all templates case",
+ 'mparams': {}},
+ {"json_data": {"value": [{'Id': 1234, 'Name': "temp2", "ViewTypeId": 4}]},
+ 'message': SUCCESS_MSG, "success": True, 'case': "invalid templates case",
+ 'mparams': {}}
+ ])
+ def test_ome_template_network_vlan_info_success(self, params, ome_connection_mock_for_vlaninfo, ome_response_mock,
+ ome_default_args, module_mock):
+ ome_response_mock.success = params.get("success", True)
+ ome_response_mock.json_data = params['json_data']
+ ome_connection_mock_for_vlaninfo.get_all_items_with_pagination.return_value = params['json_data']
+ ome_default_args.update(params['mparams'])
+ result = self._run_module(ome_default_args, check_mode=params.get('check_mode', False))
+ assert result['msg'] == params['message']
+
+ @pytest.mark.parametrize("exc_type",
+ [IOError, ValueError, SSLError, TypeError, ConnectionError, HTTPError, URLError])
+ def test_ome_template_network_vlan_info_main_exception_failure_case(self, exc_type, mocker, ome_default_args,
+ ome_connection_mock_for_vlaninfo,
+ ome_response_mock):
+ ome_default_args.update({"template_id": 1234})
+ ome_response_mock.status_code = 400
+ ome_response_mock.success = False
+ json_str = to_text(json.dumps({"info": "error_details"}))
+ if exc_type == URLError:
+ mocker.patch(MODULE_PATH + 'get_template_details', side_effect=exc_type("url open error"))
+ result = self._run_module(ome_default_args)
+ assert result["unreachable"] is True
+ elif exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH + 'get_template_details', side_effect=exc_type("exception message"))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
+ else:
+ mocker.patch(MODULE_PATH + 'get_template_details',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
+ {"accept-type": "application/json"}, StringIO(json_str)))
+ result = self._run_module_with_fail_json(ome_default_args)
+ assert result['failed'] is True
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py
index ac3c18145..623b65535 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.0.0
-# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -17,8 +17,7 @@ import pytest
from ansible_collections.dellemc.openmanage.plugins.modules import ome_user
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants, \
- AnsibleFailJSonException
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from io import StringIO
from ansible.module_utils._text import to_text
@@ -171,7 +170,7 @@ class TestOmeUser(FakeAnsibleModule):
else:
mocker.patch(
MODULE_PATH + 'ome_user._get_resource_parameters',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(ome_default_args)
assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py
index 6d48cc183..c640c89e0 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_ome_user_info.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 2.1.1
-# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -87,7 +87,7 @@ class TestOmeUserInfo(FakeAnsibleModule):
if exc_type not in [HTTPError, SSLValidationError]:
ome_connection_user_info_mock.invoke_request.side_effect = exc_type('test')
else:
- ome_connection_user_info_mock.invoke_request.side_effect = exc_type('http://testhost.com', 400,
+ ome_connection_user_info_mock.invoke_request.side_effect = exc_type('https://testhost.com', 400,
'http error message',
{"accept-type": "application/json"},
StringIO(json_str))
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py
index 075406a75..9a77be0c4 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_event_subscription.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 4.1.0
-# Copyright (C) 2021 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2021-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -25,6 +25,7 @@ SUBSCRIPTION_UNABLE_ADD = "Unable to add a subscription."
SUBSCRIPTION_ADDED = "Successfully added the subscription."
DESTINATION_MISMATCH = "No changes found to be applied."
EVENT_TYPE_INVALID = "value of event_type must be one of: Alert, MetricReport, got: Metricreport"
+PARAM_DESTINATION = "https://XX.XX.XX.XX:8188"
@pytest.fixture
@@ -38,8 +39,8 @@ def redfish_connection_mock(mocker, redfish_response_mock):
class TestRedfishSubscription(FakeAnsibleModule):
module = redfish_event_subscription
- @pytest.mark.parametrize("val", [{"destination": "https://192.168.1.100:8188"},
- {"destination": "https://192.168.1.100:8189"}])
+ @pytest.mark.parametrize("val", [{"destination": PARAM_DESTINATION},
+ {"destination": "https://XX.XX.XX.XX:8189"}])
def test_function_get_subscription_success(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
@@ -53,7 +54,7 @@ class TestRedfishSubscription(FakeAnsibleModule):
"Context": "RedfishEvent",
"DeliveryRetryPolicy": "RetryForever",
"Description": "Event Subscription Details",
- "Destination": "https://192.168.1.100:8189",
+ "Destination": "https://XX.XX.XX.XX:8189",
"EventFormatType": "Event",
"EventTypes": [
"Alert"
@@ -82,7 +83,7 @@ class TestRedfishSubscription(FakeAnsibleModule):
"Context": "RedfishEvent",
"DeliveryRetryPolicy": "RetryForever",
"Description": "Event Subscription Details",
- "Destination": "https://192.168.1.100:8188",
+ "Destination": PARAM_DESTINATION,
"EventFormatType": "MetricReport",
"EventTypes": [
"MetricReport"
@@ -130,9 +131,9 @@ class TestRedfishSubscription(FakeAnsibleModule):
assert result["Destination"] == val["destination"]
@pytest.mark.parametrize("val", [
- {"destination": "https://192.168.1.100:8188", "event_type": "MetricReport",
+ {"destination": PARAM_DESTINATION, "event_type": "MetricReport",
"event_format_type": "MetricReport"},
- {"destination": "https://192.168.1.100:8188", "event_type": "Alert", "event_format_type": "Event"}])
+ {"destination": PARAM_DESTINATION, "event_type": "Alert", "event_format_type": "Event"}])
def test_function_create_subscription(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
@@ -157,9 +158,9 @@ class TestRedfishSubscription(FakeAnsibleModule):
assert result.json_data["EventTypes"] == [val["event_type"]]
@pytest.mark.parametrize("val", [
- {"destination": "https://100.96.80.1:161", "event_type": "MetricReport",
+ {"destination": "https://XX.XX.XX.XX:161", "event_type": "MetricReport",
"event_format_type": "MetricReport"},
- {"destination": "https://100.96.80.1:161", "event_type": "Alert", "event_format_type": "Event"}])
+ {"destination": "https://XX.XX.XX.XX:161", "event_type": "Alert", "event_format_type": "Event"}])
def test_function_get_subscription_details(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
@@ -202,9 +203,9 @@ class TestRedfishSubscription(FakeAnsibleModule):
assert result["EventTypes"] == [val["event_type"]]
@pytest.mark.parametrize("val", [
- {"destination": "https://100.96.80.1:161", "event_type": "MetricReport",
+ {"destination": "https://XX.XX.XX.XX:161", "event_type": "MetricReport",
"event_format_type": "MetricReport"},
- {"destination": "https://100.96.80.1:161", "event_type": "Alert", "event_format_type": "Event"}])
+ {"destination": "https://XX.XX.XX.XX:161", "event_type": "Alert", "event_format_type": "Event"}])
def test_function_get_subscription_details_None(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
@@ -245,8 +246,8 @@ class TestRedfishSubscription(FakeAnsibleModule):
assert result is None
@pytest.mark.parametrize("val", [
- {"destination": "https://100.96.80.1:161"},
- {"destination": "https://100.96.80.1:161"}])
+ {"destination": "https://XX.XX.XX.XX:161"},
+ {"destination": "https://XX.XX.XX.XX:161"}])
def test_function_delete_subscription(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args, val):
redfish_default_args.update({"state": "absent"})
@@ -284,7 +285,8 @@ class TestRedfishSubscription(FakeAnsibleModule):
def test_module_validation_input_params(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "absent"})
- redfish_default_args.update({"destination": "http://192.168.1.100:8188"})
+ http_str = "http"
+ redfish_default_args.update({"destination": http_str + "://XX.XX.XX.XX:8188"})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
with pytest.raises(Exception) as err:
@@ -294,7 +296,7 @@ class TestRedfishSubscription(FakeAnsibleModule):
def test_module_absent_does_not_exist(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "absent"})
- redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
+ redfish_default_args.update({"destination": PARAM_DESTINATION})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
@@ -307,13 +309,13 @@ class TestRedfishSubscription(FakeAnsibleModule):
def test_module_absent_does_exist(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "absent"})
- redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
+ redfish_default_args.update({"destination": PARAM_DESTINATION})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
- "Destination": "https://192.168.1.100:8188",
+ "Destination": PARAM_DESTINATION,
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
@@ -331,13 +333,13 @@ class TestRedfishSubscription(FakeAnsibleModule):
def test_module_absent_does_exist_error(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "absent"})
- redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
+ redfish_default_args.update({"destination": PARAM_DESTINATION})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
- "Destination": "https://192.168.1.100:8188",
+ "Destination": PARAM_DESTINATION,
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
@@ -354,12 +356,12 @@ class TestRedfishSubscription(FakeAnsibleModule):
def test_module_present_does_not_exist(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "present"})
- redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
+ redfish_default_args.update({"destination": PARAM_DESTINATION})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
- "Destination": "https://192.168.1.100:8188",
+ "Destination": PARAM_DESTINATION,
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
@@ -380,12 +382,12 @@ class TestRedfishSubscription(FakeAnsibleModule):
def test_module_present_does_not_exist_error(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "present"})
- redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
+ redfish_default_args.update({"destination": PARAM_DESTINATION})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
- "Destination": "https://192.168.1.100:8188",
+ "Destination": PARAM_DESTINATION,
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
@@ -406,12 +408,12 @@ class TestRedfishSubscription(FakeAnsibleModule):
redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "present"})
- redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
+ redfish_default_args.update({"destination": PARAM_DESTINATION})
redfish_default_args.update({"event_type": "Metricreport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
- "Destination": "https://192.168.1.100:8188",
+ "Destination": PARAM_DESTINATION,
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
@@ -433,13 +435,13 @@ class TestRedfishSubscription(FakeAnsibleModule):
def test_module_present_does_exist(self, mocker, redfish_connection_mock, redfish_response_mock,
redfish_default_args):
redfish_default_args.update({"state": "present"})
- redfish_default_args.update({"destination": "https://192.168.1.100:8188"})
+ redfish_default_args.update({"destination": PARAM_DESTINATION})
redfish_default_args.update({"event_type": "MetricReport"})
redfish_default_args.update({"event_format_type": "MetricReport"})
json_data = {
"Id": "c6ff37fc-8204-11eb-b08f-2cea7ff7fe80",
- "Destination": "https://192.168.1.100:8188",
+ "Destination": PARAM_DESTINATION,
"EventFormatType": "MetricReport",
"Context": "RedfishEvent",
"Protocol": "Redfish",
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py
index dac24df41..88e3c5ed0 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.5.0
-# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2020-2023 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -17,7 +17,7 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import redfish_firmware
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from mock import MagicMock
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
@@ -26,7 +26,10 @@ from ansible.module_utils._text import to_text
from mock import patch, mock_open
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
-JOB_URI = "/redfish/v1/JobService/Jobs/{job_id}"
+JOB_URI = "JobService/Jobs/{job_id}"
+FIRMWARE_DATA = "multipart/form-data"
+HTTPS_IMAGE_URI = "https://home/firmware_repo/component.exe"
+HTTPS_ADDRESS_DELL = "https://dell.com"
@pytest.fixture
@@ -109,39 +112,41 @@ class TestRedfishFirmware(FakeAnsibleModule):
def test_main_redfish_firmware_success_case(self, redfish_firmware_connection_mock, redfish_default_args, mocker,
redfish_response_mock):
- redfish_default_args.update({"image_uri": "/home/firmware_repo/component.exe"})
+ redfish_default_args.update({"image_uri": "/home/firmware_repo/component.exe", "job_wait": False})
redfish_firmware_connection_mock.headers.get("Location").return_value = "https://multipart/form-data"
- redfish_firmware_connection_mock.headers.get("Location").split().return_value = "multipart/form-data"
+ redfish_firmware_connection_mock.headers.get("Location").split().return_value = FIRMWARE_DATA
mocker.patch(MODULE_PATH + 'redfish_firmware.firmware_update',
return_value=redfish_response_mock)
- redfish_response_mock.json_data = {"image_uri": "http://home/firmware_repo/component.exe"}
+ redfish_response_mock.json_data = {"image_uri": HTTPS_IMAGE_URI}
redfish_response_mock.status_code = 201
redfish_response_mock.success = True
result = self._run_module(redfish_default_args)
- assert result == {'changed': True,
- 'msg': 'Successfully submitted the firmware update task.',
- 'task': {'id': redfish_response_mock.headers.get().split().__getitem__(),
- 'uri': JOB_URI.format(job_id=redfish_response_mock.headers.get().split().__getitem__())}}
+ assert result['changed'] is True
+ assert result['msg'] == 'Successfully submitted the firmware update task.'
+ assert result['task']['id'] == redfish_response_mock.headers.get().split().__getitem__()
+ assert result['task']['uri'] == JOB_URI.format(job_id=redfish_response_mock.headers.get().split().__getitem__())
@pytest.mark.parametrize("exc_type",
[URLError, HTTPError, SSLValidationError, ConnectionError, TypeError, ValueError])
def test_main_redfish_firmware_exception_handling_case(self, exc_type, mocker, redfish_default_args,
redfish_firmware_connection_mock,
redfish_response_mock):
- redfish_default_args.update({"image_uri": "/home/firmware_repo/component.exe"})
+ redfish_default_args.update({"image_uri": "/home/firmware_repo/component.exe", "job_wait_timeout": 0})
redfish_response_mock.json_data = {"value": [{"image_uri": "/home/firmware_repo/component.exe"}]}
redfish_response_mock.status_code = 400
redfish_response_mock.success = False
json_str = to_text(json.dumps({"data": "out"}))
-
if exc_type not in [HTTPError, SSLValidationError]:
mocker.patch(MODULE_PATH + 'redfish_firmware.firmware_update',
side_effect=exc_type('test'))
else:
mocker.patch(MODULE_PATH + 'redfish_firmware.firmware_update',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type('https://testhost.com', 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
- result = self._run_module_with_fail_json(redfish_default_args)
+ if exc_type == HTTPError:
+ result = self._run_module(redfish_default_args)
+ else:
+ result = self._run_module_with_fail_json(redfish_default_args)
assert 'task' not in result
assert 'msg' in result
assert result['failed'] is True
@@ -150,7 +155,7 @@ class TestRedfishFirmware(FakeAnsibleModule):
def test_get_update_service_target_success_case(self, redfish_default_args, redfish_firmware_connection_mock,
redfish_response_mock):
- redfish_default_args.update({"transfer_protocol": "HTTP"})
+ redfish_default_args.update({"transfer_protocol": "HTTP", "job_wait_timeout": 0})
f_module = self.get_module_mock(params=redfish_default_args)
redfish_response_mock.status_code = 200
redfish_response_mock.success = True
@@ -162,17 +167,17 @@ class TestRedfishFirmware(FakeAnsibleModule):
}
},
"transfer_protocol": "HTTP",
- "HttpPushUri": "http://dell.com",
+ "HttpPushUri": HTTPS_ADDRESS_DELL,
"FirmwareInventory": {
"@odata.id": "2134"
}
}
result = self.module._get_update_service_target(redfish_firmware_connection_mock, f_module)
- assert result == ('2134', 'http://dell.com', '')
+ assert result == ('2134', HTTPS_ADDRESS_DELL, '')
def test_get_update_service_target_uri_none_case(self, redfish_default_args, redfish_firmware_connection_mock,
redfish_response_mock):
- redfish_default_args.update({"transfer_protocol": "HTTP"})
+ redfish_default_args.update({"transfer_protocol": "HTTP", "job_wait_timeout": 0})
f_module = self.get_module_mock(params=redfish_default_args)
redfish_response_mock.status_code = 200
redfish_response_mock.success = True
@@ -195,7 +200,7 @@ class TestRedfishFirmware(FakeAnsibleModule):
def test_get_update_service_target_failed_case(self, redfish_default_args, redfish_firmware_connection_mock,
redfish_response_mock):
- redfish_default_args.update({"transfer_protocol": "HTTP"})
+ redfish_default_args.update({"transfer_protocol": "HTTP", "job_wait_timeout": 0})
f_module = self.get_module_mock(params=redfish_default_args)
redfish_response_mock.status_code = 200
redfish_response_mock.success = True
@@ -206,7 +211,7 @@ class TestRedfishFirmware(FakeAnsibleModule):
}
},
"transfer_protocol": "HTTP",
- "HttpPushUri": "http://dell.com",
+ "HttpPushUri": HTTPS_ADDRESS_DELL,
"FirmwareInventory": {
"@odata.id": "2134"
}
@@ -218,13 +223,13 @@ class TestRedfishFirmware(FakeAnsibleModule):
def test_firmware_update_success_case01(self, redfish_default_args, redfish_firmware_connection_mock,
redfish_response_mock, mocker):
mocker.patch(MODULE_PATH + 'redfish_firmware._get_update_service_target',
- return_value=('2134', 'http://dell.com', 'redfish'))
- redfish_default_args.update({"image_uri": "http://home/firmware_repo/component.exe",
- "transfer_protocol": "HTTP"})
+ return_value=('2134', HTTPS_ADDRESS_DELL, 'redfish'))
+ redfish_default_args.update({"image_uri": HTTPS_IMAGE_URI,
+ "transfer_protocol": "HTTP", "timeout": 0, "job_wait_timeout": 0})
f_module = self.get_module_mock(params=redfish_default_args)
redfish_response_mock.status_code = 200
redfish_response_mock.success = True
- redfish_response_mock.json_data = {"image_uri": "http://home/firmware_repo/component.exe",
+ redfish_response_mock.json_data = {"image_uri": HTTPS_IMAGE_URI,
"transfer_protocol": "HTTP"}
result = self.module.firmware_update(redfish_firmware_connection_mock, f_module)
assert result == redfish_response_mock
@@ -232,15 +237,15 @@ class TestRedfishFirmware(FakeAnsibleModule):
def test_firmware_update_success_case02(self, redfish_default_args, redfish_firmware_connection_mock,
redfish_response_mock, mocker):
mocker.patch(MODULE_PATH + "redfish_firmware._get_update_service_target",
- return_value=('2134', 'nhttp://dell.com', 'multipart/form-data'))
+ return_value=('2134', HTTPS_ADDRESS_DELL, 'multipart/form-data'))
mocker.patch("ansible_collections.dellemc.openmanage.plugins.modules.redfish_firmware._encode_form_data",
- return_value=({"file": (3, "nhttp://dell.com", "multipart/form-data")}, "multipart/form-data"))
- redfish_default_args.update({"image_uri": "nhttp://home/firmware_repo/component.exe",
- "transfer_protocol": "HTTP"})
+ return_value=({"file": (3, HTTPS_ADDRESS_DELL, FIRMWARE_DATA)}, FIRMWARE_DATA))
+ redfish_default_args.update({"image_uri": HTTPS_IMAGE_URI,
+ "transfer_protocol": "HTTP", "timeout": 0, "job_wait_timeout": 0})
f_module = self.get_module_mock(params=redfish_default_args)
redfish_response_mock.status_code = 200
redfish_response_mock.success = True
- redfish_response_mock.json_data = {"image_uri": "nhttp://home/firmware_repo/component.exe",
+ redfish_response_mock.json_data = {"image_uri": HTTPS_IMAGE_URI,
"transfer_protocol": "HTTP"}
if sys.version_info.major == 3:
builtin_module_name = 'builtins'
@@ -250,18 +255,22 @@ class TestRedfishFirmware(FakeAnsibleModule):
result = self.module.firmware_update(redfish_firmware_connection_mock, f_module)
assert result == redfish_response_mock
- def test_firmware_update_success_case03(self, redfish_default_args, redfish_firmware_connection_mock,
+ @pytest.mark.parametrize("params", [{"ip": "192.161.1.1:443"}, {"ip": "192.161.1.1"},
+ {"ip": "82f5:d985:a2d5:f0c3:5392:cc52:27d1:4da6"},
+ {"ip": "[82f5:d985:a2d5:f0c3:5392:cc52:27d1:4da6]"},
+ {"ip": "[82f5:d985:a2d5:f0c3:5392:cc52:27d1:4da6]:443"}])
+ def test_firmware_update_success_case03(self, params, redfish_default_args, redfish_firmware_connection_mock,
redfish_response_mock, mocker):
mocker.patch(MODULE_PATH + "redfish_firmware._get_update_service_target",
- return_value=('2134', 'nhttp://dell.com', 'multipart/form-data'))
+ return_value=('2134', HTTPS_ADDRESS_DELL, 'multipart/form-data'))
mocker.patch(MODULE_PATH + "redfish_firmware._encode_form_data",
- return_value=({"file": (3, "nhttp://dell.com", "multipart/form-data")}, "multipart/form-data"))
- redfish_default_args.update({"image_uri": "nhttp://home/firmware_repo/component.exe",
- "transfer_protocol": "HTTP"})
+ return_value=({"file": (3, HTTPS_ADDRESS_DELL, FIRMWARE_DATA)}, FIRMWARE_DATA))
+ redfish_default_args.update({"baseuri": params["ip"], "image_uri": HTTPS_IMAGE_URI,
+ "transfer_protocol": "HTTP", "timeout": 0, "job_wait_timeout": 0})
f_module = self.get_module_mock(params=redfish_default_args)
redfish_response_mock.status_code = 201
redfish_response_mock.success = True
- redfish_response_mock.json_data = {"image_uri": "nhttp://home/firmware_repo/component.exe",
+ redfish_response_mock.json_data = {"image_uri": HTTPS_IMAGE_URI,
"transfer_protocol": "HTTP"}
if sys.version_info.major == 3:
builtin_module_name = 'builtins'
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware_rollback.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware_rollback.py
new file mode 100644
index 000000000..68171c0b0
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_firmware_rollback.py
@@ -0,0 +1,299 @@
+# -*- coding: utf-8 -*-
+
+#
+# Dell OpenManage Ansible Modules
+# Version 8.2.0
+# Copyright (C) 2023 Dell Inc. or its subsidiaries. All Rights Reserved.
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+#
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+import json
+from ansible_collections.dellemc.openmanage.plugins.modules import redfish_firmware_rollback
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
+from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
+from ansible.module_utils.urls import ConnectionError, SSLValidationError
+from io import StringIO
+from mock import MagicMock
+from ansible.module_utils._text import to_text
+
+MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+ACCESS_TYPE = "application/json"
+HTTP_ERROR_MSG = 'http error message'
+HTTPS_ADDRESS = 'https://testhost.com'
+
+
+@pytest.fixture
+def redfish_connection_mock(mocker, redfish_response_mock):
+ connection_class_mock = mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.Redfish')
+ redfish_connection_obj = connection_class_mock.return_value.__enter__.return_value
+ redfish_connection_obj.invoke_request.return_value = redfish_response_mock
+ return redfish_connection_obj
+
+
+class TestRedfishFirmware(FakeAnsibleModule):
+
+ module = redfish_firmware_rollback
+
+ @pytest.mark.parametrize("exc_type", [URLError, HTTPError, TypeError])
+ def test_wait_for_redfish_idrac_reset_http(self, exc_type, redfish_connection_mock, redfish_response_mock,
+ redfish_default_args, mocker):
+ redfish_default_args.update({"name": "BIOS", "reboot": True, "reboot_timeout": 900})
+ f_module = self.get_module_mock(params=redfish_default_args)
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.time.sleep', return_value=None)
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.Redfish', return_value=MagicMock())
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.require_session', return_value=(1, "secret token"))
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type == HTTPError:
+ redfish_connection_mock.invoke_request.side_effect = exc_type(
+ HTTPS_ADDRESS, 401, HTTP_ERROR_MSG, {"accept-type": ACCESS_TYPE},
+ StringIO(json_str)
+ )
+ result = self.module.wait_for_redfish_idrac_reset(f_module, redfish_connection_mock, 5)
+ assert result[0] is False
+ assert result[1] is True
+ assert result[2] == "iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply."
+ redfish_connection_mock.invoke_request.side_effect = exc_type(
+ HTTPS_ADDRESS, 400, HTTP_ERROR_MSG, {"accept-type": ACCESS_TYPE},
+ StringIO(json_str)
+ )
+ result = self.module.wait_for_redfish_idrac_reset(f_module, redfish_connection_mock, 5)
+ assert result[0] is True
+ assert result[1] is True
+ assert result[2] == "iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply."
+ elif exc_type == URLError:
+ redfish_connection_mock.invoke_request.side_effect = exc_type("exception message")
+ result = self.module.wait_for_redfish_idrac_reset(f_module, redfish_connection_mock, 5)
+ assert result[0] is True
+ assert result[1] is True
+ assert result[2] == "iDRAC reset is in progress. Until the iDRAC is reset, the changes would not apply."
+ else:
+ redfish_connection_mock.invoke_request.side_effect = exc_type("exception message")
+ result = self.module.wait_for_redfish_idrac_reset(f_module, redfish_connection_mock, 5)
+ assert result[0] is True
+ assert result[1] is True
+
+ def test_wait_for_redfish_idrac_reset(self, redfish_connection_mock, redfish_response_mock,
+ redfish_default_args, mocker):
+ redfish_default_args.update({"name": "BIOS", "reboot": True, "reboot_timeout": 900})
+ f_module = self.get_module_mock(params=redfish_default_args)
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.time.sleep', return_value=None)
+ result = self.module.wait_for_redfish_idrac_reset(f_module, redfish_connection_mock, 900)
+ assert result[0] is False
+ assert result[1] is False
+ assert result[2] == "iDRAC has been reset successfully."
+
+ def test_rollback_firmware(self, redfish_connection_mock, redfish_response_mock, redfish_default_args, mocker):
+ redfish_default_args.update({"name": "BIOS", "reboot": True, "reboot_timeout": 900})
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.simple_update", return_value=["JID_12345678"])
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.wait_for_redfish_reboot_job",
+ return_value=({"Id": "JID_123456789"}, True, ""))
+ job_resp_mock = MagicMock()
+ job_resp_mock.json_data = {"JobState": "RebootFailed"}
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.wait_for_redfish_job_complete",
+ return_value=(job_resp_mock, ""))
+ f_module = self.get_module_mock(params=redfish_default_args)
+ preview_uri = ["/redfish/v1/Previous1.1"]
+ reboot_uri = ["/redfish/v1/Previous.life_cycle.1.1"]
+ update_uri = "/redfish/v1/SimpleUpdate"
+ with pytest.raises(Exception) as ex:
+ self.module.rollback_firmware(redfish_connection_mock, f_module, preview_uri, reboot_uri, update_uri)
+ assert ex.value.args[0] == "Failed to reboot the server."
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.wait_for_redfish_job_complete",
+ return_value=(job_resp_mock, "Failed message."))
+ with pytest.raises(Exception) as ex:
+ self.module.rollback_firmware(redfish_connection_mock, f_module, preview_uri, reboot_uri, update_uri)
+ assert ex.value.args[0] == "Task excited after waiting for 900 seconds. " \
+ "Check console for firmware rollback status."
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.wait_for_redfish_reboot_job",
+ return_value=({}, False, "Reset operation is failed."))
+ with pytest.raises(Exception) as ex:
+ self.module.rollback_firmware(redfish_connection_mock, f_module, preview_uri, reboot_uri, update_uri)
+ assert ex.value.args[0] == "Reset operation is failed."
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.get_job_status",
+ return_value=({"JobState": "Completed"}, False))
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.wait_for_redfish_reboot_job",
+ return_value=({"JobState": "Completed", "Id": "JID_123456789"}, True, ""))
+ job_resp_mock.json_data = {"JobState": "RebootCompleted"}
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.wait_for_redfish_job_complete",
+ return_value=(job_resp_mock, ""))
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.simple_update", return_value=["JID_12345678"])
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.wait_for_redfish_idrac_reset",
+ return_value=(False, True, ""))
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.get_job_status",
+ return_value=([{"JobState": "Completed"}], 0))
+ result = self.module.rollback_firmware(redfish_connection_mock, f_module, preview_uri, reboot_uri, update_uri)
+ assert result[0] == [{'JobState': 'Completed'}, {'JobState': 'Completed'}]
+ assert result[1] == 0
+
+ redfish_default_args.update({"name": "BIOS", "reboot": False, "reboot_timeout": 900})
+ f_module = self.get_module_mock(params=redfish_default_args)
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.get_job_status",
+ return_value=([{"JobState": "Scheduled"}], 0))
+ result = self.module.rollback_firmware(redfish_connection_mock, f_module, preview_uri, [], update_uri)
+ assert result[0] == [{"JobState": "Scheduled"}]
+ assert result[1] == 0
+
+ def test_main(self, redfish_connection_mock, redfish_response_mock, redfish_default_args, mocker):
+ redfish_default_args.update({"reboot": True, "name": "BIOS"})
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.get_rollback_preview_target",
+ return_value=(["Previous/URI/1"], [], "/redfish/SimpleUpdate"))
+ job_status = {"ActualRunningStartTime": "2023-08-07T05:09:08", "ActualRunningStopTime": "2023-08-07T05:12:41",
+ "CompletionTime": "2023-08-07T05:12:41", "Description": "Job Instance", "EndTime": "TIME_NA",
+ "Id": "JID_914026562845", "JobState": "Completed", "JobType": "FirmwareUpdate",
+ "Message": "Job completed successfully.", "MessageArgs": [], "MessageId": "PR19",
+ "Name": "Firmware Rollback: Network", "PercentComplete": 100, "StartTime": "2023-08-07T05:04:16",
+ "TargetSettingsURI": None}
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.rollback_firmware", return_value=(job_status, 0, False))
+ result = self._run_module(redfish_default_args)
+ assert result["msg"] == "Successfully completed the job for firmware rollback."
+ assert result["job_status"]["JobState"] == "Completed"
+ job_status.update({"JobState": "Failed"})
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.rollback_firmware", return_value=(job_status, 1, False))
+ result = self._run_module(redfish_default_args)
+ assert result["msg"] == "The job for firmware rollback has been completed with error(s)."
+ assert result["job_status"]["JobState"] == "Failed"
+ redfish_default_args.update({"reboot": False, "name": "BIOS"})
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.rollback_firmware", return_value=(job_status, 1, False))
+ result = self._run_module(redfish_default_args)
+ assert result["msg"] == "The job for firmware rollback has been scheduled with error(s)."
+ assert result["job_status"]["JobState"] == "Failed"
+ job_status.update({"JobState": "Scheduled"})
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.rollback_firmware", return_value=(job_status, 0, False))
+ result = self._run_module(redfish_default_args)
+ assert result["msg"] == "Successfully scheduled the job for firmware rollback."
+ assert result["job_status"]["JobState"] == "Scheduled"
+ job_status = {}
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.rollback_firmware", return_value=(job_status, 0, False))
+ result = self._run_module(redfish_default_args)
+ assert result["msg"] == "Failed to complete the job for firmware rollback."
+ redfish_default_args.update({"reboot": True, "name": "BIOS", "reboot_timeout": -1})
+ result = self._run_module_with_fail_json(redfish_default_args)
+ assert result["msg"] == "The parameter reboot_timeout value cannot be negative or zero."
+ redfish_default_args.update({"reboot": False, "name": "BIOS", "reboot_timeout": 900})
+ job_status.update({"JobState": "Completed"})
+ mocker.patch(MODULE_PATH + "redfish_firmware_rollback.rollback_firmware", return_value=(job_status, 0, True))
+ result = self._run_module(redfish_default_args)
+ assert result["msg"] == "Successfully completed the job for firmware rollback."
+
+ def test_get_rollback_preview_target(self, redfish_connection_mock, redfish_response_mock, redfish_default_args):
+ redfish_default_args.update({"username": "user", "password": "pwd", "baseuri": "XX.XX.XX.XX",
+ "name": "BIOS", "reboot_timeout": 3600})
+ f_module = self.get_module_mock(params=redfish_default_args)
+ redfish_response_mock.json_data = {"Actions": {"#UpdateService.SimpleUpdate": {}}}
+ with pytest.raises(Exception) as ex:
+ self.module.get_rollback_preview_target(redfish_connection_mock, f_module)
+ assert ex.value.args[0] == "The target firmware version does not support the firmware rollback."
+ redfish_response_mock.json_data = {
+ "Actions": {"#UpdateService.SimpleUpdate": {"target": "/redfish/v1/SimpleUpdate"}},
+ "FirmwareInventory": {"@odata.id": "/redfish/v1/FirmwareInventory"},
+ "Members": [
+ {"@odata.id": "uri/1", "Id": "Previous.1", "Name": "QLogic.1", "Version": "1.2"},
+ {"@odata.id": "uri/2", "Id": "Previous.2", "Name": "QLogic.2", "Version": "1.2"},
+ {"@odata.id": "uri/3", "Id": "Previous.3", "Name": "QLogic.3", "Version": "1.2"},
+ {"@odata.id": "uri/4", "Id": "Previous.4", "Name": "QLogic.4", "Version": "1.2"}]
+ }
+ with pytest.raises(Exception) as ex:
+ self.module.get_rollback_preview_target(redfish_connection_mock, f_module)
+ assert ex.value.args[0] == "No changes found to be applied."
+ f_module.check_mode = True
+ with pytest.raises(Exception) as ex:
+ self.module.get_rollback_preview_target(redfish_connection_mock, f_module)
+ assert ex.value.args[0] == "No changes found to be applied."
+ redfish_response_mock.json_data["Members"] = [
+ {"@odata.id": "uri/1", "Id": "Previous.1", "Name": "QLogic.1", "Version": "1.2"},
+ {"@odata.id": "uri/2", "Id": "Previous.2", "Name": "QLogic.2", "Version": "1.2"},
+ {"@odata.id": "uri/3", "Id": "Previous.3", "Name": "QLogic.3", "Version": "1.2"},
+ {"@odata.id": "uri/4", "Id": "Previous.4", "Name": "BIOS", "Version": "1.2"}
+ ]
+ with pytest.raises(Exception) as ex:
+ self.module.get_rollback_preview_target(redfish_connection_mock, f_module)
+ assert ex.value.args[0] == "Changes found to be applied."
+ f_module.check_mode = False
+ result = self.module.get_rollback_preview_target(redfish_connection_mock, f_module)
+ assert result[0] == ["uri/4"]
+ assert result[2] == "/redfish/v1/SimpleUpdate"
+
+ def test_get_job_status(self, redfish_connection_mock, redfish_response_mock, redfish_default_args, mocker):
+ redfish_default_args.update({"username": "user", "password": "pwd", "baseuri": "XX.XX.XX.XX", "Name": "BIOS",
+ "reboot_timeout": 900})
+ f_module = self.get_module_mock(params=redfish_default_args)
+ redfish_response_mock.json_data = {"JobState": "Completed", "JobType": "FirmwareUpdate",
+ "Name": "Firmware Rollback: Network", "PercentComplete": 100}
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.wait_for_redfish_job_complete',
+ return_value=(redfish_response_mock, ""))
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.strip_substr_dict',
+ return_value={"JobState": "Completed", "JobType": "FirmwareUpdate",
+ "Name": "Firmware Rollback: Network", "PercentComplete": 100})
+ result = self.module.get_job_status(redfish_connection_mock, f_module, ["JID_123456789"], job_wait=True)
+ assert result[0] == [{'JobState': 'Completed', 'JobType': 'FirmwareUpdate',
+ 'Name': 'Firmware Rollback: Network', 'PercentComplete': 100}]
+ assert result[1] == 0
+ redfish_response_mock.json_data = {"JobState": "Failed", "JobType": "FirmwareUpdate",
+ "Name": "Firmware Rollback: Network", "PercentComplete": 100}
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.wait_for_redfish_job_complete',
+ return_value=(redfish_response_mock, ""))
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.strip_substr_dict',
+ return_value={"JobState": "Failed", "JobType": "FirmwareUpdate",
+ "Name": "Firmware Rollback: Network", "PercentComplete": 100})
+ result = self.module.get_job_status(redfish_connection_mock, f_module, ["JID_123456789"], job_wait=True)
+ assert result[0] == [{'JobState': 'Failed', 'JobType': 'FirmwareUpdate',
+ 'Name': 'Firmware Rollback: Network', 'PercentComplete': 100}]
+ assert result[1] == 1
+
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.wait_for_redfish_job_complete',
+ return_value=(redfish_response_mock, "some error message"))
+ with pytest.raises(Exception) as ex:
+ self.module.get_job_status(redfish_connection_mock, f_module, ["JID_123456789"], job_wait=True)
+ assert ex.value.args[0] == "Task excited after waiting for 900 seconds. Check console for " \
+ "firmware rollback status."
+
+ def test_simple_update(self, redfish_connection_mock, redfish_response_mock, redfish_default_args, mocker):
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.time.sleep', return_value=None)
+ preview_uri, update_uri = ["/uri/1"], ["/uri/SimpleUpdate"]
+ redfish_response_mock.headers = {"Location": "/job/JID_123456789"}
+ result = self.module.simple_update(redfish_connection_mock, preview_uri, update_uri)
+ assert result == ["JID_123456789"]
+
+ def test_require_session(self, redfish_connection_mock, redfish_response_mock, redfish_default_args):
+ redfish_default_args.update({"username": "user", "password": "pwd", "baseuri": "XX.XX.XX.XX", "Name": "BIOS"})
+ f_module = self.get_module_mock(params=redfish_default_args)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"Id": 1}
+ redfish_response_mock.headers = {"X-Auth-Token": "token_key"}
+ result = self.module.require_session(redfish_connection_mock, f_module)
+ assert result[0] == 1
+ assert result[1] == "token_key"
+
+ @pytest.mark.parametrize("exc_type", [RuntimeError, URLError, SSLValidationError, ConnectionError, KeyError,
+ ImportError, ValueError, TypeError, IOError, AssertionError, OSError])
+ def test_main_rollback_exception_handling_case(self, exc_type, mocker, redfish_default_args,
+ redfish_connection_mock, redfish_response_mock):
+ redfish_default_args.update({"name": "BIOS"})
+ redfish_response_mock.status_code = 400
+ redfish_response_mock.success = False
+ json_str = to_text(json.dumps({"data": "out"}))
+ if exc_type not in [HTTPError, SSLValidationError]:
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.get_rollback_preview_target',
+ side_effect=exc_type('test'))
+ else:
+ mocker.patch(MODULE_PATH + 'redfish_firmware_rollback.get_rollback_preview_target',
+ side_effect=exc_type(HTTPS_ADDRESS, 400, HTTP_ERROR_MSG,
+ {"accept-type": ACCESS_TYPE}, StringIO(json_str)))
+ if exc_type == HTTPError:
+ result = self._run_module(redfish_default_args)
+ assert result['failed'] is True
+ elif exc_type == URLError:
+ result = self._run_module(redfish_default_args)
+ assert result['unreachable'] is True
+ else:
+ result = self._run_module_with_fail_json(redfish_default_args)
+ assert result['failed'] is True
+ if exc_type == HTTPError:
+ assert 'error_info' in result
+ assert 'msg' in result
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py
index 1477015a1..9c838febc 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_powerstate.py
@@ -1,9 +1,9 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 2.1.3
-# Copyright (C) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
+# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
@@ -15,7 +15,7 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import redfish_powerstate
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from io import StringIO
@@ -24,6 +24,7 @@ from ansible.module_utils._text import to_text
tarrget_error_msg = "The target device does not support the system reset" \
" feature using Redfish API."
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+HTTPS_ADDRESS = 'https://testhost.com'
@pytest.fixture
@@ -247,7 +248,7 @@ class TestRedfishPowerstate(FakeAnsibleModule):
"""failuere case when system does not supports and throws http error not found"""
f_module = self.get_module_mock()
redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/"
- redfish_connection_mock_for_powerstate.invoke_request.side_effect = HTTPError('http://testhost.com', 404,
+ redfish_connection_mock_for_powerstate.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 404,
json.dumps(tarrget_error_msg), {},
None)
with pytest.raises(Exception) as exc:
@@ -258,7 +259,7 @@ class TestRedfishPowerstate(FakeAnsibleModule):
"""failure case when system does not supports and throws http error 400 bad request"""
f_module = self.get_module_mock()
redfish_connection_mock_for_powerstate.root_uri = "/redfish/v1/"
- redfish_connection_mock_for_powerstate.invoke_request.side_effect = HTTPError('http://testhost.com', 400,
+ redfish_connection_mock_for_powerstate.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400,
tarrget_error_msg,
{}, None)
with pytest.raises(Exception, match=tarrget_error_msg) as exc:
@@ -468,7 +469,7 @@ class TestRedfishPowerstate(FakeAnsibleModule):
assert result['failed'] is True
else:
mocker.patch(MODULE_PATH + 'redfish_powerstate.run_change_power_state',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type(HTTPS_ADDRESS, 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
result = self._run_module_with_fail_json(redfish_default_args)
assert result['failed'] is True
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py
index 55fb3535f..40160edf5 100644
--- a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/test_redfish_storage_volume.py
@@ -1,8 +1,8 @@
# -*- coding: utf-8 -*-
#
-# Dell EMC OpenManage Ansible Modules
-# Version 5.3.0
+# Dell OpenManage Ansible Modules
+# Version 7.0.0
# Copyright (C) 2020-2022 Dell Inc. or its subsidiaries. All Rights Reserved.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
@@ -15,13 +15,14 @@ __metaclass__ = type
import pytest
import json
from ansible_collections.dellemc.openmanage.plugins.modules import redfish_storage_volume
-from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule, Constants
+from ansible_collections.dellemc.openmanage.tests.unit.plugins.modules.common import FakeAnsibleModule
from ansible.module_utils.six.moves.urllib.error import URLError, HTTPError
from ansible.module_utils.urls import ConnectionError, SSLValidationError
from io import StringIO
from ansible.module_utils._text import to_text
MODULE_PATH = 'ansible_collections.dellemc.openmanage.plugins.modules.'
+HTTPS_ADDRESS = 'https://testhost.com'
@pytest.fixture
@@ -51,24 +52,30 @@ class TestStorageVolume(FakeAnsibleModule):
"encryption_types": "NativeDriveEncryption",
"encrypted": False,
"volume_id": "volume_id", "oem": {"Dell": "DellAttributes"},
- "initialize_type": "Slow"
+ "initialize_type": "Slow",
+ "reboot_server": True
}]
@pytest.mark.parametrize("param", arg_list1)
def test_redfish_storage_volume_main_success_case_01(self, mocker, redfish_default_args, module_mock,
- redfish_connection_mock_for_storage_volume, param):
+ redfish_connection_mock_for_storage_volume, param,
+ storage_volume_base_uri):
mocker.patch(MODULE_PATH + 'redfish_storage_volume.validate_inputs')
mocker.patch(MODULE_PATH + 'redfish_storage_volume.fetch_storage_resource')
mocker.patch(MODULE_PATH + 'redfish_storage_volume.configure_raid_operation',
return_value={"msg": "Successfully submitted volume task.",
"task_uri": "task_uri",
"task_id": 1234})
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_apply_time_supported_and_reboot_required',
+ return_value=True)
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_reboot')
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_job_tracking_required',
+ return_value=False)
redfish_default_args.update(param)
result = self._run_module(redfish_default_args)
assert result["changed"] is True
assert result['msg'] == "Successfully submitted volume task."
assert result["task"]["id"] == 1234
- assert result["task"]["uri"] == "task_uri"
arg_list2 = [
{"state": "absent"},
@@ -99,17 +106,21 @@ class TestStorageVolume(FakeAnsibleModule):
side_effect=exc_type('test'))
else:
mocker.patch(MODULE_PATH + 'redfish_storage_volume.configure_raid_operation',
- side_effect=exc_type('http://testhost.com', 400, 'http error message',
+ side_effect=exc_type(HTTPS_ADDRESS, 400, 'http error message',
{"accept-type": "application/json"}, StringIO(json_str)))
- result = self._run_module_with_fail_json(redfish_default_args)
+ result = self._run_module(redfish_default_args)
assert 'task' not in result
assert 'msg' in result
- assert result['failed'] is True
+ if exc_type != URLError:
+ assert result['failed'] is True
+ else:
+ assert result['unreachable'] is True
if exc_type == HTTPError:
assert 'error_info' in result
msg1 = "Either state or command should be provided to further actions."
msg2 = "When state is present, either controller_id or volume_id must be specified to perform further actions."
+ msg3 = "Either state or command should be provided to further actions."
@pytest.mark.parametrize("input",
[{"param": {"xyz": 123}, "msg": msg1}, {"param": {"state": "present"}, "msg": msg2}])
@@ -119,6 +130,13 @@ class TestStorageVolume(FakeAnsibleModule):
self.module.validate_inputs(f_module)
assert exc.value.args[0] == input["msg"]
+ @pytest.mark.parametrize("input",
+ [{"param": {"state": "present", "controller_id": "abc"}, "msg": msg3}])
+ def test_validate_inputs_skip_case(self, input):
+ f_module = self.get_module_mock(params=input["param"])
+ val = self.module.validate_inputs(f_module)
+ assert not val
+
def test_get_success_message_case_01(self):
action = "create"
message = self.module.get_success_message(action, "JobService/Jobs/JID_1234")
@@ -131,7 +149,7 @@ class TestStorageVolume(FakeAnsibleModule):
message = self.module.get_success_message(action, None)
assert message["msg"] == "Successfully submitted {0} volume task.".format(action)
- @pytest.mark.parametrize("input", [{"state": "present"}, {"state": "absent"}, {"command": "initialize"}])
+ @pytest.mark.parametrize("input", [{"state": "present"}, {"state": "absent"}, {"command": "initialize"}, {"command": None}])
def test_configure_raid_operation(self, input, redfish_connection_mock_for_storage_volume, mocker):
f_module = self.get_module_mock(params=input)
mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_volume_create_modify',
@@ -195,6 +213,7 @@ class TestStorageVolume(FakeAnsibleModule):
redfish_response_mock, storage_volume_base_uri):
redfish_response_mock.success = True
f_module = self.get_module_mock(params={"volume_id": "volume_id"})
+ f_module.check_mode = False
message = {"msg": "Successfully submitted delete volume task.", "task_uri": "JobService/Jobs",
"task_id": "JID_456"}
mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock)
@@ -210,6 +229,33 @@ class TestStorageVolume(FakeAnsibleModule):
self.module.perform_volume_deletion(f_module, redfish_connection_mock_for_storage_volume)
assert exc.value.args[0] == "'volume_id' option is a required property for deleting a volume."
+ def test_perform_volume_deletion_check_mode_case(self, mocker, redfish_connection_mock_for_storage_volume,
+ redfish_response_mock, storage_volume_base_uri):
+ redfish_response_mock.success = True
+ f_module = self.get_module_mock(params={"volume_id": "volume_id"})
+ f_module.check_mode = True
+ message = {"msg": "Changes found to be applied.", "task_uri": "JobService/Jobs"}
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock)
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_storage_volume_action',
+ return_value=redfish_response_mock)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_volume_deletion(f_module, redfish_connection_mock_for_storage_volume)
+ assert exc.value.args[0] == "Changes found to be applied."
+
+ def test_perform_volume_deletion_check_mode_failure_case(self, mocker, redfish_connection_mock_for_storage_volume,
+ redfish_response_mock, storage_volume_base_uri):
+ redfish_response_mock.code = 404
+ redfish_response_mock.success = False
+ f_module = self.get_module_mock(params={"volume_id": "volume_id"})
+ f_module.check_mode = True
+ message = {"msg": "No changes found to be applied.", "task_uri": "JobService/Jobs"}
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock)
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_storage_volume_action',
+ return_value=redfish_response_mock)
+ with pytest.raises(Exception) as exc:
+ self.module.perform_volume_deletion(f_module, redfish_connection_mock_for_storage_volume)
+ assert exc.value.args[0] == "No changes found to be applied."
+
def test_perform_volume_create_modify_success_case_01(self, mocker, storage_volume_base_uri,
redfish_connection_mock_for_storage_volume):
f_module = self.get_module_mock(params={"volume_id": "volume_id", "controller_id": "controller_id"})
@@ -238,6 +284,21 @@ class TestStorageVolume(FakeAnsibleModule):
assert message["msg"] == "Successfully submitted modify volume task."
assert message["task_id"] == "JID_123"
+ def test_perform_volume_create_modify_success_case_03(self, mocker, storage_volume_base_uri,
+ redfish_connection_mock_for_storage_volume,
+ redfish_response_mock):
+ f_module = self.get_module_mock(params={"volume_id": "volume_id"})
+ message = {"msg": "Successfully submitted modify volume task.", "task_uri": "JobService/Jobs",
+ "task_id": "JID_123"}
+ redfish_response_mock.success = False
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_volume_id_exists', return_value=redfish_response_mock)
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.volume_payload', return_value={"payload": "value"})
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.perform_storage_volume_action', return_value=message)
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.check_mode_validation', return_value=None)
+ message = self.module.perform_volume_create_modify(f_module, redfish_connection_mock_for_storage_volume)
+ assert message["msg"] == "Successfully submitted modify volume task."
+ assert message["task_id"] == "JID_123"
+
def test_perform_volume_create_modify_failure_case_01(self, mocker, storage_volume_base_uri,
redfish_connection_mock_for_storage_volume,
redfish_response_mock):
@@ -264,7 +325,7 @@ class TestStorageVolume(FakeAnsibleModule):
def test_perform_storage_volume_action_exception_case(self, redfish_response_mock,
redfish_connection_mock_for_storage_volume):
redfish_response_mock.headers.update({"Location": "JobService/Jobs/JID_123"})
- redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', 400,
+ redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400,
'', {}, None)
with pytest.raises(HTTPError) as ex:
self.module.perform_storage_volume_action("POST", "uri", redfish_connection_mock_for_storage_volume,
@@ -341,7 +402,7 @@ class TestStorageVolume(FakeAnsibleModule):
redfish_connection_mock_for_storage_volume,
redfish_response_mock):
f_module = self.get_module_mock(params={"controller_id": "1234"})
- redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com',
+ redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS,
404,
"Specified Controller 123 does"
" not exist in the System.",
@@ -359,7 +420,7 @@ class TestStorageVolume(FakeAnsibleModule):
redfish_response_mock):
f_module = self.get_module_mock(params={"controller_id": "1234"})
msg = "http error"
- redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', 400,
+ redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400,
msg, {}, None)
with pytest.raises(Exception, match=msg) as exc:
self.module.check_specified_identifier_exists_in_the_system(f_module,
@@ -389,7 +450,7 @@ class TestStorageVolume(FakeAnsibleModule):
f_module = self.get_module_mock(params={"controller_id": "RAID.Mezzanine.1C-1",
"drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"]})
val = self.module.check_physical_disk_exists(f_module, drive)
- assert val is True
+ assert val
def test_check_physical_disk_exists_success_case_02(self):
drive = [
@@ -400,7 +461,7 @@ class TestStorageVolume(FakeAnsibleModule):
]
f_module = self.get_module_mock(params={"controller_id": "RAID.Mezzanine.1C-1", "drives": []})
val = self.module.check_physical_disk_exists(f_module, drive)
- assert val is True
+ assert val
def test_check_physical_disk_exists_error_case_01(self):
drive = [
@@ -431,9 +492,10 @@ class TestStorageVolume(FakeAnsibleModule):
"block_size_bytes": 512,
"encryption_types": "NativeDriveEncryption",
"encrypted": True,
- "volume_type": "NonRedundant",
+ "raid_type": "RAID0",
"name": "VD1",
"optimum_io_size_bytes": 65536,
+ "apply_time": "Immediate",
"oem": {"Dell": {"DellVirtualDisk": {"BusProtocol": "SAS", "Cachecade": "NonCachecadeVD",
"DiskCachePolicy": "Disabled",
"LockStatus": "Unlocked",
@@ -446,7 +508,7 @@ class TestStorageVolume(FakeAnsibleModule):
payload = self.module.volume_payload(f_module)
assert payload["Drives"][0]["@odata.id"] == "/redfish/v1/Systems/System.Embedded.1/Storage/" \
"Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"
- assert payload["VolumeType"] == "NonRedundant"
+ assert payload["RAIDType"] == "RAID0"
assert payload["Name"] == "VD1"
assert payload["BlockSizeBytes"] == 512
assert payload["CapacityBytes"] == 299439751168
@@ -454,15 +516,16 @@ class TestStorageVolume(FakeAnsibleModule):
assert payload["Encrypted"] is True
assert payload["EncryptionTypes"] == ["NativeDriveEncryption"]
assert payload["Dell"]["DellVirtualDisk"]["ReadCachePolicy"] == "NoReadAhead"
+ assert payload["@Redfish.OperationApplyTime"] == "Immediate"
def test_volume_payload_case_02(self):
param = {"block_size_bytes": 512,
- "volume_type": "NonRedundant",
+ "raid_type": "RAID0",
"name": "VD1",
"optimum_io_size_bytes": 65536}
f_module = self.get_module_mock(params=param)
payload = self.module.volume_payload(f_module)
- assert payload["VolumeType"] == "NonRedundant"
+ assert payload["RAIDType"] == "RAID0"
assert payload["Name"] == "VD1"
assert payload["BlockSizeBytes"] == 512
assert payload["OptimumIOSizeBytes"] == 65536
@@ -475,7 +538,7 @@ class TestStorageVolume(FakeAnsibleModule):
"block_size_bytes": 512,
"encryption_types": "NativeDriveEncryption",
"encrypted": False,
- "volume_type": "NonRedundant",
+ "raid_type": "RAID0",
"name": "VD1",
"optimum_io_size_bytes": 65536,
"oem": {"Dell": {"DellVirtualDisk": {"BusProtocol": "SAS", "Cachecade": "NonCachecadeVD",
@@ -490,7 +553,7 @@ class TestStorageVolume(FakeAnsibleModule):
payload = self.module.volume_payload(f_module)
assert payload["Drives"][0]["@odata.id"] == "/redfish/v1/Systems/System.Embedded.1/" \
"Storage/Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"
- assert payload["VolumeType"] == "NonRedundant"
+ assert payload["RAIDType"] == "RAID0"
assert payload["Name"] == "VD1"
assert payload["BlockSizeBytes"] == 512
assert payload["CapacityBytes"] == 299439751168
@@ -499,6 +562,109 @@ class TestStorageVolume(FakeAnsibleModule):
assert payload["EncryptionTypes"] == ["NativeDriveEncryption"]
assert payload["Dell"]["DellVirtualDisk"]["ReadCachePolicy"] == "NoReadAhead"
+ def test_volume_payload_case_04(self, storage_volume_base_uri):
+ param = {
+ "drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"],
+ "capacity_bytes": 299439751168,
+ "block_size_bytes": 512,
+ "encryption_types": "NativeDriveEncryption",
+ "encrypted": True,
+ "volume_type": "NonRedundant",
+ "name": "VD1",
+ "optimum_io_size_bytes": 65536,
+ "oem": {"Dell": {"DellVirtualDisk": {"BusProtocol": "SAS", "Cachecade": "NonCachecadeVD",
+ "DiskCachePolicy": "Disabled",
+ "LockStatus": "Unlocked",
+ "MediaType": "HardDiskDrive",
+ "ReadCachePolicy": "NoReadAhead",
+ "SpanDepth": 1,
+ "SpanLength": 2,
+ "WriteCachePolicy": "WriteThrough"}}}}
+ f_module = self.get_module_mock(params=param)
+ payload = self.module.volume_payload(f_module)
+ assert payload["Drives"][0]["@odata.id"] == "/redfish/v1/Systems/System.Embedded.1/Storage/" \
+ "Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"
+ assert payload["RAIDType"] == "RAID0"
+ assert payload["Name"] == "VD1"
+ assert payload["BlockSizeBytes"] == 512
+ assert payload["CapacityBytes"] == 299439751168
+ assert payload["OptimumIOSizeBytes"] == 65536
+ assert payload["Encrypted"] is True
+ assert payload["EncryptionTypes"] == ["NativeDriveEncryption"]
+ assert payload["Dell"]["DellVirtualDisk"]["ReadCachePolicy"] == "NoReadAhead"
+
+ def test_volume_payload_case_05(self, storage_volume_base_uri):
+ param = {
+ "drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-2:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-3:RAID.Mezzanine.1C-1"],
+ "capacity_bytes": 299439751168,
+ "block_size_bytes": 512,
+ "encryption_types": "NativeDriveEncryption",
+ "encrypted": True,
+ "raid_type": "RAID6",
+ "name": "VD1",
+ "optimum_io_size_bytes": 65536,
+ "oem": {"Dell": {"DellVirtualDisk": {"BusProtocol": "SAS", "Cachecade": "NonCachecadeVD",
+ "DiskCachePolicy": "Disabled",
+ "LockStatus": "Unlocked",
+ "MediaType": "HardDiskDrive",
+ "ReadCachePolicy": "NoReadAhead",
+ "SpanDepth": 1,
+ "SpanLength": 2,
+ "WriteCachePolicy": "WriteThrough"}}}}
+ f_module = self.get_module_mock(params=param)
+ payload = self.module.volume_payload(f_module)
+ assert payload["Drives"][0]["@odata.id"] == "/redfish/v1/Systems/System.Embedded.1/Storage/" \
+ "Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"
+ assert payload["RAIDType"] == "RAID6"
+ assert payload["Name"] == "VD1"
+ assert payload["BlockSizeBytes"] == 512
+ assert payload["CapacityBytes"] == 299439751168
+ assert payload["OptimumIOSizeBytes"] == 65536
+ assert payload["Encrypted"] is True
+ assert payload["EncryptionTypes"] == ["NativeDriveEncryption"]
+ assert payload["Dell"]["DellVirtualDisk"]["ReadCachePolicy"] == "NoReadAhead"
+
+ def test_volume_payload_case_06(self, storage_volume_base_uri):
+ param = {
+ "drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-1:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-2:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-3:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-4:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-5:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-6:RAID.Mezzanine.1C-1",
+ "Disk.Bay.0:Enclosure.Internal.0-7:RAID.Mezzanine.1C-1"],
+ "capacity_bytes": 299439751168,
+ "block_size_bytes": 512,
+ "encryption_types": "NativeDriveEncryption",
+ "encrypted": True,
+ "raid_type": "RAID60",
+ "name": "VD1",
+ "optimum_io_size_bytes": 65536,
+ "oem": {"Dell": {"DellVirtualDisk": {"BusProtocol": "SAS", "Cachecade": "NonCachecadeVD",
+ "DiskCachePolicy": "Disabled",
+ "LockStatus": "Unlocked",
+ "MediaType": "HardDiskDrive",
+ "ReadCachePolicy": "NoReadAhead",
+ "SpanDepth": 1,
+ "SpanLength": 2,
+ "WriteCachePolicy": "WriteThrough"}}}}
+ f_module = self.get_module_mock(params=param)
+ payload = self.module.volume_payload(f_module)
+ assert payload["Drives"][0]["@odata.id"] == "/redfish/v1/Systems/System.Embedded.1/Storage/" \
+ "Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Mezzanine.1C-1"
+ assert payload["RAIDType"] == "RAID60"
+ assert payload["Name"] == "VD1"
+ assert payload["BlockSizeBytes"] == 512
+ assert payload["CapacityBytes"] == 299439751168
+ assert payload["OptimumIOSizeBytes"] == 65536
+ assert payload["Encrypted"] is True
+ assert payload["EncryptionTypes"] == ["NativeDriveEncryption"]
+ assert payload["Dell"]["DellVirtualDisk"]["ReadCachePolicy"] == "NoReadAhead"
+
def test_fetch_storage_resource_success_case_01(self, redfish_connection_mock_for_storage_volume,
redfish_response_mock):
f_module = self.get_module_mock()
@@ -551,7 +717,7 @@ class TestStorageVolume(FakeAnsibleModule):
f_module = self.get_module_mock()
msg = "Target out-of-band controller does not support storage feature using Redfish API."
redfish_connection_mock_for_storage_volume.root_uri = "/redfish/v1/"
- redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', 404,
+ redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 404,
json.dumps(msg), {}, None)
with pytest.raises(Exception) as exc:
self.module.fetch_storage_resource(f_module, redfish_connection_mock_for_storage_volume)
@@ -561,7 +727,7 @@ class TestStorageVolume(FakeAnsibleModule):
f_module = self.get_module_mock()
msg = "http error"
redfish_connection_mock_for_storage_volume.root_uri = "/redfish/v1/"
- redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError('http://testhost.com', 400,
+ redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400,
msg, {}, None)
with pytest.raises(Exception, match=msg) as exc:
self.module.fetch_storage_resource(f_module, redfish_connection_mock_for_storage_volume)
@@ -579,7 +745,7 @@ class TestStorageVolume(FakeAnsibleModule):
redfish_response_mock, storage_volume_base_uri):
param = {"drives": ["Disk.Bay.0:Enclosure.Internal.0-0:RAID.Integrated.1-1"],
"capacity_bytes": 214748364800, "block_size_bytes": 512, "encryption_types": "NativeDriveEncryption",
- "encrypted": False, "volume_type": "NonRedundant", "optimum_io_size_bytes": 65536}
+ "encrypted": False, "raid_type": "RAID0", "optimum_io_size_bytes": 65536}
f_module = self.get_module_mock(params=param)
f_module.check_mode = True
with pytest.raises(Exception) as exc:
@@ -598,7 +764,7 @@ class TestStorageVolume(FakeAnsibleModule):
"Members": [{"@odata.id": "/redfish/v1/Systems/System.Embedded.1/Storage/"
"RAID.Integrated.1-1/Volumes/Disk.Virtual.0:RAID.Integrated.1-1"}],
"Name": "VD0", "BlockSizeBytes": 512, "CapacityBytes": 214748364800, "Encrypted": False,
- "EncryptionTypes": ["NativeDriveEncryption"], "OptimumIOSizeBytes": 65536, "VolumeType": "NonRedundant",
+ "EncryptionTypes": ["NativeDriveEncryption"], "OptimumIOSizeBytes": 65536, "RAIDType": "RAID0",
"Links": {"Drives": [{"@odata.id": "Drives/Disk.Bay.0:Enclosure.Internal.0-0:RAID.Integrated.1-1"}]}}
param.update({"name": "VD0"})
f_module = self.get_module_mock(params=param)
@@ -608,3 +774,358 @@ class TestStorageVolume(FakeAnsibleModule):
f_module, redfish_connection_mock_for_storage_volume, "create",
"/redfish/v1/Systems/System.Embedded.1/Storage/RAID.Integrated.1-1/Volumes/")
assert exc.value.args[0] == "No changes found to be applied."
+
+ def test_check_mode_validation_01(self, redfish_connection_mock_for_storage_volume,
+ redfish_response_mock, storage_volume_base_uri):
+ param1 = {"volume_id": None, 'name': None}
+ f_module = self.get_module_mock(params=param1)
+ f_module.check_mode = False
+ result = self.module.check_mode_validation(f_module,
+ redfish_connection_mock_for_storage_volume,
+ "",
+ "/redfish/v1/Systems/System.Embedded.1/Storage/RAID.Integrated.1-1/Volumes/")
+ assert not result
+
+ def test_check_raid_type_supported_success_case01(self, mocker, redfish_response_mock, storage_volume_base_uri,
+ redfish_connection_mock_for_storage_volume):
+ param = {"raid_type": "RAID0", "controller_id": "controller_id"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {'StorageControllers': [{'SupportedRAIDTypes': ['RAID0', 'RAID6', 'RAID60']}]}
+ self.module.check_raid_type_supported(f_module,
+ redfish_connection_mock_for_storage_volume)
+
+ def test_check_raid_type_supported_success_case02(self, mocker, redfish_response_mock, storage_volume_base_uri,
+ redfish_connection_mock_for_storage_volume):
+ param = {"volume_type": "NonRedundant", "controller_id": "controller_id"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {'StorageControllers': [{'SupportedRAIDTypes': ['RAID0', 'RAID6', 'RAID60']}]}
+ self.module.check_raid_type_supported(f_module,
+ redfish_connection_mock_for_storage_volume)
+
+ def test_check_raid_type_supported_success_case03(self, mocker, redfish_response_mock, storage_volume_base_uri,
+ redfish_connection_mock_for_storage_volume):
+ param = {"raid_type": "RAID6", "controller_id": "controller_id"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {'StorageControllers': [{'SupportedRAIDTypes': ['RAID0', 'RAID6', 'RAID60']}]}
+ self.module.check_raid_type_supported(f_module,
+ redfish_connection_mock_for_storage_volume)
+
+ def test_check_raid_type_supported_success_case04(self, mocker, redfish_response_mock, storage_volume_base_uri,
+ redfish_connection_mock_for_storage_volume):
+ param = {"raid_type": "RAID60", "controller_id": "controller_id"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {'StorageControllers': [{'SupportedRAIDTypes': ['RAID0', 'RAID6', 'RAID60']}]}
+ self.module.check_raid_type_supported(f_module,
+ redfish_connection_mock_for_storage_volume)
+
+ def test_check_raid_type_supported_failure_case(self, mocker, redfish_response_mock, storage_volume_base_uri,
+ redfish_connection_mock_for_storage_volume):
+ param = {"raid_type": "RAID9", "controller_id": "controller_id"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {'StorageControllers': [{'SupportedRAIDTypes': ['RAID0', 'RAID6', 'RAID60']}]}
+ with pytest.raises(Exception) as exc:
+ self.module.check_raid_type_supported(f_module,
+ redfish_connection_mock_for_storage_volume)
+ assert exc.value.args[0] == "RAID Type RAID9 is not supported."
+
+ def test_check_raid_type_supported_exception_case(self, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"volume_type": "NonRedundant", "controller_id": "controller_id"}
+ f_module = self.get_module_mock(params=param)
+ redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400,
+ '', {}, None)
+ with pytest.raises(HTTPError) as ex:
+ self.module.check_raid_type_supported(f_module, redfish_connection_mock_for_storage_volume)
+
+ def test_get_apply_time_success_case_01(self, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"controller_id": "controller_id", "apply_time": "Immediate"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"@Redfish.OperationApplyTimeSupport": {"SupportedValues": ["Immediate"]}}
+ self.module.get_apply_time(f_module,
+ redfish_connection_mock_for_storage_volume,
+ controller_id="controller_id")
+
+ def test_get_apply_time_success_case_02(self, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"controller_id": "controller_id"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"@Redfish.OperationApplyTimeSupport": {"SupportedValues": ["Immediate"]}}
+ self.module.get_apply_time(f_module,
+ redfish_connection_mock_for_storage_volume,
+ controller_id="controller_id")
+
+ def test_get_apply_time_supported_failure_case(self, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"controller_id": "controller_id", "apply_time": "Immediate"}
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ redfish_response_mock.json_data = {"@Redfish.OperationApplyTimeSupport": {"SupportedValues": ["OnReset"]}}
+ with pytest.raises(Exception) as exc:
+ self.module.get_apply_time(f_module,
+ redfish_connection_mock_for_storage_volume,
+ controller_id="controller_id")
+ assert exc.value.args[0] == "Apply time Immediate \
+is not supported. The supported values are ['OnReset']. Enter the valid values and retry the operation."
+
+ def test_get_apply_time_supported_exception_case(self, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"controller_id": "controller_id", "apply_time": "Immediate"}
+ f_module = self.get_module_mock(params=param)
+ redfish_connection_mock_for_storage_volume.invoke_request.side_effect = HTTPError(HTTPS_ADDRESS, 400,
+ '', {}, None)
+ with pytest.raises(HTTPError) as ex:
+ self.module.get_apply_time(f_module, redfish_connection_mock_for_storage_volume,
+ controller_id="controller_id")
+
+ def test_check_apply_time_supported_and_reboot_required_success_case01(self, mocker,
+ redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"reboot_server": True}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.get_apply_time',
+ return_value="OnReset")
+ apply_time = self.module.get_apply_time(f_module, redfish_connection_mock_for_storage_volume)
+ val = self.module.check_apply_time_supported_and_reboot_required(f_module,
+ redfish_connection_mock_for_storage_volume,
+ controller_id="controller_id")
+ assert val
+
+ def test_check_apply_time_supported_and_reboot_required_success_case02(self, mocker,
+ redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"reboot_server": False}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.get_apply_time',
+ return_value="Immediate")
+ apply_time = self.module.get_apply_time(f_module, redfish_connection_mock_for_storage_volume)
+ val = self.module.check_apply_time_supported_and_reboot_required(f_module,
+ redfish_connection_mock_for_storage_volume,
+ controller_id="controller_id")
+ assert not val
+
+ def test_check_job_tracking_required_success_case01(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"job_wait": True}
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.get_apply_time',
+ return_value="OnReset")
+ f_module = self.get_module_mock(params=param)
+ redfish_response_mock.success = True
+ val = self.module.check_job_tracking_required(f_module,
+ redfish_connection_mock_for_storage_volume,
+ reboot_required=False,
+ controller_id="controller_id")
+ assert not val
+
+ def test_check_job_tracking_required_success_case02(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"job_wait": True}
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.get_apply_time',
+ return_value="Immediate")
+ f_module = self.get_module_mock(params=param)
+ val = self.module.check_job_tracking_required(f_module,
+ redfish_connection_mock_for_storage_volume,
+ reboot_required=True,
+ controller_id="controller_id")
+ assert val
+
+ def test_check_job_tracking_required_success_case03(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri):
+ param = {"job_wait": False}
+ mocker.patch(MODULE_PATH + 'redfish_storage_volume.get_apply_time',
+ return_value="Immediate")
+ f_module = self.get_module_mock(params=param)
+ val = self.module.check_job_tracking_required(f_module,
+ redfish_connection_mock_for_storage_volume,
+ reboot_required=True,
+ controller_id=None)
+ assert not val
+
+ def test_perform_reboot_timeout_case(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ param = {"force_reboot": False}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_redfish_reboot_job",
+ return_value=({"JobState": "Completed", "Id": "JID_123456789"}, True, ""))
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=("", "The job is not complete after 2 seconds."))
+ with pytest.raises(Exception) as exc:
+ self.module.perform_reboot(f_module, redfish_connection_mock_for_storage_volume)
+ assert exc.value.args[0] == "The job is not complete after 2 seconds."
+
+ def test_perform_reboot_success_case01(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ param = {"force_reboot": False}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_redfish_reboot_job",
+ return_value=({"JobState": "Completed", "Id": "JID_123456789"}, True, ""))
+ redfish_response_mock.json_data = {"JobState": "Completed"}
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=(redfish_response_mock, "The job is completed."))
+ val = self.module.perform_reboot(f_module, redfish_connection_mock_for_storage_volume)
+ assert not val
+
+ def test_perform_reboot_success_case02(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ param = {"force_reboot": True}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_redfish_reboot_job",
+ return_value=({"JobState": "Failed", "Id": "JID_123456789"}, True, ""))
+ redfish_response_mock.json_data = {"JobState": "Failed"}
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=(redfish_response_mock, "The job is failed."))
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.perform_force_reboot",
+ return_value=True)
+ val = self.module.perform_reboot(f_module, redfish_connection_mock_for_storage_volume)
+ assert not val
+
+ def test_perform_reboot_without_output_case(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ param = {"force_reboot": False}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_redfish_reboot_job",
+ return_value=("", False, ""))
+
+ val = self.module.perform_reboot(f_module, redfish_connection_mock_for_storage_volume)
+ assert not val
+
+ def test_perform_force_reboot_timeout_case(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ param = {"force_reboot": False}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_redfish_reboot_job",
+ return_value=({"JobState": "Completed", "Id": "JID_123456789"}, True, ""))
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=("", "The job is not complete after 2 seconds."))
+ with pytest.raises(Exception) as exc:
+ self.module.perform_force_reboot(f_module, redfish_connection_mock_for_storage_volume)
+ assert exc.value.args[0] == "The job is not complete after 2 seconds."
+
+ def test_perform_force_reboot_success_case01(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ param = {"force_reboot": False}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_redfish_reboot_job",
+ return_value=({"JobState": "Completed", "Id": "JID_123456789"}, True, ""))
+ redfish_response_mock.json_data = {"JobState": "Completed"}
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=(redfish_response_mock, "The job is completed."))
+ val = self.module.perform_force_reboot(f_module, redfish_connection_mock_for_storage_volume)
+ assert not val
+
+ def test_perform_reboot_success_case02(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ param = {"force_reboot": True}
+ f_module = self.get_module_mock(params=param)
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_redfish_reboot_job",
+ return_value=({"JobState": "Completed", "Id": "JID_123456789"}, True, ""))
+ redfish_response_mock.json_data = {"JobState": "Failed"}
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=(redfish_response_mock, "The job is completed."))
+ with pytest.raises(Exception) as exc:
+ self.module.perform_force_reboot(f_module, redfish_connection_mock_for_storage_volume)
+ assert exc.value.args[0] == "Failed to reboot the server."
+
+ def test_perform_force_reboot_without_output_case(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ f_module = self.get_module_mock()
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_redfish_reboot_job",
+ return_value=("", False, ""))
+ val = self.module.perform_force_reboot(f_module, redfish_connection_mock_for_storage_volume)
+ assert not val
+
+ def test_track_job_success_case01(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ job_id = "JID_123456789"
+ job_url = "/redfish/v1/Managers/iDRAC.Embedded.1/JID_123456789"
+ f_module = self.get_module_mock()
+ redfish_response_mock.json_data = {"JobState": "Scheduled"}
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=(redfish_response_mock, "The job is scheduled."))
+ with pytest.raises(Exception) as exc:
+ self.module.track_job(f_module, redfish_connection_mock_for_storage_volume, job_id, job_url)
+ assert exc.value.args[0] == "The job is successfully submitted."
+
+ def test_track_job_success_case02(self, mocker,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ job_id = "JID_123456789"
+ job_url = "/redfish/v1/Managers/iDRAC.Embedded.1/JID_123456789"
+ f_module = self.get_module_mock()
+ redfish_response_mock = {}
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=(redfish_response_mock, "The job has no response."))
+ with pytest.raises(Exception) as exc:
+ self.module.track_job(f_module, redfish_connection_mock_for_storage_volume, job_id, job_url)
+ assert exc.value.args[0] == "The job has no response."
+
+ def test_track_job_success_case03(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ job_id = "JID_123456789"
+ job_url = "/redfish/v1/Managers/iDRAC.Embedded.1/JID_123456789"
+ f_module = self.get_module_mock()
+ redfish_response_mock.json_data = {"JobState": "Failed"}
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=(redfish_response_mock, "The job is failed."))
+ with pytest.raises(Exception) as exc:
+ self.module.track_job(f_module, redfish_connection_mock_for_storage_volume, job_id, job_url)
+ assert exc.value.args[0] == "Unable to complete the task initiated for creating the storage volume."
+
+ def test_track_job_success_case04(self, mocker, redfish_response_mock,
+ redfish_connection_mock_for_storage_volume,
+ storage_volume_base_uri,
+ redfish_default_args):
+ job_id = "JID_123456789"
+ job_url = "/redfish/v1/Managers/iDRAC.Embedded.1/JID_123456789"
+ f_module = self.get_module_mock()
+ redfish_response_mock.json_data = {"JobState": "Success"}
+ mocker.patch(MODULE_PATH + "redfish_storage_volume.wait_for_job_completion",
+ return_value=(redfish_response_mock, "The job is failed."))
+ with pytest.raises(Exception) as exc:
+ self.module.track_job(f_module, redfish_connection_mock_for_storage_volume, job_id, job_url)
+ assert exc.value.args[0] == "The job is successfully completed."
+
+ def test_validate_negative_job_time_out(self, redfish_default_args):
+ redfish_default_args.update({"job_wait": True, "job_wait_timeout": -5})
+ f_module = self.get_module_mock(params=redfish_default_args)
+ with pytest.raises(Exception) as ex:
+ self.module.validate_negative_job_time_out(f_module)
+ assert ex.value.args[0] == "The parameter job_wait_timeout value cannot be negative or zero."
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/utils.py b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/utils.py
new file mode 100644
index 000000000..bd264f6b3
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/plugins/modules/utils.py
@@ -0,0 +1,49 @@
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import unittest
+import tempfile
+from unittest.mock import patch
+from ansible.module_utils import basic
+from ansible.module_utils.common.text.converters import to_bytes
+
+
+def set_module_args(args):
+ args['_ansible_remote_tmp'] = tempfile.gettempdir()
+ args['_ansible_keep_remote_files'] = False
+
+ args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
+ basic._ANSIBLE_ARGS = to_bytes(args)
+
+
+class AnsibleExitJson(Exception):
+ pass
+
+
+class AnsibleFailJson(Exception):
+ pass
+
+
+def exit_json(*args, **kwargs):
+ if "changed" not in kwargs:
+ kwargs["changed"] = False
+ raise AnsibleExitJson(kwargs)
+
+
+def fail_json(*args, **kwargs):
+ kwargs['failed'] = True
+ raise AnsibleFailJson(kwargs)
+
+
+class ModuleTestCase(unittest.TestCase):
+
+ def setUp(self):
+ self.mock_module = patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
+ self.mock_module.start()
+ self.mock_sleep = patch('time.sleep')
+ self.mock_sleep.start()
+ set_module_args({})
+ self.addCleanup(self.mock_module.stop)
+ self.addCleanup(self.mock_sleep.stop)
diff --git a/ansible_collections/dellemc/openmanage/tests/unit/requirements.txt b/ansible_collections/dellemc/openmanage/tests/unit/requirements.txt
new file mode 100644
index 000000000..324a0eebc
--- /dev/null
+++ b/ansible_collections/dellemc/openmanage/tests/unit/requirements.txt
@@ -0,0 +1,9 @@
+omsdk
+pytest
+pytest-xdist==2.5.0
+mock
+pytest-mock
+pytest-cov
+# pytest-ansible==2.0.1
+coverage
+netaddr>=0.7.19
diff --git a/ansible_collections/dellemc/os10/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/os10/.github/workflows/ansible-test.yml
deleted file mode 100644
index 2b94f4ccf..000000000
--- a/ansible_collections/dellemc/os10/.github/workflows/ansible-test.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-name: CI
-on:
-- pull_request
-
-jobs:
- sanity:
- name: Sanity (${{ matrix.ansible }})
- strategy:
- matrix:
- ansible:
- - stable-2.10
- - devel
- runs-on: ubuntu-latest
- steps:
-
- - name: Check out code
- uses: actions/checkout@v1
- with:
- path: ansible_collections/dellemc/os10
-
- - name: Set up Python 3.6
- uses: actions/setup-python@v1
- with:
- python-version: 3.6
-
- - name: Install ansible-base (${{ matrix.ansible }})
- run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
-
- - name: Install ansible_collections.ansible.netcommon
- run: ansible-galaxy collection install ansible.netcommon -p ../../
-
- - name: Run sanity tests
- run: ansible-test sanity --docker -v --color --python 3.6
diff --git a/ansible_collections/dellemc/os10/.gitignore b/ansible_collections/dellemc/os10/.gitignore
deleted file mode 100644
index c6fc14ad0..000000000
--- a/ansible_collections/dellemc/os10/.gitignore
+++ /dev/null
@@ -1,387 +0,0 @@
-
-# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
-# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
-
-### dotenv ###
-.env
-
-### Emacs ###
-# -*- mode: gitignore; -*-
-*~
-\#*\#
-/.emacs.desktop
-/.emacs.desktop.lock
-*.elc
-auto-save-list
-tramp
-.\#*
-
-# Org-mode
-.org-id-locations
-*_archive
-
-# flymake-mode
-*_flymake.*
-
-# eshell files
-/eshell/history
-/eshell/lastdir
-
-# elpa packages
-/elpa/
-
-# reftex files
-*.rel
-
-# AUCTeX auto folder
-/auto/
-
-# cask packages
-.cask/
-dist/
-
-# Flycheck
-flycheck_*.el
-
-# server auth directory
-/server/
-
-# projectiles files
-.projectile
-
-# directory configuration
-.dir-locals.el
-
-# network security
-/network-security.data
-
-
-### Git ###
-# Created by git for backups. To disable backups in Git:
-# $ git config --global mergetool.keepBackup false
-*.orig
-
-# Created by git when using merge tools for conflicts
-*.BACKUP.*
-*.BASE.*
-*.LOCAL.*
-*.REMOTE.*
-*_BACKUP_*.txt
-*_BASE_*.txt
-*_LOCAL_*.txt
-*_REMOTE_*.txt
-
-#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!#
-
-### Linux ###
-
-# temporary files which can be created if a process still has a handle open of a deleted file
-.fuse_hidden*
-
-# KDE directory preferences
-.directory
-
-# Linux trash folder which might appear on any partition or disk
-.Trash-*
-
-# .nfs files are created when an open file is removed but is still being accessed
-.nfs*
-
-### PyCharm+all ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-# Generated files
-.idea/**/contentModel.xml
-
-# Sensitive or high-churn files
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-.idea/**/dbnavigator.xml
-
-# Gradle
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn. Uncomment if using
-# auto-import.
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-cmake-build-*/
-
-# Mongo Explorer plugin
-.idea/**/mongoSettings.xml
-
-# File-based project format
-*.iws
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
-
-# Editor-based Rest Client
-.idea/httpRequests
-
-# Android studio 3.1+ serialized cache file
-.idea/caches/build_file_checksums.ser
-
-### PyCharm+all Patch ###
-# Ignores the whole .idea folder and all .iml files
-# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
-
-.idea/
-
-# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
-
-*.iml
-modules.xml
-.idea/misc.xml
-*.ipr
-
-# Sonarlint plugin
-.idea/sonarlint
-
-### pydev ###
-.pydevproject
-
-### Python ###
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-pip-wheel-metadata/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-.hypothesis/
-.pytest_cache/
-
-# Translations
-*.mo
-*.pot
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# pyenv
-.python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# celery beat schedule file
-celerybeat-schedule
-
-# SageMath parsed files
-*.sage.py
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# Mr Developer
-.mr.developer.cfg
-.project
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-### Vim ###
-# Swap
-[._]*.s[a-v][a-z]
-[._]*.sw[a-p]
-[._]s[a-rt-v][a-z]
-[._]ss[a-gi-z]
-[._]sw[a-p]
-
-# Session
-Session.vim
-Sessionx.vim
-
-# Temporary
-.netrwhist
-# Auto-generated tag files
-tags
-# Persistent undo
-[._]*.un~
-
-### WebStorm ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-
-# Generated files
-
-# Sensitive or high-churn files
-
-# Gradle
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn. Uncomment if using
-# auto-import.
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-
-# Mongo Explorer plugin
-
-# File-based project format
-
-# IntelliJ
-
-# mpeltonen/sbt-idea plugin
-
-# JIRA plugin
-
-# Cursive Clojure plugin
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-
-# Editor-based Rest Client
-
-# Android studio 3.1+ serialized cache file
-
-### WebStorm Patch ###
-# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
-
-# *.iml
-# modules.xml
-# .idea/misc.xml
-# *.ipr
-
-# Sonarlint plugin
-.idea/**/sonarlint/
-
-# SonarQube Plugin
-.idea/**/sonarIssues.xml
-
-# Markdown Navigator plugin
-.idea/**/markdown-navigator.xml
-.idea/**/markdown-navigator/
-
-### Windows ###
-# Windows thumbnail cache files
-Thumbs.db
-Thumbs.db:encryptable
-ehthumbs.db
-ehthumbs_vista.db
-
-# Dump file
-*.stackdump
-
-# Folder config file
-[Dd]esktop.ini
-
-# Recycle Bin used on file shares
-$RECYCLE.BIN/
-
-# Windows Installer files
-*.cab
-*.msi
-*.msix
-*.msm
-*.msp
-
-# Windows shortcuts
-*.lnk
-
-# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
diff --git a/ansible_collections/dellemc/os10/FILES.json b/ansible_collections/dellemc/os10/FILES.json
deleted file mode 100644
index d695485f4..000000000
--- a/ansible_collections/dellemc/os10/FILES.json
+++ /dev/null
@@ -1,8951 +0,0 @@
-{
- "files": [
- {
- "name": ".",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dee254ee9bf55d8338407f70bb76c3880022002a91e1a804274a4cc733b454de",
- "format": 1
- },
- {
- "name": "tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/target-prefixes.network",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bd6264633dfbb82b3b2abd95db6a4d24ce7c834747e37628ce9453d6b9d642a9",
- "format": 1
- },
- {
- "name": "tests/integration/targets",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tests/uplink_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ad4c2834dd2c4c1d2bb9027b0d478a33f790f9e62e58bb63a247963acbd6dad",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1464bb853617218ab9d0cc4f4e9cf80806e79d88944d34f1c4552b5188880ba5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0e26f625d487441753393154f4cd2f714697dc87592f824e4ef3c5e13da1013",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f5c5d89caa1a4b04bfd0a1259601946bb23eb8a92c2a9669b0f3ca9569f2d139",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e4c03a49f371e8720181c1fa5ef9022129fa1699fef19edb21bf835d08f5d94e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "03a4a548eb2a047077b132d28616752341b92b8e196890b4fb72f4e1bb22a287",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_uplink_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tests/lldp_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7dcd2f26d1ed6ea1e001dd47cc618e79cd5f0a8fa6b3e36a4d7b72bcd3b85515",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "038c67cb186cb91c026e8c25d136473e00c866947ac6853296a1d49e2850f82e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bf26c47803f887e09c07bf229230916348fced9ebdcd207e0d0a02e5a6c18a16",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "27430a9f600b854e9cc8117c8144f7a85b90e19b2d29bed6842a5540250022e1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1ae1a076a2505f9bcde827427c97e684c12933712e89b2c24de74f7e731147a9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lldp_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tests/vlt_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac384b363d9d08fa48c87835782dcc96fef3069e56cdbabec327e4f6eed2aa96",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77602a7e28874df30ea877773c60758a42707cdf1b83907beaac668219dd24f4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f24a87e3c9d4519e41dece5bb9b6ead5c5c05996517a1cdbe81665fd4ab2d5da",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2f597f1a6b29c4dd82db6fc25e93a661c9caf161cc985c2c4cf9793317fba2d4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "12d2266c315abb0949d991954df3a2069b04c8ebe600b5794f1e90323c319291",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "79be2281e710189652a5f6a81289a73b20d971498d7fd1fdf9b699415336765f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlt_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tests/ecmp_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f8869617be68b7ff69e659cd1d386f6ecd08b779ae05e41e9ac9648ce03388d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "936cdc70b6bdc97f5f5a3da9c54c2511e5476dae19633dad1230597817a6aaba",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "870430b4d011fb3b33683b31414dd43c25ab506e75dd8699888328c7b45ca47b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "47cd3660628331fe5d34423506c9f99c85774ee9c11e4c4386ee84a4099b5cf2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c5b0d42531d4e746162b097cbf4470738bc35be6081ed6cea196537aee66600d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ecmp_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tests/system_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/templates/system_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "36d342ffd6d3f9f2b13f830e5f408dff15fe03da2cfdd3cdf340772e9feb7d53",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "399caf267b75f67d2c4b7d09144b1e540d5892ba25fe2f3ed4d8c52f6e5abca2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1461afa655d59defdc17d72a7babef798af905ddc4c1f374bf33ef898cac7978",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "33c84177089c1e5e5b429b2a021508d1967297115f8c41a46bdfb756af09be9f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e25465fa854c72b6fa28bfd685b745696562ac31f1e41dad623228feefb39d23",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_system_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6fe2efa2aa815cd8e3c5ed5bac0a0bf3e52eecc692477321ff8fa00b6fd6a49b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c0f818940efa77c7436a40f0ac4b817dbdd823f56ce122c996aacf1ecf107b73",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli/toplevel.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "20edcd141fa02b787ffaf488fd5ccbbb32f5f136e0cda2421e20544914d41cfa",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4753a3e2722dc38e608f5bbc92755855d4b6e94e0d1fa3f4c9dcf41d1e4d2d7f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "64f0bd4b1e4debdda48f1b2231aa87e66183114bd534b02d601d215bcea6011c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a4f0825966fb1a889b2a1115592859be5d1a00bc5b40fd3891fcb798162ebd8b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli/sublevel.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49bbe36eae4a02c056f03644d5b684d5999d808cd3847d6b4b9c73ed9d5dfc78",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5be8981c069e5d699a7b11f1b6a002882dc6dadae778f2d594f19ec61bab64d0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tasks/cli.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "046c1f590ecbc9ebc6891a7a266dd61ebc350fee85d1d5e1d4eed2ec013f095f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_config/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tests/lag_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/templates/lag_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dd124b87cd1413b1d35a8ade7f7d9e8243db707998f13560931fb02c5354623e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc48f14b613ede66352421b7b1c0a7a2eeef0f41c174c3de4e3e55f2b02808de",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5f7d115aa8f99c563a03357e21947e12a1040233554d57655bbc9cd7046035c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c3fad9175702557fd1f2ad02c973b55ec61c313dfdf19a4684b383aeefa5f092",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e73c08ff1c788caf128298f7483d6796194d2bed998af5849ba1920c05d1b5b5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9ab3e363710d250c9fd7ee5da92bfad54e0712ac687a83a2a71d21fea615190",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_lag_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tests/logging_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates/logging_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8963a603439108496f3ecfcd6eaffeaf0dd8c2c1571176acb93670bb058596a8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cd1a483a4fe6caca31a171b7b5cdd8a137469c7589ac1f6da10e9844c380c619",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a56287580c306b275be51d53e9bee0d46d348e0d6b6897bed9347be5ff2396e5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4453291b55d2727d39fa686c78b7219de9715e5628740856d87ca5bf566f87c5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6457fe1c0aa31d88098e5501597a708139f0fe656caad2f4f528782809798e4c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e1f5a117dd578a05d487d512f0bf7d79b4ad9d89c6c01fdf6ed633b9ce330126",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be824897c97b322e7e95483a7a976f30144bb5c0dd92d784763ccacc7e8e8ef5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_logging_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tests/cli",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tests/cli/output.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9404404f4797bca49ffca7a124c7c2d05a261b93a4e889bc519963321f747d7e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tests/cli/contains.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c64d46fd01e6afdb612effe95fd2fbdb9d169fdd8ffa61bfc8f887b24f6a1c1c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tests/cli/timeout.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba6c7a58f08497d49fa11e30c572763d6a4c64fbda0f4b5c7970462ef6c173ad",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tests/cli/bad_operator.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57c45cd1008cda243635c2512d803fd40aff6eab6d48d0f3669d939a6aa1237d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tests/cli/invalid.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3c9ffb27c38e0c69998113c11d06b69534f184ab355b252c100089f998d8c5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tasks/cli.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ee0b5ac71e5a1585eb464148138380f40e591e6c8365462bc4d54ee40296927",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_command/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tests/acl_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "70f29afff510ae18a5cd6b97ecdf75668764347523c15da6c68566ec45d3c6ec",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "64b4b3d366d4c93405bc376c8876f64e904ac0f4ee50b8252607b44d64d26d29",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "94291d22d4b2739c0a3f6722cc00c56a67851411d7f7b720901cbd11227e204d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8259bf1f41818cbd1b9d6a9ed594b9477507ddaf24a19b215bcaa95020b94339",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "66730263cd92a99847b1a2c442f018ef44cfd0f54bf32ac7eb5c3212b54a292a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45e939f49ae0073d331b3c194aee32057f2dffc9dd87c005e9893a7306c01043",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "271e90fed1f6a0d182d068ef9f1380b2954f24a562478dfcd51395421e01030f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a76bb998f3b124077f9a276cf43d4a84d9b3e53b470b871cfc9cfced9587ce90",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0890b6de50982637140d3c5ebfd589418877b7ec2365d2e3032536ff05fadac4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_acl_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tests/interface_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b89b57c196502b4db8b96eded2c15962b8262605c0b576567f9aefa2100ac59a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89a8788a11b9a1731059404bc4bffb93a9dfc9548597e93bbde4f6a17e8346ca",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b26d1cdacc6e8890a737c338365787b599fd3020b07ea78c07c2beb860818aa",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "34507f8d30ed8d6627ac1bda49fe4498ebcbfc9853b7f88566945b0f81ea03a4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc05af4226f3fb99f4fa76d99fd984bfad995ce4fbbd6a9e15e428271208ef7b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25bb95a1293563e7ed1f8cf1897431c31dc20560ad1c4cbc27337db4207c7d3a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2b03f32aa29954fb19a74f90669d8a77780dcb5e21353f80fd54512d01ffe0f9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b0c8cf327b741e2c8ebaa8794db305463aa1e2bd1e703688c00270782c099443",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e8d5c87d1efce5c559048284e61b672513a60c885e81fba37781a4a9317988b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_interface_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tests/aaa_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b9776c13eeec76aea6d785ac8302f71bc0a6a51f3d485fcc04ef89f58fc7830",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb6d5f9175d1291be9481a1b15573d5d58496c4c6e9d3ed11cf7aee1fd0c61f4",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0e18d086b11fe776c0fb3214d8c44b86029cc4754dfdf6d44cf5cafa65579680",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f3ee9a76bfcea7eabefde1ebc62545190df51ae77e021897a30d362d4777183c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f5890a7ed5a38cb9a39822c7ce2508043c0a3ab22652f5a2aec83ac79432a4b0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f40f95d65fc3baee4fa76364b08cebcb8b3d1f8e922fe760b47d5253cea488ab",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc047a96be30108030f3f11669ef7d444f8992c1f7314ec659be6ca7127cc8cf",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_aaa_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tests/bgp_vrf.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4cdf73caceefe7198e3aa9d515cb1d782579a383a31a67ddc2639d4e24c5a897",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3bd502404820f470e59cb02843ca6f4c5dd70d474bdf99ae1f54e26b3aa26f1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3990c102131993c5bfcd3375aa0663aaa673e1e08386a12a77a7bd4a9efcf12f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba02d699d915ea12b9060d4aa1cffb6744ace4245ce9cf3da34fd3b68924f730",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ab9c96a6bc824e55300b18f849cf38a3af5d64ed782dd7468af4a43d1eea2e8f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1ed045f05985fe4a03cfc5d0101eb55a7b9a865841f5b5dc14f81ff2d0aacf11",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "14c77c86c67145581da1148d524a2e417857aed04df4f40ccb2ace9360a755d3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6713d9ad92f4d351dcb45f4125ad87f6fade7928da824dccfb3bdd906da6061b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8565083e10e769d00d8b324972a505d423dabb85fa95bae0580dbc00ef395dd6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_bgp_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tests/snmp_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5cc4372241eb2df4b14f5d4acca951eddce534e4506727c8a923b8da03b89b7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5083cc81b4bc1e3d57a9d40ce2b7627f183dc713f5da287509c9edc5afc3948b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "217b588ffda030b76fe4d034748ae9520c5599ab10f4a2408e7694263bf0e517",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "031484751006ce7f54eb80cdf76f10aa77caea142ab8584e8f9401cb94dded23",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2d39f9d971d23178c3aea8415b8e3758dc280d0fd6e6c2954d8b76f1d11975bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "30cf1b99dcce1631f708f93b338a5a87ca52a040755529200b99df871f5494e3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_snmp_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tests/vrrp_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "41fd286bf325335e0558c55d926c230fb6c88ca184396f7fbce94ac01a645700",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f9f81b4822104265fba8ddf82705413e4d358f7d3f05316851db6cde31a3129",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "810cc6fb8ef4487a767c4e3761b85a6790bab055883c8bbc96219386cfd1b181",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b57bbbda1567a16770400417eea96f1a330c5d84892ed629923b5abbcfaf37bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba784953137edd01cb1221d4090234fe9bb7877150397c4d963ed241e5495534",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c9fab029b4805d461a60368e84bbcfde5aada1b7e5f9b225c7801b07759620e6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "77e81f8cee030414317096006a3fbdf931b560bb9680d496ae9292e54ad1c09d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vrrp_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tests/flow_monitor_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "21c67da734056f54f0f661686d32cb70ef1e40462a23c5d1bfda041fa443b47d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32d91017598fe87187773edc5ec054b3ce252f98772f26a5e438f823c1501c69",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1637733203ace9d52bb3e84a800f97f804744559ddac33d40c4324b997826f9f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ea0eedfb075fca42a7026b653a9ffee006039de86b0d689efe16aca7f55a90ae",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b44b9d483d108da304de5deba1b3bddf898ea1acf1a53368cf163dbc21044333",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "40e22288d08ed92b3282c053ead73aab881e396a6eefa37f3bb520623107f9f7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks_old",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks_old/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a1ff989bbc9d899940da328d57ca6a361aae2c0beb1a31eef3e52cb7fc9da0e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tests/users_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/templates/users_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/templates/users_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "05d4cb24262b361ccf5503e3a851e01ea8e9e749ca4ef9b1f85098c183638ba1",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "17fea757e088e8509ec4c02316c74462fef45874ed6f7f40bf342d8dc78dbed8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "856c30f658de79f4fd5fa7c97ee33d75572ccb106dc05e546bedb2c9d23fc95d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "22f70a2838c06e331fef2663423ed2d7151dc162903da79d5a031d8daf90c71b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_users_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tests/xstp_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6ed3afc848f77dc5bf4645082e764f64e2a46c468e8375905d7df2550f49a4bb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a086d274c7307960594d74bd1343db52aaf5efc732e6df268960f31a8b8ef9a9",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3222eeb3fd8518341ef5ee5f7a487c0788bbe854c92141874826c7327a5f0d25",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "513806605be25208e46714482778b8b95e0d87224ecdef69485f92e997ef6a26",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f58eb90b9bde477bacb06bde02825150a5db4f1b5155570e99a1f5142eaf9308",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c97f697071e92385e9e6830329f2550eae4e5069bdacf55b9c52b3312de803d",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e31a4ea0f664df37e4f6d70fbf845df27f397bfb45383704d6d10ec40419287",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "453c623503a121c01efc3150a9cf85839e79d940433987b336aa22441d36f64f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a95acc0962e1c997759874f98f3e8f0c61da997449a74a596d1b64e491563ce",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8e5f3ea47b47d7c2f3f621420cf8fdb3461965ce6387ea4c08a4278a6df42a2c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc10529635b27eb3fbb63341bf5d9b601e5077d27fc1a7d9df04c22a49433d6e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_xstp_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tests/vlan_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da2cf3e3d67e2d2ee660a478d4de2abb3075d346f83746773e6e644e1894c5d5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f0d54565e30202e99fab5271a25445fd575c5bcf9fa2189adcad456560456f5",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c7d018be27caad0ba12b873bea941393124eb882c779a4c2df166739d58fcd59",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc8439541f2b0a111b2261b4dc098c14fc4154534d6d3392bf3f69e39f03a837",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b5843c89738aba44d1ffe89785b1500e0096638583adab6e66191cd4cb6a278c",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e02270afe3b8c27f33e289029e685b17565928ddfc38273bf7af2200c23baf36",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45e8e08ac1d4dbc419ef2fdff4a2117cb3ee6a1a83e81cca79b99a86657c011b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_vlan_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tests/route_map_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e26d9301cc00a368b8ef0ab68fd65bd0eb3876b3c435747772d50f46c21abc14",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b2503f674bd07f01c1530098917205037b6a07e87a02acaa14a7b629b36fc102",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f328017cb2eea83066a992fe86379f12acee5906a14d7f0ff163345d1e5db6c6",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3445b062cd7971c48ab9945e5de43d27b78a324797aabdc541a20b72238e0415",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2cb7300c38b7720edfed6beb987fb5545c33923b5886749e72c5413936038207",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7d9f489833fc3c45b9925e75b8d5a1fed9ca67d0a6515bb97e43b5ce6115ffed",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_route_map_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tests/prefix_list_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b6614abd2f498fbfaaaab25c150c300d9611ae766db1a97e7538fa54f7b6219",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a22f6694e88cfb27b23029f8a4afb63449526eaeae292463832df6c7d5efca2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "278fb587b324a50694295730162e5c811622a28af0391f23497d41efe5b657cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d7b175087cd7b576713915177cd1c3c28d9ca679f30af7e19cd2965693a486a3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a22f6694e88cfb27b23029f8a4afb63449526eaeae292463832df6c7d5efca2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0ecb58e50c5fce555b18c49d94fe4fd3dcbd462b0336ec836c66c517b6d10c29",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_prefix_list_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts/tests/cli",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts/tests/cli/facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ca0c676c3247ba63fd1a736444d479bbae1132e72e25cb0914510a3febdd03b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts/tasks/cli.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9ee0b5ac71e5a1585eb464148138380f40e591e6c8365462bc4d54ee40296927",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_facts/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tests/qos_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/templates/qos_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25606b78a5f8a7b79e8def63ae12eaff46d736320d09ca635d57def0aea7a1f3",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5bfbcb597c7477c68ee508dca873550971e8545cf57afc6120a68d9a521aa0d8",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f60945c13a7edc8db88fa6f152f97094d3ee6c310566cc15895304903b91497a",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dfce41c5684933653d6216ca71bc1ff414126188e973d475d551750c2f89c24f",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96ec3510b727a3c486a9e29f08ae120bd90bd70f55a5cacb44796ef8eea49bfa",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_qos_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb3b3006684a5349b51efc867c830bb493ca88402b1f02809f3d43464b359ceb",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tests/ntp_basic.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1fe912833efabcbf0d06e33c1e13524b0548199f483e274fc1025774826c4ebe",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0fc716243126e26a45456b49a81aac41405fe6ba2bca6c739b35b497a255bc09",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c45ca707d76cccebe4198a70c9426cd42847390cae92757aed4fd165f8570f8e",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "928ff4f33699b229a59276ceb1f24e5d22446835eb969b58cffe61c009035fc7",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de2ae09d153f14f3ee2691e64e441b045e246119169f67b60324b432b6157681",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5083cc81b4bc1e3d57a9d40ce2b7627f183dc713f5da287509c9edc5afc3948b",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "778ca7bb8974f2697e4591c0ee0d99e00cc7bba1c7e07a3b54aaffd67ab219ef",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/vars/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0b8378c4f0cb839eb2fc3456cfdf667b1a0856245c9850aee1d2f44a7c8c9d60",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tasks/tests.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4275ab95132034757270704223dfbce8daf66193e54463bf8cb449b360b1bce2",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a2434838959246b514827d405f0f307faff45751efe16078efd3cf605076be88",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83aebf2af67b8ad3ab3c60895a86cf0da0dc36ec688aa90bf8734f09bc418919",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tasks/testcase",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a59bc1b0d37c317d649aec90436db08b5ba30ead32560628dfe4edd9b5aa42a0",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb7a3f4d7763885ffbc2acdc6ea4fb539ed9980961d8164d68198ae3eb9213bc",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ccb3d55f2ffa2f46231b7d698d6fe3be5c1adf9d3d7e987e5f748c4edff655cd",
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/integration/targets/os10_ntp_role/defaults/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "format": 1
- },
- {
- "name": "tests/unit",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/modules/network",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/os10_module.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9349583625868b561568aa6b370e5da954e6567baae5206d649fe005b97497b",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/test_os10_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d5adc3e2b16b21d21d73cb8a9cdd0e28eaed0715414ab5d7ffd2cccad3e7bb89",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/test_os10_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6c0a64846f3954cea9a644f416bc08b0bf3fde6b097fbf2286aa4c2e4fa65519",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/test_os10_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "246875d5c4a286409f2bb523be2f987b7898ef4509fd42a0f11ed32a13274f17",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/show_system__display-xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f22f3fe42e53f39403ee015ef68ce551201e6ef6c1a1ae4bc963982c5be7d855",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/show_version__display-xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "258f08e34a8dc36e7d5a6c2457526f563172ee3a0ae2f3dd89b93d48d114a883",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/show_running-config",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e18498e855071e4d4a55d27f71e02cdfc2aee209a9fb1161c0fc260bae9d119c",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6dd7743eda61e2a7a4b5baea468a5bd9093790b50004f1d2977db910f65065c",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/show_version",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06298058714a4f3e00626004aac888b8cee0e93fc322f78b63902c2ee3daba6e",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/os10_config_config.cfg",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8cfd0377665f893639236d5f87e031a944966b24439fb243962d1c1565069e59",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c4aa2574cf8851166f5d42f53a3c9c4ce4c1c1d42f459d8e4c6f3aa5e8302493",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/show_interface__display-xml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b18e0c951436423f17735b55c4e9db9348402ffd8ec6e384fd929efe88d7386b",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/fixtures/os10_config_src.cfg",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "55739f1fb3c63abe984ffcc2c663d1c66d4c5ad90a05d48ec2bdb2b8da1b8da9",
- "format": 1
- },
- {
- "name": "tests/unit/modules/network/os10/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "tests/sanity",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "tests/sanity/requirements.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c8a4ac4bfdef88e75d6e748e35a42fb4915947dfa2b7dd788626fd829600e014",
- "format": 1
- },
- {
- "name": "tests/sanity/ignore-2.11.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb3e67ff678384b46656f54d2d1dac67880f21df825e084da1412582577a6924",
- "format": 1
- },
- {
- "name": "tests/sanity/ignore-2.10.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a36f6c078712863fd489b1811be21c6b23c3f901ad34ad259a93d08985f698a2",
- "format": 1
- },
- {
- "name": "tests/sanity/ignore-2.9.txt",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8ca93887811a83a2df421775191c0b50594520168a8ab9459a43a58b2160a01d",
- "format": 1
- },
- {
- "name": ".gitignore",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "258e4be3cfda40797fe734b375b6f94c110c9a9bebce196fedce319a457ce720",
- "format": 1
- },
- {
- "name": "roles",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "563a9cadb1c7ce0ecb1f62f033c3959342805be145dc20f6b1bf81c23b2ed412",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a8d9410432844f8dc85022738544ba585f5913e870a5e01942d96e0e340ed99d",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4b39e5f6d223be657b598c57f645780fc7702ffe172edcc6d54acda11a4897cf",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf/templates/os10_vrf.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "efdc1703a845b53bf89516f5c00fdda272125c03826be93f04cf28ced504703e",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48333a7f4910ad871d4166baf30acc13b4ca8272df8e755cc8ed8d230859ae5e",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "20318378249dcdecd863ac6dbf6e9c974e9405508ef0044fb60a9df87bdfcd6f",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25b33f208a9b255533b417a2789bc76b7e861b2660d99766a4990c2df201c30a",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "43a4548f4d185e37899418fbd9cb45007f7abd47d9e6c6bd767d5f528e0b7eb2",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_vrf/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrf/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d45c4bfac82a5c80b7b0084b90a3a9c8df9209e02c335987292be4bed9cbd071",
- "format": 1
- },
- {
- "name": "roles/os10_qos",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_qos/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "47508b2209d8933ede8aa881d3e42507baf52088fdcf1682dff4cb3dbacd2409",
- "format": 1
- },
- {
- "name": "roles/os10_qos/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_qos/tests/inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f",
- "format": 1
- },
- {
- "name": "roles/os10_qos/tests/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6d86ce4aedcadcb72a47078a74734369917f32c3c27c1b6b5e997174fb618bd3",
- "format": 1
- },
- {
- "name": "roles/os10_qos/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4012c691ed8545d2820f04721035e599d78a0521706aff7fa1d1808627d28d54",
- "format": 1
- },
- {
- "name": "roles/os10_qos/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_qos/templates/os10_qos.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "04eb355caed6674616fb61d6bd5326a721937c54bdf77a52a90ce0bf69e5b0b5",
- "format": 1
- },
- {
- "name": "roles/os10_qos/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_qos/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1af7dfa2b058753cbddd43826c703f09173f4eaa9ba04f7cb4eadcfc5ae0970f",
- "format": 1
- },
- {
- "name": "roles/os10_qos/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_qos/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25451af055cbfa149bbcfb9e931bf6cb33cf3014a2a94e5d7c52cd19f8ba822a",
- "format": 1
- },
- {
- "name": "roles/os10_qos/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_qos/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "769984874a273a9eb32760bb64b4cb5da59f4fbe482f24dff3d9f4cb3a6f9a93",
- "format": 1
- },
- {
- "name": "roles/os10_qos/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_qos/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f8741408dd745230550168b627a77abfbf367803b5a0e73edf56a337e2a11477",
- "format": 1
- },
- {
- "name": "roles/os10_qos/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_qos/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_qos/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba51d4193d632320fda2caf2c12cd37c99dd3c2d2b7eb20b802e65caa9c87efa",
- "format": 1
- },
- {
- "name": "roles/os10_dns",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_dns/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d7a878dd74478a8e5a90b8f365f0d158ba08b1044984d6ad9d375314cb25f08",
- "format": 1
- },
- {
- "name": "roles/os10_dns/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_dns/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "94f9aebb9791d2731255777fbefe23c60fb22149d6cb6943a4f6929baf4d9689",
- "format": 1
- },
- {
- "name": "roles/os10_dns/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_dns/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "43934ca8dcf90dbae738eee82b489df8e54d4a897f4f0e3aa7b89512cac2be6f",
- "format": 1
- },
- {
- "name": "roles/os10_dns/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_dns/templates/os10_dns.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4888c3203fcf4d25339f7d1776d13cc143c516e655af0df26e5d139839d3bb09",
- "format": 1
- },
- {
- "name": "roles/os10_dns/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_dns/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a5adc81357a3738e7a94b9e4a6ecd2c784219131ec203bb1bcd305fb472a03ff",
- "format": 1
- },
- {
- "name": "roles/os10_dns/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_dns/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da15b3c7b3ca860fa29c2b18580dad7256546e7fff3d4f60a213050ef38efbd0",
- "format": 1
- },
- {
- "name": "roles/os10_dns/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_dns/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "04ee6ff105792050e1812070d4cef75c1e18612e5eab0c7847f1c249598efcc0",
- "format": 1
- },
- {
- "name": "roles/os10_dns/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_dns/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "864243055c727f0f5935c1f13bb6f44783890d6762922ea90a88e9eb3ad4a384",
- "format": 1
- },
- {
- "name": "roles/os10_dns/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_dns/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_dns/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e713093e28077901633582e5023d6f6336be1f394f0b07bdfd484d4539077fdf",
- "format": 1
- },
- {
- "name": "roles/os10_interface",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_interface/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aeddd44f2c7f6d17211606d02555416c6bb3f3319bbff45ea634c665097715fa",
- "format": 1
- },
- {
- "name": "roles/os10_interface/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_interface/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_interface/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "13faa3d363b73b65da07280e2d21c0087f8eca07c15491fdacd09c38f2616321",
- "format": 1
- },
- {
- "name": "roles/os10_interface/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e8c70ae112f0b6eedd6f7df1ada479e1a35ba8d1b584270dfd2bde6e207fe38",
- "format": 1
- },
- {
- "name": "roles/os10_interface/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_interface/templates/os10_interface.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0a9feed1029f79dadfa6eb1cc9bcbb39c57bc97190dc82ad61e74ef14ab598ae",
- "format": 1
- },
- {
- "name": "roles/os10_interface/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_interface/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d7ac54c9ccb6409b6e8ffd41ffb96db8384a1ff03addf9e25a34e3565fb1fc6d",
- "format": 1
- },
- {
- "name": "roles/os10_interface/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_interface/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ecfaae6853318db03899ad122d13731cdb4058ee4e77bb802d64ac0c6ea20824",
- "format": 1
- },
- {
- "name": "roles/os10_interface/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_interface/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e5b8ea5bb8a09389a1eadc1eca701b5e9b0f073f5d748b6f28b4cf1bcf0869c",
- "format": 1
- },
- {
- "name": "roles/os10_interface/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_interface/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1db1c2b5ac629c9ea3cc9f1c0fc8d56fa230cc1c8a85a7beb975f01a210aee54",
- "format": 1
- },
- {
- "name": "roles/os10_interface/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_interface/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_interface/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "75d2a975b644cd7fef6108087c834dd4c144810a9945876f9b979d87fcdb3639",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b53872392d389fff12d93f0f89a85893c3a4dff81b7a29cc40072ad487734183",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c5efe4259fe1e8fa9a662aca40abf418b0f48eeeaedf7c59eddd06974f5b1179",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e6e5429b918bcd489a5a07c21f480989d5eb72304eba5f524552b0a899b45fde",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/templates/os10_prefix_list.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7166b7aa4a5eadf5d7036b25ae7e6718b109f909cb768acb963ef6c5346a46bc",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "683ac64d2b9636f2e1469e8d738b88749d6896649b136c62d3c6321512c5a887",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "476f80dc41f83236cc4af3af174451d89d4588cc42ecc1a4e2a3d5b43f63bb3b",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ea5b0afaf9945f59836bedcc4263108995086fd6962443f29658bd8245996c7f",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e9f0a7fc55900b2dd1b724ef42637c401bab31748414f4db0c99c5bc73a1274e",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/meta/.galaxy_install_info",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fff6a8db965bb26187afecc326a56281f74bcde1d5759bedf9874c72aba696fc",
- "format": 1
- },
- {
- "name": "roles/os10_prefix_list/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a592727b6802b1e47956477d630a6d0f02ac3f38e9ff42e67ff4b00ce553abca",
- "format": 1
- },
- {
- "name": "roles/os10_system",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_system/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c6f9fa3aebc2738e4c3f5f3a49fedfa92575553a0bf93a101a76967bc63954bb",
- "format": 1
- },
- {
- "name": "roles/os10_system/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_system/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_system/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c174145b3348883c81c119c7e941ad4da952b0d02001f04b80a942108939867",
- "format": 1
- },
- {
- "name": "roles/os10_system/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "520ed65df1e5f3526bba7d6021312a02df0c259019f0667209d142c2cf43ce67",
- "format": 1
- },
- {
- "name": "roles/os10_system/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_system/templates/os10_system.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "57e1e32fffef8663700e790e71277ad0e3b8e263f10d21965bcff7115e56898d",
- "format": 1
- },
- {
- "name": "roles/os10_system/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_system/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "238dcdae07f217c1e64583262f8fe18a6de757c5959172b49784e4183ac44516",
- "format": 1
- },
- {
- "name": "roles/os10_system/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_system/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dff73f186ab1079802c73fef50e8e7d533fcf7aa8517cebb39425d6e2b8a18e4",
- "format": 1
- },
- {
- "name": "roles/os10_system/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_system/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6e1b14922fe9bf4d9ac307185d8dff8eac7d98d2e33c6237a2b08558ced95061",
- "format": 1
- },
- {
- "name": "roles/os10_system/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_system/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "894b1ec35a60fea55d969c548964de6bb89a50cffbb1ac42cbed481768c4f032",
- "format": 1
- },
- {
- "name": "roles/os10_system/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_system/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_system/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e43a6cce922b7e77005b4331d27fe3cd205617abe34dcbeb34aaf7f0066e1c89",
- "format": 1
- },
- {
- "name": "roles/os10_template",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_template/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d311ae7470c42f2f31a7d688121e3ba8b240afe5fa917d4ab2b4fe99338055e",
- "format": 1
- },
- {
- "name": "roles/os10_template/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_template/tests/group_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_template/tests/group_vars/all",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b1e93949330f1a344c787df97038cfa6d122285f204fb3a1b6385603c2d248c",
- "format": 1
- },
- {
- "name": "roles/os10_template/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1c21302855000db5fd5e9f1573fdc03b97a7cbe3392efe3e0013da373dce3596",
- "format": 1
- },
- {
- "name": "roles/os10_template/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b134978967ac2f8ef1951c1246c196caaba06421ec57bccadb8498514e06c26",
- "format": 1
- },
- {
- "name": "roles/os10_template/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0e703d610802949e54787230fc461f04ad2a8af5e9e65533c6f53d8ffa19c345",
- "format": 1
- },
- {
- "name": "roles/os10_template/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_template/templates/os10_show_ip_bgp_summary.template",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f8b1427d59f64f9f23c9ef60b44e3adc2ad5cdb6af65d169ee3542e3df7a226",
- "format": 1
- },
- {
- "name": "roles/os10_template/templates/os10_show_port-channel_summary.template",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4be2ecbcaeaf7c87edc4dc1c317b50bc90a11e42f3b06178bd58cf8370f15205",
- "format": 1
- },
- {
- "name": "roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "495588aea4ab674ccef67c158e0688d212af552ae2be73c3133a88cbe138ba1f",
- "format": 1
- },
- {
- "name": "roles/os10_template/templates/os10_show_lldp_neighbors.template",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48f9d390a818cb19307f186e715ca09ca0c477666c45e4f95bdc9f00c32d01d7",
- "format": 1
- },
- {
- "name": "roles/os10_template/templates/os10_show_ip_vrf.template",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bb3661256bf365afd209f1b3cfe63acff96ed605870795452166f0d50359dd3e",
- "format": 1
- },
- {
- "name": "roles/os10_template/templates/os10_show_vlan.template",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a7b08a9326f7dd8d9a0512cae918f1903a44a55f24e2cf62b8478316fbbc8254",
- "format": 1
- },
- {
- "name": "roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fb8d12baa8d65787ec6bfe263d8cd67b2b7f8c65332ad49d0ada87e4e7ee1792",
- "format": 1
- },
- {
- "name": "roles/os10_template/templates/os10_show_ip_interface_brief.template",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "09c504a2eb0e35fe79e85c2d5b9b417b1c5545583a122bd353563fcac6631fc8",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "943d291b756e2ac2a075a2004a2ddb8caa148f704a632ef75634818e179a7fc6",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/show_lldp_neighbors.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "65b5436254aaf10a5d77b354cc322a5405cc4c44d7a16fa39aa670b0fabe6afa",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/show_ip_bgp_summary.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b51a689d0b09d992ad8b492a754d5e7d6af449f9e801c241738362b211438753",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1cfbc4e32774b6e1303a25e1a3a56b9eee8bc54daa50bc511d12af3c54985b11",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/show_ip_vrf.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "72b260949bcfc2535794d50deec940eb04f38f8209a7d965f779d81e582c3ade",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/show_vlan.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9118d65024264927e657487606ac92a4eaac1a29d14286886f40194053d6d486",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/textfsm.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a76b1f05772eafab2d95e4bc644a314d97a6a727d7ccb85cce45372081e3c98f",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/show_ip_interface_brief.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3138ce2648d4507a62425dbb76d035083c5bb5310799e619f394b3cc19ff7676",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a25dd113f8bef549d87f0369acf0b80f1dd9dbcb396b1300237f90ba6dcb27fb",
- "format": 1
- },
- {
- "name": "roles/os10_template/tasks/show_port-channel_summary.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6fc9a394046acaec5d9819cb11311d1d39b11c699313e734748fc802a108b4e4",
- "format": 1
- },
- {
- "name": "roles/os10_template/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_template/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_template/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be70f3f8008d9eb8f8f6012880e576ab29edd59e1c92698f93109d88d2ecff93",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d07249dc4347118c841ed0740b1c8eea0351d593f943fdb1922306173842f7e",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2754f0e1d88508b9378acc6c770239fe810d7149765ccde298e9ec2d15359f2e",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8fd54e1550ea6b41b6272e12f8584779ce043ac0e6a4ef2ee44de2136855bbb6",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests/host_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests/host_vars/site1-spine2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ed1347840622bc1fd1304558157877cfe22064f0e7b697390f14eebf729e002",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests/host_vars/site2-spine1",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ed1347840622bc1fd1304558157877cfe22064f0e7b697390f14eebf729e002",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests/host_vars/site2-spine2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ed1347840622bc1fd1304558157877cfe22064f0e7b697390f14eebf729e002",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tests/host_vars/site1-spine1",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ed1347840622bc1fd1304558157877cfe22064f0e7b697390f14eebf729e002",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9a710654050c8ec0ff24355c999cc7d6b472d30059eafacba139853c5e97b6d",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_fabric_summary/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "72f2a96b9a15847e7e948bae69d8318983d42a7e1e14718484d5f060042206cd",
- "format": 1
- },
- {
- "name": "roles/os10_bfd",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bfd/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9155aa75d45da527a150711f44a5d46195a07761fdc2326da9ce389c4f6bac6",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bfd/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9209c2609e24006f7a95935b3a9c342d776dbe31f537a5f445b9f39c5a7ba8b0",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bae4876627be0da4ae45c11f4e3d14686c98c15ad6ff7c65c95f22a93cbc3998",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bfd/templates/os10_bfd.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8686cc925b9c907512f582c0af0a5e5daff0ea1a9f3818d7f210c2cc3d8b3bc0",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bfd/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "814fd791e0988bf3397ef69f8f8bd4bf97910fa390f6e9697c2bd8b5cd326457",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bfd/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "37924af5558f6ca24c1399f212fd0c7de703cbbbe0814cb253cebf09ec03b43c",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bfd/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dbac54437ce53f90811f0472a8e9f242e17c5cab85fcad5faf818b5e3dc61f8e",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bfd/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9893b6a378c9bab4d39407e7b9ef878ae7d25ae68134bbc6f4156ed49c682d2c",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_bfd/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bfd/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7f535c91de0a7152f3e8b34e50696e9c5c17b3e0f303e5a0500a7dcf1ad07f1b",
- "format": 1
- },
- {
- "name": "roles/os10_bgp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bgp/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3c0330969b4488def090626fd3ca345b0d5fd7d13024b8f7bc5d099a4d61bf5f",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bgp/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11381f54e8ad097f39125218c22a2289cb177f2633a1fd4fe60d877d14075d5a",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "829f2f19e06879ef781c9f42fbdac798699b92b0aa4f1fa03c20230707ae63ea",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bgp/templates/os10_bgp.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "83f1f39f35731290e0c16f9fb4b5f40effc06a6c7148b08d7a18041baa3bc96e",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bgp/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2ce824753ae5e51d5096c4d0330d02c7ba7ee2a4d1e9950557457ede4d51ea79",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bgp/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "554908747a3a38554d8d546ac6df4d952dff072db5e5a6887a0e51d0fe92ffee",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bgp/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2410d1e75d200951a1a323149e6a4b95cfc6fe198a3f068a20ca9049c353bb0",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bgp/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "18e3284fe021e3ccd315d1ee37fb1e50fff6db3dfb39600d22dd13da0339cad3",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_bgp/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_bgp/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8b2e840c2981c38c49f6c8c729312b412b66b6ca2622bc94e38540d446ccc977",
- "format": 1
- },
- {
- "name": "roles/os10_vlan",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlan/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba6599c1a2ac34d0361638276f081621d8b31823c3fa4d01fe952a469c3a5692",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlan/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "90d75c30cf368c94b15f0e67f5dd42bf3be7079727ce68799344fd7fc8f09fed",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a7e6522987551969174daba5973aef288aa6ed91b59855a3406bf2e31ffd6632",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlan/templates/os10_vlan.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "69a1923fe2d15d97b33d28f5c9dbbe230fc3cf3af741cd98a3ca40410f49c6db",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlan/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f6464d917fb54d60e7c03c4f437e12795452246458aa27ea7f674afcdf744c2c",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlan/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e2317f1dad320074b0d71bd2416faefc5007ba40dcd99c196ee79e23cd5ae6c",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlan/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5e05831587de755162befd688ae927a8b92504f7d7c4e5cde6ce67cf35f7f980",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlan/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fe8c107015a54e155ce06dace6919d89837d025d667cee3dc293ac602b7e9b46",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_vlan/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlan/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8970684379fb0f71b0752e70e3b09791dde1e20117403ff98a06333fce205b3a",
- "format": 1
- },
- {
- "name": "roles/os10_lag",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lag/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "38b04b2c00265af0b32b43f8e0057565e818e088401545600b1476ab6f090fb8",
- "format": 1
- },
- {
- "name": "roles/os10_lag/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lag/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_lag/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fdd9b082fe66bcf825e22a7a2bdb09b5ee000bfca5631a68f97f34bf0dc5e5a9",
- "format": 1
- },
- {
- "name": "roles/os10_lag/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "42e8ff164312c486ba66a307d8d43c11940ab66a73fda890a316dad2fb49edf9",
- "format": 1
- },
- {
- "name": "roles/os10_lag/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lag/templates/os10_lag.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "80d8472ab536a0f16455a593f63ebda55772ee6c9c55ba0f0086f90b79627949",
- "format": 1
- },
- {
- "name": "roles/os10_lag/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lag/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "06492dafca5b9e2d876862aa59bed0855b2a42016f1cf0199846e78d308c90d0",
- "format": 1
- },
- {
- "name": "roles/os10_lag/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lag/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6081a52e02ebdba4b6d50bcf7b939ed072437e0cdbdbf26dbe806e824a03a79",
- "format": 1
- },
- {
- "name": "roles/os10_lag/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lag/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8727a9d0a9b2dfc2123c386c937fa24f137c6f0bad31a0278df0af3eae4b9351",
- "format": 1
- },
- {
- "name": "roles/os10_lag/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lag/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f80595af9a680584548d1c63a9ec37db0e5d9dc9330824a77640170772ce43c",
- "format": 1
- },
- {
- "name": "roles/os10_lag/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_lag/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lag/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "596b282692b0c8b54557287c0cac0a30077a34ee1a5f3443cf4e84397750945a",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48f5c4db66fc2f0b993ee1f4fbd40997a402a2dc43760becb694bee7af60e02e",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "dc8b26d113e94f68344b0a73022ec1b6642bee0fdabbd049400860da31d9b36d",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f5263eac7a34b93e945d4a7703a9fe87f41654453f3b2929583c254c3565967b",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/templates/os10_ecmp.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5e8acfc826d6090844b4928084c3710b27fac5075987d2a7647a1bc8210794e3",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ad97aa8409e0c4cf2cc23d2e313e208abf6b21e5eadb23d64f424f64b8307ff0",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "763d31c712a86816eff6be981731088c42da2a5b0852f3f1fe57da56b57d7dc8",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5dac42611a9fa57fa6a0a83cd60637d602055c4ae194635d550fb76f7ebacbb2",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c0321e026416b7fd9c2f88159362e8d1f2b65c41beedb3f168fb92ca45f1b2d0",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ecmp/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e9c053d2bf170109e3fa13aea5ffb03a9db7441584555c8e2c1459027838dc3",
- "format": 1
- },
- {
- "name": "roles/os10_snmp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_snmp/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8b1ebf6b632207048f02fb33090cd122da7b91f64a0dda9e9ca8d28303dc972c",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_snmp/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e5d59a283ccc11966a31af2db9d3183d83b78266a28e26b15f976b04c66c3a38",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d56b04e4466345436a0cc5a245f7f275ff94faf68fcdad220014a6f92aaffb63",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_snmp/templates/os10_snmp.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "562362465cd1503d084678d505e51c99c039399087f9f5933c9fb1dff5b87a0b",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_snmp/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d2ec9577c90e062a77e6462626fbfb81baa3305f5f8626c7361bb8216b02f47c",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_snmp/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9c920ee69d360a4c5a573e78c544024df542232ccea968027262ad7158c641ba",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_snmp/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c5eb25123570c203b04de08fc2f50a67e1f447b0f91de636915d22948e194732",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_snmp/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "49b9ff32e4872ec4027678a862f12bd9f6a78180ba6ad2b5255e60c9521d5beb",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_snmp/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_snmp/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6f3be0e02ccdca4f08555041789a3a607c9fbfefcca1de808ff929d9388bdf10",
- "format": 1
- },
- {
- "name": "roles/os10_aaa",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_aaa/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7874fbb6486c3ce0f7e13cef3426f5a9941b0017674cc085cef979d511bb31ff",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_aaa/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "66751106dbf88e567300e2b0c835f27fdc1015a2f9da04bb1f22a0c86f4aa41e",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ff5f1b96c5b4ec4a3f9e611de4e239780e7f88b93293fca09ad027129957b22c",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_aaa/templates/os10_aaa.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "88e02f764a50d9d5131f21745d8b272717d8036f24b5aba516cee99d60b8138d",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_aaa/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "65d59ffb82e5ebc61e867465058e9d6ae1669f9650498d5d27723dd3b4ae6a62",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_aaa/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e673f73970ee3ae369f22daa57f0a98574c0068cb8e6c3523dfe633e8be4dd8",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_aaa/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "89cbb0d6f26de9476b2a98ac433755f227dbd7cf1d4458b26f402faafcbb3044",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_aaa/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "873deeb7c99414c615310861f37813e789beb42962a6967c74256cf3a33f15d0",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_aaa/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_aaa/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0370bc97b76c060488b25ad1265ffcad188037ceac6825a22554d9a65cafbb6a",
- "format": 1
- },
- {
- "name": "roles/os10_logging",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_logging/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "67358b37eda847a70eca21ede16c1f078989a6ef26bdcb167828592620c06a01",
- "format": 1
- },
- {
- "name": "roles/os10_logging/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_logging/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "59b2a3a4d5a95dd0991c773d92efb07e68bdd61da98b63dff97cc50ee751dd6b",
- "format": 1
- },
- {
- "name": "roles/os10_logging/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4959bb39ef5381cb61c508070e175d9a29649865be616e2557a35591d422a578",
- "format": 1
- },
- {
- "name": "roles/os10_logging/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9600c0f56aab9908ce0299c93e83503a959564caaf3479c0f28c08323061f511",
- "format": 1
- },
- {
- "name": "roles/os10_logging/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_logging/templates/os10_logging.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a49077c07327ca5ce6fe16c4c507b807089af3503aeceb980c7e35ba3e18a18",
- "format": 1
- },
- {
- "name": "roles/os10_logging/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_logging/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cf4eba7564bf3cf20b2a11f28b0829615f2a1b8853ef5f10432e5e9b50b22fe0",
- "format": 1
- },
- {
- "name": "roles/os10_logging/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_logging/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b685be74eefc0378442ad4dcd4be13eb1d1549f360ef93d05e2dcd642e6a7fa1",
- "format": 1
- },
- {
- "name": "roles/os10_logging/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_logging/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ac88e43708cc564df91c1e8a2ef9f0ee476f891e9a9999cef9896e92bccf3827",
- "format": 1
- },
- {
- "name": "roles/os10_logging/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_logging/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5dc2d0ca8bead76638828a9b3b224a4263c4e5d0391990ac876a00d6da638ec2",
- "format": 1
- },
- {
- "name": "roles/os10_logging/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_logging/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_logging/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "935018e6dd62d4f57fc7e8be1320db76ef26bbb3ab5008bc861c44e50703dc41",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b7b814ce1af3563e014db3e85d1395bc767682a69f33a6504009cd604d65af5",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/tests/inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/tests/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "16f179b8972b382463445d13f20888fcb5f5980e8b75fea8510d358e7d31355b",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/tests/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/templates/leaf1.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8eb520d7a9795fe75d13de190e17201d77da82d4f04674e185dd7dfd66ff0d8",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8f7b3839a275bb1dda6cc658b7818f060276dbb1297d7ea5927fd33201ef64f",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "24b3fb3075b12ef503f3c3df760955b01f8fd64d175ca9435c4cff4fc542421d",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "652608725cb8231e6e56806713e1cd1613fba1240f53702b9bddf31a1aaf1155",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ee2c451634d02a007d669a1ffaafc9715f4013fcf34efb55c391f1aa76d83b41",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_copy_config/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "17a4f025b9e7eda4c229c8b319b2f5e342d00d2314b53fae442b4773ba83f844",
- "format": 1
- },
- {
- "name": "roles/os10_uplink",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_uplink/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "af218f5633d9ad8970bc83ea4d0112996ea810232a74d35813d31a378d183357",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_uplink/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "174f8113fac66d0c5cb0ea967a222087bb87cbdd53d157ba82224194ed89ebf7",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "606b5622997bdf5ac5a149016a56a7d09d2e1a06ffac37650e683898958e186b",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_uplink/templates/os10_uplink.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6a5ef1d9820fe2a91e4bd4b4ae33b436f9e920a5c2354cc38c1c7514cdce7e7f",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_uplink/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c18562de4e1361b781112f1d25b7085081988ed1c4ef81d4bfc3f7e0de97879",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_uplink/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "70bb18eb36b126dd6d0e08692b8ac60c823da65b388684241d263e966a521adf",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_uplink/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "28fa2ba1a0d18a45d80a2fe56bef42892b653a49c1a9e273a32c66db4b259b10",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_uplink/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a08ced67e2d18f14c3f98a7861e7c1e90676cb98403ff9a194b18841b288632",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_uplink/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_uplink/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e48425fe0a4c069ee2bcd1b326515d3d883156c8c6427ec3acc9145cd194aee0",
- "format": 1
- },
- {
- "name": "roles/os10_acl",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_acl/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4902c3d89e393cc4678bbe45f3201578a785441d2e57b71798a4b9169a1035ec",
- "format": 1
- },
- {
- "name": "roles/os10_acl/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_acl/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_acl/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15d359c83967d4c8065ec202d71f08aa9cfb9cb2ea7f8515ab938298cc7176ea",
- "format": 1
- },
- {
- "name": "roles/os10_acl/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d1499e88988365d216eea4383d3771a7da6c7808491aedd366a64a2c26b5bd2",
- "format": 1
- },
- {
- "name": "roles/os10_acl/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_acl/templates/os10_acl.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "82f512798eafa8b85f27f3caeded8da7e66d80c228d9694c9fcbdbb2056b4e87",
- "format": 1
- },
- {
- "name": "roles/os10_acl/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_acl/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "32411f8c331dd03641c3843c4e633b9a186917210b3512149a6d67d8bf0ca3ee",
- "format": 1
- },
- {
- "name": "roles/os10_acl/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_acl/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7aa31675d890ccb8f2cba267762146a856aa260fa76793226ad6d1018d767706",
- "format": 1
- },
- {
- "name": "roles/os10_acl/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_acl/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "62b435c90d0cd1f418a44a14ea7ba9a0e3d6cce0237c404855a09899abbdde78",
- "format": 1
- },
- {
- "name": "roles/os10_acl/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_acl/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "600f3b1f85b778b3a28616390bf94bb4cdb87fbf6bd7372680313c6465205450",
- "format": 1
- },
- {
- "name": "roles/os10_acl/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_acl/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_acl/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "21cc5b51e05746b675b5a608ae85f7995013476dec3e92c2db809665a4aa5441",
- "format": 1
- },
- {
- "name": "roles/os10_route_map",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_route_map/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6818269013ecdd151788b3dd06bba9bc06123b904efc39f89c9738c0315e14c2",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_route_map/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e94645bfd865f3f6ea64b8444a2a6123d65d6ffe7822eff765531ddf59fe5385",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/tests/inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/tests/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8f11741f11a0f817750f2fcf95e5be2131344ee420991fedc314cea9a1e8a044",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_route_map/templates/os10_route_map.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "44398b74953c02cc8b8d52f41c6a6d8a00ef3aa5e3a6a4f0944f3eb489570678",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_route_map/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d5202f1d5be26d843f185ad282b2a9c79219f29c3e25bb9142e1c3b6aab7c0d3",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_route_map/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9b5223df0608509782741a0b1b1d620489bfee39a22271879a33be226d563e75",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_route_map/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc5a3a60138702f492afe0615ad779ada4907a0d291ca6f9cd0db99727d5a835",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_route_map/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "feecce89b8551e332335a6ab8f776b01bbc9a28a1646402ea4df83a78a2e2986",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_route_map/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_route_map/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d27725f7db1d7be5e903da8556375be380be2e292991c95d938dcf4de4dc15fb",
- "format": 1
- },
- {
- "name": "roles/os10_vlt",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlt/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7edcaa0e40c1497e1cec3310108fd578cab638f4d170b06e1c7c94f279e85943",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlt/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d0a27dd63de43769b735d5b49bb2c38ff6ceee6044829802eedb26e876c6764d",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f7c495a659acb9a65c70f99751b8c8a93fff3634bab273a9f5fe4a173372884b",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlt/templates/os10_vlt.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "25e140862387419273a5c85f3c2c28ef342b13d22554027854e6694ab7f676db",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlt/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c826942d39bbbe07afa7ce5ec6b25dd1bee3c45973217a9a8f914c61bc15dd1d",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlt/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0c99502dbf1431a5fa3b9f6c33bd479eca09dc7c718747b50745904118afff4b",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlt/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7084e3f87a0be344172701e0fa030f9739ebb915b6c29e6782044f64b7742348",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlt/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "637efe3f2b6ae54f972d9d684751267071fef192150794156b1cf900eb9dd4ba",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_vlt/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vlt/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c086a03ccf6da3e70cb9405ba5fe964a211bcbfc2ebd711ec88fa1de71d76339",
- "format": 1
- },
- {
- "name": "roles/os10_lldp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lldp/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a7b3ecc8fb44b6654f82f849ca81d35f554aae9c9d1a668d795b0b697bd1181",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lldp/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "67910b8f7cdd4a708e661f8e9a91ef97535ee5bd2361770a356b87a2a441af36",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "84a67763842eb34ce0679295153beed4257949d0a4a2b3591eea13e1b640be41",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lldp/templates/os10_lldp.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1dd15229ab1883772f6bd6e65a9fa564d88b54d3bfd979c937b11ef9e3b22190",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lldp/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c93bc619c8c0fecf9be10e8738df37455d98ce6ea8f99fb5b4866be24075f5b0",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lldp/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0555b321eefc8953cf1654c41798524e402ad24bfe4e55a3ae5f93a6cca95889",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lldp/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a914d650108a7d69cfba2c389021498b44a53e9c963e0d4e2e4f8863f5477a3c",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lldp/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c931d17e150116110599ed116b9e981dcdeaa25c836b8efa7f45f084b770fecb",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_lldp/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_lldp/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "096051fa88a0f1150e45bee7378a41b58710ee89aec049e7368282325db779b1",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a7753705b354037b6b52c76d086328a60cf18b120f54d509e01a9c7ea4f2431",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "37bcf358ae65745173b3e91dd31f63b92e3180bd680546149c50a24ccfefdfb9",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0e0c9f92417513f3c21cb535c11e39a91156a37b676e9ae02639b54f5bcb9d3b",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/templates/os10_vrrp.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ea569e3efbe5591cee54852a1c4e93bdaa54e9affd4126a2c37dcdd228584e48",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f89da38e4c4c6dc6f4fdc65d71e0c326b53e53ab28a342e6d0c4838ecb5a0e14",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1ab26d1ff9cb52daa1da7d36ab72fbd9031c0cee28a4a9f2cd402e3ac61ff9f3",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c31a893d2de4391bab63960327186e0030eb3b50d0d454a202b9ad32bf8c1605",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d7c3d38e722d0113e681e5bf451fdbf787b772038dab654419eab455874e6490",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vrrp/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bc6092e9bba6056bce1edf31b3b1d8ddd50562b4595a05b4714364a289fc3f54",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de7a336064a9cc726f30e55aa0c8f93426a7a30976ec9f8993268ad433c815d1",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/group_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/group_vars/all",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9783ff2c833041285b826cdd8818ff86e2dd4a036936057c2b786f90c5d94cfd",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "495543a435f037536ce83135c1c057b29dad18496dcec544d3d0d575412f8e57",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "42f12cce1d2096bd6704d0bded92f0f424f20f6f8fb6bfd43f512ea167a3d535",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/host_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/host_vars/site1-spine2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/host_vars/site2-spine1",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/host_vars/site2-spine2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tests/host_vars/site1-spine1",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "be466b34cc9022a3ac02ae77d8b801efc3609a7cb04f1883a4f3ff4f4469fb1f",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tasks/wiring_validation.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "30189cda8538ba54cce476840c54dbd7b65bae20b81fbe38b5cdd490d267121a",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tasks/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c93acda8436ce1b6b8ae69b6e0856172c1d4442c95ad26510f6b091511463030",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tasks/mtu_validation.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e88ffb1bdbc1d5111fd4dbcaef9d05122d130f3d92686bc1476cfe8b92d6ed88",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tasks/bgp_validation.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3f231237fccf6b555cb5d603fa5ecb9a2520046052102546d0a49745c322516f",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/tasks/vlt_validation.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fc2caadd4b8c67806553d9a09767879e2705cb87b14561821761170b04aae114",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_network_validation/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "15dd45f3c6b3ef0b9a18068409f2fa06e7658ffa36fc536c61272f60cee19e6a",
- "format": 1
- },
- {
- "name": "roles/os10_xstp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_xstp/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1aa9f3f4225408e0224d825d03529cbf75a5aa95cdb9e2d48fff649122a4d488",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_xstp/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "11dc4d3a7f27c9457cb1926ffb4b6941a5698b00693270702b83007f9e6be475",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cbb0ce314389f1a1c62bd36eed69284476da8bdc35ca9f166e524da51a81bab1",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_xstp/templates/os10_xstp.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "91106569ca0f82c69b3987666f2c0988e065ba8c72e6619f1eea38024d7bb459",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_xstp/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d104c9a45496afdc6fc6b1b6d0598bc7ba3fa9490bbbb0729aea0721ad6d9ed1",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_xstp/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "93faab9ab389ce95078149d446f7393e2f51b15ae26e4bc9e375b18e8f785ae3",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_xstp/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de9efdcd059596d2c0022a19a60bda4e148339b8903195327c22898b690851bf",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_xstp/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3bdd9427e1adf15da19c43299ee9e0a80cb614aa975bda7d26c45d87f3853bfc",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_xstp/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_xstp/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a5ac22166270a0ff74c299d9fc58db9a5775da1c050b2750e21db38e7688f573",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c9fc5caa466c30af8e20e94c890166472ad45e241e5a6c5e5cd8fb8e91789ab5",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/tests/inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/tests/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6acee5a1485b2936d50918c4eeeaf19e935b16a71eb313966613d5f7474af1d6",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/tests/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d5cc592c390f2fdb1b4d2522bd9d3ea1ef44c70f3c93b8f21dcccda6dacf8886",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3e87e5811ae377b12e4ab55e05748fd9d633d451d50ddb27b944d35b3391030f",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fae119a8a8b829c96158bb18b129995bbd81bb1f4ce2816a56ebab6e5046dd7b",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f0fa971f5e8f6afc6354c45a243da50223bc28a3bd23544da6029d0ccf379b0",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f4a1807cbb9bfb1bf7d5236ead8df960f56b308a96c7d427bf9c751f2d59189d",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_image_upgrade/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "600ddfdb6ea8958552d6f3c4883ff14e03a03c5baa74fed28c824c69853a630a",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3215dc7ae719682ff7edbfe9ee0dfcabf348cd9a47d59303abb23fa15ac48db",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "93ecaaa721e06f9dfe6714065578aab69faf978ec3d0a052a65dd42bbe45375d",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/tests/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f488468fc00c1ebbcd411bccfb79c4c3f969cb4798f545ff04bed1ff9f060295",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/templates/os10_vxlan.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b8c620c88dad78855c1d75470657add6b6945ae5033e7e365f6ca61eabf85cfe",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7b18d0a5d4f4eb08afb13a90bef216e62f14084a7c15c6fd57b2571d2202c11c",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "86f729a523490d39e8de987c937683b579d9fc5e2c02aad682fd5144d8da243f",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2b05d46988f0fe03752627f491afbfaf43beb317b498dbfcc186fd51cf9f116b",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "04323331ccbbbd6f0ed18972b31c11feef04c2fc82f73c276cf49b4c52bba17d",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_vxlan/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "08bd15c1af2e5b93adb0c287cd8dbfb89a99c104330532596905cefd633378a7",
- "format": 1
- },
- {
- "name": "roles/os10_users",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_users/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdd33c3a3d4970f86670be7d55ae6e36afe1940c2bede9f7a3c3d64f16fb84ff",
- "format": 1
- },
- {
- "name": "roles/os10_users/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_users/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_users/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3cad1a913d91373fb343a45090749edccff3e614d29fc20b2be5dd62f1416839",
- "format": 1
- },
- {
- "name": "roles/os10_users/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45cb5340cf0daeaa88ce3ef96d6b2f729a9587614b4f2da6d018c01c7007ed05",
- "format": 1
- },
- {
- "name": "roles/os10_users/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_users/templates/os10_users.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2a49fd0f145f2397a5320999f4fd2653d123355bd09b7f7f9fc7b08491530d7f",
- "format": 1
- },
- {
- "name": "roles/os10_users/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_users/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "54960fc84fea4a3ca7b680da71f0caf5673ed6cbfe2848b6896e1e09d80daf78",
- "format": 1
- },
- {
- "name": "roles/os10_users/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_users/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5143d119661a1b293a965db175a3e495924ba56ae668293c393f79803987a9ad",
- "format": 1
- },
- {
- "name": "roles/os10_users/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_users/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d7ab0972fa0d373e24e87fd2a37bf2d364a35d608c968a8ba3855775c96f7025",
- "format": 1
- },
- {
- "name": "roles/os10_users/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_users/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e757c050d438aa41fd3489ca030b2505af98aa7fa58efcb2af15af9e49f86679",
- "format": 1
- },
- {
- "name": "roles/os10_users/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_users/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_users/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "51e8bbd65c9e3c0392c9cb365413149869fae597d2ed0d2d1f12b87e39f6ed91",
- "format": 1
- },
- {
- "name": "roles/os10_ntp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ntp/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2cbcf9c9aa3c627804600ca4c6c09b75ab55111fac01ecbc3b108690f0f9d8e",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ntp/tests/inventory",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/tests/test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a8f0e26fd497d4af28b1f97246262a24e8e890e0b37c6e5045a7e59a75b1027",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bca5f97127de4c4fc5386d8a971e8ce5942b6e1e615eb074a85f84ae3922e2fa",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ntp/templates/os10_ntp.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7b01e6bab24ab07001d0041aec39b960adb7f5ae3380576cbdb4d8c5ead17615",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/templates/os10_ntp_vrf.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "02bacaed5bb5d10ec65bf0ea092aa908d390c2aed8f33965f3ba5aa534fae551",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ntp/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b6968045572c03408c0ae667b3e3d4bd618d7e44ef3569dc6afbb2f64c8aed3b",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ntp/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5e30177d599fcae1cd2a5ae7dbe5edb7ae61b54fe8abc8a4a291f7cb2e99cb04",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ntp/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7e8c0b69d2ba07f1e7036c37c35e9450cb1a1713f5bf06e67ae55049a47f498e",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ntp/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba28cab4f617eea38d6a9572d6600b035bf3b0aa6a9ea1f16b89c3bca0bd5fef",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_ntp/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_ntp/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "119e95d884e66b8699ce7cfb245ef9a295d0ad74f611e91bde10b8de1bf6b6c6",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "30e96ae0283581b1dc3af2c83de6d1f3ef986c352b0f1aa30b3bcd84f3e0e82f",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/tests/main.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f9521ae72a98e24c998e5ca8b57b37b6387fb46ea98bce475c300f7a998f5713",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e587cf6a0634eb27ebaa0d2b09186c2ab9c218b8691d5343d11a2aff7cb4e806",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f522718931e7b47c3e6d7f02a4c31981aa93f71729415a5a9226b098211fd167",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/templates/os10_flow_monitor.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ce701f3f7435a76af19d25fb2c0aef0629baea31f100fca4ffb2d715fa330cdc",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "da543209c815eaf6a292b7e3ba6b251d12218c7ac85973eeb5f347fc23f38d82",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c69bd67ade82f83cb462931176842e1c15d45d254ed2867937539fde9240056",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5c4fb0eb22d434434842ef2e9f7a66cd480485a13ddedb7cbe4736776ee2660d",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d75f9ff91119f5b2c00a015c32634aef88d63f56446b8a49611baa94950a7109",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_flow_monitor/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "262699aa3cc3f5ca73c44807c385bafe6ace198d295ae0f4d8174c8432d27a30",
- "format": 1
- },
- {
- "name": "roles/os10_raguard",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_raguard/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6083750b9289d4ab598aef8b07e4e2460b7c5c1c17e15196fa1389a8346e33a8",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/tests",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_raguard/tests/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2af66e1e4a6ac23b147f9221e9c52e0c1b0756cd9d0acd33ebc6273e94cdcd3f",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/tests/main.os10.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4249ee5e0837a3608674e0ddf5fe786819ff406046561c36d37faf53835a3c15",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/tests/test.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "897b50f07fd83fdc9d4af3d1f95736df7de6587ecee7eb99656e896f11e57673",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/templates",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_raguard/templates/os10_raguard.j2",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "45b2a163a3a6f4ed91a04eaf1eaf163744dc521e68bcc652f5b9204c8680ccff",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_raguard/vars/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3d6921abe30008182bc51398fb297b2e3513e74192f97e5425270f274ee10a72",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/handlers",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_raguard/handlers/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5f107b00d4f91ef0488f02bad76bcabf32e37d36ac7dc4b3e13eacd93d7a2ddf",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/tasks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_raguard/tasks/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5076cf62b3808abcd72b0591f6dd79a0edfa2e24506c3c24a54ebeac0b52fc46",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/defaults",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_raguard/defaults/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba3c9242ffb63fbdee8e94263256c4607963147bc30edb716dbbd19760e3a000",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "roles/os10_raguard/meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "roles/os10_raguard/meta/main.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a865bdbfcce0522277b74b9312d4264b3ed3b692d22a6bffd740a8c8cc021fa4",
- "format": 1
- },
- {
- "name": "docs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "docs/os10_bgp.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3c0330969b4488def090626fd3ca345b0d5fd7d13024b8f7bc5d099a4d61bf5f",
- "format": 1
- },
- {
- "name": "docs/os10_image_upgrade.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c9fc5caa466c30af8e20e94c890166472ad45e241e5a6c5e5cd8fb8e91789ab5",
- "format": 1
- },
- {
- "name": "docs/os10_vrrp.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8a7753705b354037b6b52c76d086328a60cf18b120f54d509e01a9c7ea4f2431",
- "format": 1
- },
- {
- "name": "docs/os10_qos.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "47508b2209d8933ede8aa881d3e42507baf52088fdcf1682dff4cb3dbacd2409",
- "format": 1
- },
- {
- "name": "docs/os10_logging.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "67358b37eda847a70eca21ede16c1f078989a6ef26bdcb167828592620c06a01",
- "format": 1
- },
- {
- "name": "docs/os10_vxlan.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d3215dc7ae719682ff7edbfe9ee0dfcabf348cd9a47d59303abb23fa15ac48db",
- "format": 1
- },
- {
- "name": "docs/os10_uplink.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "af218f5633d9ad8970bc83ea4d0112996ea810232a74d35813d31a378d183357",
- "format": 1
- },
- {
- "name": "docs/roles.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9f91d993dbc32b3691c9e2f79d51d67eda1493d0f0cea841a3cc2816fe1ca724",
- "format": 1
- },
- {
- "name": "docs/os10_vlan.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "ba6599c1a2ac34d0361638276f081621d8b31823c3fa4d01fe952a469c3a5692",
- "format": 1
- },
- {
- "name": "docs/os10_prefix_list.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b53872392d389fff12d93f0f89a85893c3a4dff81b7a29cc40072ad487734183",
- "format": 1
- },
- {
- "name": "docs/os10_system.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c6f9fa3aebc2738e4c3f5f3a49fedfa92575553a0bf93a101a76967bc63954bb",
- "format": 1
- },
- {
- "name": "docs/os10_interface.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "aeddd44f2c7f6d17211606d02555416c6bb3f3319bbff45ea634c665097715fa",
- "format": 1
- },
- {
- "name": "docs/os10_ntp.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "c2cbcf9c9aa3c627804600ca4c6c09b75ab55111fac01ecbc3b108690f0f9d8e",
- "format": 1
- },
- {
- "name": "docs/os10_ecmp.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "48f5c4db66fc2f0b993ee1f4fbd40997a402a2dc43760becb694bee7af60e02e",
- "format": 1
- },
- {
- "name": "docs/os10_bfd.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d9155aa75d45da527a150711f44a5d46195a07761fdc2326da9ce389c4f6bac6",
- "format": 1
- },
- {
- "name": "docs/os10_fabric_summary.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d07249dc4347118c841ed0740b1c8eea0351d593f943fdb1922306173842f7e",
- "format": 1
- },
- {
- "name": "docs/os10_vlt.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7edcaa0e40c1497e1cec3310108fd578cab638f4d170b06e1c7c94f279e85943",
- "format": 1
- },
- {
- "name": "docs/os10_flow_monitor.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "30e96ae0283581b1dc3af2c83de6d1f3ef986c352b0f1aa30b3bcd84f3e0e82f",
- "format": 1
- },
- {
- "name": "docs/os10_vrf.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "563a9cadb1c7ce0ecb1f62f033c3959342805be145dc20f6b1bf81c23b2ed412",
- "format": 1
- },
- {
- "name": "docs/os10_xstp.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1aa9f3f4225408e0224d825d03529cbf75a5aa95cdb9e2d48fff649122a4d488",
- "format": 1
- },
- {
- "name": "docs/os10_template.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9d311ae7470c42f2f31a7d688121e3ba8b240afe5fa917d4ab2b4fe99338055e",
- "format": 1
- },
- {
- "name": "docs/os10_lag.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "38b04b2c00265af0b32b43f8e0057565e818e088401545600b1476ab6f090fb8",
- "format": 1
- },
- {
- "name": "docs/os10_dns.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1d7a878dd74478a8e5a90b8f365f0d158ba08b1044984d6ad9d375314cb25f08",
- "format": 1
- },
- {
- "name": "docs/dellemc.os10.os10_config_module.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6267acb8f9cfb15edcc55f99507216be10e2e434a03c3427f4a5e1aad2b522c",
- "format": 1
- },
- {
- "name": "docs/os10_snmp.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8b1ebf6b632207048f02fb33090cd122da7b91f64a0dda9e9ca8d28303dc972c",
- "format": 1
- },
- {
- "name": "docs/os10_acl.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4902c3d89e393cc4678bbe45f3201578a785441d2e57b71798a4b9169a1035ec",
- "format": 1
- },
- {
- "name": "docs/os10_users.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "bdd33c3a3d4970f86670be7d55ae6e36afe1940c2bede9f7a3c3d64f16fb84ff",
- "format": 1
- },
- {
- "name": "docs/os10_network_validation.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "de7a336064a9cc726f30e55aa0c8f93426a7a30976ec9f8993268ad433c815d1",
- "format": 1
- },
- {
- "name": "docs/os10_copy_config.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3b7b814ce1af3563e014db3e85d1395bc767682a69f33a6504009cd604d65af5",
- "format": 1
- },
- {
- "name": "docs/os10_route_map.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6818269013ecdd151788b3dd06bba9bc06123b904efc39f89c9738c0315e14c2",
- "format": 1
- },
- {
- "name": "docs/dellemc.os10.os10_command_module.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "29c5d8db29a5ff5c8008dc598adca8e4c9d242fddfd9c94ac20bf1c166174dea",
- "format": 1
- },
- {
- "name": "docs/os10_aaa.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7874fbb6486c3ce0f7e13cef3426f5a9941b0017674cc085cef979d511bb31ff",
- "format": 1
- },
- {
- "name": "docs/dellemc.os10.os10_facts_module.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8c5627d7d019b5a96cd19c3b494538b54c8571a30678dbe29b1baa53040326bb",
- "format": 1
- },
- {
- "name": "docs/os10_lldp.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "3a7b3ecc8fb44b6654f82f849ca81d35f554aae9c9d1a668d795b0b697bd1181",
- "format": 1
- },
- {
- "name": "docs/os10_raguard.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "6083750b9289d4ab598aef8b07e4e2460b7c5c1c17e15196fa1389a8346e33a8",
- "format": 1
- },
- {
- "name": "changelogs",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "changelogs/CHANGELOG.rst",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "96780e8d019276e8fa9c7d937c88a841028c21adb67a02e0e0b239f26c870ace",
- "format": 1
- },
- {
- "name": "changelogs/config.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "b5dbc7f223c6b79feee444ffa052b620913771b52077a69bbe78a93c002ccf67",
- "format": 1
- },
- {
- "name": "changelogs/changelog.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "573c974da3352bb7318029687993489295ce119ca08138a99d7c1add75f698f4",
- "format": 1
- },
- {
- "name": ".github",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".github/workflows",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": ".github/workflows/ansible-test.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "372cc6f489143cc505aa1b52c38cd04bc4b6487c3ce5bf613e4b3d4e32a053fc",
- "format": 1
- },
- {
- "name": "plugins",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/doc_fragments",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/doc_fragments/os10.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9cd210947b85bbb3c3270a239d73c3e6fe6a5f0e16bc77326eb179a9486593e8",
- "format": 1
- },
- {
- "name": "plugins/action",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/action/os10.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8d13cba843ee5563bec94849b9255cab43db98ee3369f553bc360ec3d9070513",
- "format": 1
- },
- {
- "name": "plugins/action/textfsm_parser.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8e229a5a0eac4493c04425e825ab50945d7eda4a6bee56a3485dea640f7d209b",
- "format": 1
- },
- {
- "name": "plugins/cliconf",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/cliconf/os10.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2fde5ba9d659d487322a300ecb3a433a5a522c4ec2a5383be1c6a661797ea033",
- "format": 1
- },
- {
- "name": "plugins/terminal",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/terminal/os10.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9633af38e10311856b00de1aee3be7c95c986fb25e6d20cd7827eaa2614d4974",
- "format": 1
- },
- {
- "name": "plugins/modules",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/modules/vlt_validate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a31a1b1341a6cfb695af05233b5d29ef1cef8cc35c945e24b716a7d1445101a2",
- "format": 1
- },
- {
- "name": "plugins/modules/base_xml_to_dict.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "8ce7d403e2dc3e0090faed6853efcf0c3a7de4ff866d140e51839cf54b80efcf",
- "format": 1
- },
- {
- "name": "plugins/modules/wiring_validate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "60306438476482dbf7df288c76a2c5a61a478c4f8e0b99ee2b6218f775153648",
- "format": 1
- },
- {
- "name": "plugins/modules/os10_command.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "7ee1bda2f8719396ced20d4fae9d3c15536ed0467190260e8870f256926c0454",
- "format": 1
- },
- {
- "name": "plugins/modules/os10_facts.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5cd81545aafbd4dc8eb8c08f6067c64059385f14521fad36000b755ff54c9089",
- "format": 1
- },
- {
- "name": "plugins/modules/mtu_validate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2e8d0d7bceb4b16c36793c20304cb828274c34e82dcbc00eea80feeabb16fc73",
- "format": 1
- },
- {
- "name": "plugins/modules/show_system_network_summary.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "eb4c713221503a9a726f74a8d2b54b248b345f554bbe271ef513a32c38b4f56a",
- "format": 1
- },
- {
- "name": "plugins/modules/bgp_validate.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "cae91228b6226f4c6fa5e00b9dba14942075250061f8011ee144a207bb9bddd4",
- "format": 1
- },
- {
- "name": "plugins/modules/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "plugins/modules/os10_config.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0693b1edece924d6c8d055acc94e6cc5cb65fcd8b9ea96175fab3257fbbc9c99",
- "format": 1
- },
- {
- "name": "plugins/module_utils",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/network",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "plugins/module_utils/network/os10.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a0827b94850fda014c103f0c9cd3e2e80bab71bce2dfdef7c2224d62f2a1d60d",
- "format": 1
- },
- {
- "name": "plugins/module_utils/network/base_network_show.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "0f0291c93d3829d2e1c78e1bca681691332e48ca6fb0fabcf1ae35ad378ea89e",
- "format": 1
- },
- {
- "name": "plugins/module_utils/network/__init__.py",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "format": 1
- },
- {
- "name": "playbooks",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/group_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/group_vars/all",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "602987ea665844bebae77f58df9682a1e015edb5d18b7ea9259a8209dd08994f",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/group_vars/spine.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "9410c561579c34b35fc5239aab55350e3472d93aa0d0cef5dd4438b4b5ced91d",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/datacenter.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "2627623980960381195863f4fee460eec8d778be76a096b54857a06e97df23bd",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8d7d7db2043af3d64bc829dc8fdb9b6c59826ccf0d75c1de55efc48260c89bb",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1dd91b341f01a3a4f397ef739ca40c87c824d07a39038d8ecf293bed2a2d9b8c",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/host_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "50e9c43b204e412be5cba64f520bbb8bc1d6badfd418729149992494b78cb2ad",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f749957a827032a0c6077a83d1f709af736596e68ee9e6e33c5ce2abd928db91",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "d8696e7b5d612c1f16cc7a0bdbed960a6258baaade83ff7e6cfa55f2fc100e49",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/host_vars/spine2.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "f795b650bb0a1b6360bef9a63be4ff553cf0ca4496ccfc5d2783247853ee218a",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/host_vars/spine1.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "86b4b5ae09553480001d5c459bbbc0c8f188029dfb5f88afdf3e3be6c7466304",
- "format": 1
- },
- {
- "name": "playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "acb008b56cdc437f69f9f95c4cfee68999e573fa6238f91d28597725f7f6f5be",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/datacenter.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "01512d9b68a28d1f437b669edeaa3139209fc605f137b80d9b70c38eda29bf8d",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/README.md",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a4171c46c9d8296384d9f2d944195e4bea20be0eb9d84c759490bf4dfe40ce3c",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/vxlan_evpn_topology.png",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1eb1121f60229ce9d7ffde2625a6d10b3d5bae314f888172bef932a8f1a1713a",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/inventory.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4d7aa907659dffc2df8f9788f353f04081c1853ff959378c4facc618ac1f686c",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/host_vars",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "1e581cc10cd5569e8e188acbdee765b32a143f3c517540cbe314c1f99092f8d5",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e86083141c836c8ea7bc9643cd7f47c5e7241fff2288efa9bac68f24b9d9a53f",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/host_vars/spine2.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "4cb8ed1a4f629eac44ae9577abcf284b918d531b958338fd04a2a42f40488339",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a1193c59b99f6954c1fe0098d2c31191802bf4ee697d1b3b4c0e240a45e42d9d",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/host_vars/spine1.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "a6516ef2547d0aaced10ec03e730bebe67f2432f43797d4c2037dbc912544b96",
- "format": 1
- },
- {
- "name": "playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "fe94c0a6f1b5657c1a24373f117a6d71300dfdd2b3b2fcc513df934318e1ff4a",
- "format": 1
- },
- {
- "name": "LICENSE",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "format": 1
- },
- {
- "name": "meta",
- "ftype": "dir",
- "chksum_type": null,
- "chksum_sha256": null,
- "format": 1
- },
- {
- "name": "meta/runtime.yml",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "e89e29376e2d83040beda97f813eadab01df7697a7e9594ca89537d54f95214c",
- "format": 1
- }
- ],
- "format": 1
-} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/LICENSE b/ansible_collections/dellemc/os10/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/MANIFEST.json b/ansible_collections/dellemc/os10/MANIFEST.json
deleted file mode 100644
index 6bed5d7ad..000000000
--- a/ansible_collections/dellemc/os10/MANIFEST.json
+++ /dev/null
@@ -1,38 +0,0 @@
-{
- "collection_info": {
- "namespace": "dellemc",
- "name": "os10",
- "version": "1.1.1",
- "authors": [
- "Parameswaran Krishnamurthy <Parameswaran_Krishna@Dell.com>",
- "Senthil Ganesan Ganesan <Senthil_Kumar_Ganesa@Dell.com>",
- "Shreeja R <Shreeja_R@Dell.com>"
- ],
- "readme": "README.md",
- "tags": [
- "dell",
- "dellemc",
- "os10",
- "emc",
- "networking"
- ],
- "description": "Ansible Network Collection for Dell EMC SmartFabric OS10",
- "license": [],
- "license_file": "LICENSE",
- "dependencies": {
- "ansible.netcommon": ">=1.0.0"
- },
- "repository": "https://github.com/ansible-collections/dellemc.os10",
- "documentation": "https://github.com/ansible-collections/dellemc.os10/tree/master/docs",
- "homepage": "https://github.com/ansible-collections/dellemc.os10",
- "issues": "https://github.com/ansible-collections/dellemc.os10/issues"
- },
- "file_manifest_file": {
- "name": "FILES.json",
- "ftype": "file",
- "chksum_type": "sha256",
- "chksum_sha256": "afa56fa2200f773fcafd6a3cd52df1affca39c600c356d3bccb09fcc1b92bdfd",
- "format": 1
- },
- "format": 1
-} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/README.md b/ansible_collections/dellemc/os10/README.md
deleted file mode 100644
index 942dda9c5..000000000
--- a/ansible_collections/dellemc/os10/README.md
+++ /dev/null
@@ -1,93 +0,0 @@
-# Ansible Network Collection for Dell EMC SmartFabric OS10
-
-### Collection contents
-This collection includes Ansible modules, plugins and roles needed to provision and manage Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. Sample playbooks and documentation are also included to show how the collection can be used.
-
-### Collection core modules
-
-Name | Description
---- | ---
-[os10_command](https://github.com/ansible-collections/dellemc.os10/blob/master/docs/dellemc.os10.os10_command_module.rst)| Run commands on devices running OS10
-[os10_config](https://github.com/ansible-collections/dellemc.os10/blob/master/docs/dellemc.os10.os10_config_module.rst)| Manage configuration on devices running OS10
-[os10_facts](https://github.com/ansible-collections/dellemc.os10/blob/master/docs/dellemc.os10.os10_facts_module.rst)| Collect facts from devices running OS10
-
-### Collection roles
-These roles facilitate the provisioning and administration of devices running SmartFabric OS10. There are over 30 roles available that provide a comprehensive coverage of most OS10 resources, including *os10_interface*, *os10_acl*, *os10_bgp*, and *os10_vxlan*. The documentation for each role is located at [OS10 roles](https://github.com/ansible-collections/dellemc.os10/blob/master/docs/roles.rst).
-
-### Sample use case playbooks
-This collection includes these sample playbooks that illustrate end-to-end use cases.
-
-- [CLOS fabric](https://github.com/ansible-collections/dellemc.os10/blob/master/playbooks/clos_fabric_ebgp/README.md) — Example playbook to build a Layer 3 leaf-spine fabric
-
-- [VXLAN EVPN](https://github.com/ansible-collections/dellemc.os10/blob/master/playbooks/vxlan_evpn/README.md) — Example playbook to build a scale-out leaf-spine topology using VxLAN, BGP EVPN, and symmetric-IRB
-
-## Installation
-Use this command to install the latest version of the OS10 collection from Ansible Galaxy.
-
- ansible-galaxy collection install dellemc.os10
-
-To install a specific version, a version range identifier must be specified. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0.
-
- ansible-galaxy collection install 'dellemc.os10:>=1.0.0,<2.0.0'
-
-## Version compatibility
-* Ansible version 2.9 or later.
-* Python 3.5 or higher, or Python 2.7
-
-### Using in Ansible version 2.9
-> **NOTE**: This step is not required for Ansible version 2.10 or later.
-
-To use this collection in Ansible version 2.9 it is required to set the below environment variable while running the playbook.
-
- ANSIBLE_NETWORK_GROUP_MODULES=os10
-
-It can be set permanently in *ansible.cfg* with variable *network_group_modules* under the *defaults* section.
-
-**ansible.cfg**
-
- [defaults]
- network_group_modules=os10
-
-> **NOTE**: For Ansible versions lower than 2.9, use the legacy [dellos10 modules](https://ansible-dellos-docs.readthedocs.io/en/latest/modules.html#os10-modules) and [dellos roles](https://ansible-dellos-docs.readthedocs.io/en/latest/roles.html).
-
-## Sample playbook
-
-**playbook.yaml**
-
- - hosts: os10_switches
- connection: network_cli
- collections:
- - dellemc.os10
- roles:
- - os10_vlan
-
-**host_vars/os10_sw1.yaml**
-
- hostname: os10_sw1
- # Parameters for connection type network_cli
- ansible_ssh_user: xxxx
- ansible_ssh_pass: xxxx
- ansible_network_os: dellemc.os10.os10
-
- # Create vlan100 and delete vlan888
- os10_vlan:
- vlan 100:
- description: "Blue"
- state: present
- vlan 888:
- state: absent
-
-**inventory.yaml**
-
- [os10_sw1]
- os10_sw1 ansible_host=100.104.28.119
-
- [os10_sw2]
- os10_sw2 ansible_host=100.104.28.120
-
- [os10_switches:children]
- os10_sw1
- os10_sw2
-
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/changelogs/CHANGELOG.rst b/ansible_collections/dellemc/os10/changelogs/CHANGELOG.rst
deleted file mode 100644
index 367cb5856..000000000
--- a/ansible_collections/dellemc/os10/changelogs/CHANGELOG.rst
+++ /dev/null
@@ -1,116 +0,0 @@
-======================================================================
-Ansible Network Collection for Dell EMC SmartFabric OS10 Release Notes
-======================================================================
-
-.. contents:: Topics
-
-
-v1.1.1
-======
-
-Minor Changes
--------------
-
-- Adding support for Ansible version 2.9 (https://github.com/ansible-collections/dellemc.os10/pull/58)
-
-v1.1.0
-======
-
-Major Changes
--------------
-
-- os10_bgp - Enhanced router bgp keyword support for non-default vrf which are supported for default vrf and additional keyword to support both default and non-default vrf
-- os10_snmp role - Added support for snmp V3 features in community, group, host, engineID
-
-Minor Changes
--------------
-
-- Enhanced os10_bgp role to support internal BGP redistribution under address-family for V4 and V6
-- Enhanced os10_bgp role to support maximum-prefix configuration under BGP peer and peer-group.
-- os10_ntp role - Added support for vrf and sha1 and sha2-256 authentication-key types
-- os10_snmp role - Added support for source-interface and vrf
-- os10_template - add template for show spanning tree compatibility mode
-- os10_template - add template for show vlt error disabled ports
-- os10_uplink role - Added support for downstream disable-links and auto-recover
-
-Breaking Changes / Porting Guide
---------------------------------
-
-- os10_bgp - Changed "subnet" key as list format instead of dictionary format under "listen" key to support multiple neighbor prefix for listen command
-- os10_bgp - Changed "vrf" key as list format instead of dictionary format to supprot multiple VRF in router BGP and changed the "vrf" key name to "vrfs"
-
-Bugfixes
---------
-
-- Fixed issue in using interface range in os10_vlan members. (https://github.com/ansible-collections/dellemc.os10/issues/53)
-
-v1.0.2
-======
-
-Bugfixes
---------
-
-- Fix issue in using ip_and_mask along with members in os10_vlan role (https://github.com/ansible-collections/dellemc.os10/issues/42)
-- Fix issue in using list of strings for `commands` argument for `os10_command` module (https://github.com/ansible-collections/dellemc.os10/issues/43)
-- Fixed os10_vlan role idempotency issue with description and members (https://github.com/ansible-collections/dellemc.os10/issues/46)
-
-v1.0.1
-======
-
-Release Summary
----------------
-
-Added changelog.
-
-v1.0.0
-======
-
-Major Changes
--------------
-
-- New role os10_aaa - Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server.
-- New role os10_acl - Facilitates the configuration of Access Control lists.
-- New role os10_bfd - Facilitates the configuration of BFD global attributes.
-- New role os10_bgp - Facilitates the configuration of border gateway protocol (BGP) attributes.
-- New role os10_copy_config - This role pushes the backup running configuration into a OS10 device.
-- New role os10_dns - Facilitates the configuration of domain name service (DNS).
-- New role os10_ecmp - Facilitates the configuration of equal cost multi-path (ECMP) for IPv4.
-- New role os10_fabric_summary Facilitates to get show system information of all the OS10 switches in the fabric.
-- New role os10_flow_monitor Facilitates the configuration of ACL flow-based monitoring attributes.
-- New role os10_image_upgrade Facilitates installation of OS10 software images.
-- New role os10_interface Facilitates the configuration of interface attributes.
-- New role os10_lag Facilitates the configuration of link aggregation group (LAG) attributes.
-- New role os10_lldp Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level.
-- New role os10_logging Facilitates the configuration of global logging attributes and logging servers.
-- New role os10_network_validation Facilitates validation of wiring connection, BGP neighbors, MTU between neighbors and VLT pair.
-- New role os10_ntp Facilitates the configuration of network time protocol (NTP) attributes.
-- New role os10_prefix_list Facilitates the configuration of IP prefix-list.
-- New role os10_qos Facilitates the configuration of quality of service attributes including policy-map and class-map.
-- New role os10_raguard Facilitates the configuration of IPv6 RA Guard attributes.
-- New role os10_route_map Facilitates the configuration of route-map attributes.
-- New role os10_snmp Facilitates the configuration of global SNMP attributes.
-- New role os10_system Facilitates the configuration of hostname and hashing algorithm.
-- New role os10_template The role takes the raw string input from the CLI of OS10 device, and returns a structured text in the form of a Python dictionary.
-- New role os10_uplink Facilitates the configuration of uplink attributes like uplink-state group.
-- New role os10_users Facilitates the configuration of global system user attributes.
-- New role os10_vlan Facilitates the configuration of virtual LAN (VLAN) attributes.
-- New role os10_vlt Facilitates the configuration of virtual link trunking (VLT).
-- New role os10_vrf Facilitates the configuration of virtual routing and forwarding (VRF).
-- New role os10_vrrp Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes.
-- New role os10_vxlan Facilitates the configuration of virtual extensible LAN (VXLAN) attributes.
-- New role os10_xstp Facilitates the configuration of xSTP attributes.
-
-New Plugins
------------
-
-Cliconf
-~~~~~~~
-
-- os10 - Use OS10 cliconf to run commands on Dell EMC PowerSwitch devices.
-
-New Modules
------------
-
-- os10_command - Run commands on devices running Dell EMC SmartFabric OS1O.
-- os10_config - Manage configuration on devices running OS10.
-- os10_facts - Collect facts from devices running OS10.
diff --git a/ansible_collections/dellemc/os10/changelogs/changelog.yaml b/ansible_collections/dellemc/os10/changelogs/changelog.yaml
deleted file mode 100644
index 334a1e5f1..000000000
--- a/ansible_collections/dellemc/os10/changelogs/changelog.yaml
+++ /dev/null
@@ -1,124 +0,0 @@
-ancestor: null
-releases:
- 1.0.0:
- changes:
- major_changes:
- - New role os10_aaa - Facilitates the configuration of Authentication Authorization
- and Accounting (AAA), TACACS and RADIUS server.
- - New role os10_acl - Facilitates the configuration of Access Control lists.
- - New role os10_bfd - Facilitates the configuration of BFD global attributes.
- - New role os10_bgp - Facilitates the configuration of border gateway protocol
- (BGP) attributes.
- - New role os10_copy_config - This role pushes the backup running configuration
- into a OS10 device.
- - New role os10_dns - Facilitates the configuration of domain name service (DNS).
- - New role os10_ecmp - Facilitates the configuration of equal cost multi-path
- (ECMP) for IPv4.
- - New role os10_fabric_summary Facilitates to get show system information of
- all the OS10 switches in the fabric.
- - New role os10_flow_monitor Facilitates the configuration of ACL flow-based
- monitoring attributes.
- - New role os10_image_upgrade Facilitates installation of OS10 software images.
- - New role os10_interface Facilitates the configuration of interface attributes.
- - New role os10_lag Facilitates the configuration of link aggregation group
- (LAG) attributes.
- - New role os10_lldp Facilitates the configuration of link layer discovery protocol
- (LLDP) attributes at global and interface level.
- - New role os10_logging Facilitates the configuration of global logging attributes
- and logging servers.
- - New role os10_network_validation Facilitates validation of wiring connection,
- BGP neighbors, MTU between neighbors and VLT pair.
- - New role os10_ntp Facilitates the configuration of network time protocol (NTP)
- attributes.
- - New role os10_prefix_list Facilitates the configuration of IP prefix-list.
- - New role os10_qos Facilitates the configuration of quality of service attributes
- including policy-map and class-map.
- - New role os10_raguard Facilitates the configuration of IPv6 RA Guard attributes.
- - New role os10_route_map Facilitates the configuration of route-map attributes.
- - New role os10_snmp Facilitates the configuration of global SNMP attributes.
- - New role os10_system Facilitates the configuration of hostname and hashing
- algorithm.
- - New role os10_template The role takes the raw string input from the CLI of
- OS10 device, and returns a structured text in the form of a Python dictionary.
- - New role os10_uplink Facilitates the configuration of uplink attributes like
- uplink-state group.
- - New role os10_users Facilitates the configuration of global system user attributes.
- - New role os10_vlan Facilitates the configuration of virtual LAN (VLAN) attributes.
- - New role os10_vlt Facilitates the configuration of virtual link trunking (VLT).
- - New role os10_vrf Facilitates the configuration of virtual routing and forwarding
- (VRF).
- - New role os10_vrrp Facilitates the configuration of virtual router redundancy
- protocol (VRRP) attributes.
- - New role os10_vxlan Facilitates the configuration of virtual extensible LAN
- (VXLAN) attributes.
- - New role os10_xstp Facilitates the configuration of xSTP attributes.
- modules:
- - description: Run commands on devices running Dell EMC SmartFabric OS1O.
- name: os10_command
- namespace: ''
- - description: Manage configuration on devices running OS10.
- name: os10_config
- namespace: ''
- - description: Collect facts from devices running OS10.
- name: os10_facts
- namespace: ''
- plugins:
- cliconf:
- - description: Use OS10 cliconf to run commands on Dell EMC PowerSwitch devices.
- name: os10
- namespace: null
- release_date: '2020-07-31'
- 1.0.1:
- changes:
- release_summary: Added changelog.
- fragments:
- - 1.0.1.yaml
- release_date: '2020-08-14'
- 1.0.2:
- changes:
- bugfixes:
- - Fix issue in using ip_and_mask along with members in os10_vlan role (https://github.com/ansible-collections/dellemc.os10/issues/42)
- - Fix issue in using list of strings for `commands` argument for `os10_command`
- module (https://github.com/ansible-collections/dellemc.os10/issues/43)
- - Fixed os10_vlan role idempotency issue with description and members (https://github.com/ansible-collections/dellemc.os10/issues/46)
- fragments:
- - 1.0.2.yaml
- - 46-vlan-idempotency-desc-and-members.yaml
- release_date: '2020-10-28'
- 1.1.0:
- changes:
- breaking_changes:
- - os10_bgp - Changed "subnet" key as list format instead of dictionary format
- under "listen" key to support multiple neighbor prefix for listen command
- - os10_bgp - Changed "vrf" key as list format instead of dictionary format to
- supprot multiple VRF in router BGP and changed the "vrf" key name to "vrfs"
- bugfixes:
- - Fixed issue in using interface range in os10_vlan members. (https://github.com/ansible-collections/dellemc.os10/issues/53)
- major_changes:
- - os10_bgp - Enhanced router bgp keyword support for non-default vrf which are
- supported for default vrf and additional keyword to support both default and
- non-default vrf
- - os10_snmp role - Added support for snmp V3 features in community, group, host,
- engineID
- minor_changes:
- - Enhanced os10_bgp role to support internal BGP redistribution under address-family
- for V4 and V6
- - Enhanced os10_bgp role to support maximum-prefix configuration under BGP peer
- and peer-group.
- - os10_ntp role - Added support for vrf and sha1 and sha2-256 authentication-key
- types
- - os10_snmp role - Added support for source-interface and vrf
- - os10_template - add template for show spanning tree compatibility mode
- - os10_template - add template for show vlt error disabled ports
- - os10_uplink role - Added support for downstream disable-links and auto-recover
- fragments:
- - 53-vlan-member-port-range.yaml
- - R9.yaml
- release_date: '2020-12-02'
- 1.1.1:
- changes:
- minor_changes:
- - Adding support for Ansible version 2.9 (https://github.com/ansible-collections/dellemc.os10/pull/58)
- fragments:
- - 58-support-for-ansible-version-2.9.yaml
- release_date: '2020-12-11'
diff --git a/ansible_collections/dellemc/os10/changelogs/config.yaml b/ansible_collections/dellemc/os10/changelogs/config.yaml
deleted file mode 100644
index 953ad20a2..000000000
--- a/ansible_collections/dellemc/os10/changelogs/config.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-changelog_filename_template: CHANGELOG.rst
-changelog_filename_version_depth: 0
-changes_file: changelog.yaml
-changes_format: combined
-keep_fragments: false
-mention_ancestor: true
-new_plugins_after_name: removed_features
-notesdir: fragments
-prelude_section_name: release_summary
-prelude_section_title: Release Summary
-flatmap: true
-sections:
-- - major_changes
- - Major Changes
-- - minor_changes
- - Minor Changes
-- - breaking_changes
- - Breaking Changes / Porting Guide
-- - deprecated_features
- - Deprecated Features
-- - removed_features
- - Removed Features (previously deprecated)
-- - security_fixes
- - Security Fixes
-- - bugfixes
- - Bugfixes
-- - known_issues
- - Known Issues
-title: Ansible Network Collection for Dell EMC SmartFabric OS10
-trivial_section_name: trivial
diff --git a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_command_module.rst b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_command_module.rst
deleted file mode 100644
index 4bec5b231..000000000
--- a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_command_module.rst
+++ /dev/null
@@ -1,446 +0,0 @@
-.. Document meta
-
-
-.. Anchors
-
-.. _ansible_collections.dellemc.os10.os10_command_module:
-
-.. Anchors: short name for ansible.builtin
-
-.. Anchors: aliases
-
-
-
-.. Title
-
-dellemc.os10.os10_command -- Run commands on devices running Dell EMC SmartFabric OS10
-++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. Collection note
-
-.. note::
- This plugin is part of the `dellemc.os10 collection <https://galaxy.ansible.com/dellemc/os10>`_.
-
- To install it use: :code:`ansible-galaxy collection install dellemc.os10`.
-
- To use it in a playbook, specify: :code:`dellemc.os10.os10_command`.
-
-.. version_added
-
-
-.. contents::
- :local:
- :depth: 1
-
-.. Deprecated
-
-
-Synopsis
---------
-
-.. Description
-
-- Sends arbitrary commands to a OS10 device and returns the results read from the device. This module includes an argument that will cause the module to wait for a specific condition before returning or timing out if the condition is not met.
-- This module does not support running commands in configuration mode. Please use `dellemc.os10.os10_config <dellemc.os10.os10_config_module.rst>`_ to configure OS10 devices.
-
-
-.. Aliases
-
-
-.. Requirements
-
-
-.. Options
-
-Parameters
-----------
-
-.. raw:: html
-
- <table border=0 cellpadding=0 class="documentation-table">
- <tr>
- <th colspan="2">Parameter</th>
- <th>Choices/<font color="blue">Defaults</font></th>
- <th width="100%">Comments</th>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-commands"></div>
- <b>commands</b>
- <a class="ansibleOptionLink" href="#parameter-commands" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> / <span style="color: red">required</span> </div>
- </td>
- <td>
- </td>
- <td>
- <div>List of commands to send to the remote OS10 device over the configured provider. The resulting output from the command is returned. If the <em>wait_for</em> argument is provided, the module is not returned until the condition is satisfied or the number of retries has expired.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-interval"></div>
- <b>interval</b>
- <a class="ansibleOptionLink" href="#parameter-interval" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>
- <b>Default:</b><br/><div style="color: blue">1</div>
- </td>
- <td>
- <div>Configures the interval in seconds to wait between retries of the command. If the command does not pass the specified conditions, the interval indicates how long to wait before trying the command again.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-match"></div>
- <b>match</b>
- <a class="ansibleOptionLink" href="#parameter-match" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>all</b>&nbsp;&larr;</div></li>
- <li>any</li>
- </ul>
- </td>
- <td>
- <div>The <em>match</em> argument is used in conjunction with the <em>wait_for</em> argument to specify the match policy. Valid values are <code>all</code> or <code>any</code>. If the value is set to <code>all</code> then all conditionals in the wait_for must be satisfied. If the value is set to <code>any</code> then only one of the values must be satisfied.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-provider"></div>
- <b>provider</b>
- <a class="ansibleOptionLink" href="#parameter-provider" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">dictionary</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>A dict object containing connection details.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/auth_pass"></div>
- <b>auth_pass</b>
- <a class="ansibleOptionLink" href="#parameter-provider/auth_pass" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the password to use if required to enter privileged mode on the remote device. If <em>authorize</em> is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_AUTH_PASS</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/authorize"></div>
- <b>authorize</b>
- <a class="ansibleOptionLink" href="#parameter-provider/authorize" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">boolean</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>no</b>&nbsp;&larr;</div></li>
- <li>yes</li>
- </ul>
- </td>
- <td>
- <div>Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_AUTHORIZE</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/host"></div>
- <b>host</b>
- <a class="ansibleOptionLink" href="#parameter-provider/host" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/password"></div>
- <b>password</b>
- <a class="ansibleOptionLink" href="#parameter-provider/password" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Password to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_PASSWORD</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/port"></div>
- <b>port</b>
- <a class="ansibleOptionLink" href="#parameter-provider/port" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the port to use when building the connection to the remote device.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/ssh_keyfile"></div>
- <b>ssh_keyfile</b>
- <a class="ansibleOptionLink" href="#parameter-provider/ssh_keyfile" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">path</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Path to an ssh key used to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_SSH_KEYFILE</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/timeout"></div>
- <b>timeout</b>
- <a class="ansibleOptionLink" href="#parameter-provider/timeout" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies idle timeout (in seconds) for the connection. Useful if the console freezes before continuing. For example when saving configurations.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/username"></div>
- <b>username</b>
- <a class="ansibleOptionLink" href="#parameter-provider/username" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>User to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_USERNAME</code> will be used instead.</div>
- </td>
- </tr>
-
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-retries"></div>
- <b>retries</b>
- <a class="ansibleOptionLink" href="#parameter-retries" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>
- <b>Default:</b><br/><div style="color: blue">10</div>
- </td>
- <td>
- <div>Specifies the number of retries a command should be tried before it is considered failed. The command is run on the target device every retry and evaluated against the <em>wait_for</em> conditions.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-wait_for"></div>
- <b>wait_for</b>
- <a class="ansibleOptionLink" href="#parameter-wait_for" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>
- </td>
- <td>
- <div>List of conditions to evaluate against the output of the command. The task will wait for each condition to be true before moving forward. If the conditional is not true within the configured number of <em>retries</em>, the task fails. See examples.</div>
- </td>
- </tr>
- </table>
- <br/>
-
-.. Notes
-
-Notes
------
-
-.. note::
- - For more information on using Ansible to manage Dell EMC Network devices see https://www.ansible.com/ansible-dell-networking.
-
-.. Seealso
-
-
-.. Examples
-
-Examples
---------
-
-.. code-block:: yaml+jinja
-
-
- tasks:
- - name: run show version on remote devices
- os10_command:
- commands: show version
-
- - name: run show version and check to see if output contains OS10
- os10_command:
- commands: show version
- wait_for: result[0] contains OS10
-
- - name: run multiple commands on remote nodes
- os10_command:
- commands:
- - show version
- - show interface
-
- - name: run multiple commands and evaluate the output
- os10_command:
- commands:
- - show version
- - show interface
- wait_for:
- - result[0] contains OS10
- - result[1] contains Ethernet
-
-
-
-
-.. Facts
-
-
-.. Return values
-
-Return Values
--------------
-Common return values are documented `here <https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#common-return-values>`_, the following are the fields unique to this module:
-
-.. raw:: html
-
- <table border=0 cellpadding=0 class="documentation-table">
- <tr>
- <th colspan="1">Key</th>
- <th>Returned</th>
- <th width="100%">Description</th>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-failed_conditions"></div>
- <b>failed_conditions</b>
- <a class="ansibleOptionLink" href="#return-failed_conditions" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>failed</td>
- <td>
- <div>The list of conditionals that have failed</div>
- <br/>
- <div style="font-size: smaller"><b>Sample:</b></div>
- <div style="font-size: smaller; color: blue; word-wrap: break-word; word-break: break-all;">[&#x27;...&#x27;, &#x27;...&#x27;]</div>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-stdout"></div>
- <b>stdout</b>
- <a class="ansibleOptionLink" href="#return-stdout" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>always apart from low level errors (such as action plugin)</td>
- <td>
- <div>The set of responses from the commands</div>
- <br/>
- <div style="font-size: smaller"><b>Sample:</b></div>
- <div style="font-size: smaller; color: blue; word-wrap: break-word; word-break: break-all;">[&#x27;...&#x27;, &#x27;...&#x27;]</div>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-stdout_lines"></div>
- <b>stdout_lines</b>
- <a class="ansibleOptionLink" href="#return-stdout_lines" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>always apart from low level errors (such as action plugin)</td>
- <td>
- <div>The value of stdout split into a list</div>
- <br/>
- <div style="font-size: smaller"><b>Sample:</b></div>
- <div style="font-size: smaller; color: blue; word-wrap: break-word; word-break: break-all;">[[&#x27;...&#x27;, &#x27;...&#x27;], [&#x27;...&#x27;], [&#x27;...&#x27;]]</div>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-warnings"></div>
- <b>warnings</b>
- <a class="ansibleOptionLink" href="#return-warnings" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>always</td>
- <td>
- <div>The list of warnings (if any) generated by module based on arguments</div>
- <br/>
- <div style="font-size: smaller"><b>Sample:</b></div>
- <div style="font-size: smaller; color: blue; word-wrap: break-word; word-break: break-all;">[&#x27;...&#x27;, &#x27;...&#x27;]</div>
- </td>
- </tr>
- </table>
- <br/><br/>
-
-.. Status (Presently only deprecated)
-
-
-.. Authors
-
-Authors
-~~~~~~~
-
-- Senthil Kumar Ganesan (@skg-net)
-
-
-
-.. Parsing errors
-
diff --git a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_config_module.rst b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_config_module.rst
deleted file mode 100644
index 737478087..000000000
--- a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_config_module.rst
+++ /dev/null
@@ -1,606 +0,0 @@
-.. Document meta
-
-
-.. Anchors
-
-.. _ansible_collections.dellemc.os10.os10_config_module:
-
-.. Anchors: short name for ansible.builtin
-
-.. Anchors: aliases
-
-
-
-.. Title
-
-dellemc.os10.os10_config -- Manage Dell EMC SmartFabric OS10 configuration sections
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. Collection note
-
-.. note::
- This plugin is part of the `dellemc.os10 collection <https://galaxy.ansible.com/dellemc/os10>`_.
-
- To install it use: :code:`ansible-galaxy collection install dellemc.os10`.
-
- To use it in a playbook, specify: :code:`dellemc.os10.os10_config`.
-
-.. version_added
-
-
-.. contents::
- :local:
- :depth: 1
-
-.. Deprecated
-
-
-Synopsis
---------
-
-.. Description
-
-- OS10 configurations use a simple block indent file syntax for segmenting configuration into sections. This module provides an implementation for working with OS10 configuration sections in a deterministic way.
-
-
-.. Aliases
-
-
-.. Requirements
-
-
-.. Options
-
-Parameters
-----------
-
-.. raw:: html
-
- <table border=0 cellpadding=0 class="documentation-table">
- <tr>
- <th colspan="2">Parameter</th>
- <th>Choices/<font color="blue">Defaults</font></th>
- <th width="100%">Comments</th>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-after"></div>
- <b>after</b>
- <a class="ansibleOptionLink" href="#parameter-after" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>
- </td>
- <td>
- <div>The ordered set of commands to append to the end of the command stack if a change needs to be made. Just like with <em>before</em> this allows the playbook designer to append a set of commands to be executed after the command set.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-backup"></div>
- <b>backup</b>
- <a class="ansibleOptionLink" href="#parameter-backup" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">boolean</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>no</b>&nbsp;&larr;</div></li>
- <li>yes</li>
- </ul>
- </td>
- <td>
- <div>This argument will cause the module to create a full backup of the current <code>running-config</code> from the remote device before any changes are made. If the <code>backup_options</code> value is not given, the backup file is written to the <code>backup</code> folder in the playbook root directory. If the directory does not exist, it is created.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-backup_options"></div>
- <b>backup_options</b>
- <a class="ansibleOptionLink" href="#parameter-backup_options" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">dictionary</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>This is a dict object containing configurable options related to backup file path. The value of this option is read only when <code>backup</code> is set to <em>yes</em>, if <code>backup</code> is set to <em>no</em> this option will be silently ignored.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-backup_options/dir_path"></div>
- <b>dir_path</b>
- <a class="ansibleOptionLink" href="#parameter-backup_options/dir_path" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">path</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>This option provides the path ending with directory name in which the backup configuration file will be stored. If the directory does not exist it will be first created and the filename is either the value of <code>filename</code> or default filename as described in <code>filename</code> options description. If the path value is not given in that case a <em>backup</em> directory will be created in the current working directory and backup configuration will be copied in <code>filename</code> within <em>backup</em> directory.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-backup_options/filename"></div>
- <b>filename</b>
- <a class="ansibleOptionLink" href="#parameter-backup_options/filename" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>The filename to be used to store the backup configuration. If the the filename is not given it will be generated based on the hostname, current time and date in format defined by &lt;hostname&gt;_config.&lt;current-date&gt;@&lt;current-time&gt;</div>
- </td>
- </tr>
-
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-before"></div>
- <b>before</b>
- <a class="ansibleOptionLink" href="#parameter-before" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>
- </td>
- <td>
- <div>The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-config"></div>
- <b>config</b>
- <a class="ansibleOptionLink" href="#parameter-config" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The <em>config</em> argument allows the implementer to pass in the configuration to use as the base config for comparison.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-lines"></div>
- <b>lines</b>
- <a class="ansibleOptionLink" href="#parameter-lines" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>
- </td>
- <td>
- <div>The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser. This argument is mutually exclusive with <em>src</em>.</div>
- <div style="font-size: small; color: darkgreen"><br/>aliases: commands</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-match"></div>
- <b>match</b>
- <a class="ansibleOptionLink" href="#parameter-match" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>line</b>&nbsp;&larr;</div></li>
- <li>strict</li>
- <li>exact</li>
- <li>none</li>
- </ul>
- </td>
- <td>
- <div>Instructs the module on the way to perform the matching of the set of commands against the current device config. If match is set to <em>line</em>, commands are matched line by line. If match is set to <em>strict</em>, command lines are matched with respect to position. If match is set to <em>exact</em>, command lines must be an equal match. Finally, if match is set to <em>none</em>, the module will not attempt to compare the source configuration with the running configuration on the remote device.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-parents"></div>
- <b>parents</b>
- <a class="ansibleOptionLink" href="#parameter-parents" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>
- </td>
- <td>
- <div>The ordered set of parents that uniquely identify the section or hierarchy the commands should be checked against. If the parents argument is omitted, the commands are checked against the set of top level or global commands.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-provider"></div>
- <b>provider</b>
- <a class="ansibleOptionLink" href="#parameter-provider" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">dictionary</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>A dict object containing connection details.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/auth_pass"></div>
- <b>auth_pass</b>
- <a class="ansibleOptionLink" href="#parameter-provider/auth_pass" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the password to use if required to enter privileged mode on the remote device. If <em>authorize</em> is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_AUTH_PASS</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/authorize"></div>
- <b>authorize</b>
- <a class="ansibleOptionLink" href="#parameter-provider/authorize" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">boolean</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>no</b>&nbsp;&larr;</div></li>
- <li>yes</li>
- </ul>
- </td>
- <td>
- <div>Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_AUTHORIZE</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/host"></div>
- <b>host</b>
- <a class="ansibleOptionLink" href="#parameter-provider/host" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/password"></div>
- <b>password</b>
- <a class="ansibleOptionLink" href="#parameter-provider/password" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Password to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_PASSWORD</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/port"></div>
- <b>port</b>
- <a class="ansibleOptionLink" href="#parameter-provider/port" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the port to use when building the connection to the remote device.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/ssh_keyfile"></div>
- <b>ssh_keyfile</b>
- <a class="ansibleOptionLink" href="#parameter-provider/ssh_keyfile" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">path</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Path to an ssh key used to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_SSH_KEYFILE</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/timeout"></div>
- <b>timeout</b>
- <a class="ansibleOptionLink" href="#parameter-provider/timeout" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies idle timeout (in seconds) for the connection. Useful if the console freezes before continuing. For example when saving configurations.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/username"></div>
- <b>username</b>
- <a class="ansibleOptionLink" href="#parameter-provider/username" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>User to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_USERNAME</code> will be used instead.</div>
- </td>
- </tr>
-
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-replace"></div>
- <b>replace</b>
- <a class="ansibleOptionLink" href="#parameter-replace" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>line</b>&nbsp;&larr;</div></li>
- <li>block</li>
- </ul>
- </td>
- <td>
- <div>Instructs the module on the way to perform the configuration on the device. If the replace argument is set to <em>line</em> then the modified lines are pushed to the device in configuration mode. If the replace argument is set to <em>block</em> then the entire command block is pushed to the device in configuration mode if any line is not correct.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-save"></div>
- <b>save</b>
- <a class="ansibleOptionLink" href="#parameter-save" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">boolean</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>no</b>&nbsp;&larr;</div></li>
- <li>yes</li>
- </ul>
- </td>
- <td>
- <div>The <code>save</code> argument instructs the module to save the running- config to the startup-config at the conclusion of the module running. If check mode is specified, this argument is ignored.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-src"></div>
- <b>src</b>
- <a class="ansibleOptionLink" href="#parameter-src" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">path</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the source path to the file that contains the configuration or configuration template to load. The path to the source file can either be the full path on the Ansible control host or a relative path from the playbook or role root directory. This argument is mutually exclusive with <em>lines</em>.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-update"></div>
- <b>update</b>
- <a class="ansibleOptionLink" href="#parameter-update" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>merge</b>&nbsp;&larr;</div></li>
- <li>check</li>
- </ul>
- </td>
- <td>
- <div>The <em>update</em> argument controls how the configuration statements are processed on the remote device. Valid choices for the <em>update</em> argument are <em>merge</em> and <em>check</em>. When you set this argument to <em>merge</em>, the configuration changes merge with the current device running configuration. When you set this argument to <em>check</em> the configuration updates are determined but not actually configured on the remote device.</div>
- </td>
- </tr>
- </table>
- <br/>
-
-.. Notes
-
-Notes
------
-
-.. note::
- - For more information on using Ansible to manage Dell EMC Network devices see https://www.ansible.com/ansible-dell-networking.
-
-.. Seealso
-
-
-.. Examples
-
-Examples
---------
-
-.. code-block:: yaml+jinja
-
-
- - os10_config:
- lines: ['hostname {{ inventory_hostname }}']
-
- - os10_config:
- lines:
- - 10 permit ip host 1.1.1.1 any log
- - 20 permit ip host 2.2.2.2 any log
- - 30 permit ip host 3.3.3.3 any log
- - 40 permit ip host 4.4.4.4 any log
- - 50 permit ip host 5.5.5.5 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- match: exact
-
- - os10_config:
- lines:
- - 10 permit ip host 1.1.1.1 any log
- - 20 permit ip host 2.2.2.2 any log
- - 30 permit ip host 3.3.3.3 any log
- - 40 permit ip host 4.4.4.4 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- replace: block
-
- - os10_config:
- lines: ['hostname {{ inventory_hostname }}']
- backup: yes
- backup_options:
- filename: backup.cfg
- dir_path: /home/user
-
-
-
-
-.. Facts
-
-
-.. Return values
-
-Return Values
--------------
-Common return values are documented `here <https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#common-return-values>`_, the following are the fields unique to this module:
-
-.. raw:: html
-
- <table border=0 cellpadding=0 class="documentation-table">
- <tr>
- <th colspan="1">Key</th>
- <th>Returned</th>
- <th width="100%">Description</th>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-backup_path"></div>
- <b>backup_path</b>
- <a class="ansibleOptionLink" href="#return-backup_path" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>when backup is yes</td>
- <td>
- <div>The full path to the backup file</div>
- <br/>
- <div style="font-size: smaller"><b>Sample:</b></div>
- <div style="font-size: smaller; color: blue; word-wrap: break-word; word-break: break-all;">/playbooks/ansible/backup/os10_config.2016-07-16@22:28:34</div>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-commands"></div>
- <b>commands</b>
- <a class="ansibleOptionLink" href="#return-commands" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>always</td>
- <td>
- <div>The set of commands that will be pushed to the remote device</div>
- <br/>
- <div style="font-size: smaller"><b>Sample:</b></div>
- <div style="font-size: smaller; color: blue; word-wrap: break-word; word-break: break-all;">[&#x27;hostname foo&#x27;, &#x27;router bgp 1&#x27;, &#x27;router-id 1.1.1.1&#x27;]</div>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-saved"></div>
- <b>saved</b>
- <a class="ansibleOptionLink" href="#return-saved" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">boolean</span>
- </div>
- </td>
- <td>When not check_mode.</td>
- <td>
- <div>Returns whether the configuration is saved to the startup configuration or not.</div>
- <br/>
- <div style="font-size: smaller"><b>Sample:</b></div>
- <div style="font-size: smaller; color: blue; word-wrap: break-word; word-break: break-all;">True</div>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-updates"></div>
- <b>updates</b>
- <a class="ansibleOptionLink" href="#return-updates" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>always</td>
- <td>
- <div>The set of commands that will be pushed to the remote device.</div>
- <br/>
- <div style="font-size: smaller"><b>Sample:</b></div>
- <div style="font-size: smaller; color: blue; word-wrap: break-word; word-break: break-all;">[&#x27;hostname foo&#x27;, &#x27;router bgp 1&#x27;, &#x27;router-id 1.1.1.1&#x27;]</div>
- </td>
- </tr>
- </table>
- <br/><br/>
-
-.. Status (Presently only deprecated)
-
-
-.. Authors
-
-Authors
-~~~~~~~
-
-- Senthil Kumar Ganesan (@skg-net)
-
-
-
-.. Parsing errors
-
diff --git a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_facts_module.rst b/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_facts_module.rst
deleted file mode 100644
index 658a3d9da..000000000
--- a/ansible_collections/dellemc/os10/docs/dellemc.os10.os10_facts_module.rst
+++ /dev/null
@@ -1,511 +0,0 @@
-.. Document meta
-
-
-.. Anchors
-
-.. _ansible_collections.dellemc.os10.os10_facts_module:
-
-.. Anchors: short name for ansible.builtin
-
-.. Anchors: aliases
-
-
-
-.. Title
-
-dellemc.os10.os10_facts -- Collect facts from devices running Dell EMC SmartFabric OS10
-+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
-.. Collection note
-
-.. note::
- This plugin is part of the `dellemc.os10 collection <https://galaxy.ansible.com/dellemc/os10>`_.
-
- To install it use: :code:`ansible-galaxy collection install dellemc.os10`.
-
- To use it in a playbook, specify: :code:`dellemc.os10.os10_facts`.
-
-.. version_added
-
-
-.. contents::
- :local:
- :depth: 1
-
-.. Deprecated
-
-
-Synopsis
---------
-
-.. Description
-
-- Collects a base set of device facts from a remote device that is running OS10. This module prepends all of the base network fact keys with ``ansible_net_<fact>``. The facts module will always collect a base set of facts from the device and can enable or disable collection of additional facts.
-
-
-.. Aliases
-
-
-.. Requirements
-
-
-.. Options
-
-Parameters
-----------
-
-.. raw:: html
-
- <table border=0 cellpadding=0 class="documentation-table">
- <tr>
- <th colspan="2">Parameter</th>
- <th>Choices/<font color="blue">Defaults</font></th>
- <th width="100%">Comments</th>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-gather_subset"></div>
- <b>gather_subset</b>
- <a class="ansibleOptionLink" href="#parameter-gather_subset" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>
- <b>Default:</b><br/><div style="color: blue">["!config"]</div>
- </td>
- <td>
- <div>When supplied, this argument will restrict the facts collected to a given subset. Possible values for this argument include all, hardware, config, and interfaces. Can specify a list of values to include a larger subset. Values can also be used with an initial <code><span class='module'>!</span></code> to specify that a specific subset should not be collected.</div>
- </td>
- </tr>
- <tr>
- <td colspan="2">
- <div class="ansibleOptionAnchor" id="parameter-provider"></div>
- <b>provider</b>
- <a class="ansibleOptionLink" href="#parameter-provider" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">dictionary</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>A dict object containing connection details.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/auth_pass"></div>
- <b>auth_pass</b>
- <a class="ansibleOptionLink" href="#parameter-provider/auth_pass" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the password to use if required to enter privileged mode on the remote device. If <em>authorize</em> is false, then this argument does nothing. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_AUTH_PASS</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/authorize"></div>
- <b>authorize</b>
- <a class="ansibleOptionLink" href="#parameter-provider/authorize" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">boolean</span>
- </div>
- </td>
- <td>
- <ul style="margin: 0; padding: 0"><b>Choices:</b>
- <li><div style="color: blue"><b>no</b>&nbsp;&larr;</div></li>
- <li>yes</li>
- </ul>
- </td>
- <td>
- <div>Instructs the module to enter privileged mode on the remote device before sending any commands. If not specified, the device will attempt to execute all commands in non-privileged mode. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_AUTHORIZE</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/host"></div>
- <b>host</b>
- <a class="ansibleOptionLink" href="#parameter-provider/host" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the DNS host name or address for connecting to the remote device over the specified transport. The value of host is used as the destination address for the transport.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/password"></div>
- <b>password</b>
- <a class="ansibleOptionLink" href="#parameter-provider/password" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Password to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_PASSWORD</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/port"></div>
- <b>port</b>
- <a class="ansibleOptionLink" href="#parameter-provider/port" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies the port to use when building the connection to the remote device.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/ssh_keyfile"></div>
- <b>ssh_keyfile</b>
- <a class="ansibleOptionLink" href="#parameter-provider/ssh_keyfile" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">path</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Path to an ssh key used to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_SSH_KEYFILE</code> will be used instead.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/timeout"></div>
- <b>timeout</b>
- <a class="ansibleOptionLink" href="#parameter-provider/timeout" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>Specifies idle timeout (in seconds) for the connection. Useful if the console freezes before continuing. For example when saving configurations.</div>
- </td>
- </tr>
- <tr>
- <td class="elbow-placeholder"></td>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="parameter-provider/username"></div>
- <b>username</b>
- <a class="ansibleOptionLink" href="#parameter-provider/username" title="Permalink to this option"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>
- </td>
- <td>
- <div>User to authenticate the SSH session to the remote device. If the value is not specified in the task, the value of environment variable <code>ANSIBLE_NET_USERNAME</code> will be used instead.</div>
- </td>
- </tr>
-
- </table>
- <br/>
-
-.. Notes
-
-Notes
------
-
-.. note::
- - For more information on using Ansible to manage Dell EMC Network devices see https://www.ansible.com/ansible-dell-networking.
-
-.. Seealso
-
-
-.. Examples
-
-Examples
---------
-
-.. code-block:: yaml+jinja
-
-
- # Collect all facts from the device
- - os10_facts:
- gather_subset: all
-
- # Collect only the config and default facts
- - os10_facts:
- gather_subset:
- - config
-
- # Do not collect hardware facts
- - os10_facts:
- gather_subset:
- - "!hardware"
-
-
-
-
-.. Facts
-
-
-.. Return values
-
-Return Values
--------------
-Common return values are documented `here <https://docs.ansible.com/ansible/latest/reference_appendices/common_return_values.html#common-return-values>`_, the following are the fields unique to this module:
-
-.. raw:: html
-
- <table border=0 cellpadding=0 class="documentation-table">
- <tr>
- <th colspan="1">Key</th>
- <th>Returned</th>
- <th width="100%">Description</th>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_all_ipv4_addresses"></div>
- <b>ansible_net_all_ipv4_addresses</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_all_ipv4_addresses" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>when interfaces is configured</td>
- <td>
- <div>All IPv4 addresses configured on the device</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_all_ipv6_addresses"></div>
- <b>ansible_net_all_ipv6_addresses</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_all_ipv6_addresses" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>when interfaces is configured</td>
- <td>
- <div>All IPv6 addresses configured on the device</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_config"></div>
- <b>ansible_net_config</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_config" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>when config is configured</td>
- <td>
- <div>The current active config from the device</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_cpu_arch"></div>
- <b>ansible_net_cpu_arch</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_cpu_arch" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>when hardware is configured</td>
- <td>
- <div>CPU Architecture of the remote device.</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_gather_subset"></div>
- <b>ansible_net_gather_subset</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_gather_subset" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">list</span>
- / <span style="color: purple">elements=string</span> </div>
- </td>
- <td>always</td>
- <td>
- <div>The list of fact subsets collected from the device</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_hostname"></div>
- <b>ansible_net_hostname</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_hostname" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>always</td>
- <td>
- <div>The configured hostname of the device</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_interfaces"></div>
- <b>ansible_net_interfaces</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_interfaces" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">dictionary</span>
- </div>
- </td>
- <td>when interfaces is configured</td>
- <td>
- <div>A hash of all interfaces running on the system</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_memfree_mb"></div>
- <b>ansible_net_memfree_mb</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_memfree_mb" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>when hardware is configured</td>
- <td>
- <div>The available free memory on the remote device in Mb</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_memtotal_mb"></div>
- <b>ansible_net_memtotal_mb</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_memtotal_mb" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">integer</span>
- </div>
- </td>
- <td>when hardware is configured</td>
- <td>
- <div>The total memory on the remote device in Mb</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_model"></div>
- <b>ansible_net_model</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_model" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>always</td>
- <td>
- <div>The model name returned from the device.</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_name"></div>
- <b>ansible_net_name</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_name" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>Always.</td>
- <td>
- <div>The name of the OS that is running.</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_neighbors"></div>
- <b>ansible_net_neighbors</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_neighbors" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">dictionary</span>
- </div>
- </td>
- <td>when interfaces is configured</td>
- <td>
- <div>The list of LLDP neighbors from the remote device</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_servicetag"></div>
- <b>ansible_net_servicetag</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_servicetag" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>always</td>
- <td>
- <div>The service tag number of the remote device.</div>
- <br/>
- </td>
- </tr>
- <tr>
- <td colspan="1">
- <div class="ansibleOptionAnchor" id="return-ansible_net_version"></div>
- <b>ansible_net_version</b>
- <a class="ansibleOptionLink" href="#return-ansible_net_version" title="Permalink to this return value"></a>
- <div style="font-size: small">
- <span style="color: purple">string</span>
- </div>
- </td>
- <td>always</td>
- <td>
- <div>The operating system version running on the remote device</div>
- <br/>
- </td>
- </tr>
- </table>
- <br/><br/>
-
-.. Status (Presently only deprecated)
-
-
-.. Authors
-
-Authors
-~~~~~~~
-
-- Senthil Kumar Ganesan (@skg-net)
-
-
-
-.. Parsing errors
-
diff --git a/ansible_collections/dellemc/os10/docs/os10_aaa.md b/ansible_collections/dellemc/os10/docs/os10_aaa.md
deleted file mode 100644
index cabee7ea9..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_aaa.md
+++ /dev/null
@@ -1,136 +0,0 @@
-AAA role
-========
-
-This role facilitates the configuration of authentication, authorization, and acccounting (AAA). It supports the configuration of RADIUS server, TACACS server, and AAA. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The AAA role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_aaa keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``radius_server`` | dictionary | Configures the RADIUS server (see ``radius_server.*``) | os10 |
-| ``radius_server.retransmit`` | integer | Configures the number of retransmissions | os10 |
-| ``radius_server.timeout`` | integer | Configures the timeout for retransmissions | os10 |
-| ``radius_server.host`` | dictionary | Configures the RADIUS server host (see ``host.*``) | os10 |
-| ``host.ip`` | string | Configures the RADIUS server host address | os10 |
-| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os10 |
-| ``host.state`` | string: present,absent | Removes the RADIUS server host if set to absent | os10 |
-| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 |
-| ``tacacs_server`` | dictionary | Configures the TACACS server (see ``tacacs_server.*``) | os10 |
-| ``tacacs_server.timeout`` | integer | Configures the timeout for retransmissions | os10 |
-| ``tacacs_server.host`` | dictionary | Configures the TACACS server host (see ``host.*``) | os10 |
-| ``host.ip`` | string | Configures the TACACS server host address | os10 |
-| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os10 |
-| ``host.state`` | string: present,absent | Removes the TACACS server host if set to absent | os10 |
-| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 |
-| ``aaa_accounting`` | dictionary | Configures accounting parameters (see ``aaa_accounting.*``) | os10 |
-| ``aaa_accounting.accounting.accounting_type`` | dictionary | Configures accounting type | os10 |
-| ``aaa_accounting.accounting.connection_type`` | dictionary | Configures accounting connection type | os10 |
-| ``aaa_accounting.accounting.account_mode`` | dictionary | Configures accounting mode | os10 |
-| ``aaa_accounting.accounting.server_group`` | dictionary | Configures accounting server group | os10 |
-| ``aaa_accounting.accounting.state`` | string: present,absent | Configures/unconfigures accounting parameters | os10 |
-| ``aaa_authentication`` | dictionary | Configures authentication parameters (see ``aaa_authentication.*``) | os10 |
-| ``aaa_authentication.login`` | dictionary | Configures authentication login (see ``aaa_authentication.login.*``)| os10 |
-| ``aaa_authentication.login.console`` | dictionary | Configures authentication method for console login | os10 |
-| ``aaa_authentication.login.state`` | string: present,absent | Unconfigures authentication login if set to absent | os10 |
-| ``aaa_authentication.login.type`` | dictionary | Configures authentication type | os10 |
-| ``aaa_authentication.re_authenticate`` | boolean | Configures re-authenticate by enable if set to true | os10 |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_aaa* role to configure AAA for radius and TACACS servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, it is set to false and it writes a simple playbook that only references the *os10_aaa* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_aaa:
- radius_server:
- retransmit: 5
- timeout: 10
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- tacacs_server:
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- timeout: 6
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: present
- re_authenticate: true
- aaa_accounting:
- accounting:
- - accounting_type: commands
- connection_type: console
- account_mode: start-stop
- server_group: group tacacs+
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_aaa
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_acl.md b/ansible_collections/dellemc/os10/docs/os10_acl.md
deleted file mode 100644
index 14a1fe2a5..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_acl.md
+++ /dev/null
@@ -1,130 +0,0 @@
-ACL role
-========
-
-This role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to the line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The ACL role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_acl keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string (required): ipv4, ipv6, mac | Configures the L3 (IPv4/IPv6) or L2 (MAC) access-control list | os10 |
-| ``name`` | string (required) | Configures the name of the access-control list | os10 |
-| ``description`` | string | Configures the description about the access-control list | os10 |
-| ``remark`` | list | Configures the ACL remark (see ``remark.*``) | os10|
-| ``remark.number`` | integer (required) | Configures the remark sequence number | os10 |
-| ``remark.description`` | string | Configures the remark description | os10 |
-| ``remark.state`` | string: absent,present\* | Deletes the configured remark for an ACL entry if set to absent | os10 |
-| ``entries`` | list | Configures ACL rules (see ``seqlist.*``) | os10 |
-| ``entries.number`` | integer (required) | Specifies the sequence number of the ACL rule | os10 |
-| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true; specifies to reject packets if set to false | os10 |
-| ``entries.protocol`` | string (required) | Specifies the type of protocol or the protocol number to filter | os10 |
-| ``entries.source`` | string (required) | Specifies the source address to match in the packets | os10 |
-| ``entries.src_condition`` | string | Specifies the condition to filter packets from the source address; ignored if MAC | os10 |
-| ``entries.destination`` | string (required) | Specifies the destination address to match in the packets | os10 |
-| ``entries.dest_condition`` | string | Specifies the condition to filter packets to the destination address | os10 |
-| ``entries.other_options`` | string | Specifies the other options applied on packets (count, log, order, monitor, and so on) | os10 |
-| ``entries.state`` | string: absent,present\* | Deletes the rule from the ACL if set to absent | os10 |
-| ``stage_ingress`` | list | Configures ingress ACL to the interface (see ``stage_ingress.*``) | os10 |
-| ``stage_ingress.name`` | string (required) | Configures the ingress ACL filter to the interface with this interface name | os10 |
-| ``stage_ingress.state`` | string: absent,present\* | Deletes the configured ACL from the interface if set to absent | os10 |
-| ``stage_egress`` | list | Configures egress ACL to the interface (see ``stage_egress.*``) | os10 |
-| ``stage_egress.name`` | string (required) | Configures the egress ACL filter to the interface with this interface name | os10 |
-| ``stage_egress.state`` | string: absent,present\* | Deletes the configured egress ACL from the interface if set to absent | os10 |
-| ``lineterminal`` | list | Configures the terminal to apply the ACL (see ``lineterminal.*``) | os10 |
-| ``lineterminal.state`` | string: absent,present\* | Deletes the access-class from line terminal if set to absent | os10 |
-| ``state`` | string: absent,present\* | Deletes the ACL if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOM`E environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_acl* role to configure different types of ACLs (standard and extended) for both IPv4 and IPv6 and assigns the access-class to the line terminals. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, it generates the configuration commands as a .part file in the *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os10_acl* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_acl:
- - name: ssh
- type: ipv4
- description: acl
- remark:
- - description: 1
- number: 3
- state: absent
- entries:
- - number: 14
- permit: true
- protocol: tcp
- source: any
- src_condition: neq 6
- destination: any
- dest_condition: eq 4
- other_options: count
- state: present
- stage_ingress:
- - name: ethernet 1/1/1
- state: absent
- - name: ethernet 1/1/2
- state: absent
- stage_egress:
- - name: ethernet 1/1/3
- state: absent
- lineterminal:
- state: absent
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_acl
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_bfd.md b/ansible_collections/dellemc/os10/docs/os10_bfd.md
deleted file mode 100644
index c69079924..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_bfd.md
+++ /dev/null
@@ -1,89 +0,0 @@
-BFD role
-===========
-
-This role facilitates the configuration of bidirectional forwarding detection (BFD) global attributes. It specifically enables configuration of BFD interval, min_rx, multiplier, and role. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The BFD role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_bfd keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``bfd`` | dictionary | Configures global BFD parameters (see ``bfd.*``) | os10 |
-| ``bfd.interval`` | integer | Configures the time interval in ms (100 to 1000) | os10 |
-| ``bfd.min_rx`` | integer | Configures maximum waiting time for receiving control packets from BFD peers in ms (100 to 1000)| os10 |
-| ``bfd.multiplier`` | integer | Configures the maximum number of consecutive packets that are not received from BFD peers before session state changes to Down (3 to 50) | os10 |
-| ``bfd.role`` | string: passive,active\* | Configures the BFD role | os10 |
-| ``bfd.state`` | string: absent,present\* | Removes the global BFD if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
-********************
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_bfd role* to completely set the global BFD attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The bfd role writes a simple playbook that only references the *os10_bfd* role. By including the role, you automatically get access to all of the tasks to configure BFD feature.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_bfd:
- bfd:
- interval: 100
- min_rx: 100
- multiplier: 3
- role: "active"
- state: "present"
-
-**Simple playbook to setup bfd — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_bfd
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_bgp.md b/ansible_collections/dellemc/os10/docs/os10_bgp.md
deleted file mode 100644
index e4e7c94e1..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_bgp.md
+++ /dev/null
@@ -1,729 +0,0 @@
-BGP role
-========
-
-This role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum paths. This role is abstracted for Dell EMC PowerSwitch platforms running SmartFabric OS10.
-
-The BGP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_bgp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``asn`` | string (required) | Configures the autonomous system (AS) number of the local BGP instance | os10 |
-| ``router_id`` | string | Configures the IP address of the local BGP router instance | os10 |
-| ``graceful_restart`` | boolean | Configures graceful restart capability | os10 |
-| ``maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) | os10 |
-| ``maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) | os10 |
-| ``log_neighbor_changes`` | boolean | Configures log neighbors up/down | os10 |
-| ``fast_ext_fallover`` | boolean | Configures a reset session if a link to a directly connected external peer goes down | os10 |
-| ``always_compare_med`` | boolean | Configures comparing MED from different neighbors | os10 |
-| ``default_loc_pref`` | integer | Configures the default local preference value | os10 |
-| ``as_notation`` | string | Configures AS number notation format | os10 |
-| ``enforce_first_as`` | boolean | Configures the first AS for eBGP routes | os10 |
-| ``non_deterministic_med`` | boolean | Configures nondeterministic path selection algorithm | os10 |
-| ``outbound_optimization`` | boolean | Configures outbound optimization for iBGP peer-group members | os10 |
-| ``confederation`` | dictionary | Configures AS confederation parameters (see ``confederation.*``) | os10 |
-| ``confederation.identifier`` | integer | Configures the routing domain confederation AS | os10 |
-| ``confederation.peers`` | string | Configures the peer AS in BGP confederation | os10 |
-| ``confederation.peers_state`` | string: absent,present\* | Deletes the peer AS in BGP confederation if set to absent | os10 |
-| ``route_reflector`` | dictionary | Configures route reflection parameters (see ``route_reflector.*``) | os10 |
-| ``route_reflector.client_to_client`` | boolean | Configures client-to-client route reflection | os10 |
-| ``route_reflector.cluster_id`` | string | Configures the route reflector cluster-id | os10 |
-| ``address_family_ipv4`` | dictionary | Configures IPv4 address family parameters (see ``address_family_ipv4.*``) | os10 |
-| ``address_family_ipv4.aggregate_addr`` | list | Configures IPv4 BGP aggregate entries (see ``aggregate_addr.*``) | os10 |
-| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.summary_only`` | boolean | Sets address to summary only if true | os10 |
-| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv4 BGP aggregate entry if set to absent | os10 |
-| ``address_family_ipv4.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 |
-| ``dampening.value`` | dictionary | Configures dampening values (<half-life time> <start value to reuse> <start value to suppress> <max duration> format; default 15 750 2000 60) | os10 |
-| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 |
-| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 |
-| ``address_family_ipv4.ibgp_redist_internal`` | dictionary | Configures internal BGP reditribution (see ``ibgp_redist_internal.*``) | os10 |
-| ``ibgp_redist_internal.state`` | boolean | Configures the internal BGP redistribution for an IPv4 address family | os10 |
-| ``address_family_ipv4.default_metric`` | integer | Configures the metric of redistributed routes for IPv4 address family | os10 |
-| ``address_family_ipv4.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 |
-| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPv4 address family (<routes external to AS> <routes internal to AS> <local routes> format; distance bgp 2 3 4) | os10 |
-| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 |
-| ``address_family_ipv6`` | dictionary | Configures IPv6 address family parameters (see ``address_family_ipv6.*``) | os10 |
-| ``address_family_ipv6.aggregate_addr`` | list | Configures IPv6 BGP aggregate entries (see ``aggregate_addr.*``) | os10 |
-| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true | os10 |
-| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv6 BGP aggregate entry if set to absent | os10 |
-| ``address_family_ipv6.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 |
-| ``dampening.value`` | dictionary | Configures dampening values (<half-life time> <start value to reuse> <start value to suppress> <max duration> format; default 15 750 2000 60) | os10 |
-| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 |
-| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 |
-| ``address_family_ipv6.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 |
-| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for an IPv6 address family | os10 |
-| ``address_family_ipv6.default_metric`` | integer | Configures the metric of redistributed routes for IPv6 address family | os10 |
-| ``address_family_ipv6.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 |
-| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for an IPv6 address family (<routes external to AS> <routes internal to AS> <local routes> format; distance bgp 2 3 4) | os10 |
-| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 |
-| ``best_path`` | list | Configures the default best-path selection (see ``best_path.*``) | os10 |
-| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os10 |
-| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os10 |
-| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os10 |
-| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os10 |
-| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os10 |
-| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os10 |
-| ``ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os10 |
-| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os10 |
-| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os10 |
-| ``ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os10 |
-| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os10 |
-| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os10 |
-| ``neighbor`` | list | Configures IPv4 BGP neighbors (see ``neighbor.*``) | os10 |
-| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os10 |
-| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | os10 |
-| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os10 |
-| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os10 |
-| ``neighbor.auto_peer`` |string: unnumbered-auto | Enables auto discovery of neighbors | os10 |
-| ``neighbor.password`` | string | Configures the BGP neighbor password | os10 |
-| ``neighbor.peergroup_type`` | string (ibgp, ebgp) | Configures the BGP neighbor peer-group type| os10 |
-| ``neighbor.ebgp_peergroup`` | string | Configures the peer-group to all auto-discovered external neighbors | os10 |
-| ``neighbor.ebgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered external neighbors | os10 |
-| ``neighbor.ibgp_peergroup`` | string | Configures the peer-group to all auto-discovered internal neighbors | os10 |
-| ``neighbor.ibgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered internal neighbors | os10 |
-| ``neighbor.route_reflector_client`` | boolean | Configures router reflector client on the BGP neighbor | os10 |
-| ``neighbor.local_as`` | integer | Configures the local AS for the BGP peer | os10 |
-| ``neighbor.weight`` | integer | Configures the default weight for routes from the neighbor interface | os10 |
-| ``neighbor.send_community`` | list | Configures the send community attribute to the BGP neighbor (see ``send_community.*``) | os10 |
-| ``send_community.type`` | string (required) | Configures the send community attribute to the BGP neighbor | os10 |
-| ``send_community.state`` | string: absent,present\* | Deletes the send community attribute of the BGP neighbor if set to absent | os10 |
-| ``neighbor.address_family`` | list | Configures address family commands on the BGP neighbor (see ``address_family.*``)| os10 |
-| ``address_family.type`` | string (required): ipv4,ipv6,l2vpn | Configures IPv4/IPv6/EVPN address family command mode on the BGP neighbor | os10 |
-| ``address_family.activate`` | boolean | Configures activation/deactivation of IPv4/IPv6 address family command mode on the BGP neighbor | os10 |
-| ``address_family.sender_loop_detect`` | boolean | Enables/disables the sender-side loop detection process for a BGP neighbor of IPv4/IPv6/l2vpn address family | os10 |
-| ``address_family.allow_as_in`` | integer | Configures the local AS number in the as-path | os10 |
-| ``address_family.next_hop_self`` | boolean | Configures disabling the next-hop calculation for the neighbor | os10 |
-| ``address_family.soft_reconf`` | boolean | Configures per neighbor soft reconfiguration | os10 |
-| ``address_family.add_path`` | string | Configures send or receive multiple paths (value can be 'both <no of paths>', 'send <no of paths>', 'receive')| os10 |
-| ``address_family.route_map`` | list | Configures the route-map on the BGP neighbor (see ``route_map.*``) | os10 |
-| ``route_map.name`` | string | Configures the name of the route-map for the BGP neighbor | os10 |
-| ``route_map.filter`` | string | Configures the filter for routing updates | os10 |
-| ``route_map.state`` | string, choices: absent,present\* | Deletes the route-map of the BGP neighbor if set to absent | os10 |
-| ``address_family.max_prefix`` | dictionary | Configures maximum-prefix parameters (see ``max_prefix.\*``) | os10 |
-| ``max_prefix.count`` | integer | Configures maximum number of prefix accepted from the peer | os10 |
-| ``max_prefix.state`` | string: absent,present | Deletes maximum prefix configured for the peer | os10 |
-| ``max_prefix.threshold`` | integer | Configures threshold percentage at which warning log is thrown | os10 |
-| ``max_prefix.warning``| boolean | Configures a warning without dropping the session when maximum limit exceeds if set to true | os10|
-| ``address_family.default_originate`` | dictionary | Configures default-originate parameters (see ``default_originate.\*``) | os10 |
-| ``default_originate.route_map`` | string | Configures the name of the route-map to specify criteria to originate default | os10 |
-| ``default_originate.state`` | string, choices: absent,present\* | Deletes the default-originate if set to absent | os10 |
-| ``address_family.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 |
-| ``address_family.state`` | string: absent,present\* | Deletes the address family command mode of the BGP neighbor if set to absent | os10 |
-| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os10 |
-| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer group if set to absent; supported only when *neighbor.type* is "peergroup" | os10 |
-| ``neighbor.timer`` | string | Configures neighbor timers; 5 10, where 5 is the keepalive interval and 10 is the holdtime | os10 |
-| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os10 |
-| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os10 |
-| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 |
-| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os10 |
-| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os10 |
-| ``neighbor.adv_start`` | integer | Set the advertisement start of the neighbor | os10 |
-| ``neighbor.adv_start_state`` | string: absent,present\* | Configures or unconfigures the advertisement start of a neighbor | os10 |
-| ``neighbor.conn_retry_timer`` | integer | Configures the peer connection retry timer | os10 |
-| ``neighbor.remove_pri_as`` | string: absent,present | Configures the remove private AS number from outbound updates | os10 |
-| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os10 |
-| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables or disables the sender-side loop detect for neighbors | os10 |
-| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os10 |
-| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os10 |
-| ``neighbor.listen`` | list | Configures listen commands on the BGP template (see ``listen.*``)| os10 |
-| ``listen.subnet`` | string (required) | Configures the passive BGP neighbor IPv4/IPv6 to this subnet | os10 |
-| ``listen.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4/IPv6 BGP neighbor if set to absent | os10 |
-| ``listen.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | os10 |
-| ``neighbor.bfd`` | boolean | Enables BFD for neighbor | os10 |
-| ``neighbor.description`` | string | Configures neighbor description | os10 |
-| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os10 |
-| ``redistribute`` | list | Configures the redistribute list to get information from other routing protocols (see ``redistribute.*``) | os10 |
-| ``redistribute.route_type`` | string (required): static,connected,imported_bgp,l2vpn,ospf | Configures the name of the routing protocol to redistribute | os10 |
-| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os10 |
-| ``redistribute.imported_bgp_vrf_name`` | string | Configures the redistribute imported BGP VRF name | os10 |
-| ``redistribute.ospf_id`` | string | Configures the redistribute OSPF | os10 |
-| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os10 |
-| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os10 |
-| ``bfd_all_neighbors`` | dictionary | Enables BFD for all BGP neighbors | os10 |
-| ``bfd_all_neighbors.interval`` | integer: 100 to 1000 | Configures time interval for sending control packets to BFD peers in ms| os10 |
-| ``bfd_all_neighbors.min_rx`` | integer: 100 to 1000 | Configures maximum waiting time for receiving control packets from BFD peers in ms| os10 |
-| ``bfd_all_neighbors.multiplier`` | integer: 3 to 50 | Configures maximum number of consecutive packets that are not received from BFD peers before session state changes to Down| os10 |
-| ``bfd_all_neighbors.role``| string: active, passive | Configures BFD role | os10 |
-| ``bfd_all_neighbors.state`` |string: absent,present\* | Deletes BFD for all neighbors if set to absent | os10 |
-| ``vrfs`` | list | Enables VRF under BGP | os10 |
-| ``vrf.name`` | string (Required)| Configures VRF name | os10 |
-| ``vrf.router_id`` | string | Configures the IP address of the local BGP router instance in VRF | os10 |
-| ``vrf.graceful_restart`` | boolean | Configures graceful restart capability in VRF | os10 |
-| ``vrf.maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) in VRF | os10 |
-| ``vrf.maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) in VRF | os10 |
-| ``vrf.log_neighbor_changes`` | boolean | Configures log neighbors up/down in VRF | os10 |
-| ``vrf.fast_ext_fallover`` | boolean | Configures a reset session if a link to a directly connected external peer goes down in VRF | os10 |
-| ``vrf.always_compare_med`` | boolean | Configures comparing MED from different neighbors in VRF | os10 |
-| ``vrf.default_loc_pref`` | integer | Configures the default local preference value in VRF | os10 |
-| ``vrf.as_notation`` | string | Changes the AS number notation format in VRF | os10 |
-| ``vrf.enforce_first_as`` | boolean | Configures the first AS for eBGP routes in VRF | os10 |
-| ``vrf.non_deterministic_med`` | boolean | Configures nondeterministic path selection algorithm in VRF | os10 |
-| ``vrf.outbound_optimization`` | boolean | Configures outbound optimization for iBGP peer-group members in VRF | os10 |
-| ``vrf.route_reflector`` | dictionary | Configures route reflection parameters (see ``route_reflector.*``) in VRF | os10 |
-| ``vrf.route_reflector.client_to_client`` | boolean | Configures client-to-client route reflection in VRF | os10 |
-| ``vrf.route_reflector.cluster_id`` | string | Configures the route-reflector cluster-id in VRF | os10 |
-| ``vrf.address_family_ipv4`` | dictionary | Configures IPv4 address family parameters in VRF (see ``address_family_ipv4.*``) in VRF | os10 |
-| ``address_family_ipv4.aggregate_addr`` | list | Configures IPv4 BGP aggregate entries (see ``aggregate_addr.*``) in VRF | os10 |
-| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv4 BGP aggregate address in VRF | os10 |
-| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true in VRF | os10 |
-| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv4 BGP aggregate entry if set to absent in VRF | os10 |
-| ``address_family_ipv4.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 |
-| ``dampening.value`` | dictionary | Configures dampening values (<half-life time> <start value to reuse> <start value to suppress> <max duration> format; default 15 750 2000 60) | os10 |
-| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 |
-| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 |
-| ``address_family_ipv4.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 |
-| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for a IPV4 address family | os10 |
-| ``address_family_ipv4.default_metric`` | integer | Configures the metric of redistributed routes for IPV4 address family | os10 |
-| ``address_family_ipv4.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 |
-| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPV4 address family (<routes external to AS> <routes internal to AS> <local routes> format; distance bgp 2 3 4) | os10 |
-| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 |
-| ``vrf.address_family_ipv6`` | dictionary | Configures IPv6 address family parameters in VRF (see ``address_family_ipv6.*``) | os10 |
-| ``address_family_ipv6.aggregate_addr`` | list | Configures IPv6 BGP aggregate entries (see ``aggregate_addr.*``) | os10 |
-| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true | os10 |
-| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv6 BGP aggregate entry if set to absent | os10 |
-| ``address_family_ipv6.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 |
-| ``dampening.value`` | dictionary | Configures dampening values (<half-life time> <start value to reuse> <start value to suppress> <max duration> format; default 15 750 2000 60) | os10 |
-| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 |
-| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 |
-| ``address_family_ipv6.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 |
-| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for a IPv6 address family | os10 |
-| ``address_family_ipv6.default_metric`` | integer | Configures the metric of redistributed routes for IPv6 address family | os10 |
-| ``address_family_ipv6.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 |
-| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPv6 address family (<routes external to AS> <routes internal to AS> <local routes> format; distance bgp 2 3 4) | os10 |
-| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 |
-| ``vrf.best_path`` | list | Configures the default best-path selection in VRF (see ``best_path.*``) | os10 |
-| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os10 |
-| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os10 |
-| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os10 |
-| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os10 |
-| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os10 |
-| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os10 |
-| ``vrf.ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os10 |
-| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os10 |
-| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os10 |
-| ``vrf.ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os10 |
-| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os10 |
-| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os10 |
-| ``vrf.neighbor`` | list | Configures IPv4 BGP neighbors in VRF (see ``neighbor.*``) | os10 |
-| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os10 |
-| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | os10 |
-| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os10 |
-| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os10 |
-| ``neighbor.auto_peer`` |string: unnumbered-auto | Enable auto-discovery of neighbors | os10 |
-| ``neighbor.password`` | string | Configures the BGP neighbor password | os10 |
-| ``neighbor.peergroup_type`` | string (ibgp, ebgp) | Configures the BGP neighbor peer-group type| os10 |
-| ``neighbor.ebgp_peergroup`` | string | Configures the peer-group to all auto-discovered external neighbors | os10 |
-| ``neighbor.ebgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered external neighbors | os10 |
-| ``neighbor.ibgp_peergroup`` | string | Configures the peer-group to all auto-discovered internal neighbors | os10 |
-| ``neighbor.ibgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered internal neighbors | os10 |
-| ``neighbor.route_reflector_client`` | boolean | Configures router reflector client on the BGP neighbor | os10 |
-| ``neighbor.local_as`` | integer | Configures the local AS for the BGP peer | os10 |
-| ``neighbor.weight`` | integer | Configures the default weight for routes from the neighbor interface | os10 |
-| ``neighbor.send_community`` | list | Configures the send community attribute to the BGP neighbor (see ``send_community.*``) | os10 |
-| ``send_community.type`` | string (required) | Configures the send community attribute to the BGP neighbor | os10 |
-| ``send_community.state`` | string: absent,present\* | Deletes the send community attribute of the BGP neighbor if set to absent | os10 |
-| ``neighbor.address_family`` | list | Configures address family commands on the BGP neighbor (see ``address_family.*``)| os10 |
-| ``address_family.type`` | string (required): ipv4,ipv6,l2vpn | Configures IPv4/IPv6 EVPN address family command mode on the BGP neighbor | os10 |
-| ``address_family.activate`` | boolean | Configures activation or deactivation of IPv4/IPv6 address family command mode on the BGP neighbor | os10 |
-| ``address_family.sender_loop_detect`` | boolean | Enables or disables the sender-side loop detection process for a BGP neighbor of IPv4/IPv6 l2vpn address family | os10 |
-| ``address_family.allow_as_in`` | integer | Configures the local AS number in the as-path | os10 |
-| ``address_family.next_hop_self`` | boolean | Configures disabling the next-hop calculation for the neighbor | os10 |
-| ``address_family.soft_reconf`` | boolean | Configures per neighbor soft reconfiguration | os10 |
-| ``address_family.add_path`` | string | Configures send or receive multiple paths (value can be 'both <no of paths>', 'send <no of paths>', 'receive')| os10 |
-| ``address_family.route_map`` | list | Configures the route-map on the BGP neighbor (see ``route_map.*``) | os10 |
-| ``route_map.name`` | string | Configures the name of the route-map for the BGP neighbor | os10 |
-| ``route_map.filter`` | string | Configures the filter for routing updates | os10 |
-| ``route_map.state`` | string, choices: absent,present* | Deletes the route-map of the BGP neighbor if set to absent | os10 |
-| ``address_family.max_prefix`` | dictionary | Configures maximum-prefix parameters (see ``max_prefix.*``) | os10 |
-| ``max_prefix.count`` | integer | Configures maximum number of prefix accepted from the peer | os10 |
-| ``max_prefix.state`` | string: absent,present | Deletes maximum prefix configured for the peer | os10 |
-| ``max_prefix.threshold`` | integer | Configures threshold percentage at which warning log is thrown | os10 |
-| ``max_prefix.warning``| boolean | Configures a warning without dropping session when maximum limit exceeds if set to true | os10|
-| ``address_family.default_originate`` | dictionary | Configures default-originate parameters (see ``default_originate.\*``) | os10 |
-| ``default_originate.route_map`` | string | Configures the name of the route-map to specify criteria to originate default | os10 |
-| ``default_originate.state`` | string, choices: absent,present\* | Deletes the default-originate if set to absent | os10 |
-| ``address_family.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 |
-| ``address_family.state`` | string: absent,present\* | Deletes the address family command mode of the BGP neighbor if set to absent | os10 |
-| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os10 |
-| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer-group if set to absent; supported only when *neighbor.type* is "peergroup" | os10 |
-| ``neighbor.timer`` | string | Configures neighbor timers; 5 10, where 5 is the keepalive interval and 10 is the holdtime | os10 |
-| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os10 |
-| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os10 |
-| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 |
-| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os10 |
-| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os10 |
-| ``neighbor.adv_start`` | integer | Set the advertisement start of the neighbor | os10 |
-| ``neighbor.adv_start_state`` | string: absent,present\* | Configures or unconfigures the advertisement start of the neighbor | os10 |
-| ``neighbor.conn_retry_timer`` | integer | Configures the peer connection retry timer | os10 |
-| ``neighbor.remove_pri_as`` | string: absent,present | Removes private AS number from outbound updates | os10 |
-| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os10 |
-| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables or disables the sender-side loop detect for neighbors | os10 |
-| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os10 |
-| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os10 |
-| ``neighbor.listen`` | list | Configures listen commands on the BGP template (see ``listen.*``)| os10 |
-| ``listen.subnet`` | string (required) | Configures the passive BGP neighbor IPv4/IPv6 to this subnet | os10 |
-| ``listen.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4/IPv6 BGP neighbor if set to absent | os10 |
-| ``listen.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | os10 |
-| ``neighbor.bfd`` | boolean | Enables BFD for neighbor | os10 |
-| ``neighbor.description`` | string | Configures neighbor description | os10 |
-| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os10 |
-| ``vrf.redistribute`` | list | Configures the redistribute list to get information from other routing protocols in VRF (see ``redistribute.*``) | os10 |
-| ``redistribute.route_type`` | string (required): static,connected,imported_bgp | Configures the name of the routing protocol to redistribute | os10 |
-| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os10 |
-| ``redistribute.imported_bgp_vrf_name`` | string | Configures the redistribute imported BGP VRF name | os10 |
-| ``redistribute.ospf_id`` | string | Configures the redistribute ospf | os10 |
-| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os10 |
-| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os10 |
-| ``vrf.bfd_all_neighbors`` | dictionary | Enables BFD for all BGP neighbors in VRF ((see ``bfd_all_neighbors.*``))| os10 |
-| ``bfd_all_neighbors.interval`` | integer: 100 to 1000 | Configures time interval for sending control packets to BFD peers in ms| os10 |
-| ``bfd_all_neighbors.min_rx`` | integer: 100 to 1000 | Configures maximum waiting time for receiving control packets from BFD peers in ms| os10 |
-| ``bfd_all_neighbors.multiplier`` | integer: 3 to 50 | Configures maximum number of consecutive packets that are not received from BFD peers before session state changes to Down| os10 |
-| ``bfd_all_neighbors.role``| string: active, passive | Configures BFD role | os10 |
-| ``bfd_all_neighbors.state`` |string: absent,present\* | Deletes BFD for all neighbors if set to absent | os10 |
-| ``vrf.state`` | string: absent,present\* | Deletes the VRF instance under router BGP if set to absent | os10 |
-| ``state`` | string: absent,present\* | Deletes the local router BGP instance if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_bgp* role to configure the BGP network and neighbors. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os10_bgp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_bgp:
- asn: 12
- router_id: 90.1.1.4
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: true
- fast_ext_fallover: false
- always_compare_med: true
- default_loc_pref: 1000
- as_notation: asdot
- enforce_first_as: false
- non_deterministic_med: true
- outbound_optimization: true
- confederation:
- identifier: 25
- peers: 23 24
- peers_state: present
- route_reflector:
- client_to_client: false
- cluster_id: 4294967295
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- summary_only: true
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- default_metric: 10
- distance_bgp:
- value: 3 4 6
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- address_family:
- - type: ipv4
- activate: false
- state: present
- max_prefix:
- count: 20
- threshold: 90
- warning: true
- state: present
- listen:
- - subnet: 4.4.4.4/32
- limit: 4
- subnet_state: present
- - subnet: 20::/64
- limit: 4
- subnet_state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2-spine1"
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- password: bgppassword
- route_reflector_client: true
- adv_start: 100
- adv_start_state: present
- conn_retry_timer: 20
- remove_pri_as: present
- src_loopback: 0
- address_family:
- - type: ipv4
- activate: true
- state: present
- max_prefix:
- count: 10
- threshold: 40
- warning: true
- state: present
- default_originate:
- route_map: aa
- state: present
- distribute_list:
- in: XX
- in_state: present
- out: YY
- out_state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: true
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ebgp_peergroup: ebgp_pg
- ebgp_peergroup_state: absent
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- sender_loop_detect: true
- password: bgppassword
- address_family:
- - type: ipv4
- activate: true
- sender_loop_detect: true
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- - type: l2vpn
- activate: true
- sender_loop_detect: false
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- vrfs:
- - name: "GREEN"
- router_id: 50.1.1.1
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: false
- fast_ext_fallover: false
- always_compare_med: true
- default_loc_pref: 1000
- route_reflector:
- client_to_client: false
- cluster_id: 1
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- - attribute: missing-as-worst
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan10
- description: U_site2 vlan
- send_community:
- - type: extended
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.20.1
- name: peer1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- route_reflector_client: true
- src_loopback: 0
- address_family:
- - type: ipv4
- activate: false
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.15.3
- address_family:
- - type: ipv4
- activate: false
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ebgp_peergroup: ebgp_pg
- ebgp_peergroup_state: present
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- address_family:
- - type: ipv4
- activate: false
- sender_loop_detect: false
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- - route_type: connected
- route_map_name: bb
- address_type: ipv4
- state: present
- - route_type: l2vpn
- route_map_name: cc
- address_type: ipv4
- state: present
- - route_type: imported_bgp
- imported_bgp_vrf_name: test6
- route_map_name: dd
- address_type: ipv4
- state: present
- - route_type: ospf
- ospf_id: 12
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- state: present
- state: present
-
-
-**Simple playbook to configure BGP — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_bgp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_copy_config.md b/ansible_collections/dellemc/os10/docs/os10_copy_config.md
deleted file mode 100644
index eadefecb0..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_copy_config.md
+++ /dev/null
@@ -1,131 +0,0 @@
-Copy-config role
-================
-
-This role is used to push the backup running configuration into a Dell EMC PowerSwitch platform running Dell EMC SmartFabric OS10, and merges the configuration in the template file with the running configuration of the device.
-
-The copy-config role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- No predefined variables are part of this role
-- Use *host_vars* or *group_vars* as part of the template file
-- Configuration file is host-specific
-- Copy the host-specific configuration to the respective file under the template directory in *<host_name>.j2* format
-- Variables and values are case-sensitive
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_copy_config* role to push the configuration file into the device. It creates a *hosts* file with the switch details and corresponding variables. It writes a simple playbook that only references the *os10_copy_config* role. By including the role, you automatically get access to all of the tasks to push configuration file.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
-
- # This variable shall be applied in the below jinja template for each host by defining here
- os10_bgp
- asn: 64801
-
-**Sample roles/os10_copy_config/templates/leaf1.j2**
-
- ! Leaf1 BGP profile on Dell OS10 switch
- snmp-server community public ro
- hash-algorithm ecmp crc
- !
- interface ethernet1/1/1:1
- no switchport
- ip address 100.1.1.2/24
- ipv6 address 2001:100:1:1::2/64
- mtu 9216
- no shutdown
- !
- interface ethernet1/1/9:1
- no switchport
- ip address 100.2.1.2/24
- ipv6 address 2001:100:2:1::2/64
- mtu 9216
- no shutdown
- !
- router bgp {{ os10_bgp.asn }}
- bestpath as-path multipath-relax
- bestpath med missing-as-worst
- router-id 100.0.2.1
- !
- address-family ipv4 unicast
- !
- address-family ipv6 unicast
- !
- neighbor 100.1.1.1
- remote-as 64901
- no shutdown
- !
- neighbor 100.2.1.1
- remote-as 64901
- no shutdown
- !
- neighbor 2001:100:1:1::1
- remote-as 64901
- no shutdown
- !
- address-family ipv4 unicast
- no activate
- exit
- !
- address-family ipv6 unicast
- activate
- exit
- !
- neighbor 2001:100:2:1::1
- remote-as 64901
- no shutdown
- !
- address-family ipv4 unicast
- no activate
- exit
- !
- address-family ipv6 unicast
- activate
- exit
- !
-
-**Simple playbook to setup to push configuration file into device — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_copy_config
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_dns.md b/ansible_collections/dellemc/os10/docs/os10_dns.md
deleted file mode 100644
index b65d7622a..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_dns.md
+++ /dev/null
@@ -1,125 +0,0 @@
-DNS role
-========
-
-This role facilitates the configuration of the domain name service (DNS). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The DNS role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_dns keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``name_server`` | list | Configures DNS (see ``name_server.*``) | os10 |
-| ``name_server.ip`` | list | Configures the name server IP | os10 |
-| ``name_server.vrf`` | list | Configures VRF for each IP | os10 |
-| ``name_server.state`` | string: absent,present\* | Deletes the name server IP if set to absent | os10 |
-| ``domain_list`` | list | Configures domain-list (see ``domain_list.*``) | os10 |
-| ``domain_list.name`` | list | Configures the domain-list name | os10 |
-| ``domain_list.vrf`` | list | Configures VRF for each domain-list name | os10 |
-| ``domain_list.state`` | string: absent,present\* | Deletes the domain-list if set to absent | os10 |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_dns* role to completely set up the DNS server configuration. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os10_dns* role. By including the role, you automatically get access to all of the tasks to configure DNS.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_dns:
- domain_lookup: true
- name_server:
- - ip:
- - 3.1.1.1
- - 3.1.1.2
- vrf:
- - test
- - test1
- - ip:
- - 3.1.1.2
- vrf:
- - test1
- state: absent
- - ip:
- - 2.2.2.2
- - ip:
- - 3.3.2.2
- state: absent
- domain_list:
- - name:
- - dname7
- - dname8
- vrf:
- - test
- - test1
- - name:
- - dname7
- vrf:
- - test
- - test1
- state: absent
- - name:
- - dname3
- - dname4
- - name:
- - dname5
- - dname6
- state: absent
-
-> **NOTE**: vrf should be present which can be configured using os10_vrf role
-
-**Simple playbook to setup DNS — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_dns
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_ecmp.md b/ansible_collections/dellemc/os10/docs/os10_ecmp.md
deleted file mode 100644
index 6932fdf6f..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_ecmp.md
+++ /dev/null
@@ -1,78 +0,0 @@
-ECMP role
-=========
-
-This role facilitates the configuration of equal cost multi-path (ECMP), and it supports the configuration of ECMP for IPv4. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The ECMP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_ecmp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``ecmp_group_max_paths`` | integer | Configures the number of maximum paths per ECMP group | os10 |
-| ``trigger_threshold`` | integer | Configures the number of link bundle utilization trigger threshold | os10 |
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_ecmp* role to configure ECMP for IPv4. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The example writes a simple playbook that only references the *os10_ecmp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_ecmp:
- ecmp_group_max_paths: 3
- trigger_threshold: 50
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_ecmp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_fabric_summary.md b/ansible_collections/dellemc/os10/docs/os10_fabric_summary.md
deleted file mode 100644
index 0ff99bf2f..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_fabric_summary.md
+++ /dev/null
@@ -1,119 +0,0 @@
-os10_fabric_summary
-=====================================
-This role is used to get show system information of all devices in the fabric. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Fabric summary role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``os10_cli_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the default value is used |
-| ``os10_cli_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-- *xmltodict* library should be installed to get show command output in dict format from XML
-- To install the package use the *pip install xmltodict* command
-
-Example playbook
-----------------
-
-This example uses the *os10_fabric_summary* role to completely get the show attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the ansible_network_os variable with the corresponding Dell EMC OS10 name.
-
-The *os10_fabric_summary* role has a simple playbook that only references the *os10_fabric_summary* role.
-
-**Sample hosts file**
-
- site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- [spine]
- site1-spine1
- site1-spine2
- site2-spine1
- site2-spine2
- [LeafAndSpineSwitch:children]
- spine
-
-**Sample host_vars/site1-spine1**
-
-
- cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
- os10_cli_user: xxxx
- os10_cli_pass: xxxx
- ansible_network_os: dellemc.os10.os10
-
-**Simple playbook to setup fabric summary — provision.yaml**
-
- ---
- - name: show system summary command
- hosts: localhost
- gather_facts: False
- connection: local
- roles:
- - os10_fabric_summary
-
-**Run**
-
- ansible-playbook -i hosts provision.yaml
-
-**Samaple Output**
-
- "results": [
- {
- "device type": "S6010-ON",
- "host": "10.11.180.21",
- "hostname": "host3",
- "inv_name": "site1-spine1",
- "node-mac": "e4:f0:04:9b:e5:dc",
- "service-tag": "D33FXC2",
- "software-version": "10.4.9999EX"
- },
- {
- "device type": "S6010-ON",
- "host": "10.11.180.22",
- "hostname": "host22",
- "inv_name": "site1-spine2",
- "node-mac": "e4:f0:04:9b:eb:dc",
- "service-tag": "J33FXC2",
- "software-version": "10.4.9999EX"
- },
- {
- "device type": "S6010-ON",
- "host": "10.11.180.24",
- "hostname": "site2-spine1",
- "inv_name": "site2-spine1",
- "node-mac": "e4:f0:04:9b:ee:dc",
- "service-tag": "343FXC2",
- "software-version": "10.4.9999EX"
- },
- {
- "device type": "S6010-ON",
- "host": "10.11.180.23",
- "hostname": "site2-spine2",
- "inv_name": "site2-spine2",
- "node-mac": "e4:f0:04:9b:f1:dc",
- "service-tag": "543FXC2",
- "software-version": "10.4.9999EX"
- }
- ]
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_flow_monitor.md b/ansible_collections/dellemc/os10/docs/os10_flow_monitor.md
deleted file mode 100644
index dd98aa956..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_flow_monitor.md
+++ /dev/null
@@ -1,152 +0,0 @@
-ACL flow-based monitor role
-===========================
-
-This role facilitates configuring ACL flow-based monitoring attributes. Flow-based mirroring is a mirroring session in which traffic matches specified policies that are mirrored to a destination port. Port-based mirroring maintains a database that contains all monitoring sessions (including port monitor sessions). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The ACL flow-based role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take the `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os10_flow_monitor` (dictionary) with session ID key (in *session <ID>* format; 1 to 18)
-- Variables and values are case-sensitive
-
-**session ID keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``session_type`` | string: local_*_,rspan-source,erspan-source | Configures the monitoring session type | os10 |
-| ``description`` | string | Configures the monitor session description | os10 |
-| ``port_match`` | list | Displays a list of interfaces with location source and destination | os10 |
-| ``port_match.interface_name`` | string | Configures the interface | os10 |
-| ``port_match.location`` | string: source,destination | Configures the source/destination of an interface | os10 |
-| ``port_match.state`` | string: absent,present\* | Deletes the interface if set to absent | os10 |
-| ``flow_based`` | boolean | Enables flow-based monitoring | os10 |
-| ``shutdown`` | string: up,down\* | Enable/disables the monitoring session | os10 |
-| ``state`` | string: absent,present\* | Deletes the monitoring session corresponding to the session ID if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_flow_monitor* role to configure session monitor configuration. It creates a *hosts* file with the switch details and corresponding variables. The hosts file defines the `anisble_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, the variable is set to false.
-It writes a simple playbook that only references the *os10_flow_monitor* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
- os10_flow_monitor:
- session 1:
- session_type: local
- description: "Discription goes here"
- port_match:
- - interface_name: ethernet 1/1/4
- location: source
- state: present
- - interface_name: ethernet 1/1/3
- location: destination
- state: present
- flow_based: true
- shutdown: up
- state: present
- session 2:
- session_type: local
- description: "Discription of session goes here"
- port_match:
- - interface_name: ethernet 1/1/6
- location: source
- state: present
- - interface_name: ethernet 1/1/7
- location: destination
- state: present
- flow_based: true
- shutdown: up
- state: present
- session 3:
- state: absent
- os10_acl:
- - name: testflow
- type: ipv4
- description: testflow description
- extended: true
- entries:
- - number: 5
- permit: true
- protocol: icmp
- source: any
- destination: any
- other_options: capture session 1 count
- state: present
- - number: 10
- permit: true
- protocol: ip
- source: 102.1.1.0/24
- destination: any
- other_option: capture session 1 count byte
- state: present
- - number: 15
- permit: false
- protocol: udp
- source: any
- destination: any
- other_options: capture session 2 count byte
- state: present
- - number: 20
- permit: false
- protocol: tcp
- source: any
- destination: any
- other_options: capture session 2 count byte
- state: present
- stage_ingress:
- - name: ethernet 1/1/1
- state: present
-
-> **NOTE**: Destination port should not be an L2/L3 port which can be configured using the *os10_interface* role.
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_flow_monitor
- - dellemc.os10.os10_acl
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_image_upgrade.md b/ansible_collections/dellemc/os10/docs/os10_image_upgrade.md
deleted file mode 100644
index 9ae8f731c..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_image_upgrade.md
+++ /dev/null
@@ -1,73 +0,0 @@
-Image upgrade role
-===================================
-
-This role facilitates upgrades or installation of a software image. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Image upgrade role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_image_upgrade keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``operation_type`` | string: cancel,install | Displays the type of image operation | os10 |
-| ``software_image_url`` | string | Configures the URL path to the image file | os10 |
-| ``software_version`` | string | Displays the software version of the image file | os10 |
-| ``number_of_retries`` | int | Configures the numbe of retries to check the status of image install process | os10 |
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_image_upgrade* role to upgrade/install software image. It creates a *hosts* file with the switch details, corresponding *host_vars* file, and a simple playbook that references the *os10_image_upgrade* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- os10_image_upgrade:
- operation_type: install
- software_image_url: tftp://10.16.148.8/PKGS_OS10-Enterprise-10.2.9999E.5790-installer-x86_64.bin
- software_version: 10.2.9999E
- number_of_retries: 50
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_image_upgrade
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_interface.md b/ansible_collections/dellemc/os10/docs/os10_interface.md
deleted file mode 100644
index bbb4f8ee6..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_interface.md
+++ /dev/null
@@ -1,178 +0,0 @@
-Interface role
-==============
-
-This role facilitates the configuration of interface attributes. It supports the configuration of admin state, description, MTU, IP address, IP helper, suppress_ra, and port mode. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Interface role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os10_interface` (dictionary) holds a dictionary with the interface name; interface name can correspond to any of the valid OS interfaces with the unique interface identifier name
-- For physical interfaces, the interface name must be in *<interfacename> <tuple>* format; for logical interfaces, the interface must be in *<logical_interfacename> <id>* format; physical interface name can be *ethernet 1/1/32*
-- For interface ranges, the interface name must be in *range <interface_type> <node/slot/port[:subport]-node/slot/port[:subport]>* format; *range ethernet 1/1/1-1/1/4*
-- Logical interface names can be *vlan 1* or *port-channel 1*
-- Variables and values are case-sensitive
-
-> **NOTE**: Only define supported variables for the interface type, and do not define the *switchport* variable for a logical interface.
-
-**interface name keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``desc`` | string | Configures a single line interface description | os10 |
-| ``portmode`` | string | Configures port-mode according to the device type | os10 |
-| ``switchport`` | boolean: true,false\* | Configures an interface in L2 mode | os10 |
-| ``admin`` | string: up,down\* | Configures the administrative state for the interface; configuring the value as administratively "up" enables the interface; configuring the value as administratively "down" disables the interface | os10 |
-| ``mtu`` | integer | Configures the MTU size for L2 and L3 interfaces (1280 to 65535) | os10 |
-| ``fanout`` | string:dual, single; string:10g-4x, 40g-1x, 25g-4x, 100g-1x, 50g-2x (os10) | Configures fanout to the appropriate value | os10 |
-| ``suppress_ra`` | string; present,absent | Configures IPv6 router advertisements if set to present | os10 |
-| ``ip_type_dynamic`` | boolean: true,false | Configures IP address DHCP if set to true (*ip_and_mask* is ignored if set to true) | os10 |
-| ``ipv6_type_dynamic`` | boolean: true,false | Configures an IPv6 address for DHCP if set to true (*ipv6_and_mask* is ignored if set to true) | os10 |
-| ``ipv6_autoconfig`` | boolean: true,false | Configures stateless configuration of IPv6 addresses if set to true (*ipv6_and_mask* is ignored if set to true) | os10 |
-| ``vrf`` | string | Configures the specified VRF to be associated to the interface | os10 |
-| ``min_ra`` | string | Configures RA minimum interval time period | os10 |
-| ``max_ra`` | string | Configures RA maximum interval time period | os10 |
-| ``ip_and_mask`` | string | Configures the specified IP address to the interface | os10 |
-| ``ipv6_and_mask`` | string | Configures a specified IPv6 address to the interface | os10 |
-| ``virtual_gateway_ip`` | string | Configures an anycast gateway IP address for a VxLAN virtual network as well as VLAN interfaces| os10 |
-| ``virtual_gateway_ipv6`` | string | Configures an anycast gateway IPv6 address for VLAN interfaces| os10 |
-| ``state_ipv6`` | string: absent,present\* | Deletes the IPV6 address if set to absent | os10 |
-| ``ip_helper`` | list | Configures DHCP server address objects (see ``ip_helper.*``) | os10 |
-| ``ip_helper.ip`` | string (required) | Configures the IPv4 address of the DHCP server (A.B.C.D format) | os10 |
-| ``ip_helper.state`` | string: absent,present\* | Deletes the IP helper address if set to absent | os10 |
-| ``flowcontrol`` | dictionary | Configures the flowcontrol attribute (see ``flowcontrol.*``) | os10 |
-| ``flowcontrol.mode`` | string: receive,transmit | Configures the flowcontrol mode | os10 |
-| ``flowcontrol.enable`` | string: on,off | Configures the flowcontrol mode on | os10 |
-| ``flowcontrol.state`` | string: absent,present\* | Deletes the flowcontrol if set to absent | os10 |
-| ``ipv6_bgp_unnum`` | dictionary | Configures the IPv6 BGP unnum attributes (see ``ipv6_bgp_unnum.*``) below | os10 |
-| ``ipv6_bgp_unnum.state`` | string: absent,present\* | Disables auto discovery of BGP unnumbered peer if set to absent | os10 |
-| ``ipv6_bgp_unnum.peergroup_type`` | string: ebgp,ibgp | Specifies the type of template to inherit from | os10 |
-
-| ``stp_rpvst_default_behaviour`` | boolean: false,true | Configures RPVST default behaviour of BPDU's when set to True which is default | os10 |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_interface* role to set up description, MTU, admin status, port mode, and switchport details for an interface. The example creates a *hosts* file with the switch details and orresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, this variable is set to false. The example writes a simple playbook that only references the *os10_interface* role.
-
-**Sample hosts file**
-
- leaf3 ansible_host= <ip_address>
-
-**Sample host_vars/leaf3**
-
- hostname: "leaf3"
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_interface:
- ethernet 1/1/32:
- desc: "Connected to Core 2"
- mtu: 2500
- stp_rpvst_default_behaviour: False
- portmode:
- admin: up
- switchport: False
- ip_and_mask:
- ip_type_dynamic: True
- ipv6_type_dynamic: True
- ethernet 1/1/12:
- desc: "ipv6 auto config"
- switchport: False
- mtu: 2500
- admin: up
- ipv6_autoconfig: True
- ethernet 1/1/14:
- fanout: 10g-4x
- ethernet 1/1/13:
- desc: "set ipv6 address"
- switchport: False
- admin: up
- ipv6_and_mask: 2001:4898:5809:faa2::10/126
- state_ipv6: present
- ethernet 1/1/1:
- desc: "Connected to Leaf1"
- portmode: "trunk"
- switchport: True
- suppress_ra: present
- admin: up
- stp_rpvst_default_behaviour: False
- ethernet 1/1/3:
- desc: site2-spine2
- ip_and_mask: 10.9.0.4/31
- mtu: 9216
- switchport: False
- admin: up
- flowcontrol:
- mode: "receive"
- enable: "on"
- state: "present"
-
- vlan 100:
- ip_and_mask:
- ipv6_and_mask: 2001:4898:5808:ffaf::1/64
- state_ipv6: present
- ip_helper:
- - ip: 10.0.0.33
- state: present
- admin: up
- range ethernet 1/1/1-1/1/32:
- mtu: 2500
- port-channel 10:
- admin: up
- switchport: False
- suppress_ra:
- stp_rpvst_default_behaviour: True
- ipv6_bgp_unnum:
- state: present
- peergroup_type: ebgp
- vlan 10:
- ip_and_mask: "10.1.1.1/24"
- virtual_gateway_ip: "10.1.1.254"
- virtual_gateway_ipv6: "10:1:1::254"
- admin: up
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf3
- roles:
- - dellemc.os10.os10_interface
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_lag.md b/ansible_collections/dellemc/os10/docs/os10_lag.md
deleted file mode 100644
index eb679dcff..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_lag.md
+++ /dev/null
@@ -1,103 +0,0 @@
-LAG role
-========
-
-This role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type as a static or dynamic LAG and minimum required link. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The LAG role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Object drives the tasks in this role
-- `os10_lag` (dictionary) contains the hostname (dictionary)
-- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value to any variable negates the corresponding configuration
-- `os10_lag` (dictionary) holds a dictionary with the port-channel ID key in `Po <ID>` format (1 to 128)
-- Variables and values are case-sensitive
-
-**port-channel ID keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string: static,dynamic | Configures the interface either as a static or dynamic LAG | os10 |
-| ``min_links`` | integer | Configures the minimum number of links in the LAG that must be in *operup* status (1 to 32) | os10 |
-| ``max_bundle_size`` | integer | Configures the maximum bundle size for the port channel | os10 |
-| ``lacp_system_priority`` | integer | Configures the LACP system-priority value | os10 |
-| ``lacp_fallback_enable`` | boolean | Configures LACP fallback | os10 |
-| ``channel_members`` | list | Specifies the list of port members to be associated to the port-channel (see ``channel_members.*``) | os10 |
-| ``channel_members.port`` | string | Specifies valid interface names to be configured as port-channel members | os10 |
-| ``channel_members.mode`` | string: active,passive,on | Configures mode of channel members | os10 |
-| ``channel_members.port_priority`` | integer | Configures port priority on devices for channel members | os10 |
-| ``channel_members.lacp_rate_fast`` | boolean | Configures the LACP rate as fast if set to true | os10 |
-| ``state`` | string: absent,present\* | Deletes the LAG corresponding to the port-channel ID if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_lag* role to setup port channel ID and description, and configures hash algorithm and minimum links for the LAG. Channel members can be configured for the port-channel either in static or dynamic mode. You can also delete the LAG with the port-channel ID or delete the members associated to it. This example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_lag* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_lag:
- Po 12:
- type: dynamic
- min_links: 2
- max_bundle_size: 2
- lacp_system_priority: 2
- channel_members:
- - port: ethernet 1/1/31
- mode: "active"
- port_priority: 3
- lacp_rate_fast: true
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_lag
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_lldp.md b/ansible_collections/dellemc/os10/docs/os10_lldp.md
deleted file mode 100644
index 0c08af4dc..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_lldp.md
+++ /dev/null
@@ -1,149 +0,0 @@
-LLDP role
-=========
-
-This role facilitates the configuration of link layer discovery protocol (LLDP) attributes at a global and interface level. It supports the configuration of hello, mode, multiplier, advertise TLVs, management interface, FCoE, and iSCSI at global and interface level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The LLDP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_lldp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``enable`` | boolean | Enables or disables LLDP at a global level | os10 |
-| ``multiplier`` | integer | Configures the global LLDP multiplier (2 to 10) | os10 |
-| ``reinit`` | integer | Configures the reinit value (1 to 10) | os10 |
-| ``timer`` | integer | Configures the timer value (5 to 254) | os10 |
-| ``advertise`` | dictionary | Configures LLDP-MED and TLV advertisement at the global level (see ``advertise.*``) | os10 |
-| ``advertise.med`` | dictionary | Configures MED TLVs advertisement (see ``med_tlv.*``) | os10 |
-| ``med.fast_start_repeat_count`` | integer | Configures med fast start repeat count value (1 to 10) | os10 |
-| ``med.application`` | list | Configures global MED TLVs advertisement for an application (see ``application.*``) | os10 |
-| ``application.name`` | string | Configures the application name for MED TLVs advertisement | os10 |
-| ``application.vlan_id`` | integer | Configures the VLAN ID for the application MED TLVs advertisement (1 to 4094) | os10 |
-| ``application.l2_priority`` | integer | Configures the L2 priority for the application MED TLVs advertisement (0 to 7) | os10 |
-| ``application.code_point_value`` | integer | Configures differentiated services code point values for MED TLVs advertisement (0 to 63) | os10 |
-| ``application.vlan_type`` | string: tag, untag | Configures the VLAN type for the application MED TLvs advertisement | os10 |
-| ``application.network_policy_id`` | integer | Configures network policy ID for the application MED TLVs advertisement | os10 |
-| ``application.state`` | string: present\*,absent | Deletes the application if set to absent | os10 |
-| ``local_interface`` | dictionary | Configures LLDP at the interface level (see ``local_interface.*``) | os10 |
-| ``local_interface.<interface name>`` | dictionary | Configures LLDP at the interface level (see ``<interface name>.*``) | os10 |
-| ``<interface name>.mode`` | string: rx,tx | Configures LLDP mode configuration at the interface level | os10 |
-| ``<interface name>.mode_state`` | string: absent,present | Configures transmit/receive at the interface level| os10 |
-| ``<interface name>.advertise`` | dictionary | Configures LLDP-MED TLV advertisement at the interface level (see ``advertise.*``) | os10 |
-| ``advertise.med`` | dictionary | Configures MED TLVs advertisement at the interface level (see ``med_tlv.*``) | os10 |
-| ``med.enable`` | boolean | Enables interface level MED capabilities | os10 |
-| ``med.tlv`` | string | Configures MED TLV advertisement at interface level | os10 |
-| ``med.tlv_state`` | string: present\*,absent | Deletes the interface level MED configuration if set to absent | os10 |
-| ``med.application`` | list | Configures MED TLVs advertisement for the application at the interface level (see ``application.*``) | os10 |
-| ``application.network_policy_id`` | integer | Configures the *network_policy_id* for the application of MED | os10 |
-| ``application.state`` | string: present\*,absent | Deletes the associated network policy ID for the application if set to absent.| os10 |
-| ``advertise.tlv`` | list | Configures TLVs advertisement at interface level (see `<interface_name>.tlv.*`) | os10 |
-| ``tlv.name`` | string: basic-tlv,dcbxp,dcbxp-appln,dot1-tlv,dot3-tlv | Configures corresponding to the TLV name specified at the interface | os10 |
-| ``tlv.value`` | string | Specifies corresponding TLV value according to the name as a string | os10 |
-| ``tlv.state`` | string: present\*,absent | Deletes the interface level TLVs advertisement if set to absent | os10 |
-
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_lldp* role to configure protocol lldp. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_lldp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_lldp:
- enable: false
- multiplier: 3
- reinit: 2
- timer: 5
- advertise:
- med:
- fast_start_repeat_count: 4
- application:
- - name: guest-voice
- network_policy_id: 0
- vlan_id: 2
- vlan_type: tag
- l2_priority: 3
- code_point_value: 4
- state: present
- - name: voice
- network_policy_id: 1
- vlan_id: 3
- vlan_type: untag
- l2_priority: 3
- code_point_value: 4
- state: absent
- local_interface:
- ethernet 1/1/1:
- mode: rx
- mode_state: present
- advertise:
- med:
- enable: true
- tlv: inventory
- tlv_state: present
- application:
- - network_policy_id: 4
- state: present
- tlv:
- - name: basic-tlv
- value: management-address port-description
- state: present
- - name: dcbxp-appln
- value: iscsi
- state: present
- - name: dcbxp
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_lldp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_logging.md b/ansible_collections/dellemc/os10/docs/os10_logging.md
deleted file mode 100644
index c8a2dbf23..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_logging.md
+++ /dev/null
@@ -1,97 +0,0 @@
-Logging role
-============
-
-This role facilitates the configuration of global logging attributes, and it supports the configuration of logging servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Logging role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_logging keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``logging`` | list | Configures the logging server (see ``logging.*``) | os10 |
-| ``logging.ip`` | string (required) | Configures the IPv4 address for the logging server (A.B.C.D format) | os10 |
-| ``logging.state`` | string: absent,present\* | Deletes the logging server if set to absent | os10 |
-| ``console`` | dictionary | Configures logging to the console (see ``console.*``) | os10 |
-| ``console.enable`` | boolean | Enables/disables logging to the console | os10 |
-| ``console.severity`` | string | Configures the minimum severity level for logging to the console | os10 |
-| ``log_file`` | dictionary | Configures logging to a log file (see ``log_file.*``) | os10 |
-| ``log_file.enable`` | boolean | Enables/disables logging to a log file | os10 |
-| ``log_file.severity`` | string | Configures the minimum severity level for logging to a log file | os10 |
-| ``source_interface`` | string | Configures the source interface for logging | os10 |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_logging* role to completely set up logging servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_logging:
- logging:
- - ip: 1.1.1.1
- state: absent
- console:
- enable: True
- severity: log-err
- log_file:
- enable: True
- severity: log-err
- source_interface: "ethernet1/1/30"
-
-**Simple playbook to setup logging — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_logging
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_network_validation.md b/ansible_collections/dellemc/os10/docs/os10_network_validation.md
deleted file mode 100644
index e9014c42b..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_network_validation.md
+++ /dev/null
@@ -1,304 +0,0 @@
-Network validation role
-=========================
-
-This roles is used to verify network validation. It validates network features of a wiring connection, BGP neighbors, MTU between neighbors, and VLT pairing. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. The Network validation role requires an SSH connection for connectivity to a Dell EMC OS10 device. You can use any of the built-in OS connection variables.
-
-- **Wiring validation** — Based on the LLDP neighbor establishment, the intended neighbor input model is defined by the _group_var/all_ user which is compared with the actual LLDP neighbor; report is generated if there is any mismatch with the intended neighbors
-
-- **BGP validation** — Based on the BGP neighbor state establishment, report is generated if the BGP neighbor state is not in an established state
-
-- **MTU validation** — Based on the interface MTU, the report is generated if there is an MTU mismatch between LLDP neighbors
-
-- **VLT validation** — Based on the VLT information, the report is generated if the backup VLT link is down or not present
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- Variables and values are case-sensitive
-
-**wiring_validation keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``intended_neighbors`` | list | Defines topology details planned | os10 |
-| ``source_switch`` | string | Defines the source switch inventory name planned | os10 |
-| ``source_port`` | string | Defines the source port planned | os10 |
-| ``dest_switch`` | string | Defines the destination switch inventory name planned | os10 |
-| ``dest_port`` | string | Defines the destination port planned | os10 |
-
-**bgp_validation keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``intended_bgp_neighbors`` | list | Defines topology details planned | os10 |
-| ``source_switch`` | string | Defines the source switch inventory name planned | os10 |
-
-**vlt_validation keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``intended_vlt_pairs`` | list | Defines topology details planned | os10 |
-| ``primary`` | string | Defines the primary role of switch inventory name planned | os10 |
-| ``secondary`` | string | Defines the secondary role of switch inventory name planned | os10 |
-
-Connection variables
---------------------
-
-Ansible Dell EMC roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible _group_vars_ or _host_vars_ directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if the value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; defaults to 22 |
-| ``os10_cli_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the default value is used |
-| ``os10_cli_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the _become_ method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use, if required, to enter privileged mode on the remote device; if `ansible_become` is set to no, this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-- The _xmltodict_ library should be installed to convert show command output in dictionary format from XML
-- To install the package, use the pip install xmltodict command
-- The *os10_fabric_summary* role must be included to query system network summary information
-
-Example playbook
-----------------
-
-This example uses the *os10_network_validation* role to verify network validations. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-
-**Sample hosts file**
-
- site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- [spine]
- site1-spine1
- site1-spine2
- site2-spine1
- site2-spine2
- [LeafAndSpineSwitch:children]
- spine
-
-
-**Sample host_vars/site1-spine1**
-
- cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
-
- os10_cli_user: xxxx
- os10_cli_pass: xxxx
- ansible_network_os: dellemc.os10.os10
-
-
-#### Sample ``group_var/all``
-
-**Sample input for wiring validation**
-
-
- intended_neighbors:
- - source_switch: site1-spine2
- source_port: ethernet1/1/5
- dest_port: ethernet1/1/29
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/6
- dest_port: ethernet1/1/30
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/7
- dest_port: ethernet1/1/31
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/8
- dest_port: ethernet1/1/32
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/9
- dest_port: ethernet1/1/21
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/7
- dest_port: ethernet1/1/29
- dest_switch: site1-spine3
-
-**Sample input for BGP validation**
-
- intended_bgp_neighbors:
- - source_switch: site1-spine1
- neighbor_ip: ["10.11.0.1","10.9.0.1","10.9.0.3","10.9.0.5","1.1.1.1"]
- - source_switch: site1-spine2
- neighbor_ip: ["10.11.0.0","10.9.0.9","10.9.0.11","10.9.0.15"]
-
-**Sample input for VLT validation**
-
- intended_vlt_pairs:
- - primary: site1-spine1
- secondary: site2-spine2
- - primary: site2-spine1
- secondary: site2-spine2
-
-
-#### Simple playbook to setup network validation
-
-**Sample playbook of ``validation.yaml`` to run complete validation**
-
- ---
- - name: setup network validation
- hosts: localhost
- gather_facts: no
- connection: local
- roles:
- - os10_network_validation
-
-**Sample playbook to run wiring validation**
-
- ---
- - name: setup wiring validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: wiring_validation.yaml
-
-**Sample playbook to run BGP validation**
-
- ---
- - name: setup bgp validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: bgp_validation.yaml
-
-**Sample playbook to run VLT validation**
-
- ---
- - name: setup vlt validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: vlt_validation.yaml
-
-**Sample playbook to run MTU validation**
-
- ---
- - name: setup mtu validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: mtu_validation.yaml
-
-
-**Run**
-
-Execute the playbook and examine the results.
-
- ansible-playbook -i inventory.yaml validation.yaml
-
-**sample output of wiring validation**
-
- "results": [
- {
- "dest_port": "ethernet1/1/1",
- "dest_switch": "site2-spine2",
- "error_type": "link-missing",
- "reason": "link is not found for source switch: site2-spine1,port: ethernet1/1/1",
- "source_port": "ethernet1/1/1",
- "source_switch": "site2-spine1"
- },
- {
- "dest_port": "ethernet1/1/2",
- "dest_switch": "site2-spine1",
- "error_type": "link-mismatch",
- "reason": "Destination switch is not an expected value, expected switch: site2-spine1,port: ethernet1/1/2; actual switch: site1-spine2(svc-tag:J33FXC2, node_mac:e4:f0:04:9b:eb:dc), port: ethernet1/1/1",
- "source_port": "ethernet1/1/1",
- "source_switch": "site1-spine1"
- }
- ]
-
-**sample output of BGP validation**
-
- "results": [
- {
- "bgp_neighbor": "10.9.0.1",
- "bgp_state": "idle",
- "error_type": "remote_port_down",
- "possible_reason": "remote port site2-spine1 ethernet1/1/2 is down",
- "source_switch": "site1-spine1"
- },
- {
- "bgp_neighbor": "-",
- "bgp_state": "idle",
- "error_type": "not_an_intended_neighbor",
- "possible_reason": "neighbor 10.9.0.7 is not an intended, please add this neighbor in the intended_bgp_neighbors",
- "source_switch": "site1-spine1"
- },
- {
- "bgp_neighbor": "1.1.1.1",
- "error_type": "config_missing",
- "possible_reason": "neighbor config missing",
- "source_switch": "site1-spine1"
- },
- {
- "bgp_neighbor": "10.9.0.9",
- "bgp_state": "idle",
- "error_type": "remote_port_down",
- "possible_reason": "remote port site2-spine1 ethernet1/1/3 is down",
- "source_switch": "site1-spine2"
- }
- ]
-
-**sample output of VLT validation**
-
- "results": [
- {
- "error_type": "secondary_mismatch",
- "intended_primary": "site1-spine1",
- "intended_secondary": "site2-spine2",
- "possible_reason": "config mismatch as site2-spine2 is expected, but the actual secondary is site1-spine2 ",
- "secondary": "site1-spine2"
- },
- {
- "error_type": "peer_missing",
- "intended_primary": "site2-spine1",
- "intended_secondary": "site2-spine2",
- "possible_reason": "peer info is not configured or peer interface is down"
- }
- ]
-
-**sample output of MTU validation**
-
- "msg": {
- "results": "There is no MTU mistmatch between neighbors"
- }
-
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/docs/os10_ntp.md b/ansible_collections/dellemc/os10/docs/os10_ntp.md
deleted file mode 100644
index 17e879c6b..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_ntp.md
+++ /dev/null
@@ -1,124 +0,0 @@
-NTP role
-========
-
-This role facilitates the configuration of network time protocol (NTP) attributes. It specifically enables configuration of NTP server, NTP source, authentication, and broadcast service. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The NTP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_ntp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``server`` | list | Configures the NTP server (see ``server.*``) | os10 |
-| ``server.ip`` | string (required) | Configures an IPv4 address for the NTP server (A.B.C.D format) | os10 |
-| ``server.key`` | integer | Configures the peer authentication key for the NTP server | os10 |
-| ``server.prefer`` | boolean | Configures the peer preference | os10 |
-| ``server.state`` | string: absent,present\* | Deletes the NTP server if set to absent | os10 |
-| ``source`` | string | Configures the interface for the source address | os10 |
-| ``master`` | integer | Configures the local clock to act as the server | os10 |
-| ``authenticate`` | boolean | Configures authenticate time sources | os10 |
-| ``authentication_key`` | list | Configures authentication key for trusted time sources (see ``authentication_key.*``) | os10 |
-| ``authentication_key.key_num`` | integer | Configures authentication key number | os10 |
-| ``authentication_key.key_string_type`` | integer: 0,9 | Configures hidden authentication key string if the value is 9, and configures unencrypted authentication key string if the value is 0 | os10 |
-| ``authentication_key.key_string`` | string | Configures the authentication key string | os10 |
-| ``authentication_key.type`` | string: md5,sha1,sha2-256 | Configures the authentication type | os10 |
-| ``authentication_key.state`` | string: absent,present\* | Deletes the authenticaton key if set to absent | os10 |
-| ``trusted_key`` | list | Configures key numbers for trusted time sources (see ``trusted_key.*``) | os10 |
-| ``trusted_key.key_num`` | integer | Configures the key number | os10 |
-| ``trusted_key.state`` | string: absent,present\* | Deletes the trusted key if set to absent | os10 |
-| ``intf`` | dictionary | Configures NTP on the interface (see ``intf.*``) | os10 |
-| ``intf.<interface name>`` | dictionary | Configures NTP on the interface (see ``<interface name>.*``) | os10 |
-| ``<interface name>.disable`` | boolean | Configures NTP disable on the interface | os10 |
-| ``<interface name>.broadcast`` | boolean | Configures NTP broadcast client service on the interface | os10 |
-| ``vrf`` | dictionary | Enables NTP on VRF (see ``vrf.*``) | os10 |
-| ``vrf.name`` | string | Name of the VRF to enable NTP | os10 |
-| ``vrf.state`` | string: absent,present\* | Disables NTP on the VRF if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_ntp* role to set the NTP server, source ip, authentication and broadcast service. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When the `os10_cfg_generate` variable is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os10_ntp* role.
-
-By including the role, you automatically get access to all of the tasks to configure NTP attributes. The sample *host_vars* is for os10.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- host: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_ntp:
- source: ethernet 1/1/2
- master: 5
- authenticate: true
- authentication_key:
- - key_num: 123
- key_string_type: 9
- key_string: test
- type: md5
- state: present
- trusted_key:
- - key_num: 1323
- state: present
- server:
- - ip: 2.2.2.2
- key: 345
- prefer: true
- state: present
- intf:
- ethernet 1/1/2:
- disable: true
- broadcast: true
- vrf:
- name: red
- state: present
-
-**Simple playbook to setup NTP — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_ntp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_prefix_list.md b/ansible_collections/dellemc/os10/docs/os10_prefix_list.md
deleted file mode 100644
index dce141e8f..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_prefix_list.md
+++ /dev/null
@@ -1,104 +0,0 @@
-Prefix-list role
-================
-
-This role facilitates the configuration of a prefix-list. It supports the configuration of an IP prefix-list, and assigns the prefix-list to line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The prefix-list role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` value
-- If `os10_cfg_generate` set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_prefix_list keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string (required): ipv4,ipv6 | Configures an L3 (IPv4/IPv6) prefix-list | os10 |
-| ``name`` | string (required) | Configures the prefix-list name | os10 |
-| ``description`` | string | Configures the prefix-list description | os10 |
-| ``entries`` | list | Configures rules in the prefix-list (see ``seqlist.*``) | os10 |
-| ``entries.number`` | int (required) | Specifies the sequence number of the prefix-list rule | os10 |
-| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true, and specifies to reject packets if set to false | os10 |
-| ``entries.net_num`` | string (required) | Specifies the network number | os10 |
-| ``entries.mask`` | string (required) | Specifies the mask | os10 |
-| ``entries.condition_list`` | list | Configures conditions to filter packets (see ``condition_list.*``)| os10 |
-| ``condition_list.condition`` | list | Specifies the condition to filter packets from the source address | os10 |
-| ``condition_list.prelen`` | string (required) | Specifies the allowed prefix length | os10 |
-| ``entries.state`` | string: absent,present\* | Deletes the rule from the prefix-list if set to absent | os10 |
-| ``state`` | string: absent,present\* | Deletes the prefix-list if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_prefix_list* role to configure prefix-list for both IPv4 and IPv6. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_prefix_list* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: present
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_prefix_list
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_qos.md b/ansible_collections/dellemc/os10/docs/os10_qos.md
deleted file mode 100644
index 584159707..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_qos.md
+++ /dev/null
@@ -1,90 +0,0 @@
-QoS role
-========
-
-This role facilitates the configuration quality of service (QoS) attributes like policy-map and class-map. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The QoS role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_qos keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``policy_map`` | list | Configures the policy-map (see ``policy_map.*``) | os10 |
-| ``policy_map.name`` | string (required) | Configures the policy-map name | os10 |
-| ``policy_map.type`` | string: qos\*, application, control-plane, network-qos, queuing in os10 | Configures the policy-map type | os10 |
-| ``policy_map.state`` | string: present\*,absent | Deletes the policy-map if set to absent | os10 |
-| ``class_map`` | list | Configures the class-map (see ``class_map.*``) | os10 |
-| ``class_map.name`` | string (required) | Configures the class-map name | os10 |
-| ``class_map.type`` | string: qos\*,application,control-plane,network-qos,queuing | Configures the class-map type | os10 |
-| ``class_map.state`` | string: present\*,absent | Deletes the class-map if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_qos* role to configure the policy-map class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_qos* role. By including the role, you automatically get access to all of the tasks to configure QoS features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_qos:
- policy_map:
- - name: testpolicy
- type: qos
- state: present
- class_map:
- - name: testclass
- type: application
- state: present
-
-**Simple playbook to setup qos — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - Dell-Networking.os10.os10_qos
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_raguard.md b/ansible_collections/dellemc/os10/docs/os10_raguard.md
deleted file mode 100644
index abf7cf4af..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_raguard.md
+++ /dev/null
@@ -1,126 +0,0 @@
-IPv6 RA uard role
-===================
-
-This role facilitates the configuration of IPv6 RA Guard attributes. It specifically enables configuration of IPv6 RA Guard feature enable/disable, IPv6 RA Guard policy definition and policy parameter configuration, and attachment of IPv6 RA Guard policy to an interface. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The IPv6 RA Guard role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_raguard keys**
-
-
-| Key | Type | Description |Support |
-|--------------------------------------|-------------------------|----------------------------------------------------------|---------|
-| ``enable`` | boolean | Enables IPv6 RA-Guard feature | os10 |
-| ``policy`` | list | Configures the IPv6 RA Guard policy (see ``policy.*``) | os10 |
-| ``policy.state`` | string: absent/present\*| Deletes the policy if set to absent | os10 |
-| ``policy.name`` | string (required) | Configures the IPv6 RA Guard policy name | os10 |
-| ``policy.device_role.value`` | string (required) | Configures the device role for a policy | os10 |
-| ``policy.device_role.state`` | string: absent,present\*| Deletes the device role if set to absent | os10 |
-| ``policy.managed_config_flag.value`` | string | Configures the managed config flag param for a policy | os10 |
-| ``policy.managed_config_flag.state`` | string: absent,present\*| Deletes the managed config flag if set to absent | os10 |
-| ``policy.other_config_flag.value`` | string | Configures the other config flag param for a policy | os10 |
-| ``policy.other_config_flag.state`` | string: absent,present\*| Deletes the other config flag if set to absent | os10 |
-| ``policy.mtu.value`` | integer | Configures the MTU param for a policy | os10 |
-| ``policy.mtu.state`` | string: absent,present\*| Deletes the MTU if set to absent | os10 |
-| ``policy.reachable_time.value`` | integer | Configures the reachable time param for a policy | os10 |
-| ``policy.reachable_time.state`` | string: absent,present\*| Deletes the reachable time if set to absent | os10 |
-| ``policy.retrans_timer.value`` | integer | Configures the retransmit timer param for a policy | os10 |
-| ``policy.retrans_timer.state`` | string: absent,present\*| Deletes the retransmit timer if set to absent | os10 |
-| ``policy.router_lifetime.value`` | integer | Configures the router lifetime param for a policy | os10 |
-| ``policy.router_lifetime.state`` | string: absent,present\*| Deletes the router lifetime if set to absent | os10 |
-| ``policy.router_preference.value`` | string | Configures the router preference param for a policy | os10 |
-| ``policy.router_preference.state`` | string: absent,present\*| Deletes the router preference if set to absent | os10 |
-| ``policy.match`` | list | Configures the prefix/ACL/MAC list param for a policy | os10 |
-| ``policy.match.type`` | string | Configures the prefix/ACL/MAC type for a policy | os10 |
-| ``policy.match.name`` | string | Configures the prefix/ACL/MAC name for a policy | os10 |
-| ``policy.match.state`` | string: absent,present\*| Deletes the prefix/ACL/MAC if set to absent | os10 |
-| ``intf`` | dictionary | Configures IPv6 RA Guard on the interface (see``intf.*``) | os10 |
-| ``intf.<interface name>`` | dictionary | Configures RA Guard on the interface (see``<interface name>.*``)| os10 |
-| ``<interface name>.policy_name`` | String | Configures RA Guard policy name to be attached on an interface | os10 |
-| ``<interface name>.vlan`` | String | Configures VLAN name to which policy to be attached on an interface| os10|
-| ``<interface name>.state`` | String: absent,present\*| Deletes the policy if set to absent an interface | os10|
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_raguard* role to configure the IPv6 RA Guard feature enable/disable, IPv6 RA Guard Policy defination and policy parameter configuration, Attachment of IPv6 RA Guard policy to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os10_raguard* role. By including the role, you automatically get access to all of the tasks to configure IPv6 RA Guard attributes.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- host: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_raguard:
- enable: true
- policy:
- - policy_name: test
- device_role:
- value: router
- state: present
- managed_config_flag:
- value: "on"
- state: present
- mtu:
- value: 1280
- state: present
- match:
- - type: prefix_list
- name: test_prefix
- state: present
- state: present
- intf:
- ethernet 1/1/2:
- policy_name: test
- vlan: 10
- state: present
-
-**Simple playbook to setup IPv6 RA Guard — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_raguard
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_route_map.md b/ansible_collections/dellemc/os10/docs/os10_route_map.md
deleted file mode 100644
index 1160ca48c..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_route_map.md
+++ /dev/null
@@ -1,190 +0,0 @@
-Route-map role
-==============
-
-This role facilitates the configuration of route-map attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The route-map role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_route_map keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``route_map`` | list | Configures the route-map (see ``route_map.*``) | os10 |
-| ``route_map.name`` | string (required) | Configures the route-map name | os10 |
-| ``route_map.permit`` | boolean | Configures permit/deny set operations | os10 |
-| ``route_map.seq_num`` | integer | Configures the sequence number | os10 |
-| ``route_map.continue`` | integer | Configures the next sequence number | os10 |
-| ``route_map.set`` | dictionary | Configures route-map to set values in the destination routing protocol (see ``set.*``) | os10 |
-| ``set.local_pref`` | integer | Configures the BGP local preference path attribute | os10 |
-| ``set.metric`` | string | Configures a specific value to add or subtract from the existing metric value ("+ <value>", "- <value>", <value> format) | os10 |
-| ``set.metric_type`` | string: internal,type-1,type-2 | Configures the metric type for the destination routing protocol | os10 |
-| ``set.origin`` | string: igp,egp,incomplete | Configures the BGP origin attribute | os10 |
-| ``set.weight`` | integer | Configures the weight for the BGP route | os10 |
-| ``set.comm_list`` | dictionary | Configures the BGP community list (see ``comm_list.*``) | os10 |
-| ``comm_list.add`` | string | Adds the community attribute of a BGP update | os10 |
-| ``comm_list.delete`` | string | Deletes a community attribute of a BGP update | os10 |
-| ``set.community`` | string | Configures the community attribute for a BGP route update | os10 |
-| ``set.extcomm_list`` | dictionary | Configures the BGP extcommunity list (see ``extcomm_list.*``) | os10 |
-| ``extcomm_list.add`` | string | Adds an extended community attribute of a BGP update | os10 |
-| ``extcomm_list.delete`` | string | Deletes the extended community attribute of a BGP update | os10 |
-| ``set.extcommunity`` | string | Configures the extended community attribute for a BGP route update | os10 |
-| ``set.next_hop`` | list | Configures the next-hop address (see ``next_hop.*``) | os10 |
-| ``next_hop.type`` | string: ip,ipv6 | Configures the type of the next-hop address | os10 |
-| ``next_hop.address`` | string | Configures the next-hop address | os10 |
-| ``next_hop.track_id`` | integer | Configures the object track ID | os10 |
-| ``next_hop.state`` | string: present\*,absent | Deletes the next-hop address if set to absent | os10 |
-| ``route_map.match`` | list | Configures the route-map to match values from the route table (see ``match.*``) | os10 |
-| ``match.ip_type`` | string (required): ipv4,ipv6 | Configures the IPv4/IPv6 address to match | os10 |
-| ``match.access_group`` | string | Configures the access-group or list to match | os10 |
-| ``match.source_protocol_ebgp`` | string | Configures the source protocol to eBGP to match | os10 |
-| ``match.source_protocol_ibgp`` | string | Configures the source protocol to iBGP to match | os10 |
-| ``match.source_protocol_evpn`` | string | Configures the source protocol to EVPN to match | os10 |
-| ``match.source_protocol_static`` | string | Configures the source protocol to static to match | os10 |
-| ``match.source_protocol_connected`` | string | Configures the source protocol to connected to match | os10 |
-| ``match.source_protocol_ospf`` | string | Configures the source protocol to OSPF to match | os10 |
-| ``match.prefix_list`` | string | Configures the IP prefix-list to match against | os10 |
-| ``route_map.state`` | string, choices: present\*,absent | Deletes the route-map if set to absent | os10 |
-| ``as_path`` | list | Configures the BGP AS path filter (see ``as_path.*``) | os10 |
-| ``as_path.access_list`` | string (required) | Configures the access-list name | os10 |
-| ``as_path.permit`` | boolean (required) | Configures an AS path to accept or reject | os10 |
-| ``as_path.regex``| string (required) | Configures a regular expression | os10 |
-| ``as_path.state`` | string: absent,present\* | Deletes the BGP as path filter if set to absent | os10 |
-| ``community_list`` | list | Configures a community list entry (see ``community_list.*``) | os10 |
-| ``community_list.type`` | string (required): standard,expanded | Configures the type of community-list entry | os10 |
-| ``community_list.name`` | string (required) | Configures the name of community-list entry | os10 |
-| ``community_list.permit`` | boolean(required) | Configures the community to accept or reject | os10 |
-| ``community_list.regex`` | string (required) | Configures the regular expression for extended community list; mutually exclusive with *community_list.community* | os10 |
-| ``community_list.community`` | string (required) | Configures a well-known community or community number for standard community list; mutually exclusive with *community_list.regex* | os10 |
-| ``community_list.state`` | string: absent,present\* | Deletes the community list entry if set to absent | os10 |
-| ``extcommunity_list`` | list | Configures extcommunity-list entry (see ``extcommunity_list.*``) | os10 |
-| ``extcommunity_list.type`` | string (required): standard,expanded | Configures the type of extcommunity-list entry | os10 |
-| ``extcommunity_list.name`` | string (required) | Configures the name of extcommunity-list entry | os10 |
-| ``extcommunity_list.permit`` | boolean(required) | Configures the extcommunity to accept or reject | os10 |
-| ``extcommunity_list.regex`` | string (required) | Configures the regular expression for the extended extcommunity list; mutually exclusive with *extcommunity_list.community* | os10 |
-| ``extcommunity_list.community`` | string (required) | Configures the extended community for standard community-list; mutually exclusive with *extcommunity_list.regex* | os10 |
-| ``extcommunity_list.state`` | string: absent,present\* | Deletes the extcommunity-list entry if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_route_map* role for the route-map, policy-map, and class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_route_map* role. By including the role, you automatically get access to all of the tasks to configure route-map features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_route_map:
- as_path:
- - access_list: aa
- permit: true
- regex: www
- state: present
- community_list:
- - type: expanded
- name: qq
- permit: true
- regex: aaa
- state: present
- - type: standard
- name: qqq
- permit: false
- community: internet
- state: present
- extcommunity_list:
- - type: expanded
- name: qq
- permit: true
- regex: aaa
- state: present
- - type: standard
- name: qqq
- permit: false
- community: "rt 22:33"
- state: present
- route_map:
- - name: test
- permit: true
- seq_num: 1
- continue: 20
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- source_protocol_ebgp: present
- source_protocol_ibgp: present
- source_protocol_evpn: present
- source_protocol_static: present
- source_protocol_ospf: present
- source_protocol_connected: present
- set:
- local_pref: 1200
- metric_type: internal
- metric: + 30
- origin: igp
- weight: 50
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: present
- community: internet
- comm_list:
- add: qq
- delete: qqq
- extcommunity: "22:33"
- extcomm_list:
- add: aa
- delete: aa
- state: present
-
-**Simple playbook to setup QoS —leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_route_map
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_snmp.md b/ansible_collections/dellemc/os10/docs/os10_snmp.md
deleted file mode 100644
index a875a2340..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_snmp.md
+++ /dev/null
@@ -1,269 +0,0 @@
-SNMP role
-=========
-
-This role facilitates the configuration of global SNMP attributes. It supports the configuration of SNMP server attributes including users, group, community, location, and traps. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The SNMP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_snmp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``snmp_contact`` | string | Configures SNMP contact information | os10 |
-| ``snmp_location`` | string | Configures SNMP location information | os10 |
-| ``snmp_community`` | list | Configures SNMP community information (see ``snmp_community.*``) | os10 |
-| ``snmp_community.name`` | string (required) | Configures the SNMP community string | os10 |
-| ``snmp_community.access_mode`` | string: ro,rw | Configures access-mode for the community | os10 |
-| ``snmp_community.access_list`` | dictionary | Configures ACL for the community (see ``snmp_community.access_list.*``) | os10 |
-| ``snmp_community.access_list.name`` | string | Specifies the name of the ACL for the community | os10 |
-| ``snmp_community.access_list.state`` | string: absent,present\* | Deletes the ACL from the community if set to absent | os10 |
-| ``snmp_community.state`` | string: absent,present\* | Deletes the SNMP community information if set to absent | os10 |
-| ``snmp_engine_id`` | string | Configures SNMP local EngineID | os10 |
-| ``snmp_remote_engine_id`` | list | Configures SNMP remote engine information (see ``snmp_remote_engine_id.*``) | os10 |
-| ``snmp_remote_engine_id.ip`` | string | Configures the IP address of the SNMP remote engine | os10 |
-| ``snmp_remote_engine_id.engine_id`` | string | Configures the EngineID of the SNMP remote engine | os10 |
-| ``snmp_remote_engine_id.udpport`` | string | Configures the UDP port of the SNMP remote engine | os10 |
-| ``snmp_remote_engine_id.state`` | string: absent,present\* | Deletes the SNMP remote engine information if set to absent | os10 |
-| ``snmp_group`` | list | Configures the SNMP group information (see ``snmp_group.*``) | os10 |
-| ``snmp_group.name`` | string | Configures the name of the SNMP group | os10 |
-| ``snmp_group.version`` | string: 1,2c,3 | Configures the version of the SNMP group | os10 |
-| ``snmp_group.security_level`` | string: auth,noauth,priv | Configures the security level of SNMP group for version 3 | os10 |
-| ``snmp_group.access_list`` | dictionary | Configures the access list of the SNMP group (see ``snmp_group.access_list.*``)| os10 |
-| ``snmp_group.access_list.name`` | string | Specifies the name of the access list for the SNMP group wtih version 1 or 2c | os10 |
-| ``snmp_group.access_list.state`` | string: absent,present\* | Deletes the access list from the SNMP group if set to absent | os10 |
-| ``snmp_group.read_view`` | dictionary | Configures the read view of the SNMP group (see ``snmp_group.read_view.*``) | os10 |
-| ``snmp_group.read_view.name`` | string | Specifies the name of the read view for the SNMP group | os10 |
-| ``snmp_group.read_view.state`` | string: absent,present\* | Deletes the read view from the SNMP group if set to absent | os10 |
-| ``snmp_group.write_view`` | dictionary | Configures the write view of the SNMP group (see ``snmp_group.write_view``) | os10 |
-| ``snmp_group.write_view.name`` | string | Specifies the name of the write view for the SNMP group | os10 |
-| ``snmp_group.write_view.state`` | string: absent,present\* | Deletes the write view from the SNMP group if set to absent | os10 |
-| ``snmp_group.notify_view`` | dictionary | Configures the notify view of the SNMP group (see ``snmp_group.notify_view.*``) | os10 |
-| ``snmp_group.notify_view.name`` | string | Specifies the name of the notify view for the SNMP group | os10 |
-| ``snmp_group.notify_view.state`` | string: absent,present\* | Deletes the notify view from the SNMP group if set to absent | os10 |
-| ``snmp_group.state`` | string: absent,present\* | Deletes the SNMP group if set to absent | os10 |
-| ``snmp_host`` | list | Configures SNMP hosts to receive SNMP traps (see ``snmp_host.*``) | os10 |
-| ``snmp_host.ip`` | string | Configures the IP address of the SNMP trap host | os10 |
-| ``snmp_host.communitystring`` | string | Configures the SNMP community string of the trap host for version 1 or 2c | os10 |
-| ``snmp_host.udpport`` | string | Configures the UDP number of the SNMP trap host (0 to 65535) | os10 |
-| ``snmp_host.version`` | string: 1,2c,3 (required) | Specifies the SNMP version of the host (1 or 2c or 3 in os10) | os10 |
-| ``snmp_host.security_level`` | string: auth,noauth,priv | Configures the security level of the SNMP host for version 3 | os10 |
-| ``snmp_host.security_name`` | string | Configures the security name of the SNMP host for version 3 | os10 |
-| ``snmp_host.notification_type`` | string: traps,informs | Configures the notification type of the SNMP host | os10 |
-| ``snmp_host.trap_categories`` | dictionary | Enables or disables different trap categories for the SNMP host (see ``snmp_host.trap_categories.*``) | os10 |
-| ``snmp_host.trap_categories.dom`` | boolean: true,false | Enables or disables dom category traps for the SNMP host | os10 |
-| ``snmp_host.trap_categories.entity`` | boolean: true,false | Enables or disables entity category traps for the SNMP host | os10 |
-| ``snmp_host.trap_categories.envmon`` | boolean: true,false | Enables or disables envmon category traps for the SNMP host | os10 |
-| ``snmp_host.trap_categories.lldp`` | boolean: true,false | | Enables or disables lldp category traps for the SNMP host | os10 |
-| ``snmp_host.trap_categories.snmp`` | boolean: true,false | | Enables or disables snmp category traps for the SNMP host | os10 |
-| ``snmp_host.state`` | string: absent,present\* | Deletes the SNMP trap host if set to absent | os10 |
-| ``snmp_source_interface`` | string | Configures the source interface for SNMP | os10 |
-| ``snmp_traps`` | list | Configures SNMP traps (see ``snmp_traps.*``) | os10 |
-| ``snmp_traps.name`` | string | Enables SNMP traps | os10 |
-| ``snmp_traps.state`` | string: absent,present\* | Deletes the SNMP trap if set to absent | os10 |
-| ``snmp_user`` | list | Configures the SNMP user information (see ``snmp_user.*``) | os10 |
-| ``snmp_user.name`` | string | Specifies the name of the SNMP user | os10 |
-| ``snmp_user.group_name`` | string | Specifies the group of the SNMP user | os10 |
-| ``snmp_user.version `` | string: 1,2c,3 | Configures the version for the SNMP user | os10 |
-| ``snmp_user.access_list`` | string | Configures the access list for the SNMP user with version 1 or 2c | os10 |
-| ``snmp_user.authentication`` | dictionary | Configures the authentication information for the SNMP user with version 3 (see ``snmp_user.authentication.*``) | os10 |
-| ``snmp_user.authentication.localized`` | boolean: true,false | Configures the password to be in localized key format or not | os10 |
-| ``snmp_user.authentication.algorithm`` | string: md5, sha | Configures the authentication algorithm for the SNMP user | os10 |
-| ``snmp_user.authentication.password`` | string | Configures the authentication password for the SNMP user; if localized is true it should be a hexadecimal string prefixed with 0x and qouted | os10 |
-| ``snmp_user.authentication.encryption`` | dictionary | Configures the encryption parameters for the SNMP user | os10 |
-| ``snmp_user.authentication.encryption.algorithm`` | string: aes,des | Configures the encryption algorithm for the SNMP user | os10 |
-| ``snmp_user.authentication.encryption.password`` | string | Configures encryption password for the SNMP user; if localized is true it should be a hexadecimal string prefixed with 0x and qouted | os10 |
-| ``snmp_user.remote`` | dictionary | Configures the remote SNMP entity the user belongs to (see ``snmp_user.remote.*``) | os10 |
-| ``snmp_user.remote.ip`` | string | Configures the IP address of the remote entity for the SNMP user | os10 |
-| ``snmp_user.remote.udpport`` | string | Configures the UDP port of the remote entiry for the SNMP user | os10 |
-| ``snmp_user.state`` | string: absent,present\* | Deletes the SNMP user if set to absent | os10 |
-| ``snmp_view`` | list | Configures SNMPv3 view information (see ``snmp_view.*``) | os10 |
-| ``snmp_view.name`` | string | Configures the SNMP view name (up to 20 characters) | os10 |
-| ``snmp_view.oid_subtree`` | integer | Configures the SNMP view for the OID subtree | os10 |
-| ``snmp_view.include`` | boolean: true,false | Specifies if the MIB family should be included or excluded from the view | os10 |
-| ``snmp_view.state`` | string: absent,present\* | Deletes the SNMP view if set to absent | os10 |
-| ``snmp_vrf`` | string | Configures the VRF for SNMP | os10 |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_snmp* role to completely set up the SNMP server attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_snmp* role. By including the role, you automatically get access to all of the tasks to configure SNMP features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_snmp:
- snmp_contact: test
- snmp_location: Chennai
- snmp_source_interface: loopback 10
- snmp_vrf: test
- snmp_community:
- - name: public
- access_mode: ro
- access_list:
- name: test_acl
- state: present
- state: present
- snmp_engine_id: 123456789
- snmp_remote_engine_id:
- - host: 1.1.1.1
- engine_id: '0xab'
- udpport: 162
- state: present
- snmp_traps:
- - name: all
- state: present
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: true
- state: absent
- snmp_host:
- - ip: 1.1.1.1
- communitystring: c1
- version: "2c"
- udpport: 4
- state: present
- - ip: 2.2.2.2
- version: 1
- communitystring: c3
- trap_categories:
- dom: true
- lldp: true
- state: present
- - ip: 3.1.1.1
- version: 3
- security_level: priv
- security_name: test
- notification_type: informs
- udpport: 200
- trap_categories:
- dom: true
- entity: true
- envmon: true
- snmp: true
- state: present
- snmp_group:
- - name: group_1
- version: "2c"
- state: present
- access_list:
- name: test_acl
- state: present
- read_view:
- name: view_1
- state: present
- write_view:
- name: view_2
- state: present
- notify_view:
- name: view_3
- state: present
- - name: group_2
- version: 3
- security_level: priv
- state: present
- read_view:
- name: view_1
- state: absent
- notify_view:
- name: view_3
- state: present
- snmp_user:
- - name: user_1
- group_name: group_1
- version: 3
- authentication:
- localized: true
- algorithm: md5
- password: 9fc53d9d908118b2804fe80e3ba8763d
- encryption:
- algorithm: aes
- password: d0452401a8c3ce42804fe80e3ba8763d
- state: present
- - name: user_2
- group_name: group_1
- version: 3
- remote:
- ip: 1.1.1.1
- udpport: 200
- authentication:
- localized: true
- algorithm: md5
- password: '0x9fc53d9d908118b2804fe80e3ba8763d'
- encryption:
- algorithm: aes
- password: '0xd0452401a8c3ce42804fe80e3ba8763d'
- state: present
- - name: user_3
- group_name: group_1
- version: 2c
- state: present
- - name: user_4
- group_name: group_1
- version: 3
- state: present
- - name: user_5
- group_name: group_2
- version: 2c
- remote:
- ip: 1.1.1.1
- udpport: 200
- access_list: test_acl
- state: present
-
-**Simple playbook to setup SNMP — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_snmp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_system.md b/ansible_collections/dellemc/os10/docs/os10_system.md
deleted file mode 100644
index 119138afc..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_system.md
+++ /dev/null
@@ -1,126 +0,0 @@
-System role
-===========
-
-This role facilitates the configuration of global system attributes. It specifically enables configuration of hostname and hashing algorithm. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The System role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_system keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``hostname`` | string | Configures a hostname to the device (no negate command) | os10 |
-| ``hardware_forwarding`` | string: scaled-l2,scaled-l3-routes,scaled-l3-hosts | Configures hardware forwarding mode | os10 |
-| ``hash_algo`` | dictionary | Configures hash algorithm commands (see ``hash_algo.*``) | os10 |
-| ``hash_algo.algo`` | list | Configures hashing algorithm (see ``algo.*``) | os10 |
-| ``algo.name`` | string (required) | Configures the name of the hashing algorithm | os10 |
-| ``algo.mode`` | string (required) | Configures the hashing algorithm mode | os10 |
-| ``algo.state`` | string: absent,present\* | Deletes the hashing algorithm if set to absent | os10 |
-| ``load_balance`` | dictionary | Configures the global traffic load balance (see ``load_balance.*``) | os10 |
-| ``load_balance.ingress_port`` | boolean: true,false | Specifies whether to use the source port ID for the hashing algorithm | os10 |
-| ``load_balance.ip_selection`` | list | Configures IPv4 key fields to use in hashing algorithm; | os10 |
-| ``ip_selection.field`` | string | Configures IPv4 key fields to use in hashing algorithm | os10 |
-| ``ip_selection.state`` | string: absent,present\* | Deletes the IPv4 key fields if set to absent | os10 |
-| ``load_balance.ipv6_selection`` | list | Configures IPv6 key fields to use in hashing algorithm | os10 |
-| ``ipv6_selection.field`` | string | Configures IPv6 key fields to use in hashing algorithm | os10 |
-| ``ipv6_selection.state`` | string: absent,present\* | Deletes the IPv6 key fields if set to absent | os10 |
-| ``load_balance.mac_selection`` | list | Configures MAC key fields to use in hashing algorithm (see ``mac_selection.*``) | os10 |
-| ``mac_selection.field`` | string | Configures MAC key fields to use in hashing algorithm | os10 |
-| ``mac_selection.state`` | string: absent,present\* | Deletes the MAC key fields if set to absent | os10 |
-| ``load_balance.tcp_udp_selection`` | list | Configures TCP UDP ports for load balancing configurations (see ``tcp_udp_selection.*``) | os10 |
-| ``tcp_udp_selection.field`` | string | Configures TCP UDP port fields to use in hashing algorithm | os10 |
-| ``tcp_udp_selection.state`` | string: absent,present\* | Deletes the TCP UDP ports if set to absent | os10 |
-| ``min_ra`` | string | Configures global RA minimum interval value, applicable to all interfaces across VRFs | os10 |
-| ``max_ra`` | string | Configures global RA maximum interval value, applicable to all interfaces across VRFs | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_system role* to completely set the NTP server, hostname, enable password, management route, hash alogrithm, clock, line terminal, banner and reload type. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The system role writes a simple playbook that only references the *os10_system* role. By including the role, you automatically get access to all of the tasks to configure system features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_system:
- hostname: os10
- hardware_forwarding: scaled-l3-hosts
- hash_algo:
- algo:
- - name: lag
- mode: crc
- state: present
- - name: ecmp
- mode: xor
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: source-ip
- state: present
- ipv6_selection:
- - field: source-ip
- state: present
- mac_selection:
- - field: source-mac
- state: present
- tcp_udp_selection:
- - field: l4-source-port
- state: present
- max_ra: 15
- min_ra: 10
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_system
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_template.md b/ansible_collections/dellemc/os10/docs/os10_template.md
deleted file mode 100644
index d7faf0132..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_template.md
+++ /dev/null
@@ -1,75 +0,0 @@
-Template role
-==============
-
-This role provides access to structured data from show commands. This role facilitates the TEXTFSM parsing engine. TextFSM is a template based state machine . It takes the raw string input from the CLI of network devices, run them through a TEXTFSM template and return structured text in the form of a Python dictionary. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Template role is highly customizable, and it works with separate template definitions which contain variables and rules with regular expressions. This library is very helpful to parse any text-based CLI output from network devices. The Template role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- Variables and values are case-sensitive
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_template* role to parse any text-based CLI output. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. All the supported CLI commands are imported as tasks in tasks/main.yml.
-
-For the *os10_template* role plugins to be used, you may need to specify the actual path of role in *ansible.cfg* file.
-
-**Sample ansible.cfg**
-
- action_plugins = ../../plugins/modules/
-
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address> ansible_network_os=dellemc.os10.os10 ansible_ssh_user=xxxxx ansible_ssh_pass=xxxxx
-
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_template
-
-**Example playbook to run specific show command — leaf.yaml**
-
-
- ---
- - name: PARSE SHOW IP INTERFACE BRIEF
- hosts: leaf1
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
-
-
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_uplink.md b/ansible_collections/dellemc/os10/docs/os10_uplink.md
deleted file mode 100644
index 8ffeb0e71..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_uplink.md
+++ /dev/null
@@ -1,109 +0,0 @@
-Uplink role
-===========
-
-This role facilitates the configuration of uplink failure detection feature attributes. It specifically enables configuration of association between upstream and downstream interfaces known as uplink-state group. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Uplink role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_uplink keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``uplink_state_group`` | list | Configures the uplink state group (see ``uplink_state_group.*``) | os10 |
-| ``uplink_state_group.id`` | integer | Configures the uplink state group instance | os10 |
-| ``uplink_state_group.enable`` | boolean: True,False | Enables the uplink state group instance | os10 |
-| ``uplink_state_group.defer_time`` | integer | Configures defer timer for the uplink state group | os10 |
-| ``uplink_state_group.uplink_type`` | list | Configures the upstream and downstream attribute (see ``uplink_type.*``) | os10 |
-| ``uplink_type.type`` | string: upstream,downstream | Configures the uplink type | os10 |
-| ``uplink_type.intf`` | string | Configures the uplink interface | os10 |
-| ``uplink_type.state`` | string: absent,present\* | Removes the uplink stream if set to absent | os10 |
-| ``uplink_state_group.downstream`` | dictionary | Configures downstream information for the uplink state group (see ``downstream.*``) | os10 |
-| ``downstream.disable_links`` | integer | Configures number of downstream links to be disabled. String 'all' can be used to disable all downstream links | os10 |
-| ``downstream.auto_recover`` | boolean: True,False | Enables or disables auto recover for downstream interfaces | os10 |
-| ``uplink_state_group.state`` | string: absent,present\* | Removes the uplink state group instance if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
-********************
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_uplink role* to completely set the uplink sate group instance, and upstream, downstream interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The uplink role writes a simple playbook that only references the *os10_uplink* role. By including the role, you automatically get access to all of the tasks to configure uplink features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_uplink:
- uplink_state_group:
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: "present"
- - type: "downstream"
- intf: "ethernet1/1/2-1/1/5"
- state: "present"
- state: "present"
- downstream:
- disable_links: all
- auto_recover: false
- defer_time: 50
- - id: 2
- enable: True
- state: "present"
-
-> **NOTE**: Interfaces should be created using the *os10_interface* role.
-
-**Simple playbook to setup uplink — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_uplink
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_users.md b/ansible_collections/dellemc/os10/docs/os10_users.md
deleted file mode 100644
index 09d55f1dd..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_users.md
+++ /dev/null
@@ -1,89 +0,0 @@
-Users role
-==========
-
-This role facilitates the configuration of global system user attributes, and it supports the configuration of CLI users. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Users role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_users list keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``username`` | string (required) | Configures the username which must adhere to specific format guidelines (valid usernames begin with A-Z, a-z, or 0-9 and can also contain `@#$%^&*-_= +;<>,.~` characters) | os10 |
-| ``password`` | string | Configures the password set for the username; password length must be at least eight characters | os10 |
-| ``role`` | string | Configures the role assigned to the user | os10 |
-| ``state`` | string: absent,present\* | Deletes a user account if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_users* role to configure global system user attributes. It creates a hosts file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file. It writes a simple playbook that only references the *os10_users* role. By including the role, you automatically get access to all of the tasks to configure user features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_users:
- - username: test
- password: a1a2a3a4!@#$
- role: sysadmin
- state: present
- - username: u1
- password: a1a2a3a4!@#$
- role: netadmin
- state: present
-
-**Simple playbook to setup users — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_users
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_vlan.md b/ansible_collections/dellemc/os10/docs/os10_vlan.md
deleted file mode 100644
index 71a7adf9a..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_vlan.md
+++ /dev/null
@@ -1,123 +0,0 @@
-VLAN role
-=========
-
-This role facilitates configuring virtual LAN (VLAN) attributes. It supports the creation and deletion of a VLAN and its member ports. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VLAN role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- For variables with no state variable, setting an empty value for the variable negates the corresponding configuration
-- `os10_vlan` (dictionary) holds the key with the VLAN ID key and default-vlan key.
-- VLAN ID key should be in format "vlan ID" (1 to 4094)
-- Variables and values are case-sensitive
-
-**os10_vlan**
-
-| Key | Type | Notes | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``default_vlan_id`` | integer | Configures the vlan-id as the default VLAN for an existing VLAN | os10 |
-
-**VLAN ID keys**
-
-| Key | Type | Notes | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``description`` | string | Configures a single line description for the VLAN | os10 |
-| ``tagged_members`` | list | Specifies the list of port members to be tagged to the corresponding VLAN (see ``tagged_members.*``) | os10 |
-| ``tagged_members.port`` | string | Specifies valid device interface names to be tagged for each VLAN | os10 |
-| ``tagged_members.state`` | string: absent,present | Deletes the tagged association for the VLAN if set to absent | os10 |
-| ``untagged_members`` | list | Specifies the list of port members to be untagged to the corresponding VLAN (see ``untagged_members.*``) | os10 |
-| ``untagged_members.port`` | string | Specifies valid device interface names to be untagged for each VLAN | os10 |
-| ``untagged_members.state`` | string: absent,present | Deletes the untagged association for the VLAN if set to absent | os10 |
-| ``state`` | string: absent,present\* | Deletes the VLAN corresponding to the ID if set to absent | os10 |
-| ``virtual_gateway_ip`` | string | Configures an anycast gateway IPv4 address for VLAN interfaces| os10 |
-| ``virtual_gateway_ipv6`` | string | Configures an anycast gateway IPv6 address for VLAN interfaces| os10 |
-| ``ip_and_mask`` | string | Configures the specified IP address to the interface | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars directories* or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-## Example playbook
-
-This example uses the *os10_vlan* role to setup the VLAN ID and name, and it configures tagged and untagged port members for the VLAN. You can also delete the VLAN with the ID or delete the members associated to it. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vlan* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_vlan:
- default_vlan_id: 2
- vlan 100:
- description: "Blue"
- tagged_members:
- - port: ethernet 1/1/32
- state: present
- - port: ethernet 1/1/31
- state: present
- untagged_members:
- - port: ethernet 1/1/30
- state: present
- - port: ethernet 1/1/29
- state: present
- state: present
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: "present"
- state: "present"
- vlan 10:
- description: "vlan with anycast GW"
- ip_and_mask: "10.1.1.1/24"
- virtual_gateway_ip: "10.1.1.254"
- virtual_gateway_ipv6: "10:1:1::254"
- state: "present"
-
-> **NOTE**: Interfaces should be created using the *os10_interface* role.
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vlan
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_vlt.md b/ansible_collections/dellemc/os10/docs/os10_vlt.md
deleted file mode 100644
index 85ed917a2..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_vlt.md
+++ /dev/null
@@ -1,108 +0,0 @@
-VLT role
-========
-
-This role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VLT role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables .
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_vlt keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``domain`` | integer (required) | Configures the VLT domain identification number (1 to 1000) | os10 |
-| ``backup_destination`` | string | Configures an IPv4 address for the VLT backup link (A.B.C.D format or X:X:X:X::X format) | os10 |
-| ``destination_type`` | string | Configures the backup destination based on this destination type (IPv4 or IPv6)| os10 |
-| ``backup_destination_vrf`` | string | Configures the virtual routing and forwarding (VRF) instance through which the backup destination IP is reachable (*vrfname* must be present) | os10 |
-| ``discovery_intf`` | string | Configures the discovery interface for the VLT domain (range of interfaces)| os10 |
-| ``discovery_intf_state`` | string: absent,present | Deletes the discovery interfaces for the VLT domain if set to absent | os10 |
-| ``peer_routing`` | boolean | Configures VLT peer routing | os10 |
-| ``priority`` | integer (default:32768) | Configures VLT priority | os10 |
-| ``vlt_mac`` | string | Configures the VLT MAC address | os10 |
-| ``vlt_peers`` | dictionary | Contains objects to configure the VLT peer port-channel (see ``vlt_peers.*``) | os10 |
-| ``vlt_peers.<portchannelid>`` | dictionary | Configures the VLT peer port-channel (`Po <portchannelid> value`) | os10 |
-| ``vlt_peers.<portchannelid>.peer_lag`` | integer | Configures the port-channel ID of the VLT peer lag | os10 |
-| ``state`` | string: absent,present | Deletes the VLT instance if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network OS roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-The *os10_vlt* role is built on modules included in the core Ansible code. These modules were added in ansible version 2.2.0.
-
-Example playbook
-----------------
-
-This example uses the *os10_vlt* role to setup a VLT-domain. It creates a *hosts* file with the switch details and corresponding variables.The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vlt* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_vlt:
- domain: 1
- backup_destination: "192.168.211.175"
- destination_type: "ipv4"
- backup_destination_vrf:
- discovery_intf: 1/1/12
- discovery_intf_state: present
- peer_routing: True
- vlt_mac: aa:aa:aa:aa:aa:aa
- vlt_peers:
- Po 12:
- peer_lag: 13
- state: present
-
-> **NOTE**: Discovery interface must not be in switchport mode and can be configured using the *os10_interface* role.
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vlt
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_vrf.md b/ansible_collections/dellemc/os10/docs/os10_vrf.md
deleted file mode 100644
index 464efc5bd..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_vrf.md
+++ /dev/null
@@ -1,143 +0,0 @@
-VRF role
-========
-
-This role facilitates to configure the basics of virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VRF role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the variable `ansible_network_os` that can take the `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_vrf keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``vrfdetails`` | list | Configures the list of VRF instances (see ``instances.*``) | os10 |
-| ``vrfdetails.vrf_name`` | string | Specifies the VRF instance name (default is management) | os10 |
-| ``vrfdetails.state`` | string | Deletes the VRF instance name if set to absent | os10 |
-| ``vrfdetails.ip_route_import`` | string | Configures VRF IP subcommands | os10 |
-| ``ip_route_import.community_value`` | string | Configures the route community value | os10 |
-| ``ip_route_import.route_map_value`` | string | Configures the route-map value | os10 |
-| ``ip_route_import.state`` | string | Deletes the IP configuration if set to absent | os10 |
-| ``vrfdetails.ip_route_export`` | string | Configures VRF IP subcommands | os10 |
-| ``ip_route_export.community_value`` | string | Configures the route community value | os10 |
-| ``ip_route_export.route_map_value`` | string | Configures the route-map value | os10 |
-| ``ip_route_export.state`` | string | Deletes the IP config if set to absent | os10 |
-| ``vrfdetails.ipv6_route_import`` | string | Configures VRF IPv6 subcommands | os10 |
-| ``ipv6_route_import.community_value`` | string | Configures the route community value | os10 |
-| ``ipv6_route_import.route_map_value`` | string | Configures the route-map value | os10 |
-| ``ipv6_route_import.state`` | string | Deletes the IP config if set to absent | os10 |
-| ``vrfdetails.ipv6_route_export`` | string | Configures VRF IPv6 subcommands | os10 |
-| ``ipv6_route_import.community_value`` | string | Configures the route community value | os10 |
-| ``ipv6_route_export.route_map_value`` | string | Configures the route-map value | os10 |
-| ``ipv6_route_import.state`` | string | Deletes the IP config if set to absent | os10 |
-| ``vrfdetails.map_ip_interface`` | list | Specifies a list of valid interface names | os10 |
-| ``map_ip_interface.intf_id`` | string | Specifies a valid interface name | os10 |
-| ``map_ip_interface.state`` | string | Deletes VRF association in the interface if set to absent | os10 |
-| ``upd_src_ip_loopback_id`` | string | Configures the source IP for any leaked route in VRF from the provided loopback ID, delete if empty string| os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-The *os10_vrf* role is built on modules included in the core Ansible code. These modules were added in ansible version 2.2.0
-
-Example playbook
-----------------
-
-This example uses the *os10_vrf* role to setup a VRF and associate it to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that references the *os10_vrf* role.
-*upd_src_ip_loopback_id* has an dependency with association of the interface in a VRF, and the *os10_vrf* role needs to be invoked twice with different input dictionary one for the create and one for *upd_src_ip_loopback_id*.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
- os10_vrf:
- vrfdetails:
- - vrf_name: "os10vrf"
- state: "present"
- ip_route_import:
- community_value: "10:20"
- state: "present"
- route_map_value: "test4"
- ip_route_export:
- community_value: "30:40"
- state: "present"
- route_map_value: "test3"
- ipv6_route_import:
- community_value: "40:50"
- state: "absent"
- route_map_value: "test2"
- ipv6_route_export:
- community_value: "60:70"
- state: "absent"
- route_map_value: "test2"
- map_ip_interface:
- - intf_id : "loopback11"
- state : "present"
-
- os_vrf_upd_src_loopback:
- vrfdetails:
- - vrf_name: "os10vrf"
- state: "present"
- upd_src_ip_loopback_id: 11
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vrf
-
-**Simple playbook with `upd_src_ip_loopback_id` — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vrf
- - hosts: leaf1
- vars:
- os10_vrf: "{{ os_vrf_upd_src_loopback }}"
- roles:
- - dellemc.os10.os10_vrf
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_vrrp.md b/ansible_collections/dellemc/os10/docs/os10_vrrp.md
deleted file mode 100644
index 299166bff..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_vrrp.md
+++ /dev/null
@@ -1,139 +0,0 @@
-VRRP role
-=========
-
-This role facilitates configuring virtual router redundancy protocol (VRRP) attributes. It supports the creation of VRRP groups for interfaces and setting the VRRP group attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VRRP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os10_vrrp` (dictionary) holds a dictionary with the interface name key
-- Interface name can correspond to any of the valid OS10 interface with a unique interface identifier name
-- Physical interfaces names must be in *<interfacename> <tuple>* format (for example *fortyGigE 1/1*)
-- Variables and values are case-sensitive
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``vrrp`` | dictionary | Configures VRRP commands (see ``vrrp.*``) | os10 |
-| ``version`` | dictionary | Configures VRRP version | os10 |
-| ``vrrp_active_active_mode`` | dictionary | Configures VRRP active-active mode | os10 |
-| ``delay_reload`` | integer | Configures the minimum delay timer applied after boot (0 to 900) | os10 |
-| ``vrrp_group`` | list | Configures VRRP group commands (see ``vrrp_group.*``) | os10 |
-| ``vrrp_group.type`` | string: ipv6,ipv4 | Specifies the type of the VRRP group | os10 |
-| ``vrrp_group.group_id`` | integer (required) | Configures the ID for the VRRP group (1 to 255) | os10 |
-| ``vrrp_group.virtual_address`` | string | Configures a virtual-address to the VRRP group (A.B.C.D format) | os10 |
-| ``virtual_address.ip`` | string | Configures a virtual ip address (A.B.C.D format) | os10 |
-| ``virtual_address.state`` | string: present\*,absent | Configures/unconfigures a virtual-address (A.B.C.D format) | os10 |
-| ``vrrp_group.preempt`` | boolean: true\*,false | Configures preempt mode on the VRRP group | os10 |
-| ``vrrp_group.priority`` |integer | Configures priority for the VRRP group (1 to 255; default 100) | os10 |
-| ``vrrp_group.adv_interval_centisecs`` | integer | Configures the advertisement interval for the VRRP group in centiseconds (25 to 4075; default 100) and in multiple of 25; centisecs gets converted into seconds in version 2 | os10 |
-| ``vrrp_group.track_interface`` | list | Configures the track interface of the VRRP group (see ``track.*``) | os10 |
-| ``track_interface.resource_id`` | integer | Configures the object tracking resource ID of the VRRP group; mutually exclusive with *track.interface* | os10 |
-| ``track_interface.interface`` | string | Configures the track interface of the VRRP group (<interface name> <interface number> format) | os10 |
-| ``track_interface.priority_cost`` | integer | Configures the priority cost for track interface of the VRRP group (1 to 254; default 10) | os10 |
-| ``track_interface.state`` | string: present\*,absent | Deletes the specific track interface from the VRRP group if set to absent | os10 |
-| ``vrrp_group.track_interface.state`` | string: present*,absent | Deletes all track interfaces from the VRRP group if set to absent | os10 |
-| ``vrrp_group.state`` | string: present\*,absent | Deletes the VRRP group from the interface if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_vrrp* role to configure VRRP commands at the interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vrrp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_vrrp:
- vrrp:
- delay_reload: 2
- version: 3
- ethernet1/1/1:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: present
- - ip: 3001:4828:5808:ffa3::9
- state: present
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 200
- state: present
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: present
- - ip: 4.1.1.2
- state: present
- - ip: 4.1.1.3
- state: absent
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- adv_interval_centisecs: 200
- state: present
- vlan100:
- vrrp_active_active_mode: true
-
-> **NOTE**: Interface VRRP cannot exist with L2 modes and can be configured using the *os10_interface* role.
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vrrp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_vxlan.md b/ansible_collections/dellemc/os10/docs/os10_vxlan.md
deleted file mode 100644
index 09b23bb36..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_vxlan.md
+++ /dev/null
@@ -1,259 +0,0 @@
-VxLAN role
-========
-
-This role facilitates the configuration of virtual extensible LAN (VxLAN) attributes. It supports the configuration of virtual networks, Ethernet virtual private network (EVPN), and network virtualization edge (NVE). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VxLAN role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_vxlan keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``anycast_gateway_mac`` | string | Configures an anycast gateway IP address for a VxLAN virtual network | os10 |
-| ``loopback`` | dictionary | Configures the loopback interface (see ``loopback.*``) | os10 |
-| ``loopback.loopback_id`` | integer | Configures the loopback interface number (0 to 16383) | os10 |
-| ``loopback.description`` | string | Configures the interface description | os10 |
-| ``loopback.ip_address`` | string | Configure the IP address | os10 |
-| ``loopback.state`` | string: absent,present\* | Removes loopback interface if set to absent | os10 |
-| ``nve`` | dictionary | Configures network virtualization edge (see ``nve.*``) | os10 |
-| ``nve.source_interface`` | integer | Configures source loopback interface | os10 |
-| ``nve.controller`` | dictionary | Configures controller; supports only one controller connection at a time (see ``controller.*``) | os10 |
-| ``controller.name`` | string: NSX, ovsdb | Configures the NVE controller | os10 |
-| ``controller.max_backoff`` | integer | Configures max_backoff value (setting an empty value negates the corresponding configuration) | os10 |
-| ``controller.control_cfg`` | list | Configures the controller IP and port (see ``control_cfg.*``) | os10 |
-| ``control_cfg.ip_addr`` | string | Configures the controller IP | os10 |
-| ``control_cfg.port`` | integer | Configures the controller port | os10 |
-| ``control_cfg.state`` | string: absent,present\* | Removes the controller IP and port configuration if set to absent | os10 |
-| ``controller.state`` | string: absent,present\* | Removes the controller if set to absent | os10 |
-| ``nve.state`` | string: absent,present\* | Removes the NVE if set to absent | os10 |
-| ``evpn`` | dictionary | Enables EVPN in control plane (see ``evpn.*``) | os10 |
-| ``evpn.autoevi`` | boolean: True, False | Configures auto-EVI; no further manual configuration is allowed in auto-EVI mode | os10 |
-| ``evpn.rmac`` | string | Configures router MAC address | os10 |
-| ``evpn.evi`` | list | Configures EVPN instance (see ``evi.*``)| os10 |
-| ``evpn.dis_rt_asn`` | boolean | Enables/disables AS number usage in route target | os10 |
-| ``evpn.vrf`` | dictionary | Enables VRF for EVPN| os10 |
-| ``vrf.name`` | string | Configures VRF name | os10 |
-| ``vrf.state`` | string(present,absent) | Configures/removes VRF for EVPN | os10 |
-| ``vrf.vni`` | integer | Configures VNI for the VRF | os10 |
-| ``vrf.rd`` | string | Configures RD for the VRF | os10 |
-| ``vrf.route_target`` | dictionary | Enables route target for the VRF | os10 |
-| ``route_target.type`` | string (manual, auto) | Configures the route target type | os10 |
-| ``route_target.asn_value`` | string | Configure AS number | os10 |
-| ``route_target.state`` | string (present,absent) | Configures/unconfigures the route target | os10 |
-| ``route_target.route_target_type`` | string | Configures the route target type | os10 |
-| ``vrf.adv_ipv4`` | dictionary | Enables IPv4 advertisement VRF | os10 |
-| ``adv_ipv4.type`` | string | Configures IPv4 advertisement type | os10 |
-| ``adv_ipv4.rmap_name`` | string | Configures route-map for advertisement | os10 |
-| ``adv_ipv4.unconfig`` | boolean | Configures/unconfigures route-map for advertisement | os10 |
-| ``evi.id`` | integer | Configures the EVPN instance ID (1 to 65535) | os10 |
-| ``evi.rd`` | string | Configures the route distinguisher | os10 |
-| ``evi.vni`` | dictionary | Configures VNI value (see ``vni.*``) | os10 |
-| ``vni.id`` | integer | Configures VNI value; configure the same VNI value configured for the VxLAN virtual network | os10 |
-| ``vni.state`` | string: absent,present\* | Removes the VNI if set to absent | os10 |
-| ``evi.route_target`` | list | Configures route target (see ``route_target.*``) | os10 |
-| ``route_target.type`` | string: manual,auto | Configures the route target (auto mode auto-configures an import and export value for EVPN routes) | os10 |
-| ``route_target.asn_value`` | string | Configures the route target ASN value | os10 |
-| ``route_target.route_target_type`` | string: import,export,both | Configures the route target type | os10 |
-| ``route_target.state`` | string: absent,present\* | Removes the route target if set to absent | os10 |
-| ``evi.state`` | string: absent,present\* | Removes EVPN instance ID if set to absent | os10 |
-| ``evpn.state`` | string: absent,present\* | Removes the EVPN configuration if set to absent | os10 |
-| ``virtual_network`` | dictionary | Configures the virtual network attributes (see ``virtual_network.*``) | os10 |
-| ``virtual_network.untagged_vlan`` | integer | Configures the reserved untagged VLAN ID (1 to 4093) | os10 |
-| ``virtual_network.virtual_net`` | list | Configures the virtual network attributes for VxLAN tunneling (see ``virtual_net.*``) | os10 |
-| ``virtual_net.id`` | integer | Configures a virtual network ( virtual-network ID, from 1 to 65535) | os10 |
-| ``virtual_net.description`` | string | Configures the description for virtual network | os10 |
-| ``virtual_net.vlt_vlan_id`` | integer | Configures the VLTi VLAN ID | os10 |
-| ``virtual_net.member_interface`` | list | Configures the trunk member interface attributes to the virtual network (see ``member_interface.*``) | os10 |
-| ``member_interface.ifname`` | string | Configures interface name to provision the virtual network member interface | os10 |
-| ``member_interface.type`` | string: tagged,untagged | Configures the type to provision the virtual network member interface | os10 |
-| ``member_interface.vlanid`` | integer | Configures the VLAN ID to provision the virtual network member interface | os10 |
-| ``member_interface.state`` | string: absent,present\* | Removes the virtual network member interface if set to absent | os10 |
-| ``virtual_net.vxlan_vni`` | dictionary | Configures the VxLAN attributes to virtual network (see ``vxlan_vni.*``) | os10 |
-| ``vxlan_vni.id`` | integer | Configures the VxLAN ID to a virtual network | os10 |
-| ``vxlan_vni.remote_endpoint`` | list | Configures the IP address of a remote tunnel endpoint in a VxLAN network (see ``remote_endpoint.*``) | os10 |
-| ``remote_endpoint.ip`` | string | Configures the IP address of a remote tunnel endpoint (1.1.1.1) | os10 |
-| ``remote_endpoint.state`` | string: absent,present\* | Removes the remote tunnel endpoint in a VxLAN network if set to absent | os10 |
-| ``vxlan_vni.state`` | string: absent,present\* | Removes the VxLAN ID if set to absent | os10 |
-| ``virtual_net.state`` | string: absent,present\* | Removes a virtual network if set to absent | os10 |
-| ``vlan_association`` | list | Configures the VLAN association with virtual network (see ``vlan_association.*``) | os10 |
-| ``vlan_association.vlan_id`` | integer | Specifies the VLAN ID | os10 |
-| ``vlan_association.virtual_net`` | integer | Specifies the virtual netwrok ID which is to be associated with VLAN | os10 |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_vxlan* role to configure the VxLAN network, source IP address on VxLAN tunnel endpoint and virtual networks. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. The hosts file should define the ansible_network_os variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os10_vxlan* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_vxlan:
- anycast_gateway_mac: "00:22:33:44:55:66"
- loopback:
- loopback_id: 10
- description: "HARDWARE_VXLAN"
- ip_address: "10.8.0.1/32"
- state: "present"
- nve:
- source_interface: 10
- controller:
- name: "ovsdb"
- max_backoff: 2000
- control_cfg:
- - ip_addr: "1.2.3.4"
- port: 30
- state: "present"
- state: "present"
- state: "present"
- evpn:
- autoevi: False
- evi:
- - id: 111
- rd: "auto"
- vni:
- id: 111
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "111:111"
- route_target_type: "both"
- state: "present"
- - type: "manual"
- asn_value: "11:11"
- route_target_type: "export"
- state: "present"
- state: "present"
- - id: 222
- rd: "2.2.2.2:222"
- vni:
- id: 222
- state: "present"
- route_target:
- - type: "auto"
- asn_value:
- route_target_type:
- state: "present"
- state: "present"
- vrf:
- - name: "test"
- vni: 1000
- adv_ipv4:
- - type: "connected"
- state: "present"
- - type: "bgp"
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "65530:65534"
- route_target_type: "both"
- state: "present"
- - name: "blue"
- state: "absent"
- rmac: 00:11:11:11:11:11
- dis_rt_asn: "true"
- state: "present"
- virtual_network:
- untagged_vlan: 1001
- virtual_net:
- - id: 111
- description: "NSX_Cluster_VNI_111"
- vlt_vlan_id: 11
- member_interface:
- - ifname: "ethernet 1/1/15"
- type: "tagged"
- vlanid: 15
- state: "present"
- - ifname: "port-channel 12"
- type: "tagged"
- vlanid: 11
- state: "present"
- vxlan_vni:
- id: 111
- remote_endpoint:
- - ip: "1.1.1.1"
- state: "present"
- - ip: "11.11.11.11"
- state: "present"
- - ip: "111.111.111.111"
- state: "present"
- state: "present"
- state: "present"
- - id: 222
- description: "NSX_Cluster_VNI_222"
- vlt_vlan_id: 22
- member_interface:
- - ifname: "ethernet 1/1/16"
- type: "tagged"
- vlanid: 16
- state: "present"
- vxlan_vni:
- id: 222
- remote_endpoint:
- - ip: "2.2.2.2"
- state: "present"
- - ip: "22.22.22.22"
- state: "present"
- state: "present"
- state: "present"
- vlan_association:
- - vlain_id: 111
- virtual_net: 111
-
-> **NOTE**: Member interfaces should be in switchport trunk mode which can be configured using the *os10_interface* role.
-
-**Simple playbook to configure VxLAN — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vxlan
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/os10_xstp.md b/ansible_collections/dellemc/os10/docs/os10_xstp.md
deleted file mode 100644
index 0dd919b27..000000000
--- a/ansible_collections/dellemc/os10/docs/os10_xstp.md
+++ /dev/null
@@ -1,196 +0,0 @@
-# xSTP role
-
-This role facilitates the configuration of xSTP attributes. It supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP), rapid per-VLAN spanning-tree (Rapid PVST+), multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). It supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The xSTP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- `os10_xstp` (dictionary) contains the hostname (dictionary)
-- Hostname is the value of the *hostname* variable that corresponds to the name of the device
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value to any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**hostname keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|----------------------|
-| ``type`` | string (required) | Configures the type of spanning-tree mode specified that can vary according to the device including RSTP, rapid-PVST, and MST | os10 |
-| ``enable`` | boolean: true,false | Enables/disables the spanning-tree protocol specified in the type variable | os10 |
-| ``mac_flush_timer`` | integer | Configures the mac_flush_timer value (0 to 500) | os10 |
-| ``rstp`` | dictionary | Configures rapid spanning-tree (see ``rstp.*``) | os10 |
-| ``rstp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os10 |
-| ``rstp.max_age`` | integer | Configures the max_age timer for RSTP (6 to 40) | os10 |
-| ``rstp.hello_time`` | integer | Configures the hello-time for RSTP (1 to 10) | os10 |
-| ``rstp.forward_time`` | integer | Configures the forward-time for RSTP (4 to 30) | os10 |
-| ``rstp.force_version`` | string: stp | Configures the force version for the BPDUs transmitted by RSTP | os10 |
-| ``rstp.mac_flush_threshold`` | integer | Configures the MAC flush threshold for RSTP (1 to 65535) | os10 |
-| ``pvst`` | dictionary | Configures per-VLAN spanning-tree protocol (see ``pvst.*``) | os10 |
-| ``pvst.vlan`` | list | Configures the VLAN for PVST (see ``vlan.*``) | os10 |
-| ``vlan.range_or_id`` | string | Configures a VLAN/range of VLANs for the per-VLAN spanning-tree protocol | os10 |
-| ``vlan.max_age`` | integer | Configures the max_age timer for a VLAN (6 to 40) | os10 |
-| ``vlan.hello_time`` | integer | Configures the hello-time for a VLAN (1 to 10) | os10 |
-| ``vlan.forward_time`` | integer | Configures the forward-time for a VLAN (4 to 30) | os10 |
-| ``vlan.enable`` | boolean: true,false | Enables/disables spanning-tree for the associated VLAN range_or_id | os10 |
-| ``vlan.mac_flush_threshold`` | integer | Configures the MAC flush threshold for a VLAN (1 to 65535) | os10 |
-| ``vlan.root`` | string: primary,secondary | Designates the primary or secondary root for the associated VLAN range_or_id; mutually exclusive with *vlan.bridge_priority* | os10 |
-| ``vlan.bridge_priority`` | integer | Configures bridge-priority for the per-VLAN spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *vlan.root* | os10 |
-| ``mstp`` | dictionary | Configures multiple spanning-tree protocol (see ``mstp.*``) | os10 |
-| ``mstp.max_age`` | integer | Configures the max_age timer for MSTP (6 to 40) | os10 |
-| ``mstp.max_hops`` | integer | Configures the max-hops for MSTP (6 to 40) | os10 |
-| ``mstp.hello_time`` | integer | Configures the hello-time for MSTP (1 to 10) | os10 |
-| ``mstp.forward_time`` | integer | Configures the forward-time for MSTP (4 to 30) | os10 |
-| ``mstp.force_version`` | string: stp,rstp | Configures the force-version for the BPDUs transmitted by MSTP | os10 |
-| ``mstp.mstp_instances`` | list | Configures a MSTP instance (see ``mstp_instances.*``) | os10 |
-| ``mstp_instances.number_or_range`` | integer | Configures the multiple spanning-tree instance number| os10 |
-| ``mstp_instances.bridge_priority`` | integer | Configures the bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *mstp_instances.root* | os10 |
-| ``mstp_instances.enable`` | boolean: true,false | Enables/disables spanning-tree for the associated MSTP instance | os10 |
-| ``mstp_instances.mac_flush_threshold`` | integer | Configures the MAC flush-threshold for an MSTP instance (1 to 65535) | os10 |
-| ``mstp_instances.root`` | string: primary,secondary | Designates the primary or secondary root for the associated MSTP instance; mutually exclusive with *mstp_instances.bridge_priority* | os10 |
-| ``mstp.mst_config`` | dictionary | Configures multiple spanning-tree (see ``mstp.mst_config.*``); supported | os10 |
-| ``mst_config.name`` | string | Configures the name which is specified for the MSTP | os10 |
-| ``mst_config.revision`` | integer | Configures the revision number for MSTP | os10 |
-| ``mst_config.cfg_list`` | list | Configures the multiple spanning-tree list (see ``mst_config.cfg_list.*``) | os10 |
-| ``cfg_list.number`` | integer | Specifies the MSTP instance number | os10 |
-| ``cfg_list.vlans`` | string | Configures a VLAN/range of VLANs by mapping it to an instance number | os10 |
-| ``cfg_list.vlans_state`` | string: absent,present\* | Deletes a set of VLANs mapped to the spanning-tree instance if set to absent | os10 |
-| ``intf`` | list | Configures multiple spanning-tree in an interface (see ``intf.*``) | os10 |
-| ``intf <interface name>``| dictionary | Configures the interface name (see ``intf.<interface name>.*``) | os10 |
-| ``intf.<interface name>.edge_port`` | boolean: true,false | Configures the EdgePort as dynamic if set to true | os10 |
-| ``intf.<interface name>.bpdu_filter``| boolean: true,false | Enables/disables bpdufilter at the interface | os10 |
-| ``intf.<interface name>.bpdu_guard``| boolean: true,false | Enables/disables bpduguard at the interface | os10 |
-| ``intf.<interface name>.guard``| string: loop,root,none | Configures guard on the interface | os10 |
-| ``intf.<interface name>.enable`` | boolean: true,false | Enables/disables spanning-tree at the interface level | os10 |
-| ``intf.<interface name>.link_type``| string: auto,point-to-point,shared | Configures the link type at the interface | os10 |
-| ``intf.<interface name>.rstp`` | dictionary | Configures the RSTP interface name (see ``intf.<interface name>.rstp.*``) | os10 |
-| ``rstp.priority``| integer | Configures the RSTP priority value at the interface | os10 |
-| ``rstp.cost`` | integer | Configures the RSTP cost value at the interface | os10 |
-| ``intf.<interface name>.msti`` | list | Configures the MSTi interface name (see ``intf.<interface name>.msti``) | os10 |
-| ``msti.instance_number`` | integer or range | Specifies the MSTP instance number or range | os10 |
-| ``msti.priority`` | integer | Specifies the priority value to be configured at the interface | os10 |
-| ``msti.cost`` | integer | Specifies the cost value to be configured at the interface | os10 |
-| ``intf.<interface name>.vlan`` | list | Configures the VLAN interface name (see ``intf.<interface name>.vlan``) | os10 |
-| ``vlan.range_or_id`` | integer or range | Specifies the VLAN ID or range | os10 |
-| ``vlan.priority`` | integer | Specifies the priority value to be configured at the interface | os10 |
-| ``vlan.cost`` | integer | Specifies the cost value to be configured at the interface | os10 |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOM`E environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_xstp* role to configure different variants of spanning-tree. Based on the type of STP and defined objects, VLANs are associated and bridge priorities are assigned. It creates a *hosts* file with the switch details, and a *host_vars* file with connection variables. The corresponding role variables are defined in the *vars/main.yml* file at the role path.
-
-It writes a simple playbook that only references the *os10_xstp* role. By including the role, you automatically get access to all of the tasks to configure xSTP.
-
-**Sample hosts file**
-
- spine1 ansible_host= <ip_address>
-
-**Sample host_vars/spine1**
-
- hostname: spine1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
-**Sample vars/main.yml**
-
- os10_xstp:
- type: rstp
- enable: true
- path_cost: true
- mac_flush_timer: 4
- rstp:
- max_age: 6
- hello_time: 7
- forward_time: 7
- force_version: stp
- bridge_priority: 4096
- mac_flush_threshold: 5
- pvst:
- vlan:
- - range_or_id: 10
- max_age: 6
- enable: true
- hello_time: 7
- forward_time: 7
- bridge_priority: 4096
- mac_flush_threshold: 9
- mstp:
- max_age: 6
- max_hops: 6
- hello_time: 7
- forward_time: 7
- force_version: stp
- mstp_instances:
- - number_or_range: 1
- enable: true
- mac_flush_threshold: 9
- bridge_priority: 4096
- mst_config:
- name: cfg1
- revision: 5
- cfg_list:
- - number: 1
- vlans: 10,12
- vlans_state: present
- intf:
- ethernet 1/1/8:
- edge_port: true
- bpdu_filter: true
- bpdu_guard: true
- guard: loop
- enable: true
- link_type: point-to-point
- msti:
- - instance_number: 1
- priority: 32
- cost: 1
- rstp:
- priority: 32
- cost: 7
- vlan:
- - range_or_id: 6
- priority: 16
- cost: 8
-
-
-**Simple playbook to setup system — spine.yml**
-
- - hosts: spine
- roles:
- - dellemc.os10.os10_xstp
-
-**Run**
-
- ansible-playbook -i hosts spine.yml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/docs/roles.rst b/ansible_collections/dellemc/os10/docs/roles.rst
deleted file mode 100644
index 12e372128..000000000
--- a/ansible_collections/dellemc/os10/docs/roles.rst
+++ /dev/null
@@ -1,193 +0,0 @@
-##############################################################
-Ansible Network Collection Roles for Dell EMC SmartFabric OS10
-##############################################################
-
-The roles facilitate provisioning of devices running Dell EMC SmartFabric OS10. This document describes each of the roles.
-
-AAA role
---------
-
-The `os10_aaa <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_aaa/README.md>`_ role facilitates the configuration of Authentication Authorization and Accounting (AAA), and supports the configuration of TACACS and RADIUS server and AAA.
-
-
-ACL role
---------
-
-The `os10_acl <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_acl/README.md>`_ role facilitates the configuration of Access Control lists. It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to line terminals.
-
-
-BFD role
---------
-
-The `os10_bfd <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_bfd/README.md>`_ role facilitates the configuration of BFD global attributes. It specifically enables configuration of BFD interval , min_rx, multiplier, and role.
-
-
-BGP role
---------
-
-The `os10_bgp <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_bgp/README.md>`_ role facilitates the configuration of border gateway protocol (BGP) attributes, and supports router ID, networks, neighbors, and maximum path configurations.
-
-
-Copy configuration role
------------------------
-
-The `os10_copy_config <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_copy_config/README.md>`_ role pushes the backup running configuration into a device. This role merges the configuration in the template file with the running configuration of the Dell EMC Networking OS10 device.
-
-
-DNS role
---------
-
-The `os10_dns <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_dns/README.md>`_ role facilitates the configuration of domain name service (DNS).
-
-
-ECMP role
----------
-
-The `os10_ecmp <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_ecmp/README.md>`_ role facilitates the configuration of equal cost multi-path (ECMP). It supports the configuration of ECMP for IPv4.
-
-
-Fabric-summary role
--------------------
-
-The `os10_fabric_summary <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_fabric_summary/README.md>`_ role facilitates to get show system information of all the switches in the fabric.
-
-
-Flow-monitor role
------------------
-
-The `os10_flow_monitor <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_flow_monitor/README.md>`_ role facilitates the configuration of ACL flow-based monitoring attributes. In Flow-based mirroring, the ingress traffic, matching the specified policies are mirrored to a destination port. Port-based mirroring maintains a database that contains all monitoring sessions, including port monitor sessions.
-
-
-Image-upgrade role
-------------------
-
-The `os10_image_upgrade <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_image_upgrade/README.md>`_ role facilitates installation of OS10 software images.
-
-
-Interface role
---------------
-
-The `os10_interface <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_interface/README.md>`_ role facilitates the configuration of interface attributes. It supports the configuration of administrative state, description, MTU, IP address, IP helper, and port mode.
-
-
-LAG role
---------
-
-The `os10_lag <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_lag/README.md>`_ role facilitates the configuration of link aggregation group (LAG) attributes. This role supports the creation and deletion of a LAG and its member ports, and supports the configuration of type (static/dynamic), hash scheme, and minimum required link.
-
-
-LLDP role
----------
-
-The `os10_lldp <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_lldp/README.md>`_ role facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. This role supports the configuration of hello, mode, multiplier, advertise tlvs, management interface, fcoe, iscsi at global and interface levels.
-
-
-Logging role
-------------
-
-The `os10_logging <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_logging/README.md>`_ role facilitates the configuration of global logging attributes, and supports the configuration of logging servers.
-
-
-Network-Validation role
------------------------
-
-The `os10_network_validation <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_network_validation/README.md>`_ role facilitates to verify the Networks. It validates networking features of wiring connection, BGP neighbors, MTU between neighbors and VLT pair.
-
-
-NTP role
---------
-
-The `os10_ntp <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_ntp/README.md>`_ role facilitates the configuration of network time protocol attributes.
-
-
-Prefix-list role
-----------------
-
-The `os10_prefix_list <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_prefix_list/README.md>`_ role facilitates the configuration of a prefix-list, supports the configuration of IP prefix-list, and assigns the prefix-list to line terminals.
-
-
-QoS role
---------
-
-The `os10_qos <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_qos/README.md>`_ role facilitates the configuration of quality of service attributes including policy-map and class-map.
-
-
-RA Guard role
--------------
-
-The `os10_raguard <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_raguard/README.md>`_ role facilitates the configuration of IPv6 RA Guard attributes.
-
-
-Route-map role
---------------
-
-The `os10_route_map <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_route_map/README.md>`_ role facilitates the configuration of route-map attributes.
-
-
-SNMP role
----------
-
-The `os10_snmp <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_snmp/README.md>`_ role facilitates the configuration of global snmp attributes. It supports the configuration of SNMP server attributes like users, group, community, location, traps, and so on.
-
-
-System role
------------
-
-The `os10_system <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_system/README.md>`_ role facilitates the configuration of global system attributes. This role specifically enables configuration of hostname and hashing algorithm for OS10.
-
-
-TEMPLATE role
--------------
-
-The `os10_template <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_template/README.md>`_ role facilitates the TEXTFSM parsing engine. TextFSM is a template based state machine . It takes the raw string input from the CLI of network devices OS10, run them through a TEXTFSM template and return structured text in the form of a Python dictionary.
-
-
-UPLINK role
------------
-
-The `os10_uplink <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_uplink/README.md>`_ role facilitates the configuration of uplink attributes, and is abstracted for OS10. It specifically enables configuration of association between upstream and downstream interfaces known as uplink-state group.
-
-
-Users role
-----------
-
-The `os10_users <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_users/README.md>`_ role facilitates the configuration of global system user attributes. This role supports the configuration of CLI users.
-
-
-VLAN role
----------
-
-The `os10_vlan <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_vlan/README.md>`_ role facilitates configuring virtual LAN (VLAN) attributes. This role supports the creation and deletion of a VLAN and its member ports.
-
-
-VLT role
---------
-
-The `os10_vlt <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_vlt/README.md>`_ role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology.
-
-
-VRF role
---------
-
-The `os10_vrf <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_vrf/README.md>`_ role facilitates the configuration of basic virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers.
-
-
-VRRP role
----------
-
-The `os10_vrrp <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_vrrp/README.md>`_ role facilitates configuration of virtual router redundancy protocol (VRRP) attributes. This role supports the creation of VRRP groups for interfaces, and setting the VRRP group attributes.
-
-
-VXLAN role
-----------
-
-The `os10_vxlan <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_vxlan/README.md>`_ role facilitates the configuration of virtual extensible LAN (VXLAN) attributes. It supports the configuration of virtual networks, Ethernet virtual private network (EVPN), and network virtualization edge (NVE).
-
-
-xSTP role
----------
-
-The `os10_xstp <https://github.com/ansible-collections/dellemc.os10/blob/master/roles/os10_xstp/README.md>`_ role facilitates the configuration of xSTP attributes. This role supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP) protocol, multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). This role supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances.
-
-
-\(c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
diff --git a/ansible_collections/dellemc/os10/meta/runtime.yml b/ansible_collections/dellemc/os10/meta/runtime.yml
deleted file mode 100644
index e211415c7..000000000
--- a/ansible_collections/dellemc/os10/meta/runtime.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-plugin_routing:
- action:
- os10_config:
- redirect: dellemc.os10.os10
- os10_command:
- redirect: dellemc.os10.os10
- os10_facts:
- redirect: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/README.md b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/README.md
deleted file mode 100644
index f0affd956..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/README.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-
-# Provision CLOS fabric using the Ansible collection for Dell EMC SmartFabric OS10
-
-This example describes how to use Ansible to build a CLOS fabric using Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. The sample topology is a two-tier CLOS fabric with two spines and four leaves connected as mesh. eBGP is running between the two tiers. All switches in spine have the same AS number, and each leaf switch has a unique AS number. All AS numbers used are private.
-
-For application load-balancing purposes, the same prefix is advertised from multiple leaf switches and uses _BGP multipath relax_ feature.
-
-![CLOS FABRIC Topology](https://ansible-dellos-docs.readthedocs.io/en/latest/_images/topo.png)
-
-## Create simple Ansible playbook
-
-**1**. Create an inventory file called `inventory.yaml`, then specify the device IP address.
-
-**2**. Create a group variable file called `group_vars/all`, then define credentials and SNMP variables.
-
-**3**. Create a group variable file called `group_vars/spine.yaml`, then define credentials, hostname, and BGP neighbors of spine group.
-
-**4**. Create a host variable file called `host_vars/spine1.yaml`, then define the host, credentials, and transport.
-
-**5**. Create a host variable file called `host_vars/spine2.yaml`, then define the host, credentials, and transport.
-
-**6**. Create a host variable file called `host_vars/leaf1.yaml`, then define the host, credentials, and transport.
-
-**7**. Create a host variable file called `host_vars/leaf2.yaml`, then define the host, credentials, and transport.
-
-**8**. Create a host variable file called `host_vars/leaf3.yaml`, then define the host, credentials, and transport.
-
-**9**. Create a host variable file called `host_vars/leaf4.yaml`, then define the host, credentials, and transport.
-
-**10**. Create a playbook called `datacenter.yaml`.
-
-**11**. Run the playbook.
-
- ansible-playbook -i inventory.yaml datacenter.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/datacenter.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/datacenter.yaml
deleted file mode 100644
index 7174af84f..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/datacenter.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: datacenter
- gather_facts: no
- connection: network_cli
- collections:
- - dellemc.os10
- roles:
- - os10_interface
- - os10_bgp
- - os10_snmp
- - os10_system
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/all b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/all
deleted file mode 100644
index 6985e8adc..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/all
+++ /dev/null
@@ -1,9 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-
-os10_snmp:
- snmp_community:
- - name: public
- access_mode: ro
- state: present
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/spine.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/spine.yaml
deleted file mode 100644
index 3524eaafb..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/group_vars/spine.yaml
+++ /dev/null
@@ -1,85 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-
-os10_system:
- hostname: "{{ spine_hostname }}"
-
-os10_bgp:
- asn: 64901
- router_id: "{{ bgp_router_id }}"
- best_path:
- as_path: multipath-relax
- as_path_state: present
- med:
- - attribute: missing-as-worst
- state: present
- neighbor:
- - type: ipv4
- remote_asn: "{{ bgp_neigh1_remote_asn }}"
- ip: "{{ bgp_neigh1_ip }}"
- admin: up
- state: present
- - type: ipv4
- remote_asn: "{{ bgp_neigh2_remote_asn }}"
- ip: "{{ bgp_neigh2_ip }}"
- admin: up
- state: present
- - type: ipv4
- remote_asn: "{{ bgp_neigh3_remote_asn }}"
- ip: "{{ bgp_neigh3_ip }}"
- admin: up
- state: present
- - type: ipv4
- remote_asn: "{{ bgp_neigh4_remote_asn }}"
- ip: "{{ bgp_neigh4_ip }}"
- admin: up
- state: present
- - type: ipv6
- remote_asn: "{{ bgp_neigh5_remote_asn }}"
- ip: "{{ bgp_neigh5_ip }}"
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- - type: ipv6
- remote_asn: "{{ bgp_neigh6_remote_asn }}"
- ip: "{{ bgp_neigh6_ip }}"
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- - type: ipv6
- remote_asn: "{{ bgp_neigh7_remote_asn }}"
- ip: "{{ bgp_neigh7_ip }}"
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- - type: ipv6
- remote_asn: "{{ bgp_neigh8_remote_asn }}"
- ip: "{{ bgp_neigh8_ip }}"
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml
deleted file mode 100644
index 38691a5e2..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-leaf_hostname: "leaf-1"
-os10_system:
- hostname: "{{ leaf_hostname }}"
- hash_algo:
- algo:
- - name: ecmp
- mode: crc
- state: present
-os10_interface:
- ethernet 1/1/1:
- desc: "Connected to Spine 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.1.2/24
- ipv6_and_mask: 2001:100:1:1::2/64
- state_ipv6: present
- ethernet 1/1/9:
- desc: "Connected to Spine 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.1.2/24
- ipv6_and_mask: 2001:100:2:1::2/64
- state_ipv6: present
-os10_bgp:
- asn: 64801
- router_id: 100.0.2.1
- address_family_ipv4: true
- address_family_ipv6: true
- best_path:
- as_path: multipath-relax
- as_path_state: present
- med:
- - attribute: missing-as-worst
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.1.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.2.1.1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:1::1
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:2:1::1
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml
deleted file mode 100644
index d760626d5..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-hostname: leaf2
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-leaf_hostname: "leaf-2"
-os10_system:
- hostname: "{{ leaf_hostname }}"
- hash_algo:
- algo:
- - name: ecmp
- mode: crc
- state: present
-os10_interface:
- ethernet 1/1/1:
- desc: "Connected to Spine 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.17.2/24
- ipv6_and_mask: 2001:100:1:11::2/64
- state_ipv6: present
- ethernet 1/1/9:
- desc: "Connected to Spine 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.17.2/24
- ipv6_and_mask: 2001:100:2:11::2/64
-os10_bgp:
- asn: 64802
- router_id: 100.0.2.2
- address_family_ipv4: true
- address_family_ipv6: true
- best_path:
- as_path: multipath-relax
- as_path_state: present
- med:
- - attribute: missing-as-worst
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.18.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.17.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.2.17.1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:11::1
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:2:11::1
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml
deleted file mode 100644
index 7b199125c..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml
+++ /dev/null
@@ -1,81 +0,0 @@
-hostname: leaf3
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-leaf_hostname: "leaf-3"
-os10_system:
- hostname: "{{ leaf_hostname }}"
- hash_algo:
- algo:
- - name: ecmp
- mode: crc
- state: present
-os10_interface:
- ethernet 1/1/1:
- desc: "Connected to Spine 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.33.2/24
- ipv6_and_mask: 2001:100:1:21::2/64
- state_ipv6: present
- ethernet 1/1/9:
- desc: "Connected to Spine 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.33.2/24
- ipv6_and_mask: 2001:100:2:21::2/64
-os10_bgp:
- asn: 64803
- router_id: 100.0.2.3
- address_family_ipv4: true
- address_family_ipv6: true
- best_path:
- as_path: multipath-relax
- as_path_state: present
- med:
- - attribute: missing-as-worst
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.33.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.2.33.1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:21::1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:22::1
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:2:21::1
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml
deleted file mode 100644
index e06099e53..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml
+++ /dev/null
@@ -1,77 +0,0 @@
-hostname: leaf4
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-leaf_hostname: "leaf-4"
-os10_system:
- hostname: "{{ leaf_hostname }}"
- hash_algo:
- algo:
- - name: ecmp
- mode: crc
- state: present
-os10_interface:
- ethernet 1/1/5:
- desc: "Connected to Spine 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.49.2/24
- ipv6_and_mask: 2001:100:1:31::2/64
- state_ipv6: present
- ethernet 1/1/17:
- desc: "Connected to Spine 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.49.2/24
- ipv6_and_mask: 2001:100:2:31::2/64
- state_ipv6: present
-os10_bgp:
- asn: 64804
- router_id: 100.0.2.4
- address_family_ipv4: true
- address_family_ipv6: true
- best_path:
- as_path: multipath-relax
- as_path_state: present
- med:
- - attribute: missing-as-worst
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.49.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.2.49.1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:31::1
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:2:31::1
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml
deleted file mode 100644
index 2d926034c..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-hostname: spine1
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-spine_hostname: "spine-1"
-
-os10_interface:
- ethernet 1/1/1:
- desc: "Connected to leaf 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.1.1/24
- ipv6_and_mask: 2001:100:1:1::1/64
- state_ipv6: present
- ethernet 1/1/17:
- desc: "Connected to leaf 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.33.1/24
- ipv6_and_mask: 2001:100:1:21::1/64
- state_ipv6: present
- ethernet 1/1/25:
- desc: "Connected to leaf 3"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.17.1/24
- ipv6_and_mask: 2001:100:1:11::1/64
- state_ipv6: present
- ethernet 1/1/9:
- desc: "Connected to leaf 4"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.49.1/24
- ipv6_and_mask: 2001:100:1:31::1/64
- state_ipv6: present
-
-bgp_router_id: "100.0.1.1"
-bgp_neigh1_remote_asn: 64801
-bgp_neigh1_ip: "100.1.1.2"
-bgp_neigh2_remote_asn: 64803
-bgp_neigh2_ip: "100.1.33.2"
-bgp_neigh3_remote_asn: 64802
-bgp_neigh3_ip: "100.1.17.2"
-bgp_neigh4_remote_asn: 64804
-bgp_neigh4_ip: "100.1.49.2"
-bgp_neigh5_remote_asn: 64801
-bgp_neigh5_ip: "2001:100:1:1::2"
-bgp_neigh6_remote_asn: 64802
-bgp_neigh6_ip: "2001:100:1:11::2"
-bgp_neigh7_remote_asn: 64803
-bgp_neigh7_ip: "2001:100:1:21::2"
-bgp_neigh8_remote_asn: 64804
-bgp_neigh8_ip: "2001:100:1:31::2"
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml
deleted file mode 100644
index 7c616e9f7..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-hostname: spine2
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-spine_hostname: "spine-2"
-os10_interface:
- ethernet 1/1/1:
- desc: "Connected to leaf 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.1.1/24
- ipv6_and_mask: 2001:100:2:1::1/64
- state_ipv6: present
- ethernet 1/1/25:
- desc: "Connected to leaf 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.17.1/24
- ipv6_and_mask: 2001:100:2:11::1/64
- state_ipv6: present
- ethernet 1/1/17:
- desc: "Connected to leaf 3"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.33.1/24
- ipv6_and_mask: 2001:100:2:21::1/64
- state_ipv6: present
- ethernet 1/1/9:
- desc: "Connected to leaf 4"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.49.1/24
- ipv6_and_mask: 2001:100:2:31::1/64
- state_ipv6: present
-
-bgp_router_id: "100.0.1.2"
-bgp_neigh1_remote_asn: 64801
-bgp_neigh1_ip: "100.2.1.2"
-bgp_neigh2_remote_asn: 64802
-bgp_neigh2_ip: "100.2.33.2"
-bgp_neigh3_remote_asn: 64803
-bgp_neigh3_ip: "100.2.17.2"
-bgp_neigh4_remote_asn: 64804
-bgp_neigh4_ip: "100.2.49.2"
-bgp_neigh5_remote_asn: 64801
-bgp_neigh5_ip: "2001:100:2:1::2"
-bgp_neigh6_remote_asn: 64802
-bgp_neigh6_ip: "2001:100:2:11::2"
-bgp_neigh7_remote_asn: 64803
-bgp_neigh7_ip: "2001:100:2:21::2"
-bgp_neigh8_remote_asn: 64804
-bgp_neigh8_ip: "2001:100:2:31::2"
diff --git a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/inventory.yaml b/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/inventory.yaml
deleted file mode 100644
index 9516f660c..000000000
--- a/ansible_collections/dellemc/os10/playbooks/clos_fabric_ebgp/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=10.11.182.25
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/README.md b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/README.md
deleted file mode 100644
index 6d1af994a..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/README.md
+++ /dev/null
@@ -1,63 +0,0 @@
-# VxLAN Symmetric-IRB configuration using BGP EVPN using the Ansible collection for Dell EMC SmartFabric OS10
-
-
-This example describes how to use Ansible to build a Leaf-Spine topology with Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10, using a VxLAN Symmetric-IRB configuration model.
-
-VxLAN Symmetric-IRB is configured using BGP EVPN with Leaf-Spine topology. BGP unnumbered is configured between the Leaf and Spine over VLANS for both underlay and overlay. VLT is configured between the pair of Leaf nodes.
-
-In all Leaf nodes, the L3 VRF VNI "test" is configured to route between different VNs spanned across the topology, and the VTEP router MAC is configured to identify the remote VTEPs.
-VN 100 and VN 300 is configured in a pair of Leaf nodes, and VN 200 and VN 300 configured in an other pair of Leaf nodes.
-
-## Create simple Ansible playbook
-
-**1**. Create an inventory file called `inventory.yaml`, then specify the device IP address.
-
-**2**. Create a host variable file called `host_vars/spine1.yaml`, then define the host, credentials, and transport.
-
-**3**. Create a host variable file called `host_vars/spine2.yaml`, then define the host, credentials, and transport.
-
-**4**. Use the *os10_interface* and *os10_vlan* roles to configure the required VLANs.
-
-**5**. Use the *os10_bgp* role to configure BGP unnumbered.
-
-**Configurations for VTEP-1**
-
-**1**. Create a host variable file called `host_vars/prim-vtep1.yaml`.
-
-**2**. Create a host variable file called `host_vars/sec-vtep1.yaml`.
-
-**3**. Define the host, credentials, and transport.
-
-**4**. Use the *os10_interface* and *os10_vlan* roles to configure the required VLANs.
-
-**5**. Use the *os10_bgp* role to configure BGP unnumbered.
-
-**6**. Use the *os10_vxlan* role to configure VN networks, EVPN and Symmetric IRB functionality.
-
-**7**. Use *os10_vlt* role to configure VLT between leaves prim-vtep1 and sec-vtep1.
-
-**Configurations for VTEP-2**
-
-**1**. Create a host variable file called `host_vars/prim-vtep2.yaml`.
-
-**2**. Create a host variable file called `host_vars/sec-vtep2.yaml`.
-
-**3**. Define the host, credentials, and transport.
-
-**4**. Use *os10_interface* and *os10_vlan* roles to configure the required VLANs.
-
-**5**. Use the *os10_bgp* role to configure BGP unnumbered.
-
-**6**. Use *os10_vxlan* role to configure VN networks, EVPN and Symmetric IRB functionality.
-
-**7**. Use the *os10_vlt* role to configure VLT between leaves prim-vtep2 and sec-vtep2.
-
-**Create and run the playbook**
-
-**1**. Create a playbook called `datacenter.yaml`.
-
-**2**. Run the playbook.
-
- ansible-playbook -i inventory.yaml datacenter.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/datacenter.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/datacenter.yaml
deleted file mode 100644
index d8b1d4139..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/datacenter.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- collections:
- - dellemc.os10
- vars:
- build_dir: "/home/administrator/ansible/debug"
- roles:
- - os10_vrf
- - os10_interface
- - os10_system
- - os10_bgp
- - os10_lag
- - os10_vlan
- - os10_vxlan
- - os10_vlt
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml
deleted file mode 100644
index 5ad285057..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep1.yaml
+++ /dev/null
@@ -1,210 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-
-os10_system:
- hostname: "VLT1-Primary"
-
-os10_bgp:
- asn: 100
- router_id: 1.1.1.10
- neighbor:
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- interface: vlan10
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- interface: vlan11
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- ipv4_network: 1.1.1.1/32
- redistribute:
- - route_type: connected
- address_type: ipv4
- state: present
- state: "present"
-
-
-os10_interface:
- loopback 0:
- admin: up
- ip_and_mask: 1.1.1.1/32
- ethernet 1/1/6:
- switchport: False
- admin: up
- ethernet 1/1/4:
- admin: up
- switchport: False
- ethernet 1/1/5:
- admin: up
- switchport: False
- ethernet 1/1/1:
- admin: up
- switchport: False
- portmode: "trunk"
-# ethernet 1/1/2:
- ethernet 1/1/3:
- admin: up
- switchport: False
- portmode: "trunk"
- port-channel 10:
- portmode: "trunk"
- admin: up
- vlan 10:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 11:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 20:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- virtual-network 100:
- vrf: "test"
- ip_and_mask: "15.1.1.1/24"
- ip_virtual_gateway_ip: "15.1.1.254"
- admin: up
- virtual-network 300:
- vrf: "test"
- ip_and_mask: "25.1.1.1/24"
- ip_virtual_gateway_ip: "25.1.1.254"
- admin: up
-
-os10_lag:
- port-channel 10:
- type: dynamic
- channel_members:
-# - port: ethernet1/1/5
- - port: ethernet1/1/6
- mode: active
- state: present
-
-os10_vlan:
- vlan 10:
- tagged_members:
- - port: ethernet 1/1/1
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 20:
- tagged_members:
-# - port: ethernet 1/1/2
- - port: ethernet 1/1/3
- state: "present"
- access_vlan: "false"
- state: "present"
-
-os10_vrf:
- vrfdetails:
- - vrf_name: "test"
- state: "present"
-
-os10_vxlan:
- anycast_gateway_mac: "00:00:aa:bb:ee:ff"
- nve:
- source_interface: 0
- state: "present"
- evpn:
- evi:
- - id: 100
- vni:
- id: 100
- state: "present"
- rd: "auto"
- route_target:
- - type: "manual"
- asn_value: "65530:65532"
- route_target_type: "both"
- state: "present"
- state: "present"
- - id: 300
- vni:
- id: 300
- state: "present"
- rd: "auto"
- route_target:
- - type: "auto"
- state: "present"
- state: "present"
- vrf:
- - name: "test"
- vni: 1000
- adv_ipv4:
- - type: "connected"
- state: "present"
- - type: "bgp"
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "65530:65534"
- route_target_type: "both"
- state: "present"
- rmac: 00:11:11:11:11:11
- dis_rt_asn: "true"
- virtual_network:
- virtual_net:
- - id: 100
- vlt_vlan_id: 100
- member_interface:
- - ifname: "port-channel10"
- type: "tagged"
- vlanid: 100
- state: "present"
- vxlan_vni:
- id: 100
- state: "present"
- state: "present"
- - id: 300
- vlt_vlan_id: 300
- member_interface:
- - ifname: "port-channel10"
- type: "tagged"
- vlanid: 300
- state: "present"
- vxlan_vni:
- id: 300
- state: "present"
- state: "present"
-
-os10_vlt:
- domain: 1
- destination_type: "ipv4"
- peer_routing: True
-# discovery_intf: "1/1/3-1/1/4"
- discovery_intf: "1/1/4-1/1/5"
- vlt_mac: 00:00:00:11:22:33
- vlt_peers:
- Po 10:
- peer_lag: 10
- state: "present"
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml
deleted file mode 100644
index ea49d19d6..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/prim-vtep2.yaml
+++ /dev/null
@@ -1,194 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-
-os10_system:
- hostname: "VLT2-Primary"
-
-os10_bgp:
- asn: 300
- router_id: 2.2.2.10
- ipv4_network: 2.2.2.2/32
- redistribute:
- - route_type: connected
- address_type: ipv4
- state: present
- state: "present"
- neighbor:
- - type: ipv4
- interface: vlan50
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- interface: vlan60
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- interface: vlan11
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
-
-os10_interface:
- loopback 0:
- admin: up
- ip_and_mask: 2.2.2.2/32
- loopback 10:
- admin: up
- vrf: "test"
- ip_and_mask: 50.1.1.10/32
- loopback 20:
- admin: up
- vrf: "test"
- ip_and_mask: 60.1.1.10/32
- ethernet 1/1/1:
- admin: up
- switchport: False
- portmode: "trunk"
- ethernet 1/1/2:
- admin: up
- switchport: False
- portmode: "trunk"
- ethernet 1/1/3:
- switchport: False
- admin: up
- ethernet 1/1/4:
- admin: up
- switchport: False
- ethernet 1/1/5:
- switchport: False
- vrf: "test"
- ip_and_mask: "21.21.21.20/24"
- admin: up
- vlan 11:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 50:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 60:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- virtual-network 200:
- vrf: "test"
- ip_and_mask: "30.1.1.100/24"
- ip_virtual_gateway_ip: "30.1.1.254"
- admin: up
- virtual-network 300:
- vrf: "test"
- ip_and_mask: "25.1.1.100/24"
- ip_virtual_gateway_ip: "25.1.1.254"
- admin: up
-
-os10_vlan:
- vlan 50:
- tagged_members:
- - port: ethernet 1/1/1
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 60:
- tagged_members:
- - port: ethernet 1/1/2
- state: "present"
- access_vlan: "false"
- state: "present"
-
-os10_vrf:
- vrfdetails:
- - vrf_name: "test"
- state: "present"
-
-os10_vxlan:
- anycast_gateway_mac: "00:00:aa:bb:ee:ff"
- nve:
- source_interface: 0
- state: "present"
- evpn:
- evi:
- - id: 200
- vni:
- id: 200
- state: "present"
- rd: "auto"
- route_target:
- - type: "manual"
- asn_value: "65530:65533"
- route_target_type: "both"
- state: "present"
- state: "present"
- - id: 300
- vni:
- id: 300
- state: "present"
- rd: "auto"
- route_target:
- - type: "auto"
- state: "present"
- state: "present"
- vrf:
- - name: "test"
- vni: 1000
- adv_ipv4:
- - type: "connected"
- state: "present"
- - type: "bgp"
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "65530:65534"
- route_target_type: "both"
- state: "present"
- rmac: 00:00:22:22:22:22
- dis_rt_asn: "true"
- virtual_network:
- virtual_net:
- - id: 200
- vlt_vlan_id: 200
- vxlan_vni:
- id: 200
- state: "present"
- state: "present"
- - id: 300
- vlt_vlan_id: 300
- vxlan_vni:
- id: 300
- state: "present"
- state: "present"
-
-os10_vlt:
- domain: 1
- destination_type: "ipv4"
- peer_routing: True
- discovery_intf: "1/1/3-1/1/4"
- vlt_mac: 00:00:00:44:55:66
- vlt_peers:
- Po 10:
- peer_lag: 10
- state: "present"
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml
deleted file mode 100644
index ac04c3c65..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep1.yaml
+++ /dev/null
@@ -1,206 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-
-os10_system:
- hostname: "VLT1-SEC"
-
-os10_bgp:
- asn: 100
- router_id: 1.1.1.20
- neighbor:
- - type: ipv4
- interface: vlan40
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- interface: vlan30
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- interface: vlan11
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- ipv4_network: 1.1.1.1/32
- redistribute:
- - route_type: connected
- address_type: ipv4
- state: present
- state: "present"
-
-
-os10_interface:
- loopback 0:
- admin: up
- ip_and_mask: 1.1.1.1/32
- ethernet 1/1/3:
- switchport: False
- admin: up
- ethernet 1/1/4:
- admin: up
- switchport: False
- ethernet 1/1/5:
- admin: up
- switchport: False
- ethernet 1/1/1:
- admin: up
- switchport: False
- portmode: "trunk"
- ethernet 1/1/2:
- admin: up
- switchport: False
- portmode: "trunk"
- port-channel 10:
- portmode: "trunk"
- admin: up
- vlan 30:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 11:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 40:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- virtual-network 100:
- vrf: "test"
- ip_and_mask: "15.1.1.2/24"
- ip_virtual_gateway_ip: "15.1.1.254"
- admin: up
- virtual-network 300:
- vrf: "test"
- ip_and_mask: "25.1.1.2/24"
- ip_virtual_gateway_ip: "25.1.1.254"
- admin: up
-
-os10_lag:
- port-channel 10:
- type: dynamic
- channel_members:
- - port: ethernet1/1/5
- mode: active
- state: present
-
-os10_vlan:
- vlan 30:
- tagged_members:
- - port: ethernet 1/1/1
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 40:
- tagged_members:
- - port: ethernet 1/1/2
- state: "present"
- access_vlan: "false"
- state: "present"
-
-os10_vrf:
- vrfdetails:
- - vrf_name: "test"
- state: "present"
-
-os10_vxlan:
- anycast_gateway_mac: "00:00:aa:bb:ee:ff"
- nve:
- source_interface: 0
- state: "present"
- evpn:
- evi:
- - id: 100
- vni:
- id: 100
- state: "present"
- rd: "auto"
- route_target:
- - type: "manual"
- asn_value: "65530:65532"
- route_target_type: "both"
- state: "present"
- state: "present"
- - id: 300
- vni:
- id: 300
- state: "present"
- rd: "auto"
- route_target:
- - type: "auto"
- state: "present"
- state: "present"
- vrf:
- - name: "test"
- vni: 1000
- adv_ipv4:
- - type: "connected"
- state: "present"
- - type: "bgp"
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "65530:65534"
- route_target_type: "both"
- state: "present"
- rmac: 00:11:11:11:11:11
- dis_rt_asn: "true"
- virtual_network:
- virtual_net:
- - id: 100
- vlt_vlan_id: 100
- member_interface:
- - ifname: "port-channel10"
- type: "tagged"
- vlanid: 100
- state: "present"
- vxlan_vni:
- id: 100
- state: "present"
- state: "present"
- - id: 300
- vlt_vlan_id: 300
- member_interface:
- - ifname: "port-channel10"
- type: "tagged"
- vlanid: 300
- state: "present"
- vxlan_vni:
- id: 300
- state: "present"
- state: "present"
-
-os10_vlt:
- domain: 1
- destination_type: "ipv4"
- peer_routing: True
- discovery_intf: "1/1/3-1/1/4"
- vlt_mac: 00:00:00:11:22:33
- vlt_peers:
- Po 10:
- peer_lag: 10
- state: "present"
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml
deleted file mode 100644
index e23ed9c53..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/sec-vtep2.yaml
+++ /dev/null
@@ -1,200 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-
-os10_system:
- hostname: "VLT2-SEC"
-
-os10_bgp:
- asn: 300
- router_id: 2.2.2.20
- neighbor:
- - type: ipv4
- interface: vlan70
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- interface: vlan80
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- interface: vlan11
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- vrf:
- name: "test"
- address_type: ipv4
- redistribute:
- - route_type: l2vpn
- ipv4_network: 2.2.2.2/32
- redistribute:
- - route_type: connected
- address_type: ipv4
- state: present
- state: "present"
-
-
-os10_interface:
- loopback 0:
- admin: up
- ip_and_mask: 2.2.2.2/32
- loopback 10:
- admin: up
- vrf: "test"
- ip_and_mask: 80.1.1.10/32
- loopback 20:
- admin: up
- vrf: "test"
- ip_and_mask: 90.1.1.10/32
- ethernet 1/1/1:
- admin: up
- switchport: False
- portmode: "trunk"
- ethernet 1/1/2:
- admin: up
- switchport: False
- portmode: "trunk"
- ethernet 1/1/3:
- switchport: False
- admin: up
- ethernet 1/1/4:
- admin: up
- switchport: False
- ethernet 1/1/5:
- switchport: False
- vrf: "test"
- ip_and_mask: "20.20.20.20/24"
- admin: up
- vlan 11:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 70:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 80:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- virtual-network 200:
- vrf: "test"
- ip_and_mask: "30.1.1.101/24"
- ip_virtual_gateway_ip: "30.1.1.254"
- admin: up
- virtual-network 300:
- vrf: "test"
- ip_and_mask: "25.1.1.101/24"
- ip_virtual_gateway_ip: "25.1.1.254"
- admin: up
-
-os10_vlan:
- vlan 70:
- tagged_members:
- - port: ethernet 1/1/1
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 80:
- tagged_members:
- - port: ethernet 1/1/2
- state: "present"
- access_vlan: "false"
- state: "present"
-
-os10_vrf:
- vrfdetails:
- - vrf_name: "test"
- state: "present"
-
-os10_vxlan:
- anycast_gateway_mac: "00:00:aa:bb:ee:ff"
- nve:
- source_interface: 0
- state: "present"
- evpn:
- evi:
- - id: 200
- vni:
- id: 200
- state: "present"
- rd: "auto"
- route_target:
- - type: "manual"
- asn_value: "65530:65533"
- route_target_type: "both"
- state: "present"
- state: "present"
- - id: 300
- vni:
- id: 300
- state: "present"
- rd: "auto"
- route_target:
- - type: "auto"
- state: "present"
- state: "present"
- vrf:
- - name: "test"
- vni: 1000
- adv_ipv4:
- - type: "connected"
- state: "present"
- - type: "bgp"
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "65530:65534"
- route_target_type: "both"
- state: "present"
- rmac: 00:00:22:22:22:22
- dis_rt_asn: "true"
- virtual_network:
- virtual_net:
- - id: 200
- vlt_vlan_id: 200
- vxlan_vni:
- id: 200
- state: "present"
- state: "present"
- - id: 300
- vlt_vlan_id: 300
- vxlan_vni:
- id: 300
- state: "present"
- state: "present"
-
-os10_vlt:
- domain: 1
- destination_type: "ipv4"
- peer_routing: True
- discovery_intf: "1/1/3-1/1/4"
- vlt_mac: 00:00:00:44:55:66
- vlt_peers:
- Po 10:
- peer_lag: 10
- state: "present"
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine1.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine1.yaml
deleted file mode 100644
index 4672562b5..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine1.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-
-os10_system:
- hostname: "spine1"
-
-os10_bgp:
- asn: 200
- router_id: 9.9.9.10
- neighbor:
- - type: "peergroup"
- name: "ebgp_session"
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- state: present
- - type: ipv4
- interface: vlan10
- peergroup: ebgp_session
- peergroup_type: ebgp
- admin: up
- state: present
- - type: ipv4
- interface: vlan30
- peergroup: ebgp_session
- peergroup_type: ebgp
- admin: up
- state: present
- - type: ipv4
- interface: vlan50
- peergroup: ebgp_session
- peergroup_type: ebgp
- admin: up
- state: present
- - type: ipv4
- interface: vlan70
- peergroup: ebgp_session
- peergroup_type: ebgp
- admin: up
- state: present
- state: "present"
-
-
-os10_interface:
- vlan 10:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 30:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 50:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 70:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
-
-os10_vlan:
- vlan 10:
- tagged_members:
- - port: ethernet 1/1/1
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 30:
- tagged_members:
- - port: ethernet 1/1/2
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 50:
- tagged_members:
- - port: ethernet 1/1/3
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 70:
- tagged_members:
- - port: ethernet 1/1/4
- state: "present"
- access_vlan: "false"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine2.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine2.yaml
deleted file mode 100644
index 0e953b888..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/host_vars/spine2.yaml
+++ /dev/null
@@ -1,95 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os10.os10
-
-os10_system:
- hostname: "spine2"
-
-os10_bgp:
- asn: 201
- router_id: 9.9.9.20
- neighbor:
- - type: "peergroup"
- name: "ebgp_session"
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- state: present
- - type: ipv4
- interface: vlan20
- peergroup: ebgp_session
- peergroup_type: ebgp
- admin: up
- state: present
- - type: ipv4
- interface: vlan40
- peergroup: ebgp_session
- peergroup_type: ebgp
- admin: up
- state: present
- - type: ipv4
- interface: vlan60
- peergroup: ebgp_session
- peergroup_type: ebgp
- admin: up
- state: present
- - type: ipv4
- interface: vlan80
- peergroup: ebgp_session
- peergroup_type: ebgp
- admin: up
- state: present
- state: "present"
-
-
-os10_interface:
- vlan 20:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 40:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 60:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
- vlan 80:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
-
-os10_vlan:
- vlan 20:
- tagged_members:
- - port: ethernet 1/1/1
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 40:
- tagged_members:
- - port: ethernet 1/1/2
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 60:
- tagged_members:
- - port: ethernet 1/1/3
- state: "present"
- access_vlan: "false"
- state: "present"
- vlan 80:
- tagged_members:
- - port: ethernet 1/1/4
- state: "present"
- access_vlan: "false"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/inventory.yaml b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/inventory.yaml
deleted file mode 100644
index 104712dcc..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=101.104.34.141
-prim-vtep1 ansible_host=101.104.34.217
-sec-vtep1 ansible_host=101.104.34.218
-spine2 ansible_host=101.104.34.142
-prim-vtep2 ansible_host=101.104.34.219
-sec-vtep2 ansible_host=101.104.34.220
-
-[site1]
-prim-vtep1
-sec-vtep1
-spine1
-
-[site2]
-prim-vtep2
-spine2
-sec-vtep2
-
-[datacenter:children]
-site1
-site2
diff --git a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/vxlan_evpn_topology.png b/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/vxlan_evpn_topology.png
deleted file mode 100644
index b2f695b46..000000000
--- a/ansible_collections/dellemc/os10/playbooks/vxlan_evpn/vxlan_evpn_topology.png
+++ /dev/null
Binary files differ
diff --git a/ansible_collections/dellemc/os10/plugins/action/os10.py b/ansible_collections/dellemc/os10/plugins/action/os10.py
deleted file mode 100644
index 5669001c0..000000000
--- a/ansible_collections/dellemc/os10/plugins/action/os10.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import sys
-import copy
-
-
-from ansible import constants as C
-from ansible.module_utils._text import to_text
-from ansible.module_utils.connection import Connection
-from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import os10_provider_spec
-from ansible.utils.display import Display
-
-display = Display()
-
-
-class ActionModule(ActionNetworkModule):
-
- def run(self, tmp=None, task_vars=None):
- del tmp # tmp no longer has any effect
-
- self._config_module = True if self._task.action == 'os10_config' else False
- socket_path = None
-
- if self._play_context.connection == 'network_cli':
- provider = self._task.args.get('provider', {})
- if any(provider.values()):
- display.warning('provider is unnecessary when using network_cli and will be ignored')
- del self._task.args['provider']
- elif self._play_context.connection == 'local':
- provider = load_provider(os10_provider_spec, self._task.args)
- pc = copy.deepcopy(self._play_context)
- pc.connection = 'network_cli'
- pc.network_os = 'dellemc.os10.os10'
- pc.remote_addr = provider['host'] or self._play_context.remote_addr
- pc.port = int(provider['port'] or self._play_context.port or 22)
- pc.remote_user = provider['username'] or self._play_context.connection_user
- pc.password = provider['password'] or self._play_context.password
- pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
- command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
- pc.become = provider['authorize'] or False
- if pc.become:
- pc.become_method = 'enable'
- pc.become_pass = provider['auth_pass']
-
- display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
- connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
- connection.set_options(direct={'persistent_command_timeout': command_timeout})
-
- socket_path = connection.run()
- display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
- if not socket_path:
- return {'failed': True,
- 'msg': 'unable to open shell. Please see: ' +
- 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
-
- task_vars['ansible_socket'] = socket_path
-
- # make sure we are in the right cli context which should be
- # enable mode and not config module
- if socket_path is None:
- socket_path = self._connection.socket_path
-
- conn = Connection(socket_path)
- out = conn.get_prompt()
- while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
- display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
- conn.send_command('exit')
- out = conn.get_prompt()
-
- result = super(ActionModule, self).run(task_vars=task_vars)
- return result
diff --git a/ansible_collections/dellemc/os10/plugins/action/textfsm_parser.py b/ansible_collections/dellemc/os10/plugins/action/textfsm_parser.py
deleted file mode 100644
index 602186c89..000000000
--- a/ansible_collections/dellemc/os10/plugins/action/textfsm_parser.py
+++ /dev/null
@@ -1,81 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# (c) 2020, Ansible by Red Hat, inc
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.module_utils.six import StringIO, string_types
-
-from ansible.plugins.action import ActionBase
-from ansible.errors import AnsibleError
-
-try:
- import textfsm
- HAS_TEXTFSM = True
-except ImportError:
- HAS_TEXTFSM = False
-
-
-class ActionModule(ActionBase):
-
- def run(self, tmp=None, task_vars=None):
- ''' handler for textfsm action '''
-
- if task_vars is None:
- task_vars = dict()
-
- result = super(ActionModule, self).run(tmp, task_vars)
- del tmp # tmp no longer has any effect
-
- try:
- if not HAS_TEXTFSM:
- raise AnsibleError('textfsm_parser engine requires the TextFSM library to be installed')
-
- try:
- filename = self._task.args.get('file')
- src = self._task.args.get('src')
- content = self._task.args['content']
- name = self._task.args.get('name')
- except KeyError as exc:
- raise AnsibleError('missing required argument: %s' % exc)
-
- if src and filename:
- raise AnsibleError('`src` and `file` are mutually exclusive arguments')
-
- if not isinstance(content, string_types):
- return {'failed': True, 'msg': '`content` must be of type str, got %s' % type(content)}
-
- if filename:
- tmpl = open(filename)
- else:
- tmpl = StringIO()
- tmpl.write(src.strip())
- tmpl.seek(0)
-
- try:
- re_table = textfsm.TextFSM(tmpl)
- fsm_results = re_table.ParseText(content)
-
- except Exception as exc:
- raise AnsibleError(str(exc))
-
- final_facts = []
- for item in fsm_results:
- facts = {}
- facts.update(dict(zip(re_table.header, item)))
- final_facts.append(facts)
-
- if name:
- result['ansible_facts'] = {name: final_facts}
- else:
- result['ansible_facts'] = {}
-
- finally:
- self._remove_tmp_path(self._connection._shell.tmpdir)
-
- return result
diff --git a/ansible_collections/dellemc/os10/plugins/cliconf/os10.py b/ansible_collections/dellemc/os10/plugins/cliconf/os10.py
deleted file mode 100644
index 7d009f5a2..000000000
--- a/ansible_collections/dellemc/os10/plugins/cliconf/os10.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
----
-cliconf: os10
-short_description: Use os10 cliconf to run command on Dell OS10 platform
-description:
- - This os10 plugin provides low level abstraction apis for
- sending and receiving CLI commands from Dell OS10 network devices.
-"""
-
-import re
-import json
-
-from itertools import chain
-
-from ansible.module_utils._text import to_bytes, to_text
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
-from ansible.plugins.cliconf import CliconfBase, enable_mode
-
-
-class Cliconf(CliconfBase):
-
- def get_device_info(self):
- device_info = {}
-
- device_info['network_os'] = 'dellemc.os10.os10'
- reply = self.get('show version')
- data = to_text(reply, errors='surrogate_or_strict').strip()
-
- match = re.search(r'OS Version (\S+)', data)
- if match:
- device_info['network_os_version'] = match.group(1)
-
- match = re.search(r'System Type (\S+)', data, re.M)
- if match:
- device_info['network_os_model'] = match.group(1)
-
- reply = self.get('show running-configuration | grep hostname')
- data = to_text(reply, errors='surrogate_or_strict').strip()
- match = re.search(r'^hostname (.+)', data, re.M)
- if match:
- device_info['network_os_hostname'] = match.group(1)
-
- return device_info
-
- @enable_mode
- def get_config(self, source='running', format='text', flags=None):
- if source not in ('running', 'startup'):
- return self.invalid_params("fetching configuration from %s is not supported" % source)
- if source == 'running':
- cmd = 'show running-config all'
- else:
- cmd = 'show startup-config'
- return self.send_command(cmd)
-
- @enable_mode
- def edit_config(self, command):
- for cmd in chain(['configure terminal'], to_list(command), ['end']):
- self.send_command(to_bytes(cmd))
-
- def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
- return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
-
- def get_capabilities(self):
- result = super(Cliconf, self).get_capabilities()
- return json.dumps(result)
diff --git a/ansible_collections/dellemc/os10/plugins/doc_fragments/os10.py b/ansible_collections/dellemc/os10/plugins/doc_fragments/os10.py
deleted file mode 100644
index 9a6baf44c..000000000
--- a/ansible_collections/dellemc/os10/plugins/doc_fragments/os10.py
+++ /dev/null
@@ -1,75 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2020, Peter Sprygada <psprygada@ansible.com>
-# Copyright: (c) 2020, Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard files documentation fragment
- DOCUMENTATION = r'''
-options:
- provider:
- description:
- - A dict object containing connection details.
- type: dict
- suboptions:
- host:
- description:
- - Specifies the DNS host name or address for connecting to the remote
- device over the specified transport. The value of host is used as
- the destination address for the transport.
- type: str
- port:
- description:
- - Specifies the port to use when building the connection to the remote
- device.
- type: int
- username:
- description:
- - User to authenticate the SSH session to the remote device. If the
- value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_USERNAME) will be used instead.
- type: str
- password:
- description:
- - Password to authenticate the SSH session to the remote device. If the
- value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_PASSWORD) will be used instead.
- type: str
- ssh_keyfile:
- description:
- - Path to an ssh key used to authenticate the SSH session to the remote
- device. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
- type: path
- timeout:
- description:
- - Specifies idle timeout (in seconds) for the connection. Useful if the
- console freezes before continuing. For example when saving
- configurations.
- type: int
- authorize:
- description:
- - Instructs the module to enter privileged mode on the remote device before
- sending any commands. If not specified, the device will attempt to execute
- all commands in non-privileged mode. If the value is not specified in the
- task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be
- used instead.
- type: bool
- default: false
- auth_pass:
- description:
- - Specifies the password to use if required to enter privileged mode on the
- remote device. If I(authorize) is false, then this argument does nothing.
- If the value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_AUTH_PASS) will be used instead.
- type: str
-notes:
- - For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking).
-'''
diff --git a/ansible_collections/dellemc/os10/plugins/module_utils/network/base_network_show.py b/ansible_collections/dellemc/os10/plugins/module_utils/network/base_network_show.py
deleted file mode 100644
index b287c38c9..000000000
--- a/ansible_collections/dellemc/os10/plugins/module_utils/network/base_network_show.py
+++ /dev/null
@@ -1,42 +0,0 @@
-from __future__ import (absolute_import, division, print_function)
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-from collections import OrderedDict
-import traceback
-
-LIB_IMP_ERR = None
-ERR_MSG = None
-try:
- import xmltodict
- import yaml
- HAS_LIB = True
-except Exception as e:
- HAS_LIB = False
- ERR_MSG = to_native(e)
- LIB_IMP_ERR = traceback.format_exc()
-
-__copyright__ = "(c) Copyright 2020 Dell Inc. or its subsidiaries. All rights reserved."
-__metaclass__ = type
-
-
-class BaseNetworkShow(object):
- """The goal of this class is to extended by other in order to implement show system network view ansible modules"""
-
- def __init__(self):
- self.module = AnsibleModule(argument_spec=self.get_fields())
- if not HAS_LIB:
- self.module.fail_json(
- msg=ERR_MSG,
- exception=LIB_IMP_ERR)
- self.exit_msg = OrderedDict()
-
- def xml_to_dict(self, value):
-
- return xmltodict.parse(value)
-
- def dict_to_yaml(self, value):
- return yaml.safe_dump(value, default_flow_style=False)
-
-
-if __name__ == '__main__':
- pass
diff --git a/ansible_collections/dellemc/os10/plugins/module_utils/network/os10.py b/ansible_collections/dellemc/os10/plugins/module_utils/network/os10.py
deleted file mode 100644
index 35976488d..000000000
--- a/ansible_collections/dellemc/os10/plugins/module_utils/network/os10.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# (c) 2020 Red Hat, Inc
-#
-# Copyright (c) 2020 Dell Inc.
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-from __future__ import (absolute_import, division, print_function)
-
-import re
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible.module_utils.connection import exec_command
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, ConfigLine
-
-__metaclass__ = type
-
-_DEVICE_CONFIGS = {}
-
-WARNING_PROMPTS_RE = [
- r"[\r\n]?\[confirm yes/no\]:\s?$",
- r"[\r\n]?\[y/n\]:\s?$",
- r"[\r\n]?\[yes/no\]:\s?$"
-]
-
-os10_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
- 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
- 'timeout': dict(type='int'),
-}
-os10_argument_spec = {
- 'provider': dict(type='dict', options=os10_provider_spec),
-}
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- cmd = 'show running-configuration'
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- rc, out, err = exec_command(module, cmd)
- if rc != 0:
- module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
- cfg = to_text(out, errors='surrogate_or_strict').strip()
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- commands = to_commands(module, to_list(commands))
- for cmd in commands:
- cmd = module.jsonify(cmd)
- rc, out, err = exec_command(module, cmd)
- if check_rc and rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
- responses.append(to_text(out, errors='surrogate_or_strict'))
- return responses
-
-
-def load_config(module, commands):
- rc, out, err = exec_command(module, 'configure terminal')
- if rc != 0:
- module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
-
- commands.append('commit')
- for command in to_list(commands):
- if command == 'end':
- continue
- rc, out, err = exec_command(module, command)
- if rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
-
- exec_command(module, 'end')
-
-
-def get_sublevel_config(running_config, module):
- contents = list()
- current_config_contents = list()
- running_config = NetworkConfig(contents=running_config, indent=1)
- obj = running_config.get_object(module.params['parents'])
- if obj:
- contents = obj.children
- contents[:0] = module.params['parents']
-
- indent = 0
- for c in contents:
- if isinstance(c, str):
- current_config_contents.append(c.rjust(len(c) + indent, ' '))
- if isinstance(c, ConfigLine):
- current_config_contents.append(c.raw)
- indent = 1
- sublevel_config = '\n'.join(current_config_contents)
-
- return sublevel_config
diff --git a/ansible_collections/dellemc/os10/plugins/modules/base_xml_to_dict.py b/ansible_collections/dellemc/os10/plugins/modules/base_xml_to_dict.py
deleted file mode 100644
index b7d82f774..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/base_xml_to_dict.py
+++ /dev/null
@@ -1,124 +0,0 @@
-#!/usr/bin/python
-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__copyright__ = "(c) Copyright 2020 Dell Inc. or its subsidiaries. All rights reserved."
-
-__metaclass__ = type
-
-
-DOCUMENTATION = '''
-module: base_xml_to_dict
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Operations for show command output convertion from xml to json format.
-description:
-
- - Get the show system inforamtion of a Leaf-Spine.
-
-options:
- cli_responses:
- type: str
- description:
- - show command xml output
- required: True
-'''
-EXAMPLES = '''
-Copy below YAML into a playbook (e.g. play.yml) and run as follows:
-
-#$ ansible-playbook -i inv play.yml
-name: setup the plabook to get show command output in dict format
-hosts: localhost
-connection: local
-gather_facts: False
-vars:
- cli:
- username: admin
- password: admin
-tasks:
-- name: "Get Dell EMC OS10 Show output in dict format"
- os10_command:
- commands: "{{ command_list }}"
- register: show
-- debug: var=show
-- name: call to lib to get output in dict
- base_xml_to_dict:
- cli_responses: "{{ item }}"
- loop: "{{ show.stdout }}"
-'''
-
-from ansible.module_utils._text import to_native
-from ansible.module_utils.basic import AnsibleModule
-from collections import OrderedDict
-import traceback
-
-LIB_IMP_ERR = None
-ERR_MSG = None
-try:
- import xmltodict
- HAS_LIB = True
-except Exception as e:
- HAS_LIB = False
- ERR_MSG = to_native(e)
- LIB_IMP_ERR = traceback.format_exc()
-
-
-class XmlToDictAnsibleModule(object):
- """The goal of this class is to convert xml input to dict"""
-
- def __init__(self):
- self.module = AnsibleModule(argument_spec=self.get_fields())
- self.cli_responses = self.module.params['cli_responses']
- self.exit_msg = OrderedDict()
-
- def get_fields(self):
- """Return valid fields"""
- base_fields = {
- 'cli_responses': {
- 'type': 'str',
- 'required': True
- }
- }
- return base_fields
-
- def build_xml_list(self, xml_output):
- xml_str_list = []
- xml_declaration_tag = '<?xml version="1.0"?>\n'
- for data in xml_output.split('<?xml version="1.0"'):
- if not data:
- continue
- xml_data = ''.join(data.splitlines(True)[1:])
- xml_str_list.append(xml_declaration_tag + xml_data)
-
- return xml_str_list
-
- def perform_action(self):
- try:
- out = list()
- # the below line should be removed or not valid when the password
- # decrypt issue is resolved
- self.cli_responses = self.cli_responses.replace(
- "*-", '').replace("*", '')
- xml_str_list = self.build_xml_list(self.cli_responses)
- for xml_list in xml_str_list:
- out.append(xmltodict.parse(xml_list))
-
- self.exit_msg.update({"result": out})
- self.module.exit_json(changed=False, msg=self.exit_msg)
- except Exception as e:
- self.module.fail_json(
- msg=to_native(e),
- exception=traceback.format_exc())
-
-
-def main():
- module_instance = XmlToDictAnsibleModule()
- if not HAS_LIB:
- module_instance.module.fail_json(msg=ERR_MSG,
- exception=LIB_IMP_ERR)
- module_instance.perform_action()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/modules/bgp_validate.py b/ansible_collections/dellemc/os10/plugins/modules/bgp_validate.py
deleted file mode 100644
index 85832c786..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/bgp_validate.py
+++ /dev/null
@@ -1,303 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved."
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
-module: bgp_validate
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Validate the bgp neighbor state,raise error if it is not in established state
-description:
-
- - Troubleshoot the bgp neighor state info using show ip bgp summary and show ip interface brief.
-
-options:
- show_ip_bgp:
- description:
- - show ip bgp summary output
- type: 'list'
- required: True
- show_ip_intf_brief:
- description:
- - show ip interface brief output
- type: 'list'
- required: True
- bgp_neighbors:
- description:
- - planned neighbours input from group_var to compare actual
- type: 'list'
- required: True
-'''
-EXAMPLES = '''
-Copy below YAML into a playbook (e.g. play.yml) and run as follows:
-
-#$ ansible-playbook -i inv play.yml
-name: Validate BGP configuration
-hosts: localhost
-connection: local
-gather_facts: False
-tasks:
- - name: "Get Dell EMC OS10 Show ip bgp summary"
- os10_command:
- commands:
- - command: "show ip bgp summary | display-xml"
- - command: "show ip interface brief | display-xml"
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_bgp
- - set_fact:
- output_bgp: "{{ output_bgp|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item,
- 'stdout_show_bgp': item.stdout.0, 'stdout_show_ip': item.stdout.1}] }}"
- loop: "{{ show_bgp.results }}"
- - debug: var=output_bgp
- - local_action: copy content={{ output_bgp }} dest=show
- - name: call lib to convert bgp info from xml to dict format
- base_xml_to_dict:
- cli_responses: "{{ item.stdout_show_bgp }}"
- with_items:
- - "{{ output_bgp }}"
- register: show_bgp_list
- - name: call lib to convert ip interface info from xml to dict format
- base_xml_to_dict:
- cli_responses: "{{ item.stdout_show_ip }}"
- with_items:
- - "{{ output_bgp }}"
- register: show_ip_intf_list
- - name: call lib for bgp validation
- bgp_validate:
- show_ip_bgp: "{{ show_bgp_list.results }}"
- show_ip_intf_brief: "{{ show_ip_intf_list.results }}"
- bgp_neighbors: "{{ intended_bgp_neighbors }}"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-from collections import OrderedDict
-import traceback
-
-
-class BgpValidation(object):
- def __init__(self):
- self.module = AnsibleModule(argument_spec=self.get_fields())
- self.show_ip_bgp = self.module.params['show_ip_bgp']
- self.show_ip_intf_brief = self.module.params['show_ip_intf_brief']
- self.bgp_neighbors = self.module.params['bgp_neighbors']
- self.exit_msg = OrderedDict()
-
- def get_fields(self):
- spec_fields = {
- 'show_ip_bgp': {
- 'type': 'list',
- 'required': True
- },
- 'show_ip_intf_brief': {
- 'type': 'list',
- 'required': True
- },
- 'bgp_neighbors': {
- 'type': 'list',
- 'required': True
- }
- }
- return spec_fields
-
- def parse_bgp_output(self):
- show_bgp_dict = {}
- for show_list in self.show_ip_bgp:
- show_bgp_list = list()
- item = show_list.get("item")
- inv_name = None
- if item is not None:
- inv_name = item.get("inv_name")
- msg = show_list.get("msg")
- if msg is not None:
- result = msg.get("result")
- if result is not None:
- for sub_result in result:
- bgp_dict = {}
- rpc_reply = sub_result.get("rpc-reply")
- if rpc_reply is not None:
- bulk = rpc_reply.get("bulk")
- if bulk is not None:
- data = bulk.get("data")
- if data is not None and "peer-oper" in data:
- peer_oper = data.get("peer-oper")
- if peer_oper is not None and "remote-address" in peer_oper:
- bgp_dict["remote_address"] = peer_oper.get(
- "remote-address")
- bgp_dict["bgp-state"] = peer_oper.get(
- "bgp-state")
- show_bgp_list.append(bgp_dict)
- show_bgp_dict[inv_name] = show_bgp_list
- return show_bgp_dict
-
- def parse_ip_intf_output(self):
- show_ip_dict = {}
- for show_list in self.show_ip_intf_brief:
- show_ip_list = list()
- item = show_list.get("item")
- inv_name = None
- if item is not None:
- inv_name = item.get("inv_name")
- msg = show_list.get("msg")
- if msg is not None:
- result = msg.get("result")
- if result is not None:
- for sub_result in result:
- rpc_reply = sub_result.get("rpc-reply")
- if rpc_reply is not None:
- bulk = rpc_reply.get("bulk")
- if bulk is not None:
- data = bulk.get("data")
- if data is not None:
- sub_val = data.get("interface")
- if sub_val is not None:
- for val in sub_val:
- intf_dict = {}
- if "ipv4-info" in val:
- ipv4_info = val.get(
- "ipv4-info")
- if ipv4_info is not None and "addr" in ipv4_info:
- intf_dict["address"] = ipv4_info.get(
- "addr")
- intf_dict["if_name"] = val.get(
- "name")
- intf_dict["oper_status"] = val.get(
- "oper-status")
- if bool(intf_dict):
- show_ip_list.append(intf_dict)
- show_ip_dict[inv_name] = show_ip_list
- return show_ip_dict
-
- def get_intf_info_per_ip(self, intf_dict):
- final_intf_dict = {}
- for key1, value1 in intf_dict.items():
- intf_list = value1
- intf_dict = {}
- for ip in intf_list:
- intf_info = {}
- ip_address = ip.get("address")
- intf_address = ip_address.split('/')
- intf_ip = intf_address[0]
- intf_info["if_name"] = ip.get("if_name")
- intf_info["oper_status"] = ip.get("oper_status")
- intf_info["dest_switch"] = key1
- intf_dict[intf_ip] = intf_info
- if bool(intf_dict):
- final_intf_dict[key1] = intf_dict
- return final_intf_dict
-
- def get_intf_info_from_neighbor_ip(
- self, source_switch, neighbor_ip, intf_dict):
- final_intf_info = {}
- intf_dict_per_ip = self.get_intf_info_per_ip(intf_dict)
- for key, value in intf_dict_per_ip.items():
- switch_name = key
- if source_switch == switch_name:
- continue
- intf_info = value.get(neighbor_ip)
- if intf_info is None:
- continue
- final_intf_info = intf_info
- break
- return final_intf_info
-
- def get_bgp_final_nbr_list(self, bgp_dict, intf_dict):
- actual_bgp_dict = {}
- final_bgp_dict = {}
- for key, value in bgp_dict.items():
- actual_bgp_list = list()
- bgp_list = value
- source_switch = key
- for bgp in bgp_list:
- final_dict = {}
- bgp_state = bgp.get("bgp-state")
- remote_address = bgp.get("remote_address")
- reason = "neighbor config missing"
- error_type = "config_missing"
- intf_info = self.get_intf_info_from_neighbor_ip(
- source_switch, remote_address, intf_dict)
- if bool(intf_info):
- dest_switch = intf_info.get("dest_switch")
- remote_port = intf_info.get("if_name")
- oper_status = intf_info.get("oper_status")
- final_dict["source_switch"] = source_switch
- final_dict["bgp_neighbor"] = remote_address
- final_dict["bgp_state"] = bgp_state
- if bgp_state != "established":
- if oper_status != "up":
- reason = (
- "remote port {0} {1} is {2}" .format(
- dest_switch, remote_port, oper_status))
- error_type = "remote_port_down"
- final_dict["error_type"] = error_type
- final_dict["possible_reason"] = reason
- else:
- final_dict["source_switch"] = source_switch
- final_dict["bgp_neighbor"] = remote_address
- final_dict["bgp_state"] = bgp_state
- final_dict["error_type"] = error_type
- final_dict["possible_reason"] = reason
- actual_bgp_list.append(final_dict)
- actual_bgp_dict[source_switch] = actual_bgp_list
- # check actual with intended neighbor to display the result
- intended_list = list()
- for intended_bgp_neighbor in self.bgp_neighbors:
- planned_source_switch = intended_bgp_neighbor.get("source_switch")
- planned_nbr_list = intended_bgp_neighbor.get("neighbor_ip")
- actual_nbr_list = actual_bgp_dict.get(planned_source_switch)
- if planned_nbr_list is None or actual_nbr_list is None:
- continue
- for actual_nbr in actual_nbr_list:
- actual_source_switch = actual_nbr.get("source_switch")
- actual_bgp_neighbor = actual_nbr.get("bgp_neighbor")
- actual_bgp_state = actual_nbr.get("bgp_state")
- if actual_bgp_neighbor in planned_nbr_list:
- # Don't add established neighbor in result
- if actual_bgp_state != "established":
- intended_list.append(actual_nbr)
- planned_nbr_list.remove(actual_bgp_neighbor)
- else:
- reason = "neighbor {0} is not an intended, please add this neighbor in the intended_bgp_neighbors".format(
- actual_bgp_neighbor)
- actual_nbr["bgp_neighbor"] = "-"
- actual_nbr["error_type"] = "not_an_intended_neighbor"
- actual_nbr["possible_reason"] = reason
- intended_list.append(actual_nbr)
- # Add the missed planned info which are not present in actual
- # results
- for planned_nbr in planned_nbr_list:
- reason = "neighbor config missing"
- temp_dict = {}
- temp_dict["source_switch"] = planned_source_switch
- temp_dict["bgp_neighbor"] = planned_nbr
- temp_dict["error_type"] = "config_missing"
- temp_dict["possible_reason"] = reason
- intended_list.append(temp_dict)
- return intended_list
-
- def perform_action(self):
- try:
- bgp_dict = self.parse_bgp_output()
- intf_dict = self.parse_ip_intf_output()
- final_bgp_list = self.get_bgp_final_nbr_list(bgp_dict, intf_dict)
- self.exit_msg.update({"results": final_bgp_list})
- self.module.exit_json(changed=False, msg=self.exit_msg)
- except Exception as e:
- self.module.fail_json(
- msg=to_native(e),
- exception=traceback.format_exc())
-
-
-def main():
- module_instance = BgpValidation()
- module_instance.perform_action()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/modules/mtu_validate.py b/ansible_collections/dellemc/os10/plugins/modules/mtu_validate.py
deleted file mode 100644
index f0a9620d2..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/mtu_validate.py
+++ /dev/null
@@ -1,220 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved."
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
-module: mtu_validate
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Validate the MTU value for lldp neighbors
-description:
-
- - Get the wiring info using lldp output and show system network summary.
-
-options:
- show_lldp_neighbors_list:
- description:
- - show lldp neighbor output
- type: 'list'
- required: True
- show_system_network_summary:
- description:
- - show system network summary output
- type: 'list'
- required: True
- show_ip_intf_brief:
- description:
- - show ip intf brief
- type: 'list'
- required: True
-'''
-EXAMPLES = '''
-Copy below YAML into a playbook (e.g. play.yml) and run follows:
-
-#$ ansible-playbook -i inv play.yml
-name: show mtu mismatch info
-hosts: localhost
-connection: local
-gather_facts: False
-tasks:
- - name: "Get Dell EMC OS10 MTU mismatch info"
- os10_command:
- commands:
- - command: "show lldp neighbors"
- - command: "show ip interface brief | display-xml"
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_lldp
- - set_fact:
- output: "{{ output|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item,
- 'stdout_show_lldp': item.stdout.0, 'stdout_show_ip': item.stdout.1 }] }}"
- loop: "{{ show_lldp.results }}"
- - debug: var=output
- - local_action: copy content={{ output }} dest=show1
- - name: call lib to convert ip interface info from xml to dict format
- base_xml_to_dict:
- cli_responses: "{{ item.stdout_show_ip }}"
- with_items: "{{ output }}"
- register: show_ip_intf_list
- - local_action: copy content={{ show_ip_intf_list }} dest=show_ip
-
- - name: "Get Dell EMC OS10 Show system"
- import_role:
- name: os10_fabric_summary
- register: show_system_network_summary
- - debug: var=show_system_network_summary
- - name: call lib to process
- mtu_validate:
- show_lldp_neighbors_list: "{{ output }}"
- show_system_network_summary: "{{ show_system_network_summary.msg.results }}"
- show_ip_intf_brief: "{{ show_ip_intf_list.results }}"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-from collections import OrderedDict
-import re
-import traceback
-
-
-class MtuValidation(object):
- def __init__(self):
- self.module = AnsibleModule(argument_spec=self.get_fields())
- self.show_lldp_neighbors_list = self.module.params['show_lldp_neighbors_list']
- self.show_system_network_summary = self.module.params['show_system_network_summary']
- self.show_ip_intf_brief = self.module.params['show_ip_intf_brief']
- self.exit_msg = OrderedDict()
-
- def get_fields(self):
- spec_fields = {
- 'show_lldp_neighbors_list': {
- 'type': 'list',
- 'required': True
- },
- 'show_system_network_summary': {
- 'type': 'list',
- 'required': True
- },
- 'show_ip_intf_brief': {
- 'type': 'list',
- 'required': True
- }
- }
- return spec_fields
-
- def get_int_mtu(self, spine, port):
- for show_list in self.show_ip_intf_brief:
- inv_name = show_list["item"]["inv_name"]
- if spine != inv_name:
- continue
- value = show_list["msg"]["result"]
- for data in value:
- intf_list = data["rpc-reply"]["bulk"]["data"]["interface"]
- for val in intf_list:
- intf_name = val["name"]
- if intf_name == port:
- mtu = val["mtu"]
- return mtu
- return None
-
- # form actual neighbors per network with help of lldp output and show
- # sytem output
- def get_actual_neigbor(self, lldp_list):
- final_out = list()
- for show_system in self.show_system_network_summary:
- for lldp in lldp_list:
- if show_system["host"] != lldp["host"] and "node-mac" in show_system and "rem_mac" in lldp:
- rem_host = show_system["host"]
- loc_host = lldp["host"]
- # check whether lldp output mac match with system summary
- # mac and collect port and host info
- dest_switch = show_system["inv_name"]
- source_switch = lldp["inv_name"]
- lldp_mac = lldp["rem_mac"]
- for index, rem_mac in enumerate(lldp_mac):
- final_dict = {}
- if (str.lower(
- show_system["node-mac"])) == (str.lower(rem_mac)):
- final_dict["source_switch"] = source_switch
- final_dict["dest_switch"] = dest_switch
- final_dict["source_port"] = lldp["loc_port"][index]
- final_dict["dest_port"] = lldp["rem_port"][index]
- source_mtu = self.get_int_mtu(
- source_switch, final_dict["source_port"])
- dest_mtu = self.get_int_mtu(
- dest_switch, final_dict["dest_port"])
- if source_mtu is not None:
- final_dict["source_mtu"] = source_mtu
- if dest_mtu is not None:
- final_dict["dest_mtu"] = dest_mtu
- final_out.append(final_dict)
- return final_out
-
- def parse_lldp_output(self):
- nbr_list = list()
- for cli in self.show_lldp_neighbors_list:
- out_dict = {}
- loc_port = list()
- rem_port = list()
- rem_mac = list()
- for key, value in cli.items():
- if key == "host":
- out_dict[key] = value
- if key == "inv_name":
- out_dict[key] = value
- if key == "stdout_show_lldp":
- output = str(value)
- lldp_regexp = r"(\S+)\s+(\S+)\s+(\S+)\s+(\S+)"
- lines = output.splitlines()
- for line in lines:
- if "Loc PortID" in line:
- continue
- match = re.match(lldp_regexp, line)
- if match:
- val = match.groups()
- loc_port.append(val[0])
- rem_port.append(val[2])
- rem_mac.append(val[3])
- out_dict["loc_port"] = loc_port
- out_dict["rem_port"] = rem_port
- out_dict["rem_mac"] = rem_mac
- nbr_list.append(out_dict)
- return nbr_list
-
- def get_mtu_mismatch_info(self, nbr_list):
- mtu_list = list()
- for nbr in nbr_list:
- if nbr["source_mtu"] != nbr["dest_mtu"]:
- nbr["error"] = "mismatch"
- mtu_list.append(nbr)
- return mtu_list
-
- def perform_action(self):
- try:
- lldp_list = self.parse_lldp_output()
- actual_nbr = self.get_actual_neigbor(lldp_list)
- mtu_mismatch_list = self.get_mtu_mismatch_info(actual_nbr)
- if len(mtu_mismatch_list) > 0:
- self.exit_msg.update({"results": mtu_mismatch_list})
- else:
- self.exit_msg.update(
- {"results": "There is no MTU mistmatch between neighbors"})
- self.module.exit_json(changed=False, msg=self.exit_msg)
- except Exception as e:
- self.module.fail_json(
- msg=to_native(e),
- exception=traceback.format_exc())
-
-
-def main():
- module_instance = MtuValidation()
- module_instance.perform_action()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/modules/os10_command.py b/ansible_collections/dellemc/os10/plugins/modules/os10_command.py
deleted file mode 100644
index a99f1a67f..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/os10_command.py
+++ /dev/null
@@ -1,229 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2020, Peter Sprygada <psprygada@ansible.com>
-# Copyright: (c) 2020, Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: os10_command
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Run commands on devices running Dell EMC SmartFabric OS10
-description:
- - Sends arbitrary commands to a OS10 device and returns the results
- read from the device. This module includes an
- argument that will cause the module to wait for a specific condition
- before returning or timing out if the condition is not met.
- - This module does not support running commands in configuration mode.
- Please use M(dellemc.os10.os10_config) to configure OS10 devices.
-extends_documentation_fragment: dellemc.os10.os10
-options:
- commands:
- description:
- - List of commands to send to the remote OS10 device over the
- configured provider. The resulting output from the command
- is returned. If the I(wait_for) argument is provided, the
- module is not returned until the condition is satisfied or
- the number of retries has expired.
- type: list
- required: true
- wait_for:
- description:
- - List of conditions to evaluate against the output of the
- command. The task will wait for each condition to be true
- before moving forward. If the conditional is not true
- within the configured number of I(retries), the task fails.
- See examples.
- type: list
- elements: str
- match:
- description:
- - The I(match) argument is used in conjunction with the
- I(wait_for) argument to specify the match policy. Valid
- values are C(all) or C(any). If the value is set to C(all)
- then all conditionals in the wait_for must be satisfied. If
- the value is set to C(any) then only one of the values must be
- satisfied.
- type: str
- default: all
- choices: [ all, any ]
- retries:
- description:
- - Specifies the number of retries a command should be tried
- before it is considered failed. The command is run on the
- target device every retry and evaluated against the
- I(wait_for) conditions.
- type: int
- default: 10
- interval:
- description:
- - Configures the interval in seconds to wait between retries
- of the command. If the command does not pass the specified
- conditions, the interval indicates how long to wait before
- trying the command again.
- type: int
- default: 1
-"""
-
-EXAMPLES = """
-tasks:
- - name: run show version on remote devices
- os10_command:
- commands: show version
-
- - name: run show version and check to see if output contains OS10
- os10_command:
- commands: show version
- wait_for: result[0] contains OS10
-
- - name: run multiple commands on remote nodes
- os10_command:
- commands:
- - show version
- - show interface
-
- - name: run multiple commands and evaluate the output
- os10_command:
- commands:
- - show version
- - show interface
- wait_for:
- - result[0] contains OS10
- - result[1] contains Ethernet
-"""
-
-RETURN = """
-stdout:
- description: The set of responses from the commands
- returned: always apart from low level errors (such as action plugin)
- type: list
- sample: ['...', '...']
-stdout_lines:
- description: The value of stdout split into a list
- returned: always apart from low level errors (such as action plugin)
- type: list
- sample: [['...', '...'], ['...'], ['...']]
-failed_conditions:
- description: The list of conditionals that have failed
- returned: failed
- type: list
- sample: ['...', '...']
-warnings:
- description: The list of warnings (if any) generated by module based on arguments
- returned: always
- type: list
- sample: ['...', '...']
-"""
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import run_commands
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import os10_argument_spec, check_args
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional
-from ansible.module_utils.six import string_types
-
-
-def to_lines(stdout):
- for item in stdout:
- if isinstance(item, string_types):
- item = str(item).split('\n')
- yield item
-
-
-def parse_commands(module, warnings):
- command = ComplexList(dict(
- command=dict(key=True),
- prompt=dict(),
- answer=dict()
- ), module)
- commands = command(module.params['commands'])
- for index, item in enumerate(commands):
- if module.check_mode and not item['command'].startswith('show'):
- warnings.append(
- 'only show commands are supported when using check mode, not '
- 'executing `%s`' % item['command']
- )
- elif item['command'].startswith('conf'):
- module.fail_json(
- msg='os10_command does not support running config mode '
- 'commands. Please use os10_config instead'
- )
- return commands
-
-
-def main():
- """main entry point for module execution
- """
- argument_spec = dict(
- # { command: <str>, prompt: <str>, response: <str> }
- commands=dict(type='list', required=True),
-
- wait_for=dict(type='list', elements='str'),
- match=dict(default='all', choices=['all', 'any']),
-
- retries=dict(default=10, type='int'),
- interval=dict(default=1, type='int')
- )
-
- argument_spec.update(os10_argument_spec)
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- result = {'changed': False}
-
- warnings = list()
- check_args(module, warnings)
- commands = parse_commands(module, warnings)
- result['warnings'] = warnings
-
- wait_for = module.params['wait_for'] or list()
- conditionals = [Conditional(c) for c in wait_for]
-
- retries = module.params['retries']
- interval = module.params['interval']
- match = module.params['match']
-
- while retries > 0:
- responses = run_commands(module, commands)
-
- for item in list(conditionals):
- if item(responses):
- if match == 'any':
- conditionals = list()
- break
- conditionals.remove(item)
-
- if not conditionals:
- break
-
- time.sleep(interval)
- retries -= 1
-
- if conditionals:
- failed_conditions = [item.raw for item in conditionals]
- msg = 'One or more conditional statements have not been satisfied'
- module.fail_json(msg=msg, failed_conditions=failed_conditions)
-
- result.update({
- 'changed': False,
- 'stdout': responses,
- 'stdout_lines': list(to_lines(responses))
- })
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/modules/os10_config.py b/ansible_collections/dellemc/os10/plugins/modules/os10_config.py
deleted file mode 100644
index 925568f14..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/os10_config.py
+++ /dev/null
@@ -1,346 +0,0 @@
-#!/usr/bin/python
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# Copyright (c) 2020 Dell Inc.
-#
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: os10_config
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Manage Dell EMC SmartFabric OS10 configuration sections
-description:
- - OS10 configurations use a simple block indent file syntax
- for segmenting configuration into sections. This module provides
- an implementation for working with OS10 configuration sections in
- a deterministic way.
-extends_documentation_fragment: dellemc.os10.os10
-options:
- lines:
- description:
- - The ordered set of commands that should be configured in the
- section. The commands must be the exact same commands as found
- in the device running-config. Be sure to note the configuration
- command syntax as some commands are automatically modified by the
- device config parser. This argument is mutually exclusive with I(src).
- type: list
- aliases: ['commands']
- parents:
- description:
- - The ordered set of parents that uniquely identify the section or hierarchy
- the commands should be checked against. If the parents argument
- is omitted, the commands are checked against the set of top
- level or global commands.
- type: list
- src:
- description:
- - Specifies the source path to the file that contains the configuration
- or configuration template to load. The path to the source file can
- either be the full path on the Ansible control host or a relative
- path from the playbook or role root directory. This argument is
- mutually exclusive with I(lines).
- type: path
- before:
- description:
- - The ordered set of commands to push on to the command stack if
- a change needs to be made. This allows the playbook designer
- the opportunity to perform configuration commands prior to pushing
- any changes without affecting how the set of commands are matched
- against the system.
- type: list
- after:
- description:
- - The ordered set of commands to append to the end of the command
- stack if a change needs to be made. Just like with I(before) this
- allows the playbook designer to append a set of commands to be
- executed after the command set.
- type: list
- match:
- description:
- - Instructs the module on the way to perform the matching of
- the set of commands against the current device config. If
- match is set to I(line), commands are matched line by line. If
- match is set to I(strict), command lines are matched with respect
- to position. If match is set to I(exact), command lines
- must be an equal match. Finally, if match is set to I(none), the
- module will not attempt to compare the source configuration with
- the running configuration on the remote device.
- type: str
- default: line
- choices: ['line', 'strict', 'exact', 'none']
- replace:
- description:
- - Instructs the module on the way to perform the configuration
- on the device. If the replace argument is set to I(line) then
- the modified lines are pushed to the device in configuration
- mode. If the replace argument is set to I(block) then the entire
- command block is pushed to the device in configuration mode if any
- line is not correct.
- type: str
- default: line
- choices: ['line', 'block']
- update:
- description:
- - The I(update) argument controls how the configuration statements
- are processed on the remote device. Valid choices for the I(update)
- argument are I(merge) and I(check). When you set this argument to
- I(merge), the configuration changes merge with the current
- device running configuration. When you set this argument to I(check)
- the configuration updates are determined but not actually configured
- on the remote device.
- type: str
- default: merge
- choices: ['merge', 'check']
- save:
- description:
- - The C(save) argument instructs the module to save the running-
- config to the startup-config at the conclusion of the module
- running. If check mode is specified, this argument is ignored.
- type: bool
- default: 'no'
- config:
- description:
- - The module, by default, will connect to the remote device and
- retrieve the current running-config to use as a base for comparing
- against the contents of source. There are times when it is not
- desirable to have the task get the current running-config for
- every task in a playbook. The I(config) argument allows the
- implementer to pass in the configuration to use as the base
- config for comparison.
- type: str
- backup:
- description:
- - This argument will cause the module to create a full backup of
- the current C(running-config) from the remote device before any
- changes are made. If the C(backup_options) value is not given,
- the backup file is written to the C(backup) folder in the playbook
- root directory. If the directory does not exist, it is created.
- type: bool
- default: 'no'
- backup_options:
- description:
- - This is a dict object containing configurable options related to backup file path.
- The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
- to I(no) this option will be silently ignored.
- suboptions:
- filename:
- description:
- - The filename to be used to store the backup configuration. If the the filename
- is not given it will be generated based on the hostname, current time and date
- in format defined by <hostname>_config.<current-date>@<current-time>
- type: str
- dir_path:
- description:
- - This option provides the path ending with directory name in which the backup
- configuration file will be stored. If the directory does not exist it will be first
- created and the filename is either the value of C(filename) or default filename
- as described in C(filename) options description. If the path value is not given
- in that case a I(backup) directory will be created in the current working directory
- and backup configuration will be copied in C(filename) within I(backup) directory.
- type: path
- type: dict
-"""
-
-EXAMPLES = """
-- os10_config:
- lines: ['hostname {{ inventory_hostname }}']
-
-- os10_config:
- lines:
- - 10 permit ip host 1.1.1.1 any log
- - 20 permit ip host 2.2.2.2 any log
- - 30 permit ip host 3.3.3.3 any log
- - 40 permit ip host 4.4.4.4 any log
- - 50 permit ip host 5.5.5.5 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- match: exact
-
-- os10_config:
- lines:
- - 10 permit ip host 1.1.1.1 any log
- - 20 permit ip host 2.2.2.2 any log
- - 30 permit ip host 3.3.3.3 any log
- - 40 permit ip host 4.4.4.4 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- replace: block
-
-- os10_config:
- lines: ['hostname {{ inventory_hostname }}']
- backup: yes
- backup_options:
- filename: backup.cfg
- dir_path: /home/user
-"""
-
-RETURN = """
-updates:
- description: The set of commands that will be pushed to the remote device.
- returned: always
- type: list
- sample: ['hostname foo', 'router bgp 1', 'router-id 1.1.1.1']
-commands:
- description: The set of commands that will be pushed to the remote device
- returned: always
- type: list
- sample: ['hostname foo', 'router bgp 1', 'router-id 1.1.1.1']
-saved:
- description: Returns whether the configuration is saved to the startup
- configuration or not.
- returned: When not check_mode.
- type: bool
- sample: True
-backup_path:
- description: The full path to the backup file
- returned: when backup is yes
- type: str
- sample: /playbooks/ansible/backup/os10_config.2016-07-16@22:28:34
-"""
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import get_config, get_sublevel_config
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import os10_argument_spec, check_args
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import load_config, run_commands
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import WARNING_PROMPTS_RE
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps
-
-
-def get_candidate(module):
- candidate = NetworkConfig(indent=1)
- if module.params['src']:
- candidate.load(module.params['src'])
- elif module.params['lines']:
- parents = module.params['parents'] or list()
- commands = module.params['lines'][0]
- if (isinstance(commands, dict)) and (isinstance((commands['command']), list)):
- candidate.add(commands['command'], parents=parents)
- elif (isinstance(commands, dict)) and (isinstance((commands['command']), str)):
- candidate.add([commands['command']], parents=parents)
- else:
- candidate.add(module.params['lines'], parents=parents)
- return candidate
-
-
-def get_running_config(module):
- contents = module.params['config']
- if not contents:
- contents = get_config(module)
- return contents
-
-
-def main():
-
- backup_spec = dict(
- filename=dict(),
- dir_path=dict(type='path')
- )
- argument_spec = dict(
- lines=dict(aliases=['commands'], type='list'),
- parents=dict(type='list'),
-
- src=dict(type='path'),
-
- before=dict(type='list'),
- after=dict(type='list'),
-
- match=dict(default='line',
- choices=['line', 'strict', 'exact', 'none']),
- replace=dict(default='line', choices=['line', 'block']),
-
- update=dict(choices=['merge', 'check'], default='merge'),
- save=dict(type='bool', default=False),
- config=dict(),
- backup=dict(type='bool', default=False),
- backup_options=dict(type='dict', options=backup_spec)
- )
-
- argument_spec.update(os10_argument_spec)
-
- mutually_exclusive = [('lines', 'src')]
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True)
-
- parents = module.params['parents'] or list()
-
- match = module.params['match']
- replace = module.params['replace']
-
- warnings = list()
- check_args(module, warnings)
-
- result = dict(changed=False, saved=False, warnings=warnings)
-
- if module.params['backup']:
- if not module.check_mode:
- result['__backup__'] = get_config(module)
-
- commands = list()
- candidate = get_candidate(module)
-
- if any((module.params['lines'], module.params['src'])):
- if match != 'none':
- config = get_running_config(module)
- if parents:
- contents = get_sublevel_config(config, module)
- config = NetworkConfig(contents=contents, indent=1)
- else:
- config = NetworkConfig(contents=config, indent=1)
- configobjs = candidate.difference(config, match=match, replace=replace)
- else:
- configobjs = candidate.items
-
- if configobjs:
- commands = dumps(configobjs, 'commands')
- if ((isinstance((module.params['lines']), list)) and
- (isinstance((module.params['lines'][0]), dict)) and
- (set(['prompt', 'answer']).issubset(module.params['lines'][0]))):
-
- cmd = {'command': commands,
- 'prompt': module.params['lines'][0]['prompt'],
- 'answer': module.params['lines'][0]['answer']}
- commands = [module.jsonify(cmd)]
- else:
- commands = commands.split('\n')
-
- if module.params['before']:
- commands[:0] = module.params['before']
-
- if module.params['after']:
- commands.extend(module.params['after'])
-
- if not module.check_mode and module.params['update'] == 'merge':
- load_config(module, commands)
-
- result['changed'] = True
- result['commands'] = commands
- result['updates'] = commands
-
- if module.params['save']:
- result['changed'] = True
- if not module.check_mode:
- cmd = {r'command': 'copy running-config startup-config',
- r'prompt': r'\[confirm yes/no\]:\s?$', 'answer': 'yes'}
- run_commands(module, [cmd])
- result['saved'] = True
- else:
- module.warn('Skipping command `copy running-config startup-config`'
- 'due to check_mode. Configuration not copied to '
- 'non-volatile storage')
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/modules/os10_facts.py b/ansible_collections/dellemc/os10/plugins/modules/os10_facts.py
deleted file mode 100644
index c124422bd..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/os10_facts.py
+++ /dev/null
@@ -1,505 +0,0 @@
-#!/usr/bin/python
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# Copyright (c) 2020 Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: os10_facts
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Collect facts from devices running Dell EMC SmartFabric OS10
-description:
- - Collects a base set of device facts from a remote device that
- is running OS10. This module prepends all of the
- base network fact keys with C(ansible_net_<fact>). The facts
- module will always collect a base set of facts from the device
- and can enable or disable collection of additional facts.
-extends_documentation_fragment: dellemc.os10.os10
-options:
- gather_subset:
- description:
- - When supplied, this argument will restrict the facts collected
- to a given subset. Possible values for this argument include
- all, hardware, config, and interfaces. Can specify a list of
- values to include a larger subset. Values can also be used
- with an initial C(M(!)) to specify that a specific subset should
- not be collected.
- type: list
- default: [ '!config' ]
-"""
-
-EXAMPLES = """
-# Collect all facts from the device
-- os10_facts:
- gather_subset: all
-
-# Collect only the config and default facts
-- os10_facts:
- gather_subset:
- - config
-
-# Do not collect hardware facts
-- os10_facts:
- gather_subset:
- - "!hardware"
-"""
-
-RETURN = """
-ansible_net_gather_subset:
- description: The list of fact subsets collected from the device
- returned: always
- type: list
-
-# default
-ansible_net_name:
- description: The name of the OS that is running.
- returned: Always.
- type: str
-ansible_net_version:
- description: The operating system version running on the remote device
- returned: always
- type: str
-ansible_net_servicetag:
- description: The service tag number of the remote device.
- returned: always
- type: str
-ansible_net_model:
- description: The model name returned from the device.
- returned: always
- type: str
-ansible_net_hostname:
- description: The configured hostname of the device
- returned: always
- type: str
-
-# hardware
-ansible_net_cpu_arch:
- description: CPU Architecture of the remote device.
- returned: when hardware is configured
- type: str
-ansible_net_memfree_mb:
- description: The available free memory on the remote device in Mb
- returned: when hardware is configured
- type: int
-ansible_net_memtotal_mb:
- description: The total memory on the remote device in Mb
- returned: when hardware is configured
- type: int
-
-# config
-ansible_net_config:
- description: The current active config from the device
- returned: when config is configured
- type: str
-
-# interfaces
-ansible_net_all_ipv4_addresses:
- description: All IPv4 addresses configured on the device
- returned: when interfaces is configured
- type: list
-ansible_net_all_ipv6_addresses:
- description: All IPv6 addresses configured on the device
- returned: when interfaces is configured
- type: list
-ansible_net_interfaces:
- description: A hash of all interfaces running on the system
- returned: when interfaces is configured
- type: dict
-ansible_net_neighbors:
- description: The list of LLDP neighbors from the remote device
- returned: when interfaces is configured
- type: dict
-"""
-
-import re
-
-try:
- from lxml import etree as ET
-except ImportError:
- import xml.etree.ElementTree as ET
-
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import run_commands
-from ansible_collections.dellemc.os10.plugins.module_utils.network.os10 import os10_argument_spec, check_args
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils.six import iteritems
-
-
-class FactsBase(object):
-
- COMMANDS = []
-
- def __init__(self, module):
- self.module = module
- self.facts = dict()
- self.responses = None
-
- def populate(self):
- self.responses = run_commands(self.module, self.COMMANDS, check_rc=False)
-
- def run(self, cmd):
- return run_commands(self.module, cmd, check_rc=False)
-
-
-class Default(FactsBase):
-
- COMMANDS = [
- 'show version | display-xml',
- 'show system | display-xml',
- ]
-
- def populate(self):
- super(Default, self).populate()
- data = self.responses[0]
- xml_data = ET.fromstring(data.encode('utf8'))
-
- self.facts['name'] = self.parse_name(xml_data)
- self.facts['version'] = self.parse_version(xml_data)
- self.facts['model'] = self.parse_model(xml_data)
- self.facts['hostname'] = self.parse_hostname(xml_data)
-
- data = self.responses[1]
- xml_data = ET.fromstring(data.encode('utf8'))
-
- self.facts['servicetag'] = self.parse_servicetag(xml_data)
-
- def parse_name(self, data):
- sw_name = data.find('./data/system-sw-state/sw-version/sw-name')
- if sw_name is not None:
- return sw_name.text
- else:
- return ""
-
- def parse_version(self, data):
- sw_ver = data.find('./data/system-sw-state/sw-version/sw-version')
- if sw_ver is not None:
- return sw_ver.text
- else:
- return ""
-
- def parse_hostname(self, data):
- hostname = data.find('./data/system-state/system-status/hostname')
- if hostname is not None:
- return hostname.text
- else:
- return ""
-
- def parse_model(self, data):
- prod_name = data.find('./data/system-sw-state/sw-version/sw-platform')
- if prod_name is not None:
- return prod_name.text
- else:
- return ""
-
- def parse_servicetag(self, data):
- svc_tag = data.find('./data/system/node/unit/mfg-info/service-tag')
- if svc_tag is not None:
- return svc_tag.text
- else:
- return ""
-
-
-class Hardware(FactsBase):
-
- COMMANDS = [
- 'show version | display-xml',
- 'show processes node-id 1 | grep "Mem :"'
- ]
-
- def populate(self):
-
- super(Hardware, self).populate()
- data = self.responses[0]
-
- xml_data = ET.fromstring(data.encode('utf8'))
-
- self.facts['cpu_arch'] = self.parse_cpu_arch(xml_data)
-
- data = self.responses[1]
- match = self.parse_memory(data)
- if match:
- self.facts['memtotal_mb'] = int(match[0]) // 1024
- self.facts['memfree_mb'] = int(match[1]) // 1024
-
- def parse_cpu_arch(self, data):
- cpu_arch = data.find('./data/system-sw-state/sw-version/cpu-arch')
- if cpu_arch is not None:
- return cpu_arch.text
- else:
- return ""
-
- def parse_memory(self, data):
- return re.findall(r'(\d+)', data, re.M)
-
-
-class Config(FactsBase):
-
- COMMANDS = ['show running-config']
-
- def populate(self):
- super(Config, self).populate()
- self.facts['config'] = self.responses[0]
-
-
-class Interfaces(FactsBase):
-
- COMMANDS = [
- 'show interface | display-xml',
- 'show lldp neighbors | display-xml'
- ]
-
- def __init__(self, module):
- self.intf_facts = dict()
- self.lldp_facts = dict()
- super(Interfaces, self).__init__(module)
-
- def populate(self):
- super(Interfaces, self).populate()
- self.facts['all_ipv4_addresses'] = list()
- self.facts['all_ipv6_addresses'] = list()
-
- int_show_data = (self.responses[0]).splitlines()
- pattern = '?xml version'
- data = ''
- skip = True
-
- # The output returns multiple xml trees
- # parse them before handling.
- for line in int_show_data:
- if pattern in line:
- if skip is False:
- xml_data = ET.fromstring(data.encode('utf8'))
- self.populate_interfaces(xml_data)
- data = ''
- else:
- skip = False
-
- data += line
-
- if skip is False:
- xml_data = ET.fromstring(data.encode('utf8'))
- self.populate_interfaces(xml_data)
-
- self.facts['interfaces'] = self.intf_facts
-
- lldp_data = (self.responses[1]).splitlines()
- data = ''
- skip = True
- # The output returns multiple xml trees
- # parse them before handling.
- for line in lldp_data:
- if pattern in line:
- if skip is False:
- xml_data = ET.fromstring(data.encode('utf8'))
- self.populate_neighbors(xml_data)
- data = ''
- else:
- skip = False
-
- data += line
-
- if skip is False:
- xml_data = ET.fromstring(data.encode('utf8'))
- self.populate_neighbors(xml_data)
-
- self.facts['neighbors'] = self.lldp_facts
-
- def populate_interfaces(self, interfaces):
-
- for interface in interfaces.findall('./data/interfaces/interface'):
- intf = dict()
- name = self.parse_item(interface, 'name')
-
- intf['description'] = self.parse_item(interface, 'description')
- intf['duplex'] = self.parse_item(interface, 'duplex')
- intf['primary_ipv4'] = self.parse_primary_ipv4(interface)
- intf['secondary_ipv4'] = self.parse_secondary_ipv4(interface)
- intf['ipv6'] = self.parse_ipv6_address(interface)
- intf['mtu'] = self.parse_item(interface, 'mtu')
- intf['type'] = self.parse_item(interface, 'type')
-
- self.intf_facts[name] = intf
-
- for interface in interfaces.findall('./bulk/data/interface'):
- name = self.parse_item(interface, 'name')
- try:
- intf = self.intf_facts[name]
- intf['bandwidth'] = self.parse_item(interface, 'speed')
- intf['adminstatus'] = self.parse_item(interface, 'admin-status')
- intf['operstatus'] = self.parse_item(interface, 'oper-status')
- intf['macaddress'] = self.parse_item(interface, 'phys-address')
- except KeyError:
- # skip the reserved interfaces
- pass
-
- for interface in interfaces.findall('./data/ports/ports-state/port'):
- name = self.parse_item(interface, 'name')
- # media-type name interface name format phy-eth 1/1/1
- mediatype = self.parse_item(interface, 'media-type')
-
- typ, sname = name.split('-eth')
- name = "ethernet" + sname
- try:
- intf = self.intf_facts[name]
- intf['mediatype'] = mediatype
- except Exception:
- # fanout
- for subport in range(1, 5):
- name = "ethernet" + sname + ":" + str(subport)
- try:
- intf = self.intf_facts[name]
- intf['mediatype'] = mediatype
- except Exception:
- # valid case to handle 2x50G
- pass
-
- def add_ip_address(self, address, family):
- if family == 'ipv4':
- self.facts['all_ipv4_addresses'].append(address)
- else:
- self.facts['all_ipv6_addresses'].append(address)
-
- def parse_item(self, interface, item):
- elem = interface.find(item)
- if elem is not None:
- return elem.text
- else:
- return ""
-
- def parse_primary_ipv4(self, interface):
- ipv4 = interface.find('ipv4')
- ip_address = ""
- if ipv4 is not None:
- prim_ipaddr = ipv4.find('./address/primary-addr')
- if prim_ipaddr is not None:
- ip_address = prim_ipaddr.text
- self.add_ip_address(ip_address, 'ipv4')
-
- return ip_address
-
- def parse_secondary_ipv4(self, interface):
- ipv4 = interface.find('ipv4')
- ip_address = ""
- if ipv4 is not None:
- sec_ipaddr = ipv4.find('./address/secondary-addr')
- if sec_ipaddr is not None:
- ip_address = sec_ipaddr.text
- self.add_ip_address(ip_address, 'ipv4')
-
- return ip_address
-
- def parse_ipv6_address(self, interface):
-
- ip_address = list()
-
- for addr in interface.findall('./ipv6/ipv6-addresses/address'):
-
- ipv6_addr = addr.find('./ipv6-address')
-
- if ipv6_addr is not None:
- ip_address.append(ipv6_addr.text)
- self.add_ip_address(ipv6_addr.text, 'ipv6')
-
- return ip_address
-
- def populate_neighbors(self, interfaces):
- for interface in interfaces.findall('./bulk/data/interface'):
- name = interface.find('name').text
- rem_sys_name = interface.find('./lldp-rem-neighbor-info/info/rem-system-name')
- if rem_sys_name is not None:
- self.lldp_facts[name] = list()
- fact = dict()
- fact['host'] = rem_sys_name.text
- rem_sys_port = interface.find('./lldp-rem-neighbor-info/info/rem-lldp-port-id')
- fact['port'] = rem_sys_port.text
- self.lldp_facts[name].append(fact)
-
-
-FACT_SUBSETS = dict(
- default=Default,
- hardware=Hardware,
- interfaces=Interfaces,
- config=Config,
-)
-
-VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
-
-
-def main():
- """main entry point for module execution
- """
- argument_spec = dict(
- gather_subset=dict(default=['!config'], type='list')
- )
-
- argument_spec.update(os10_argument_spec)
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- gather_subset = module.params['gather_subset']
-
- runable_subsets = set()
- exclude_subsets = set()
-
- for subset in gather_subset:
- if subset == 'all':
- runable_subsets.update(VALID_SUBSETS)
- continue
-
- if subset.startswith('!'):
- subset = subset[1:]
- if subset == 'all':
- exclude_subsets.update(VALID_SUBSETS)
- continue
- exclude = True
- else:
- exclude = False
-
- if subset not in VALID_SUBSETS:
- module.fail_json(msg='Bad subset')
-
- if exclude:
- exclude_subsets.add(subset)
- else:
- runable_subsets.add(subset)
-
- if not runable_subsets:
- runable_subsets.update(VALID_SUBSETS)
-
- runable_subsets.difference_update(exclude_subsets)
- runable_subsets.add('default')
-
- facts = dict()
- facts['gather_subset'] = list(runable_subsets)
-
- instances = list()
- for key in runable_subsets:
- instances.append(FACT_SUBSETS[key](module))
-
- for inst in instances:
- inst.populate()
- facts.update(inst.facts)
-
- ansible_facts = dict()
- for key, value in iteritems(facts):
- key = 'ansible_net_%s' % key
- ansible_facts[key] = value
-
- warnings = list()
- check_args(module, warnings)
-
- module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/modules/show_system_network_summary.py b/ansible_collections/dellemc/os10/plugins/modules/show_system_network_summary.py
deleted file mode 100644
index 9922b9f8b..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/show_system_network_summary.py
+++ /dev/null
@@ -1,145 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved."
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
-module: show_system_network_summary
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Operations for show_system_network output in json/yaml format.
-description:
-
- - Get the show system inforamtion of a Leaf-Spine.
-
-options:
- output_type:
- type: str
- description:
- - json or yaml
- - Default value is json
- default: json
- required: False
- cli_responses:
- type: list
- required: True
- description:
- - show system command xml output
-'''
-EXAMPLES = '''
-Copy below YAML into a playbook (e.g. play.yml) and run as follows:
-
-#$ ansible-playbook -i inv show.yml
-name: show system Configuration
-hosts: localhost
-connection: local
-gather_facts: False
-vars:
- cli:
- username: admin
- password: admin
-tasks:
-- name: "Get Dell EMC OS10 Show system summary"
- os10_command:
- commands: ['show system | display-xml']
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_system
-- set_fact:
- output: "{{ output|default([])+ [{'inv_name': item.item, 'host': item.invocation.module_args.provider.host, 'stdout_show_system': item.stdout}] }}"
- loop: "{{ show_system.results }}"
-- debug: var=output
-- name: "show system network call to lib "
- show_system_network_summary:
- cli_responses: "{{ output}} "
- output_type: "{{ output_method if output_method is defined else 'json' }}"
- register: show_system_network_summary
-- debug: var=show_system_network_summary
-'''
-
-import re
-from ansible_collections.dellemc.os10.plugins.module_utils.network.base_network_show import BaseNetworkShow
-
-
-class ShowSystemNetworkSummary(BaseNetworkShow):
- def __init__(self):
- BaseNetworkShow.__init__(self)
- self.cli_responses = self.module.params['cli_responses']
- self.output_type = self.module.params['output_type']
- self.changed = False
-
- def get_fields(self):
- spec_fields = {
- 'cli_responses': {
- 'type': 'list',
- 'required': True
- },
- 'output_type': {
- 'type': 'str',
- 'default': "json",
- 'required': False
- }
- }
- return spec_fields
-
- def perform_action(self):
- out = list()
- show_system_summary = self.cli_responses
- if len(show_system_summary) > 0:
- for item in show_system_summary:
- out_dict = {}
- host = item.get("host")
- inv_name = item.get("inv_name")
- show_system_response = item.get("stdout_show_system")
- if show_system_response is not None:
- result = BaseNetworkShow.xml_to_dict(
- self, show_system_response[0])
- rpc_reply = result.get("rpc-reply")
- if rpc_reply is not None:
- data = rpc_reply.get("data")
- if data is not None:
- out_dict["host"] = host
- out_dict["inv_name"] = inv_name
- system_state = data.get("system-state")
- if system_state is not None:
- system_status = system_state.get(
- "system-status")
- if system_status is not None:
- out_dict["hostname"] = system_status.get(
- "hostname")
- system = data.get("system")
- if system is not None:
- node = system.get("node")
- if node is not None:
- out_dict["node-mac"] = node.get("node-mac")
- unit = node.get("unit")
- if unit is not None:
- out_dict["software-version"] = unit.get(
- "software-version")
- mfg_info = node.get("mfg-info")
- if mfg_info is not None:
- out_dict["service-tag"] = mfg_info.get(
- "service-tag")
- out_dict["device type"] = mfg_info.get(
- "product-name")
- if bool(out_dict):
- out.append(out_dict)
- if self.output_type != "json":
- self.exit_msg.update(
- {"results": (BaseNetworkShow.dict_to_yaml(self, out))})
- else:
- self.exit_msg.update({"results": (out)})
- self.module.exit_json(changed=False, msg=self.exit_msg)
-
-
-def main():
- module_instance = ShowSystemNetworkSummary()
- module_instance.perform_action()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/modules/vlt_validate.py b/ansible_collections/dellemc/os10/plugins/modules/vlt_validate.py
deleted file mode 100644
index 2042dfe77..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/vlt_validate.py
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved."
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
-module: vlt_validate
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Validate the vlt info, raise an error if peer is not in up state
-description:
-
- - Troubleshoot the show vlt info and raise an error if peer is not up.
-
-options:
- show_vlt:
- description:
- - show vlt output
- type: 'list'
- required: True
- show_system_network_summary:
- description:
- - show system summary output
- type: 'list'
- required: True
- intended_vlt_pairs:
- description:
- - intended vlt pair intput to verify with actual
- type: 'list'
- required: True
-
-'''
-EXAMPLES = '''
-Copy below YAML into a playbook (e.g. play.yml) and run as follows:
-
-#$ ansible-playbook -i inv play.yml
-name: show system Configuration
-hosts: localhost
-connection: local
-gather_facts: False
-tasks:
- - name: "Get Dell EMC OS10 Show run vlt"
- os10_command:
- commands:
- - command: "show running-configuration vlt | grep vlt-domain"
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_run_vlt
- - set_fact:
- output_vlt: "{{ output_vlt|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item,
- 'stdout_show_vlt': item.stdout.0}] }}"
- loop: "{{ show_run_vlt.results }}"
- - debug: var=output_vlt
- - name: "Get Dell EMC OS10 Show vlt info"
- os10_command:
- commands:
- - command: "show vlt {{ item.stdout_show_vlt.split()[1] }} | display-xml"
- provider: "{{ hostvars[item.inv_name].cli }}"
- with_items: "{{ output_vlt }}"
- register: show_vlt
- - set_fact:
- vlt_out: "{{ vlt_out|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'show_vlt_stdout': item.stdout.0}] }}"
- loop: "{{ show_vlt.results }}"
- register: vlt_output
- - name: call lib to convert vlt info from xml to dict format
- base_xml_to_dict:
- cli_responses: "{{ item.show_vlt_stdout }}"
- with_items:
- - "{{ vlt_out }}"
- register: vlt_dict_output
- - name: "Get Dell EMC OS10 Show system"
- import_role:
- name: os10_fabric_summary
- register: show_system_network_summary
- - name: call lib to process
- vlt_validate:
- show_vlt : "{{ vlt_dict_output.results }}"
- show_system_network_summary: "{{ show_system_network_summary.msg.results }}"
- intended_vlt_pairs: "{{ intended_vlt_pairs }}"
- register: show_vlt_info
-
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-from collections import OrderedDict
-import traceback
-
-
-class VltValidation(object):
- def __init__(self):
- self.module = AnsibleModule(argument_spec=self.get_fields())
- self.show_vlt = self.module.params['show_vlt']
- self.show_system_network_summary = self.module.params['show_system_network_summary']
- self.intended_vlt_pairs = self.module.params['intended_vlt_pairs']
- self.exit_msg = OrderedDict()
-
- def get_fields(self):
- spec_fields = {
- 'show_vlt': {
- 'type': 'list',
- 'required': True
- },
- 'show_system_network_summary': {
- 'type': 'list',
- 'required': True
- },
- 'intended_vlt_pairs': {
- 'type': 'list',
- 'required': True
- }
- }
- return spec_fields
-
- # get switch inv name from mac
- def get_switch_inv_name_from_mac(self, mac):
- inv_name = None
- for show_system in self.show_system_network_summary:
- if (str.lower(show_system["node-mac"])) == (str.lower(mac)):
- inv_name = show_system.get("inv_name")
- break
- return inv_name
-
- def validate_vlt_pairs(self, actual_vlt_dict):
- final_out = list()
- intended_vlt_list = self.intended_vlt_pairs
- for intended_vlt in intended_vlt_list:
- intended_primary = intended_vlt.get("primary")
- intended_secondary = intended_vlt.get("secondary")
- actual_vlt = actual_vlt_dict.get(intended_primary)
- temp_dict = {}
- if actual_vlt is not None:
- actual_secondary = actual_vlt.get("secondary")
- secondary_status = actual_vlt.get("secondary_status")
- if actual_secondary is not None and intended_secondary != actual_secondary:
- temp_dict["error_type"] = "secondary_mismatch"
- temp_dict["intended_primary"] = intended_primary
- temp_dict["intended_secondary"] = intended_secondary
- temp_dict["secondary"] = actual_secondary
- reason = "config mismatch as {0} is expected, but the actual secondary is {1} " .format(
- intended_secondary, actual_secondary)
- temp_dict["possible_reason"] = reason
- final_out.append(temp_dict)
- else:
- if actual_secondary is None:
- temp_dict["intended_primary"] = intended_primary
- temp_dict["intended_secondary"] = intended_secondary
- temp_dict["error_type"] = "peer_missing"
- reason = "peer info is not configured or peer interface is down"
- temp_dict["possible_reason"] = reason
- final_out.append(temp_dict)
- elif intended_secondary == actual_secondary and secondary_status != "up":
- temp_dict["intended_primary"] = intended_primary
- temp_dict["intended_secondary"] = intended_secondary
- temp_dict["secondary"] = actual_secondary
- temp_dict["error_type"] = "peer_down"
- reason = "peer interface is down"
- temp_dict["possible_reason"] = reason
- final_out.append(temp_dict)
- else:
- temp_dict["intended_primary"] = intended_primary
- temp_dict["intended_secondary"] = intended_secondary
- temp_dict["error_type"] = "vlt_config_missing"
- temp_dict["possible_reason"] = "vlt is not configured"
- final_out.append(temp_dict)
- return final_out
-
- def parse_vlt_output(self):
- show_vlt_dict = {}
- for show_list in self.show_vlt:
- source_switch = None
- item = show_list.get("item")
- if item is not None:
- inv_info = item.get("inv_name")
- source_switch = inv_info.get("inv_name")
- msg = show_list.get("msg")
- if msg is not None:
- result = msg.get("result")
- for sub_result in result:
- vlt_dict = {}
- rpc_reply = sub_result.get("rpc-reply")
- data = rpc_reply.get("data")
- if data is not None:
- topo_oper_data = data.get("topology-oper-data")
- if topo_oper_data is not None:
- vlt_domain = topo_oper_data.get("vlt-domain")
- if vlt_domain is not None:
- local_info = vlt_domain.get("local-info")
- if local_info is not None:
- local_role = local_info.get("role")
- vlt_dict[local_role] = source_switch
- local_mac = local_info.get("system-mac")
- vlt_dict[local_role + "_mac"] = local_mac
- peer_info = vlt_domain.get("peer-info")
- if peer_info is not None:
- peer_mac = peer_info.get("system-mac")
- peer_switch = self.get_switch_inv_name_from_mac(
- peer_mac)
- peer_role = peer_info.get("role")
- vlt_dict[peer_role] = peer_switch
- vlt_dict[peer_role + "_mac"] = peer_mac
- peer_status = peer_info.get("peer-status")
- vlt_dict[peer_role +
- "_status"] = peer_status
- if bool(vlt_dict):
- primary_switch = vlt_dict.get("primary")
- vlt_data = show_vlt_dict.get(primary_switch)
- if vlt_data is None:
- # update database specific to primary, it helps
- # to avoid to skip duplicate data
- show_vlt_dict[primary_switch] = vlt_dict
- return show_vlt_dict
-
- def perform_action(self):
- try:
- actual_vlt_dict = self.parse_vlt_output()
- final_out = self.validate_vlt_pairs(actual_vlt_dict)
- self.exit_msg.update({"results": final_out})
- self.module.exit_json(changed=False, msg=self.exit_msg)
- except Exception as e:
- self.module.fail_json(
- msg=to_native(e),
- exception=traceback.format_exc())
-
-
-def main():
- module_instance = VltValidation()
- module_instance.perform_action()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/modules/wiring_validate.py b/ansible_collections/dellemc/os10/plugins/modules/wiring_validate.py
deleted file mode 100644
index 7947c1b19..000000000
--- a/ansible_collections/dellemc/os10/plugins/modules/wiring_validate.py
+++ /dev/null
@@ -1,246 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import (absolute_import, division, print_function)
-
-__copyright__ = "(c) 2020 Dell Inc. or its subsidiaries. All rights reserved."
-
-__metaclass__ = type
-
-DOCUMENTATION = '''
-module: wiring_validate
-author: "Senthil Kumar Ganesan (@skg-net)"
-short_description: Validate the wiring based on the planned wiring details
-description:
-
- - Get the wiring info using lldp output and show system network summary.
-
-options:
- show_lldp_neighbors_list:
- description:
- - show lldp neighbor output
- type: 'list'
- required: True
- show_system_network_summary:
- description:
- - show system network summary output
- type: 'list'
- required: True
- planned_neighbors:
- description:
- - planned neighbours input from group_var to compare actual
- type: 'list'
- required: True
-'''
-EXAMPLES = '''
-Copy below YAML into a playbook (e.g. play.yml) and run as follows:
-
-#$ ansible-playbook -i inv play.yml
-name: show system Configuration
-hosts: localhost
-connection: local
-gather_facts: False
-tasks:
-- name: "Get Dell EMC OS10 Show lldp"
- os10_command:
- commands:
- - command: "show lldp neighbors"
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_lldp
-- local_action: copy content={{ show_lldp }} dest=show
-- set_fact:
- output_lldp: "{{ output_lldp|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item,
- 'stdout_show_lldp': item.stdout}] }}"
- loop: "{{ show_lldp.results }}"
-- debug: var=output_lldp
-- name: "Get Dell EMC OS10 Show system"
- import_role:
- name: os10_fabric_summary
- register: show_system_network_summary
-- debug: var=show_system_network_summary
-- name: call lib to process
- wiring_validate:
- show_lldp_neighbors_list: "{{ output_lldp }}"
- show_system_network_summary: "{{ show_system_network_summary.msg.results }}"
- planned_neighbors: "{{ intended_neighbors }}"
-'''
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible.module_utils._text import to_native
-from collections import OrderedDict
-import re
-import traceback
-
-
-class WiringValidation(object):
- def __init__(self):
- self.module = AnsibleModule(argument_spec=self.get_fields())
- self.show_lldp_neighbors_list = self.module.params['show_lldp_neighbors_list']
- self.show_system_network_summary = self.module.params['show_system_network_summary']
- self.planned_neighbors = self.module.params['planned_neighbors']
- self.exit_msg = OrderedDict()
-
- def get_fields(self):
- spec_fields = {
- 'show_lldp_neighbors_list': {
- 'type': 'list',
- 'required': True
- },
- 'show_system_network_summary': {
- 'type': 'list',
- 'required': True
- },
- 'planned_neighbors': {
- 'type': 'list',
- 'required': True
- }
- }
- return spec_fields
-
- # get switch inv name from mac
- def get_switch_inv_name_from_mac(self, mac):
- inv_name = None
- for show_system in self.show_system_network_summary:
- if (str.lower(show_system["node-mac"])) == (str.lower(mac)):
- inv_name = show_system.get("inv_name")
- break
- return inv_name
-
- # get service tag for switch
-
- def get_service_tag_and_mac(self):
- svc_tag_mac = {}
- for show_system in self.show_system_network_summary:
- temp_dict = {}
- temp_dict["svc-tag"] = show_system.get("service-tag")
- temp_dict["node-mac"] = show_system.get("node-mac")
- if bool(temp_dict):
- svc_tag_mac[show_system["inv_name"]] = temp_dict
- return svc_tag_mac
-
- # form actual neighbors per network with help of lldp output and show
- # sytem output
- def get_actual_neigbor(self, lldp_list):
- final_out = list()
- for lldp in lldp_list:
- # check whether lldp output mac match with system summary mac and
- # collect port and host info
- source_switch = lldp["inv_name"]
- lldp_mac = lldp["rem_mac"]
- for index, rem_mac in enumerate(lldp_mac):
- final_dict = {}
- final_dict["source_switch"] = source_switch
- final_dict["source_port"] = lldp["loc_port"][index]
- final_dict["dest_port"] = lldp["rem_port"][index]
- dest_switch = self.get_switch_inv_name_from_mac(rem_mac)
- if dest_switch is not None:
- final_dict["dest_switch"] = dest_switch
- else:
- final_dict["dest_switch"] = "unknown"
- final_out.append(final_dict)
- return final_out
-
- def parse_lldp_output(self):
- nbr_list = list()
- for item in self.show_lldp_neighbors_list:
- out_dict = {}
- loc_port = list()
- rem_port = list()
- rem_mac = list()
- out_dict["host"] = item.get("host")
- out_dict["inv_name"] = item.get("inv_name")
- show_lldp_output = item.get("stdout_show_lldp")
- if show_lldp_output is not None:
- output = str(show_lldp_output[0])
- lldp_regexp = r"(\S+)\s+(\S+)\s+(\S+)\s+(\S+)"
- lines = output.splitlines()
- for line in lines:
- if "Loc PortID" in line:
- continue
- match = re.match(lldp_regexp, line)
- if match:
- val = match.groups()
- loc_port.append(val[0])
- rem_port.append(val[2])
- rem_mac.append(val[3])
- out_dict["loc_port"] = loc_port
- out_dict["rem_port"] = rem_port
- out_dict["rem_mac"] = rem_mac
- if bool(out_dict):
- nbr_list.append(out_dict)
- return nbr_list
-
- def perform_action(self):
- try:
- lldp_list = self.parse_lldp_output()
- actual_nbr = self.get_actual_neigbor(lldp_list)
- svc_tag_mac = self.get_service_tag_and_mac()
- # Validate the planned neighbors with actual neighbors
- mismatch_list = list()
- for planned_neighbors in self.planned_neighbors:
- bflag = False
- if planned_neighbors not in actual_nbr:
- for actual_neighbors in actual_nbr:
- if (actual_neighbors["source_switch"] == planned_neighbors["source_switch"]
- and actual_neighbors["source_port"] == planned_neighbors["source_port"]):
- if (actual_neighbors["dest_switch"] !=
- planned_neighbors["dest_switch"]):
- bflag = True
- if (actual_neighbors["dest_switch"]
- != "unknown"):
- reason = (
- "Destination switch is not an expected value, "
- "expected switch: {0},port: {1}; actual switch: {2}(svc-tag:{3}, node_mac:{4}), port: {5}" .format(
- planned_neighbors["dest_switch"],
- planned_neighbors["dest_port"],
- actual_neighbors["dest_switch"],
- svc_tag_mac.get(
- actual_neighbors["dest_switch"]).get("svc-tag"),
- svc_tag_mac.get(
- actual_neighbors["dest_switch"]).get("node-mac"),
- actual_neighbors["dest_port"]))
- else:
- reason = (
- "Destination switch is not an expected value, "
- "expected switch: {0},port: {1}; actual switch: {2}, port: {3}" .format(
- planned_neighbors["dest_switch"],
- planned_neighbors["dest_port"],
- actual_neighbors["dest_switch"],
- actual_neighbors["dest_port"]))
- planned_neighbors["reason"] = reason
- planned_neighbors["error_type"] = "link-mismatch"
- break
- if(actual_neighbors["dest_port"] != planned_neighbors["dest_port"]):
- bflag = True
- reason = (
- "Destination switch port is not an expected value, "
- "expected port: {0} actual port: {1}" .format(
- planned_neighbors["dest_port"],
- actual_neighbors["dest_port"]))
- planned_neighbors["reason"] = reason
- planned_neighbors["error_type"] = "link-mismatch"
- break
- if not bflag:
- reason = "link is not found for source switch: {0},port: {1}".format(
- planned_neighbors["source_switch"], planned_neighbors["source_port"])
- planned_neighbors["reason"] = reason
- planned_neighbors["error_type"] = "link-missing"
- mismatch_list.append(planned_neighbors)
-
- self.exit_msg.update({"results": mismatch_list})
- self.module.exit_json(changed=False, msg=self.exit_msg)
- except Exception as e:
- self.module.fail_json(
- msg=to_native(e),
- exception=traceback.format_exc())
-
-
-def main():
- module_instance = WiringValidation()
- module_instance.perform_action()
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os10/plugins/terminal/os10.py b/ansible_collections/dellemc/os10/plugins/terminal/os10.py
deleted file mode 100644
index c3e1d3ac2..000000000
--- a/ansible_collections/dellemc/os10/plugins/terminal/os10.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Copyright (c) 2020 Dell Inc.
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import re
-import json
-
-from ansible.module_utils._text import to_text, to_bytes
-from ansible.plugins.terminal import TerminalBase
-from ansible.errors import AnsibleConnectionFailure
-
-
-class TerminalModule(TerminalBase):
-
- terminal_stdout_re = [
- re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:#) ?$"),
- re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
- ]
-
- terminal_stderr_re = [
- re.compile(br"% ?Error"),
- re.compile(br"% ?Bad secret"),
- re.compile(br"Syntax error:"),
- re.compile(br"invalid input", re.I),
- re.compile(br"(?:incomplete|ambiguous) command", re.I),
- re.compile(br"connection timed out", re.I),
- re.compile(br"[^\r\n]+ not found", re.I),
- re.compile(br"'[^']' +returned error code: ?\d+"),
- ]
-
- def on_open_shell(self):
- try:
- self._exec_cli_command(b'terminal length 0')
- except AnsibleConnectionFailure:
- raise AnsibleConnectionFailure('unable to set terminal parameters')
-
- def on_become(self, passwd=None):
- if self._get_prompt().endswith(b'#'):
- return
-
- cmd = {u'command': u'enable'}
- if passwd:
- cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict')
- cmd[u'answer'] = passwd
-
- try:
- self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
- except AnsibleConnectionFailure:
- raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
-
- def on_unbecome(self):
- prompt = self._get_prompt()
- if prompt is None:
- # if prompt is None most likely the terminal is hung up at a prompt
- return
-
- if prompt.strip().endswith(b')#'):
- self._exec_cli_command(b'end')
- self._exec_cli_command(b'disable')
-
- elif prompt.endswith(b'#'):
- self._exec_cli_command(b'disable')
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/LICENSE b/ansible_collections/dellemc/os10/roles/os10_aaa/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/README.md b/ansible_collections/dellemc/os10/roles/os10_aaa/README.md
deleted file mode 100644
index cabee7ea9..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/README.md
+++ /dev/null
@@ -1,136 +0,0 @@
-AAA role
-========
-
-This role facilitates the configuration of authentication, authorization, and acccounting (AAA). It supports the configuration of RADIUS server, TACACS server, and AAA. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The AAA role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_aaa keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``radius_server`` | dictionary | Configures the RADIUS server (see ``radius_server.*``) | os10 |
-| ``radius_server.retransmit`` | integer | Configures the number of retransmissions | os10 |
-| ``radius_server.timeout`` | integer | Configures the timeout for retransmissions | os10 |
-| ``radius_server.host`` | dictionary | Configures the RADIUS server host (see ``host.*``) | os10 |
-| ``host.ip`` | string | Configures the RADIUS server host address | os10 |
-| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os10 |
-| ``host.state`` | string: present,absent | Removes the RADIUS server host if set to absent | os10 |
-| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 |
-| ``tacacs_server`` | dictionary | Configures the TACACS server (see ``tacacs_server.*``) | os10 |
-| ``tacacs_server.timeout`` | integer | Configures the timeout for retransmissions | os10 |
-| ``tacacs_server.host`` | dictionary | Configures the TACACS server host (see ``host.*``) | os10 |
-| ``host.ip`` | string | Configures the TACACS server host address | os10 |
-| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os10 |
-| ``host.state`` | string: present,absent | Removes the TACACS server host if set to absent | os10 |
-| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os10 |
-| ``aaa_accounting`` | dictionary | Configures accounting parameters (see ``aaa_accounting.*``) | os10 |
-| ``aaa_accounting.accounting.accounting_type`` | dictionary | Configures accounting type | os10 |
-| ``aaa_accounting.accounting.connection_type`` | dictionary | Configures accounting connection type | os10 |
-| ``aaa_accounting.accounting.account_mode`` | dictionary | Configures accounting mode | os10 |
-| ``aaa_accounting.accounting.server_group`` | dictionary | Configures accounting server group | os10 |
-| ``aaa_accounting.accounting.state`` | string: present,absent | Configures/unconfigures accounting parameters | os10 |
-| ``aaa_authentication`` | dictionary | Configures authentication parameters (see ``aaa_authentication.*``) | os10 |
-| ``aaa_authentication.login`` | dictionary | Configures authentication login (see ``aaa_authentication.login.*``)| os10 |
-| ``aaa_authentication.login.console`` | dictionary | Configures authentication method for console login | os10 |
-| ``aaa_authentication.login.state`` | string: present,absent | Unconfigures authentication login if set to absent | os10 |
-| ``aaa_authentication.login.type`` | dictionary | Configures authentication type | os10 |
-| ``aaa_authentication.re_authenticate`` | boolean | Configures re-authenticate by enable if set to true | os10 |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_aaa* role to configure AAA for radius and TACACS servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, it is set to false and it writes a simple playbook that only references the *os10_aaa* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_aaa:
- radius_server:
- retransmit: 5
- timeout: 10
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- tacacs_server:
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- timeout: 6
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: present
- re_authenticate: true
- aaa_accounting:
- accounting:
- - accounting_type: commands
- connection_type: console
- account_mode: start-stop
- server_group: group tacacs+
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_aaa
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/defaults/main.yml
deleted file mode 100644
index 4f8b5c8cd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/defaults/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# defaults file for dellemc.os10.os10_aaa
-attribute_type:
- mandatory: mandatory
- on_for_login_auth: on-for-login-auth
- include_in_access_req: include-in-access-req
- mac: "mac format"
- mac_ietf: "mac format ietf"
- mac_ietf_lower_case: "mac format ietf lower-case"
- mac_ietf_upper_case: "mac format ietf upper-case"
- mac_legacy: "mac format legacy"
- mac_legacy_lower_case: "mac format legacy lower-case"
- mac_legacy_upper_case: "mac format legacy upper-case"
- mac_unformatted: "mac format unformatted"
- mac_unformatted_lower_case: "mac format unformatted lower-case"
- mac_unformatted_upper_case: "mac format unformatted upper-case"
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/handlers/main.yml
deleted file mode 100644
index 0b86ef12a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_aaa
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/meta/main.yml
deleted file mode 100644
index 54fde54e6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_aaa role facilitates the configuration of Authentication Authorization Acccounting (AAA) attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/tasks/main.yml
deleted file mode 100644
index 77eb07fcd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os10
-
- - name: "Generating AAA configuration for os10"
- template:
- src: os10_aaa.j2
- dest: "{{ build_dir }}/aaa10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning AAA configuration for os10"
- os10_config:
- src: os10_aaa.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/templates/os10_aaa.j2 b/ansible_collections/dellemc/os10/roles/os10_aaa/templates/os10_aaa.j2
deleted file mode 100644
index 438c0f8e5..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/templates/os10_aaa.j2
+++ /dev/null
@@ -1,148 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure AAA commands for os10 Devices
-
-os10_aaa:
- radius_server:
- retransmit: 5
- timeout: 10
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- tacacs_server:
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- timeout: 6
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: present
- re_authenticate: true
- aaa_accounting:
- accounting:
- - accounting_type: commands
- connection_type: console
- account_mode: start-stop
- server_group: group tacacs+
- state: present
-
-##################################################}
-{% if os10_aaa is defined and os10_aaa %}
- {% if os10_aaa.radius_server is defined and os10_aaa.radius_server %}
- {% set item = os10_aaa.radius_server %}
- {% if item.retransmit is defined and item.retransmit %}
-radius-server retransmit {{ item.retransmit }}
- {% else %}
-no radius-server retransmit
- {% endif %}
- {% if item.timeout is defined and item.timeout %}
-radius-server timeout {{ item.timeout }}
- {% else %}
-no radius-server timeout
- {% endif %}
- {% if item.host is defined and item.host %}
- {% for it in item.host %}
- {% if it.ip is defined and it.ip %}
- {% if it.state is defined and it.state == "absent" %}
-no radius-server host {{ it.ip }}
- {% else %}
- {% if it.auth_port is defined and it.auth_port %}
-radius-server host {{ it.ip }} auth-port {{ it.auth_port }} key {{ it.key }} {{ it.value }}
- {% else %}
-radius-server host {{ it.ip }} key {{ it.key }} {{ it.value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% if os10_aaa.tacacs_server is defined and os10_aaa.tacacs_server %}
- {% set item = os10_aaa.tacacs_server %}
- {% if item.timeout is defined and item.timeout %}
-tacacs-server timeout {{ item.timeout }}
- {% else %}
-no tacacs-server timeout
- {% endif %}
- {% if item.host is defined and item.host %}
- {% for it in item.host %}
- {% if it.ip is defined and it.ip %}
- {% if item.state is defined and item.state == "absent"%}
-no tacacs-server host {{it.ip}}
- {% else %}
- {% if it.auth_port is defined and it.auth_port %}
-tacacs-server host {{it.ip}} auth-port {{it.auth_port}} key {{it.key}} {{it.value}}
- {% else %}
-tacacs-server host {{it.ip}} key {{it.key}} {{it.value}}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% if os10_aaa.aaa_authentication is defined and os10_aaa.aaa_authentication %}
- {% set aaa_list = os10_aaa.aaa_authentication %}
- {% if aaa_list.login is defined and aaa_list.login %}
- {% for aaa_vars in aaa_list.login %}
- {% if aaa_vars.console is defined and aaa_vars.console %}
- {% if aaa_vars.state is defined and aaa_vars.state == "absent"%}
-no aaa authentication login console
- {% else %}
- {% if aaa_vars.type is defined and aaa_vars.type %}
-aaa authentication login console {{aaa_vars.type}}
- {% endif %}
- {% endif %}
- {% else %}
- {% if aaa_vars.state is defined and aaa_vars.state == "absent"%}
-no aaa authentication login default
- {% else %}
- {% if aaa_vars.type is defined and aaa_vars.type %}
-aaa authentication login default {{aaa_vars.type}}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% set aaa_vars = os10_aaa.aaa_authentication %}
- {% if aaa_vars.re_authenticate is defined %}
- {% if aaa_vars.re_authenticate %}
-aaa re-authenticate enable
- {% else %}
-no aaa re-authenticate enable
- {% endif %}
- {% endif %}
- {% endif %}
- {% if os10_aaa.aaa_accounting is defined and os10_aaa.aaa_accounting %}
- {% set acc_list = os10_aaa.aaa_accounting %}
- {% if acc_list.accounting is defined and acc_list.accounting %}
- {% for aaa_vars in acc_list.accounting %}
- {% if aaa_vars.accounting_type is defined and aaa_vars.accounting_type %}
- {% if aaa_vars.accounting_type == "commands" %}
- {% set accounting_type = aaa_vars.accounting_type + " all" %}
- {% endif %}
- {% if aaa_vars.connection_type is defined and aaa_vars.connection_type %}
- {% if aaa_vars.state is defined and aaa_vars.state == "absent"%}
-no aaa accounting {{accounting_type}} {{aaa_vars.connection_type}}
- {% else %}
- {% if aaa_vars.account_mode is defined and aaa_vars.account_mode == "none" %}
-aaa accounting {{accounting_type}} {{aaa_vars.connection_type}} {{aaa_vars.account_mode}}
- {% else %}
- {% if aaa_vars.server_group is defined and aaa_vars.server_group %}
-aaa accounting {{accounting_type}} {{aaa_vars.connection_type}} {{aaa_vars.account_mode}} {{aaa_vars.server_group}}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/main.os10.yaml
deleted file mode 100644
index a845c14d2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/main.os10.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-# vars file for dellemc.os10.os10_aaa,
-# below gives a sample configuration
-# Sample vars for OS10 device
-os10_aaa:
- radius_server:
- retransmit: 5
- timeout: 10
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- tacacs_server:
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- timeout: 6
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: present
- re_authenticate: true
- aaa_accounting:
- accounting:
- - accounting_type: commands
- connection_type: console
- account_mode: start-stop
- server_group: group tacacs+
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_aaa/tests/test.yaml
deleted file mode 100644
index b3d685fb2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_aaa
diff --git a/ansible_collections/dellemc/os10/roles/os10_aaa/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_aaa/vars/main.yml
deleted file mode 100644
index 6854698ef..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_aaa/vars/main.yml
+++ /dev/null
@@ -1 +0,0 @@
-# vars file for dellemc.os10.os10_aaa
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/LICENSE b/ansible_collections/dellemc/os10/roles/os10_acl/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/README.md b/ansible_collections/dellemc/os10/roles/os10_acl/README.md
deleted file mode 100644
index 14a1fe2a5..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/README.md
+++ /dev/null
@@ -1,130 +0,0 @@
-ACL role
-========
-
-This role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to the line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The ACL role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_acl keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string (required): ipv4, ipv6, mac | Configures the L3 (IPv4/IPv6) or L2 (MAC) access-control list | os10 |
-| ``name`` | string (required) | Configures the name of the access-control list | os10 |
-| ``description`` | string | Configures the description about the access-control list | os10 |
-| ``remark`` | list | Configures the ACL remark (see ``remark.*``) | os10|
-| ``remark.number`` | integer (required) | Configures the remark sequence number | os10 |
-| ``remark.description`` | string | Configures the remark description | os10 |
-| ``remark.state`` | string: absent,present\* | Deletes the configured remark for an ACL entry if set to absent | os10 |
-| ``entries`` | list | Configures ACL rules (see ``seqlist.*``) | os10 |
-| ``entries.number`` | integer (required) | Specifies the sequence number of the ACL rule | os10 |
-| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true; specifies to reject packets if set to false | os10 |
-| ``entries.protocol`` | string (required) | Specifies the type of protocol or the protocol number to filter | os10 |
-| ``entries.source`` | string (required) | Specifies the source address to match in the packets | os10 |
-| ``entries.src_condition`` | string | Specifies the condition to filter packets from the source address; ignored if MAC | os10 |
-| ``entries.destination`` | string (required) | Specifies the destination address to match in the packets | os10 |
-| ``entries.dest_condition`` | string | Specifies the condition to filter packets to the destination address | os10 |
-| ``entries.other_options`` | string | Specifies the other options applied on packets (count, log, order, monitor, and so on) | os10 |
-| ``entries.state`` | string: absent,present\* | Deletes the rule from the ACL if set to absent | os10 |
-| ``stage_ingress`` | list | Configures ingress ACL to the interface (see ``stage_ingress.*``) | os10 |
-| ``stage_ingress.name`` | string (required) | Configures the ingress ACL filter to the interface with this interface name | os10 |
-| ``stage_ingress.state`` | string: absent,present\* | Deletes the configured ACL from the interface if set to absent | os10 |
-| ``stage_egress`` | list | Configures egress ACL to the interface (see ``stage_egress.*``) | os10 |
-| ``stage_egress.name`` | string (required) | Configures the egress ACL filter to the interface with this interface name | os10 |
-| ``stage_egress.state`` | string: absent,present\* | Deletes the configured egress ACL from the interface if set to absent | os10 |
-| ``lineterminal`` | list | Configures the terminal to apply the ACL (see ``lineterminal.*``) | os10 |
-| ``lineterminal.state`` | string: absent,present\* | Deletes the access-class from line terminal if set to absent | os10 |
-| ``state`` | string: absent,present\* | Deletes the ACL if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOM`E environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_acl* role to configure different types of ACLs (standard and extended) for both IPv4 and IPv6 and assigns the access-class to the line terminals. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, it generates the configuration commands as a .part file in the *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os10_acl* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_acl:
- - name: ssh
- type: ipv4
- description: acl
- remark:
- - description: 1
- number: 3
- state: absent
- entries:
- - number: 14
- permit: true
- protocol: tcp
- source: any
- src_condition: neq 6
- destination: any
- dest_condition: eq 4
- other_options: count
- state: present
- stage_ingress:
- - name: ethernet 1/1/1
- state: absent
- - name: ethernet 1/1/2
- state: absent
- stage_egress:
- - name: ethernet 1/1/3
- state: absent
- lineterminal:
- state: absent
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_acl
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/defaults/main.yml
deleted file mode 100644
index 9c7559e38..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_acl
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/handlers/main.yml
deleted file mode 100644
index 162d4a3fa..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_acl
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/meta/main.yml
deleted file mode 100644
index c354b58fc..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_acl role facilitates the configuration of access control list (ACL) attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/tasks/main.yml
deleted file mode 100644
index ace51340e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating ACL configuration for os10"
- template:
- src: os10_acl.j2
- dest: "{{ build_dir }}/acl10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning ACL configuration for os10"
- os10_config:
- src: os10_acl.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/templates/os10_acl.j2 b/ansible_collections/dellemc/os10/roles/os10_acl/templates/os10_acl.j2
deleted file mode 100644
index 7d6cb31db..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/templates/os10_acl.j2
+++ /dev/null
@@ -1,212 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-
-Purpose:
-Configure ACL commands for OS10 devices
-
-os10_acl:
- - name: ssh-only
- type: ipv4
- description: acl
- remark:
- - description: 1
- number: 3
- state: present
- entries:
- - number: 10
- permit: true
- protocol: tcp
- source: any
- destination: any
- src_condition: eq 22
- dest_condition: eq 2 ack
- other_options: count
- state: present
- stage_ingress:
- - name: ethernet 1/1/1
- state: present
- - name: ethernet 1/0/1
- state: present
- stage_egress:
- - name: ethernet 1/1/2
- state: present
- lineterminal:
- state: present
- state: present
-#####################################}
-{% if os10_acl is defined and os10_acl %}
- {% for val in os10_acl %}
- {% if val.name is defined and val.name %}
- {% if val.state is defined and val.state == "absent" %}
- {% if val.type is defined and val.type == "ipv4" %}
-no ip access-list {{ val.name }}
- {% elif val.type is defined and val.type == "ipv6" %}
-no ipv6 access-list {{ val.name }}
- {% elif val.type is defined and val.type == "mac" %}
-no mac access-list {{ val.name }}
- {% endif %}
- {% else %}
- {% if val.type is defined and val.type == "ipv4" %}
-ip access-list {{ val.name }}
- {% elif val.type is defined and val.type == "ipv6" %}
-ipv6 access-list {{ val.name }}
- {% elif val.type is defined and val.type == "mac" %}
-mac access-list {{ val.name }}
- {% endif %}
- {% if val.description is defined %}
- {% if val.description %}
- description "{{ val.description }}"
- {% endif %}
- {% endif %}
- {% if val.remark is defined and val.remark %}
- {% for remark in val.remark %}
- {% if remark.description is defined and remark.description %}
- {% if remark.number is defined and remark.number %}
- {% if remark.state is defined and remark.state == "absent" %}
- no seq {{ remark.number }}
- {% else %}
- seq {{ remark.number }} remark "{{ remark.description }}"
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if val.entries is defined and val.entries %}
- {% for rule in val.entries %}
- {% if rule.number is defined and rule.number %}
- {% if rule.state is defined and rule.state == "absent" %}
- no seq {{ rule.number }}
- {% else %}
- {% if rule.permit is defined %}
- {% if rule.permit %}
- {% set is_permit = "permit" %}
- {% else %}
- {% set is_permit = "deny" %}
- {% endif %}
- {% endif %}
- {% if val.type is defined and val.type %}
- {% if rule.protocol is defined and rule.protocol %}
- {% if rule.source is defined and rule.source %}
- {% if rule.src_condition is defined and rule.src_condition %}
- {% if rule.destination is defined and rule.destination %}
- {% if rule.dest_condition is defined and rule.dest_condition %}
- {% if rule.other_options is defined and rule.other_options %}
- {% set other_options = rule.other_options %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ rule.dest_condition }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ rule.dest_condition }}
- {% endif %}
- {% else %}
- {% if rule.other_options is defined and rule.other_options %}
- {% set other_options = rule.other_options %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% else %}
- {% if rule.destination is defined and rule.destination %}
- {% if rule.dest_condition is defined and rule.dest_condition %}
- {% if rule.other_options is defined and rule.other_options %}
- {% set other_options = rule.other_options %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ rule.dest_condition }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ rule.dest_condition }}
- {% endif %}
- {% else %}
- {% if rule.other_options is defined and rule.other_options %}
- {% set other_options = rule.other_options %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if val.lineterminal is defined and val.lineterminal %}
- {% if val.type is defined and not val.type == "mac" %}
-line vty
- {% if val.lineterminal.state is defined and val.lineterminal.state == "absent" %}
- {% if val.type == "ipv6" %}
- no ipv6 access-class {{ val.name }}
- {% elif val.type == "ipv4" %}
- no ip access-class {{ val.name }}
- {% endif %}
- {% else %}
- {% if val.type == "ipv6" %}
- ipv6 access-class {{ val.name }}
- {% elif val.type == "ipv4" %}
- ip access-class {{ val.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if val.stage_ingress is defined and val.stage_ingress %}
- {% for intf in val.stage_ingress %}
- {% if intf.state is defined and intf.state == "absent" %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if val.type is defined and val.type == "mac" %}
- no mac access-group {{ val.name }} in
- {% elif val.type is defined and val.type == "ipv6" %}
- no ipv6 access-group {{ val.name }} in
- {% else %}
- no ip access-group {{ val.name }} in
- {% endif %}
- {% endif %}
- {% else %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if val.type is defined and val.type == "mac" %}
- mac access-group {{ val.name }} in
- {% elif val.type is defined and val.type == "ipv6" %}
- ipv6 access-group {{ val.name }} in
- {% else %}
- ip access-group {{ val.name }} in
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if val.stage_egress is defined and val.stage_egress %}
- {% for intf in val.stage_egress %}
- {% if intf.state is defined and intf.state == "absent" %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if val.type is defined and val.type == "mac" %}
- no mac access-group {{ val.name }} out
- {% elif val.type is defined and val.type == "ipv6" %}
- no ipv6 access-group {{ val.name }} out
- {% else %}
- no ip access-group {{ val.name }} out
- {% endif %}
- {% endif %}
- {% else %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if val.type is defined and val.type == "mac" %}
- mac access-group {{ val.name }} out
- {% elif val.type is defined and val.type == "ipv6" %}
- ipv6 access-group {{ val.name }} out
- {% else %}
- ip access-group {{ val.name }} out
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_acl/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_acl/tests/main.os10.yaml
deleted file mode 100644
index c3db9c98e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/tests/main.os10.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-# vars file for dellemc.os10.os10_acl,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl
- remark:
- - description: 1
- number: 3
- state: absent
- entries:
- - number: 14
- permit: true
- protocol: tcp
- source: any
- src_condition: neq 6
- destination: any
- dest_condition: eq 4
- other_options: count
- state: present
- stage_ingress:
- - name: ethernet 1/1/1
- state: absent
- - name: ethernet 1/1/2
- state: absent
- stage_egress:
- - name: ethernet 1/1/3
- state: absent
- lineterminal:
- state: absent
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_acl/tests/test.yaml
deleted file mode 100644
index 653f9d69d..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_acl
diff --git a/ansible_collections/dellemc/os10/roles/os10_acl/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_acl/vars/main.yml
deleted file mode 100644
index 0cd37964d..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_acl/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_acl
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/LICENSE b/ansible_collections/dellemc/os10/roles/os10_bfd/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/README.md b/ansible_collections/dellemc/os10/roles/os10_bfd/README.md
deleted file mode 100644
index c69079924..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-BFD role
-===========
-
-This role facilitates the configuration of bidirectional forwarding detection (BFD) global attributes. It specifically enables configuration of BFD interval, min_rx, multiplier, and role. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The BFD role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_bfd keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``bfd`` | dictionary | Configures global BFD parameters (see ``bfd.*``) | os10 |
-| ``bfd.interval`` | integer | Configures the time interval in ms (100 to 1000) | os10 |
-| ``bfd.min_rx`` | integer | Configures maximum waiting time for receiving control packets from BFD peers in ms (100 to 1000)| os10 |
-| ``bfd.multiplier`` | integer | Configures the maximum number of consecutive packets that are not received from BFD peers before session state changes to Down (3 to 50) | os10 |
-| ``bfd.role`` | string: passive,active\* | Configures the BFD role | os10 |
-| ``bfd.state`` | string: absent,present\* | Removes the global BFD if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
-********************
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_bfd role* to completely set the global BFD attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The bfd role writes a simple playbook that only references the *os10_bfd* role. By including the role, you automatically get access to all of the tasks to configure BFD feature.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_bfd:
- bfd:
- interval: 100
- min_rx: 100
- multiplier: 3
- role: "active"
- state: "present"
-
-**Simple playbook to setup bfd — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_bfd
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/defaults/main.yml
deleted file mode 100644
index 4aa9bfbb6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_bfd
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/handlers/main.yml
deleted file mode 100644
index b490464e1..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_bfd
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/meta/main.yml
deleted file mode 100644
index fce020597..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- role_name: os10_bfd
- author: Dell EMC Networking Engineering
- description: The os10_bfd role facilitates the configuration of global bfd attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/tasks/main.yml
deleted file mode 100644
index 88ac0eb59..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating bfd global configuration for os10"
- template:
- src: os10_bfd.j2
- dest: "{{ build_dir }}/bfd10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning bfd global configuration for os10"
- os10_config:
- src: os10_bfd.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/templates/os10_bfd.j2 b/ansible_collections/dellemc/os10/roles/os10_bfd/templates/os10_bfd.j2
deleted file mode 100644
index 18c946446..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/templates/os10_bfd.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-#Purpose:
-Configure bfd global commands for os10 Devices
-
-os10_bfd:
- bfd:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: "active"
- state: "present"
-###############################################}
-{% if os10_bfd is defined and os10_bfd %}
- {% set bfd_vars = os10_bfd %}
- {% if bfd_vars.bfd is defined and bfd_vars.bfd %}
- {% set bfd = bfd_vars.bfd %}
- {% if bfd.state is defined and bfd.state == "absent" %}
-no bfd enable
-no bfd interval
- {% else %}
-bfd enable
- {% if bfd.interval is defined and bfd.min_rx is defined and bfd.multiplier is defined %}
- {% if bfd.interval and bfd.min_rx and bfd.multiplier %}
- {% if bfd.role is defined and bfd.role %}
-bfd interval {{ bfd.interval }} min_rx {{ bfd.min_rx }} multiplier {{ bfd.multiplier }} role {{ bfd.role }}
- {% else %}
-bfd interval {{ bfd.interval }} min_rx {{ bfd.min_rx }} multiplier {{ bfd.multiplier }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/main.yaml
deleted file mode 100644
index 844b91c11..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/main.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# vars file for dellemc.os10.os10_bfd,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_bfd:
- bfd:
- interval: 100
- min_rx: 100
- multiplier: 3
- role: "active"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_bfd/tests/test.yaml
deleted file mode 100644
index a0de5db58..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_bfd
diff --git a/ansible_collections/dellemc/os10/roles/os10_bfd/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_bfd/vars/main.yml
deleted file mode 100644
index 781a25c8b..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bfd/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_bfd
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_bgp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/README.md b/ansible_collections/dellemc/os10/roles/os10_bgp/README.md
deleted file mode 100644
index e4e7c94e1..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/README.md
+++ /dev/null
@@ -1,729 +0,0 @@
-BGP role
-========
-
-This role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum paths. This role is abstracted for Dell EMC PowerSwitch platforms running SmartFabric OS10.
-
-The BGP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_bgp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``asn`` | string (required) | Configures the autonomous system (AS) number of the local BGP instance | os10 |
-| ``router_id`` | string | Configures the IP address of the local BGP router instance | os10 |
-| ``graceful_restart`` | boolean | Configures graceful restart capability | os10 |
-| ``maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) | os10 |
-| ``maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) | os10 |
-| ``log_neighbor_changes`` | boolean | Configures log neighbors up/down | os10 |
-| ``fast_ext_fallover`` | boolean | Configures a reset session if a link to a directly connected external peer goes down | os10 |
-| ``always_compare_med`` | boolean | Configures comparing MED from different neighbors | os10 |
-| ``default_loc_pref`` | integer | Configures the default local preference value | os10 |
-| ``as_notation`` | string | Configures AS number notation format | os10 |
-| ``enforce_first_as`` | boolean | Configures the first AS for eBGP routes | os10 |
-| ``non_deterministic_med`` | boolean | Configures nondeterministic path selection algorithm | os10 |
-| ``outbound_optimization`` | boolean | Configures outbound optimization for iBGP peer-group members | os10 |
-| ``confederation`` | dictionary | Configures AS confederation parameters (see ``confederation.*``) | os10 |
-| ``confederation.identifier`` | integer | Configures the routing domain confederation AS | os10 |
-| ``confederation.peers`` | string | Configures the peer AS in BGP confederation | os10 |
-| ``confederation.peers_state`` | string: absent,present\* | Deletes the peer AS in BGP confederation if set to absent | os10 |
-| ``route_reflector`` | dictionary | Configures route reflection parameters (see ``route_reflector.*``) | os10 |
-| ``route_reflector.client_to_client`` | boolean | Configures client-to-client route reflection | os10 |
-| ``route_reflector.cluster_id`` | string | Configures the route reflector cluster-id | os10 |
-| ``address_family_ipv4`` | dictionary | Configures IPv4 address family parameters (see ``address_family_ipv4.*``) | os10 |
-| ``address_family_ipv4.aggregate_addr`` | list | Configures IPv4 BGP aggregate entries (see ``aggregate_addr.*``) | os10 |
-| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.summary_only`` | boolean | Sets address to summary only if true | os10 |
-| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv4 BGP aggregate entry if set to absent | os10 |
-| ``address_family_ipv4.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 |
-| ``dampening.value`` | dictionary | Configures dampening values (<half-life time> <start value to reuse> <start value to suppress> <max duration> format; default 15 750 2000 60) | os10 |
-| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 |
-| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 |
-| ``address_family_ipv4.ibgp_redist_internal`` | dictionary | Configures internal BGP reditribution (see ``ibgp_redist_internal.*``) | os10 |
-| ``ibgp_redist_internal.state`` | boolean | Configures the internal BGP redistribution for an IPv4 address family | os10 |
-| ``address_family_ipv4.default_metric`` | integer | Configures the metric of redistributed routes for IPv4 address family | os10 |
-| ``address_family_ipv4.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 |
-| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPv4 address family (<routes external to AS> <routes internal to AS> <local routes> format; distance bgp 2 3 4) | os10 |
-| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 |
-| ``address_family_ipv6`` | dictionary | Configures IPv6 address family parameters (see ``address_family_ipv6.*``) | os10 |
-| ``address_family_ipv6.aggregate_addr`` | list | Configures IPv6 BGP aggregate entries (see ``aggregate_addr.*``) | os10 |
-| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true | os10 |
-| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv6 BGP aggregate entry if set to absent | os10 |
-| ``address_family_ipv6.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 |
-| ``dampening.value`` | dictionary | Configures dampening values (<half-life time> <start value to reuse> <start value to suppress> <max duration> format; default 15 750 2000 60) | os10 |
-| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 |
-| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 |
-| ``address_family_ipv6.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 |
-| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for an IPv6 address family | os10 |
-| ``address_family_ipv6.default_metric`` | integer | Configures the metric of redistributed routes for IPv6 address family | os10 |
-| ``address_family_ipv6.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 |
-| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for an IPv6 address family (<routes external to AS> <routes internal to AS> <local routes> format; distance bgp 2 3 4) | os10 |
-| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 |
-| ``best_path`` | list | Configures the default best-path selection (see ``best_path.*``) | os10 |
-| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os10 |
-| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os10 |
-| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os10 |
-| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os10 |
-| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os10 |
-| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os10 |
-| ``ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os10 |
-| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os10 |
-| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os10 |
-| ``ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os10 |
-| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os10 |
-| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os10 |
-| ``neighbor`` | list | Configures IPv4 BGP neighbors (see ``neighbor.*``) | os10 |
-| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os10 |
-| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | os10 |
-| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os10 |
-| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os10 |
-| ``neighbor.auto_peer`` |string: unnumbered-auto | Enables auto discovery of neighbors | os10 |
-| ``neighbor.password`` | string | Configures the BGP neighbor password | os10 |
-| ``neighbor.peergroup_type`` | string (ibgp, ebgp) | Configures the BGP neighbor peer-group type| os10 |
-| ``neighbor.ebgp_peergroup`` | string | Configures the peer-group to all auto-discovered external neighbors | os10 |
-| ``neighbor.ebgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered external neighbors | os10 |
-| ``neighbor.ibgp_peergroup`` | string | Configures the peer-group to all auto-discovered internal neighbors | os10 |
-| ``neighbor.ibgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered internal neighbors | os10 |
-| ``neighbor.route_reflector_client`` | boolean | Configures router reflector client on the BGP neighbor | os10 |
-| ``neighbor.local_as`` | integer | Configures the local AS for the BGP peer | os10 |
-| ``neighbor.weight`` | integer | Configures the default weight for routes from the neighbor interface | os10 |
-| ``neighbor.send_community`` | list | Configures the send community attribute to the BGP neighbor (see ``send_community.*``) | os10 |
-| ``send_community.type`` | string (required) | Configures the send community attribute to the BGP neighbor | os10 |
-| ``send_community.state`` | string: absent,present\* | Deletes the send community attribute of the BGP neighbor if set to absent | os10 |
-| ``neighbor.address_family`` | list | Configures address family commands on the BGP neighbor (see ``address_family.*``)| os10 |
-| ``address_family.type`` | string (required): ipv4,ipv6,l2vpn | Configures IPv4/IPv6/EVPN address family command mode on the BGP neighbor | os10 |
-| ``address_family.activate`` | boolean | Configures activation/deactivation of IPv4/IPv6 address family command mode on the BGP neighbor | os10 |
-| ``address_family.sender_loop_detect`` | boolean | Enables/disables the sender-side loop detection process for a BGP neighbor of IPv4/IPv6/l2vpn address family | os10 |
-| ``address_family.allow_as_in`` | integer | Configures the local AS number in the as-path | os10 |
-| ``address_family.next_hop_self`` | boolean | Configures disabling the next-hop calculation for the neighbor | os10 |
-| ``address_family.soft_reconf`` | boolean | Configures per neighbor soft reconfiguration | os10 |
-| ``address_family.add_path`` | string | Configures send or receive multiple paths (value can be 'both <no of paths>', 'send <no of paths>', 'receive')| os10 |
-| ``address_family.route_map`` | list | Configures the route-map on the BGP neighbor (see ``route_map.*``) | os10 |
-| ``route_map.name`` | string | Configures the name of the route-map for the BGP neighbor | os10 |
-| ``route_map.filter`` | string | Configures the filter for routing updates | os10 |
-| ``route_map.state`` | string, choices: absent,present\* | Deletes the route-map of the BGP neighbor if set to absent | os10 |
-| ``address_family.max_prefix`` | dictionary | Configures maximum-prefix parameters (see ``max_prefix.\*``) | os10 |
-| ``max_prefix.count`` | integer | Configures maximum number of prefix accepted from the peer | os10 |
-| ``max_prefix.state`` | string: absent,present | Deletes maximum prefix configured for the peer | os10 |
-| ``max_prefix.threshold`` | integer | Configures threshold percentage at which warning log is thrown | os10 |
-| ``max_prefix.warning``| boolean | Configures a warning without dropping the session when maximum limit exceeds if set to true | os10|
-| ``address_family.default_originate`` | dictionary | Configures default-originate parameters (see ``default_originate.\*``) | os10 |
-| ``default_originate.route_map`` | string | Configures the name of the route-map to specify criteria to originate default | os10 |
-| ``default_originate.state`` | string, choices: absent,present\* | Deletes the default-originate if set to absent | os10 |
-| ``address_family.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 |
-| ``address_family.state`` | string: absent,present\* | Deletes the address family command mode of the BGP neighbor if set to absent | os10 |
-| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os10 |
-| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer group if set to absent; supported only when *neighbor.type* is "peergroup" | os10 |
-| ``neighbor.timer`` | string | Configures neighbor timers; 5 10, where 5 is the keepalive interval and 10 is the holdtime | os10 |
-| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os10 |
-| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os10 |
-| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 |
-| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os10 |
-| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os10 |
-| ``neighbor.adv_start`` | integer | Set the advertisement start of the neighbor | os10 |
-| ``neighbor.adv_start_state`` | string: absent,present\* | Configures or unconfigures the advertisement start of a neighbor | os10 |
-| ``neighbor.conn_retry_timer`` | integer | Configures the peer connection retry timer | os10 |
-| ``neighbor.remove_pri_as`` | string: absent,present | Configures the remove private AS number from outbound updates | os10 |
-| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os10 |
-| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables or disables the sender-side loop detect for neighbors | os10 |
-| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os10 |
-| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os10 |
-| ``neighbor.listen`` | list | Configures listen commands on the BGP template (see ``listen.*``)| os10 |
-| ``listen.subnet`` | string (required) | Configures the passive BGP neighbor IPv4/IPv6 to this subnet | os10 |
-| ``listen.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4/IPv6 BGP neighbor if set to absent | os10 |
-| ``listen.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | os10 |
-| ``neighbor.bfd`` | boolean | Enables BFD for neighbor | os10 |
-| ``neighbor.description`` | string | Configures neighbor description | os10 |
-| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os10 |
-| ``redistribute`` | list | Configures the redistribute list to get information from other routing protocols (see ``redistribute.*``) | os10 |
-| ``redistribute.route_type`` | string (required): static,connected,imported_bgp,l2vpn,ospf | Configures the name of the routing protocol to redistribute | os10 |
-| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os10 |
-| ``redistribute.imported_bgp_vrf_name`` | string | Configures the redistribute imported BGP VRF name | os10 |
-| ``redistribute.ospf_id`` | string | Configures the redistribute OSPF | os10 |
-| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os10 |
-| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os10 |
-| ``bfd_all_neighbors`` | dictionary | Enables BFD for all BGP neighbors | os10 |
-| ``bfd_all_neighbors.interval`` | integer: 100 to 1000 | Configures time interval for sending control packets to BFD peers in ms| os10 |
-| ``bfd_all_neighbors.min_rx`` | integer: 100 to 1000 | Configures maximum waiting time for receiving control packets from BFD peers in ms| os10 |
-| ``bfd_all_neighbors.multiplier`` | integer: 3 to 50 | Configures maximum number of consecutive packets that are not received from BFD peers before session state changes to Down| os10 |
-| ``bfd_all_neighbors.role``| string: active, passive | Configures BFD role | os10 |
-| ``bfd_all_neighbors.state`` |string: absent,present\* | Deletes BFD for all neighbors if set to absent | os10 |
-| ``vrfs`` | list | Enables VRF under BGP | os10 |
-| ``vrf.name`` | string (Required)| Configures VRF name | os10 |
-| ``vrf.router_id`` | string | Configures the IP address of the local BGP router instance in VRF | os10 |
-| ``vrf.graceful_restart`` | boolean | Configures graceful restart capability in VRF | os10 |
-| ``vrf.maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) in VRF | os10 |
-| ``vrf.maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) in VRF | os10 |
-| ``vrf.log_neighbor_changes`` | boolean | Configures log neighbors up/down in VRF | os10 |
-| ``vrf.fast_ext_fallover`` | boolean | Configures a reset session if a link to a directly connected external peer goes down in VRF | os10 |
-| ``vrf.always_compare_med`` | boolean | Configures comparing MED from different neighbors in VRF | os10 |
-| ``vrf.default_loc_pref`` | integer | Configures the default local preference value in VRF | os10 |
-| ``vrf.as_notation`` | string | Changes the AS number notation format in VRF | os10 |
-| ``vrf.enforce_first_as`` | boolean | Configures the first AS for eBGP routes in VRF | os10 |
-| ``vrf.non_deterministic_med`` | boolean | Configures nondeterministic path selection algorithm in VRF | os10 |
-| ``vrf.outbound_optimization`` | boolean | Configures outbound optimization for iBGP peer-group members in VRF | os10 |
-| ``vrf.route_reflector`` | dictionary | Configures route reflection parameters (see ``route_reflector.*``) in VRF | os10 |
-| ``vrf.route_reflector.client_to_client`` | boolean | Configures client-to-client route reflection in VRF | os10 |
-| ``vrf.route_reflector.cluster_id`` | string | Configures the route-reflector cluster-id in VRF | os10 |
-| ``vrf.address_family_ipv4`` | dictionary | Configures IPv4 address family parameters in VRF (see ``address_family_ipv4.*``) in VRF | os10 |
-| ``address_family_ipv4.aggregate_addr`` | list | Configures IPv4 BGP aggregate entries (see ``aggregate_addr.*``) in VRF | os10 |
-| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv4 BGP aggregate address in VRF | os10 |
-| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv4 BGP aggregate address | os10 |
-| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true in VRF | os10 |
-| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv4 BGP aggregate entry if set to absent in VRF | os10 |
-| ``address_family_ipv4.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 |
-| ``dampening.value`` | dictionary | Configures dampening values (<half-life time> <start value to reuse> <start value to suppress> <max duration> format; default 15 750 2000 60) | os10 |
-| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 |
-| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 |
-| ``address_family_ipv4.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 |
-| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for a IPV4 address family | os10 |
-| ``address_family_ipv4.default_metric`` | integer | Configures the metric of redistributed routes for IPV4 address family | os10 |
-| ``address_family_ipv4.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 |
-| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPV4 address family (<routes external to AS> <routes internal to AS> <local routes> format; distance bgp 2 3 4) | os10 |
-| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 |
-| ``vrf.address_family_ipv6`` | dictionary | Configures IPv6 address family parameters in VRF (see ``address_family_ipv6.*``) | os10 |
-| ``address_family_ipv6.aggregate_addr`` | list | Configures IPv6 BGP aggregate entries (see ``aggregate_addr.*``) | os10 |
-| ``aggregate_addr.ip_and_mask`` | string | Configures the IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.adv_map`` | string | Configures the advertise map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.attr_map`` | string | Configures the attribute map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.suppress_map`` | string | Configures the suppress map for IPv6 BGP aggregate address | os10 |
-| ``aggregate_addr.summary_only`` | boolean | Sets address to summary-only if true | os10 |
-| ``aggregate_addr.state`` | string: absent,present\* | Deletes an IPv6 BGP aggregate entry if set to absent | os10 |
-| ``address_family_ipv6.dampening`` | dictionary | Configures route-flap dampening (see ``dampening.*``) | os10 |
-| ``dampening.value`` | dictionary | Configures dampening values (<half-life time> <start value to reuse> <start value to suppress> <max duration> format; default 15 750 2000 60) | os10 |
-| ``dampening.route_map`` | string | Configures the route-map to specify criteria for dampening | os10 |
-| ``dampening.state`` | string: absent,present\* | Deletes dampening if set to absent | os10 |
-| ``address_family_ipv6.ibgp_redist_internal`` | dictionary | Configures iBGP reditribution (see ``ibgp_redist_internal.*``) | os10 |
-| ``ibgp_redist_internal.state`` | boolean | Configures the iBGP redistribution for a IPv6 address family | os10 |
-| ``address_family_ipv6.default_metric`` | integer | Configures the metric of redistributed routes for IPv6 address family | os10 |
-| ``address_family_ipv6.distance_bgp`` | dictionary | Configures BGP distances (see ``distance_bgp.*``) | os10 |
-| ``distance_bgp.value`` | dictionary | Configures the BGP administrative distance for IPv6 address family (<routes external to AS> <routes internal to AS> <local routes> format; distance bgp 2 3 4) | os10 |
-| ``distance_bgp.state`` | string: absent,present\* | Deletes distance BGP if set to absent | os10 |
-| ``vrf.best_path`` | list | Configures the default best-path selection in VRF (see ``best_path.*``) | os10 |
-| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os10 |
-| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os10 |
-| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os10 |
-| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os10 |
-| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os10 |
-| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os10 |
-| ``vrf.ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os10 |
-| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os10 |
-| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os10 |
-| ``vrf.ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os10 |
-| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os10 |
-| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os10 |
-| ``vrf.neighbor`` | list | Configures IPv4 BGP neighbors in VRF (see ``neighbor.*``) | os10 |
-| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os10 |
-| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | os10 |
-| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os10 |
-| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os10 |
-| ``neighbor.auto_peer`` |string: unnumbered-auto | Enable auto-discovery of neighbors | os10 |
-| ``neighbor.password`` | string | Configures the BGP neighbor password | os10 |
-| ``neighbor.peergroup_type`` | string (ibgp, ebgp) | Configures the BGP neighbor peer-group type| os10 |
-| ``neighbor.ebgp_peergroup`` | string | Configures the peer-group to all auto-discovered external neighbors | os10 |
-| ``neighbor.ebgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered external neighbors | os10 |
-| ``neighbor.ibgp_peergroup`` | string | Configures the peer-group to all auto-discovered internal neighbors | os10 |
-| ``neighbor.ibgp_peergroup_state`` | string: present,absent | Removes the peer-group from all auto-discovered internal neighbors | os10 |
-| ``neighbor.route_reflector_client`` | boolean | Configures router reflector client on the BGP neighbor | os10 |
-| ``neighbor.local_as`` | integer | Configures the local AS for the BGP peer | os10 |
-| ``neighbor.weight`` | integer | Configures the default weight for routes from the neighbor interface | os10 |
-| ``neighbor.send_community`` | list | Configures the send community attribute to the BGP neighbor (see ``send_community.*``) | os10 |
-| ``send_community.type`` | string (required) | Configures the send community attribute to the BGP neighbor | os10 |
-| ``send_community.state`` | string: absent,present\* | Deletes the send community attribute of the BGP neighbor if set to absent | os10 |
-| ``neighbor.address_family`` | list | Configures address family commands on the BGP neighbor (see ``address_family.*``)| os10 |
-| ``address_family.type`` | string (required): ipv4,ipv6,l2vpn | Configures IPv4/IPv6 EVPN address family command mode on the BGP neighbor | os10 |
-| ``address_family.activate`` | boolean | Configures activation or deactivation of IPv4/IPv6 address family command mode on the BGP neighbor | os10 |
-| ``address_family.sender_loop_detect`` | boolean | Enables or disables the sender-side loop detection process for a BGP neighbor of IPv4/IPv6 l2vpn address family | os10 |
-| ``address_family.allow_as_in`` | integer | Configures the local AS number in the as-path | os10 |
-| ``address_family.next_hop_self`` | boolean | Configures disabling the next-hop calculation for the neighbor | os10 |
-| ``address_family.soft_reconf`` | boolean | Configures per neighbor soft reconfiguration | os10 |
-| ``address_family.add_path`` | string | Configures send or receive multiple paths (value can be 'both <no of paths>', 'send <no of paths>', 'receive')| os10 |
-| ``address_family.route_map`` | list | Configures the route-map on the BGP neighbor (see ``route_map.*``) | os10 |
-| ``route_map.name`` | string | Configures the name of the route-map for the BGP neighbor | os10 |
-| ``route_map.filter`` | string | Configures the filter for routing updates | os10 |
-| ``route_map.state`` | string, choices: absent,present* | Deletes the route-map of the BGP neighbor if set to absent | os10 |
-| ``address_family.max_prefix`` | dictionary | Configures maximum-prefix parameters (see ``max_prefix.*``) | os10 |
-| ``max_prefix.count`` | integer | Configures maximum number of prefix accepted from the peer | os10 |
-| ``max_prefix.state`` | string: absent,present | Deletes maximum prefix configured for the peer | os10 |
-| ``max_prefix.threshold`` | integer | Configures threshold percentage at which warning log is thrown | os10 |
-| ``max_prefix.warning``| boolean | Configures a warning without dropping session when maximum limit exceeds if set to true | os10|
-| ``address_family.default_originate`` | dictionary | Configures default-originate parameters (see ``default_originate.\*``) | os10 |
-| ``default_originate.route_map`` | string | Configures the name of the route-map to specify criteria to originate default | os10 |
-| ``default_originate.state`` | string, choices: absent,present\* | Deletes the default-originate if set to absent | os10 |
-| ``address_family.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 |
-| ``address_family.state`` | string: absent,present\* | Deletes the address family command mode of the BGP neighbor if set to absent | os10 |
-| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os10 |
-| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer-group if set to absent; supported only when *neighbor.type* is "peergroup" | os10 |
-| ``neighbor.timer`` | string | Configures neighbor timers; 5 10, where 5 is the keepalive interval and 10 is the holdtime | os10 |
-| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os10 |
-| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os10 |
-| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os10 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os10 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os10 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os10 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os10 |
-| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os10 |
-| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os10 |
-| ``neighbor.adv_start`` | integer | Set the advertisement start of the neighbor | os10 |
-| ``neighbor.adv_start_state`` | string: absent,present\* | Configures or unconfigures the advertisement start of the neighbor | os10 |
-| ``neighbor.conn_retry_timer`` | integer | Configures the peer connection retry timer | os10 |
-| ``neighbor.remove_pri_as`` | string: absent,present | Removes private AS number from outbound updates | os10 |
-| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os10 |
-| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables or disables the sender-side loop detect for neighbors | os10 |
-| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os10 |
-| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os10 |
-| ``neighbor.listen`` | list | Configures listen commands on the BGP template (see ``listen.*``)| os10 |
-| ``listen.subnet`` | string (required) | Configures the passive BGP neighbor IPv4/IPv6 to this subnet | os10 |
-| ``listen.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4/IPv6 BGP neighbor if set to absent | os10 |
-| ``listen.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | os10 |
-| ``neighbor.bfd`` | boolean | Enables BFD for neighbor | os10 |
-| ``neighbor.description`` | string | Configures neighbor description | os10 |
-| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os10 |
-| ``vrf.redistribute`` | list | Configures the redistribute list to get information from other routing protocols in VRF (see ``redistribute.*``) | os10 |
-| ``redistribute.route_type`` | string (required): static,connected,imported_bgp | Configures the name of the routing protocol to redistribute | os10 |
-| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os10 |
-| ``redistribute.imported_bgp_vrf_name`` | string | Configures the redistribute imported BGP VRF name | os10 |
-| ``redistribute.ospf_id`` | string | Configures the redistribute ospf | os10 |
-| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os10 |
-| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os10 |
-| ``vrf.bfd_all_neighbors`` | dictionary | Enables BFD for all BGP neighbors in VRF ((see ``bfd_all_neighbors.*``))| os10 |
-| ``bfd_all_neighbors.interval`` | integer: 100 to 1000 | Configures time interval for sending control packets to BFD peers in ms| os10 |
-| ``bfd_all_neighbors.min_rx`` | integer: 100 to 1000 | Configures maximum waiting time for receiving control packets from BFD peers in ms| os10 |
-| ``bfd_all_neighbors.multiplier`` | integer: 3 to 50 | Configures maximum number of consecutive packets that are not received from BFD peers before session state changes to Down| os10 |
-| ``bfd_all_neighbors.role``| string: active, passive | Configures BFD role | os10 |
-| ``bfd_all_neighbors.state`` |string: absent,present\* | Deletes BFD for all neighbors if set to absent | os10 |
-| ``vrf.state`` | string: absent,present\* | Deletes the VRF instance under router BGP if set to absent | os10 |
-| ``state`` | string: absent,present\* | Deletes the local router BGP instance if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_bgp* role to configure the BGP network and neighbors. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os10_bgp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_bgp:
- asn: 12
- router_id: 90.1.1.4
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: true
- fast_ext_fallover: false
- always_compare_med: true
- default_loc_pref: 1000
- as_notation: asdot
- enforce_first_as: false
- non_deterministic_med: true
- outbound_optimization: true
- confederation:
- identifier: 25
- peers: 23 24
- peers_state: present
- route_reflector:
- client_to_client: false
- cluster_id: 4294967295
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- summary_only: true
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- default_metric: 10
- distance_bgp:
- value: 3 4 6
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- address_family:
- - type: ipv4
- activate: false
- state: present
- max_prefix:
- count: 20
- threshold: 90
- warning: true
- state: present
- listen:
- - subnet: 4.4.4.4/32
- limit: 4
- subnet_state: present
- - subnet: 20::/64
- limit: 4
- subnet_state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2-spine1"
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- password: bgppassword
- route_reflector_client: true
- adv_start: 100
- adv_start_state: present
- conn_retry_timer: 20
- remove_pri_as: present
- src_loopback: 0
- address_family:
- - type: ipv4
- activate: true
- state: present
- max_prefix:
- count: 10
- threshold: 40
- warning: true
- state: present
- default_originate:
- route_map: aa
- state: present
- distribute_list:
- in: XX
- in_state: present
- out: YY
- out_state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: true
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ebgp_peergroup: ebgp_pg
- ebgp_peergroup_state: absent
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- sender_loop_detect: true
- password: bgppassword
- address_family:
- - type: ipv4
- activate: true
- sender_loop_detect: true
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- - type: l2vpn
- activate: true
- sender_loop_detect: false
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- vrfs:
- - name: "GREEN"
- router_id: 50.1.1.1
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: false
- fast_ext_fallover: false
- always_compare_med: true
- default_loc_pref: 1000
- route_reflector:
- client_to_client: false
- cluster_id: 1
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- - attribute: missing-as-worst
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan10
- description: U_site2 vlan
- send_community:
- - type: extended
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.20.1
- name: peer1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- route_reflector_client: true
- src_loopback: 0
- address_family:
- - type: ipv4
- activate: false
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.15.3
- address_family:
- - type: ipv4
- activate: false
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ebgp_peergroup: ebgp_pg
- ebgp_peergroup_state: present
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- address_family:
- - type: ipv4
- activate: false
- sender_loop_detect: false
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- - route_type: connected
- route_map_name: bb
- address_type: ipv4
- state: present
- - route_type: l2vpn
- route_map_name: cc
- address_type: ipv4
- state: present
- - route_type: imported_bgp
- imported_bgp_vrf_name: test6
- route_map_name: dd
- address_type: ipv4
- state: present
- - route_type: ospf
- ospf_id: 12
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- state: present
- state: present
-
-
-**Simple playbook to configure BGP — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_bgp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/defaults/main.yml
deleted file mode 100644
index 0b8cbfc82..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_bgp
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/handlers/main.yml
deleted file mode 100644
index b0141ca3f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_bgp
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/meta/main.yml
deleted file mode 100644
index 047c70dc9..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_bgp role facilitates the configuration of BGP attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/tasks/main.yml
deleted file mode 100644
index 05c44354b..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating BGP configuration for os10"
- template:
- src: os10_bgp.j2
- dest: "{{ build_dir }}/bgp10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning BGP configuration for os10"
- os10_config:
- src: os10_bgp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/templates/os10_bgp.j2 b/ansible_collections/dellemc/os10/roles/os10_bgp/templates/os10_bgp.j2
deleted file mode 100644
index d4859eba3..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/templates/os10_bgp.j2
+++ /dev/null
@@ -1,1244 +0,0 @@
-#jinja2: trim_blocks: True, lstrip_blocks: True
-{###########################################
-Purpose:
-Configure BGP commands for os10 Devices
-os10_bgp:
- asn: 12
- router_id: 90.1.1.4
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: true
- fast_ext_fallover: false
- always_compare_med: true
- default_loc_pref: 1000
- confederation:
- identifier: 25
- peers: 23 24
- peers_state: present
- route_reflector:
- client_to_client: false
- cluster_id: 4294967295
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- summary_only: true
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2-spine1"
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- password: bgppassword
- route_reflector_client: true
- src_loopback: 0
- address_family:
- - type: ipv4
- activate: true
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: true
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- max_prefix:
- - count: 10
- threshold: 80
- warning: true
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ebgp_peergroup: ebgp_pg
- ebgp_peergroup_state: absent
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- sender_loop_detect: true
- password: bgppassword
- address_family:
- - type: ipv4
- activate: true
- sender_loop_detect: true
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- - type: l2vpn
- activate: true
- sender_loop_detect: false
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- vrfs :
- - name: "GREEN"
- router_id: 1.1.1.1
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- summary_only: true
- state: present
- neighbor:
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2-spine1"
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- password: bgppassword
- route_reflector_client: true
- src_loopback: 0
- address_family:
- - type: ipv4
- activate: true
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: true
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ebgp_peergroup: ebgp_pg
- ebgp_peergroup_state: absent
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- sender_loop_detect: true
- password: bgppassword
- address_family:
- - type: ipv4
- activate: true
- sender_loop_detect: true
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- - type: l2vpn
- activate: true
- sender_loop_detect: false
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- state: present
- state: present
-################################}
-{% macro render_default_metric_configs(af_vars) %}
- {% if af_vars.default_metric is defined %}
- {% if af_vars.default_metric %}
- default-metric {{ af_vars.default_metric }}
- {% else %}
- no default-metric
- {% endif %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_outbound_optimization_configs(out_opt_vars) %}
- {% if out_opt_vars %}
- outbound-optimization
- {% else %}
- outbound-optimization
- {% endif %}
-{% endmacro %}
-
-{% macro render_non_deterministic_med_configs(non_deter_med_vars) %}
- {% if non_deter_med_vars %}
- non-deterministic-med
- {% else %}
- no non-deterministic-med
- {% endif %}
-{% endmacro %}
-
-{% macro render_enforce_first_as_configs(enforce_first_as_vars) %}
- {% if enforce_first_as_vars %}
- enforce-first-as
- {% else %}
- no enforce-first-as
- {% endif %}
-{% endmacro %}
-
-{% macro render_as_notation_configs(as_vars) %}
- {% if as_vars %}
- as-notation {{ as_vars }}
- {% endif %}
-{% endmacro %}
-
-{% macro render_neigh_configs(neigh_vars,indent_space) %}
- {% if neigh_vars is defined and neigh_vars %}
- {% for neighbor in neigh_vars %}
- {% if neighbor.type is defined %}
- {% if neighbor.type == "ipv4" or neighbor.type =="ipv6" %}
- {% if neighbor.ip is defined and neighbor.ip %}
- {% set tag_or_ip = neighbor.ip %}
- {% if neighbor.state is defined and neighbor.state == "absent" %}
- {{ indent_space }}no neighbor {{ tag_or_ip }}
- {% else %}
- {{ indent_space }}neighbor {{ tag_or_ip }}
- {% if neighbor.description is defined %}
- {% if neighbor.description %}
- {{ indent_space }}description "{{ neighbor.description }}"
- {% else %}
- {{ indent_space }}no description
- {% endif %}
- {% endif %}
- {% if neighbor.peergroup is defined and neighbor.peergroup %}
- {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %}
- {{ indent_space }}no inherit template {{ neighbor.peergroup }}
- {% else %}
- {{ indent_space }}inherit template {{ neighbor.peergroup }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if neighbor.interface is defined and neighbor.interface %}
- {% set tag_or_ip = neighbor.interface %}
- {% if neighbor.state is defined and neighbor.state == "absent" %}
- {{ indent_space }}no neighbor interface {{ neighbor.interface }}
- {% else %}
- {{ indent_space }}neighbor interface {{ neighbor.interface }}
- {% if neighbor.description is defined %}
- {% if neighbor.description %}
- {{ indent_space }}description "{{ neighbor.description }}"
- {% else %}
- {{ indent_space }}no description
- {% endif %}
- {% endif %}
- {% if neighbor.admin is defined %}
- {% if neighbor.admin == "up" %}
- {{ indent_space }}no shutdown
- {% else %}
- {{ indent_space }}shutdown
- {% endif %}
- {% endif %}
- {% if neighbor.peergroup is defined and neighbor.peergroup %}
- {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %}
- {{ indent_space }}no inherit template {{ neighbor.peergroup }}
- {% elif neighbor.peergroup_type is defined %}
- {{ indent_space }}inherit template {{ neighbor.peergroup }} inherit-type {{ neighbor.peergroup_type }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if neighbor.auto_peer is defined and neighbor.auto_peer %}
- {% set tag_or_ip = neighbor.auto_peer %}
- {% if neighbor.state is defined and neighbor.state == "absent" %}
- {{ indent_space }}no neighbor {{ neighbor.auto_peer }}
- {% else %}
- {{ indent_space }}neighbor {{ neighbor.auto_peer }}
- {% if neighbor.description is defined %}
- {% if neighbor.description %}
- {{ indent_space }}description "{{ neighbor.description }}"
- {% else %}
- {{ indent_space }}no description
- {% endif %}
- {% endif %}
- {% if neighbor.admin is defined %}
- {% if neighbor.admin == "up" %}
- {{ indent_space }}no shutdown
- {% else %}
- {{ indent_space }}shutdown
- {% endif %}
- {% endif %}
- {% if neighbor.ebgp_peergroup is defined and neighbor.ebgp_peergroup %}
- {% if neighbor.ebgp_peergroup_state is defined and neighbor.ebgp_peergroup_state == "absent" %}
- {{ indent_space }}no inherit ebgp-template {{ neighbor.ebgp_peergroup }}
- {% else %}
- {{ indent_space }}inherit ebgp-template {{ neighbor.ebgp_peergroup }}
- {% endif %}
- {% endif %}
- {% if neighbor.ibgp_peergroup is defined and neighbor.ibgp_peergroup %}
- {% if neighbor.ibgp_peergroup_state is defined and neighbor.ibgp_peergroup_state == "absent" %}
- {{ indent_space }}no inherit ibgp-template {{ neighbor.ibgp_peergroup }}
- {% else %}
- {{ indent_space }}inherit ibgp-template {{ neighbor.ibgp_peergroup }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif neighbor.type == "peergroup" %}
- {% if neighbor.name is defined and neighbor.name %}
- {% set tag_or_ip = neighbor.name %}
- {% if neighbor.state is defined and neighbor.state == "absent" %}
- {{ indent_space }}no template {{ tag_or_ip }}
- {% else %}
- {{ indent_space }}template {{ tag_or_ip }}
- {% if neighbor.description is defined %}
- {% if neighbor.description %}
- {{ indent_space }}description "{{ neighbor.description }}"
- {% else %}
- {{ indent_space }}no description
- {% endif %}
- {% endif %}
- {% if neighbor.listen is defined and neighbor.listen %}
- {% for item in neighbor.listen %}
- {% if item.subnet is defined and item.subnet %}
- {% if item.subnet_state is defined and item.subnet_state =="absent" %}
- {{ indent_space }}no listen {{ item.subnet }}
- {% else %}
- {% set listen_str = item.subnet %}
- {% if item.limit is defined and item.limit %}
- {% set listen_str = listen_str ~ " limit " ~ item.limit %}
- {% endif %}
- {{ indent_space }}listen {{ listen_str }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
- {% endif %}
- {% endif %}
- {% if tag_or_ip is defined and tag_or_ip %}
- {% if (neighbor.state is defined and not neighbor.state == "absent" ) or neighbor.state is not defined %}
- {% if neighbor.remote_asn is defined and neighbor.remote_asn %}
- {% if neighbor.remote_asn_state is defined and neighbor.remote_asn_state == "absent" %}
- {{ indent_space }}no remote-as {{ neighbor.remote_asn }}
- {% else %}
- {{ indent_space }}remote-as {{ neighbor.remote_asn }}
- {% endif %}
- {% endif %}
- {% if neighbor.local_as is defined %}
- {% if neighbor.local_as %}
- {{ indent_space }}local-as {{ neighbor.local_as }}
- {% else %}
- {{ indent_space }}no local-as
- {% endif %}
- {% endif %}
- {% if neighbor.weight is defined %}
- {% if neighbor.weight %}
- {{ indent_space }}weight {{ neighbor.weight }}
- {% else %}
- {{ indent_space }}no weight
- {% endif %}
- {% endif %}
- {% if neighbor.src_loopback is defined %}
- {% if neighbor.src_loopback|int(-1) != -1 %}
- {{ indent_space }}update-source loopback{{ neighbor.src_loopback }}
- {% else %}
- {{ indent_space }}no update-source loopback
- {% endif %}
- {% endif %}
-
- {% if neighbor.ebgp_multihop is defined %}
- {% if neighbor.ebgp_multihop %}
- {{ indent_space }}ebgp-multihop {{ neighbor.ebgp_multihop }}
- {% else %}
- {{ indent_space }}no ebgp-multihop
- {% endif %}
- {% endif %}
- {% if neighbor.route_reflector_client is defined %}
- {% if neighbor.route_reflector_client %}
- {{ indent_space }}route-reflector-client
- {% else %}
- {{ indent_space }}no route-reflector-client
- {% endif %}
- {% endif %}
- {% if neighbor.password is defined %}
- {% if neighbor.password %}
- {{ indent_space }}password {{ neighbor.password }}
- {% else %}
- {{ indent_space }}no password a
- {% endif %}
- {% endif %}
- {% if neighbor.send_community is defined and neighbor.send_community %}
- {% for comm in neighbor.send_community %}
- {% if comm.type is defined and comm.type %}
- {% if comm.state is defined and comm.state == "absent" %}
- {{ indent_space }}no send-community {{ comm.type }}
- {% else %}
- {{ indent_space }}send-community {{ comm.type }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if neighbor.address_family is defined and neighbor.address_family %}
- {% for af in neighbor.address_family %}
- {% if af.type is defined and af.type %}
- {% if af.state is defined and af.state == "absent" %}
- {% if af.type == "l2vpn" %}
- {{ indent_space }}no address-family {{ af.type }} evpn
- {% else %}
- {{ indent_space }}no address-family {{ af.type }} unicast
- {% endif %}
- {% else %}
- {% if af.type == "l2vpn" %}
- {{ indent_space }}address-family {{ af.type }} evpn
- {% else %}
- {{ indent_space }}address-family {{ af.type }} unicast
- {% endif %}
- {% if af.activate is defined %}
- {% if af.activate %}
- {{ indent_space }}activate
- {% else %}
- {{ indent_space }}no activate
- {% endif %}
- {% endif %}
- {% if af.sender_loop_detect is defined %}
- {% if af.sender_loop_detect %}
- {{ indent_space }}sender-side-loop-detection
- {% else %}
- {{ indent_space }}no sender-side-loop-detection
- {% endif %}
- {% endif %}
- {% if af.allow_as_in is defined %}
- {% if af.allow_as_in %}
- {{ indent_space }}allowas-in {{ af.allow_as_in }}
- {% else %}
- {{ indent_space }}no allowas-in
- {% endif %}
- {% endif %}
- {% if af.route_map is defined and af.route_map %}
- {% for item in af.route_map %}
- {% if item.name is defined and item.name %}
- {% if item.filter is defined and item.filter %}
- {% if item.state is defined and item.state == "absent" %}
- {{ indent_space }}no route-map {{ item.name }} {{ item.filter }}
- {% else %}
- {{ indent_space }}route-map {{ item.name }} {{ item.filter }}
- {% endif%}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if af.next_hop_self is defined %}
- {% if af.next_hop_self %}
- {{ indent_space }}next-hop-self
- {% else %}
- {{ indent_space }}no next-hop-self
- {% endif %}
- {% endif %}
- {% if af.soft_reconf is defined %}
- {% if af.soft_reconf %}
- {{ indent_space }}soft-reconfiguration inbound
- {% else %}
- {{ indent_space }}no soft-reconfiguration inbound
- {% endif %}
- {% endif %}
- {% if af.add_path is defined %}
- {% if af.add_path %}
- {{ indent_space }}add-path {{ af.add_path }}
- {% else %}
- {{ indent_space }}no add-path
- {% endif %}
- {% endif %}
- {% if af.max_prefix is defined %}
- {% if af.max_prefix.count is defined and af.max_prefix.count %}
- {% if af.max_prefix.state is defined and af.max_prefix.state == "absent" %}
- {{ indent_space }}no maximum-prefix {{ af.max_prefix.count }}
- {% else %}
- {% set max_pfrx_str = af.max_prefix.count %}
- {% if af.max_prefix.threshold is defined and af.max_prefix.threshold %}
- {% set max_pfrx_str = max_pfrx_str ~ " " ~ af.max_prefix.threshold %}
- {% endif %}
- {% if af.max_prefix.warning is defined and af.max_prefix.warning %}
- {% set max_pfrx_str = max_pfrx_str ~ " warning-only" %}
- {% endif %}
- {{ indent_space }}maximum-prefix {{ max_pfrx_str }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if af.default_originate is defined %}
- {% if af.default_originate.state is defined and af.default_originate.state == "absent" %}
- {{ indent_space }}no default-originate
- {% else %}
- {% set def_origin_str = " " %}
- {% if af.default_originate.route_map is defined and af.default_originate.route_map %}
- {% set def_origin_str = "route-map " ~ af.default_originate.route_map %}
- {% endif %}
- {{ indent_space }}default-originate {{ def_origin_str }}
- {% endif %}
- {% endif %}
- {% if af.distribute_list is defined and af.distribute_list %}
- {% if af.distribute_list.in is defined and af.distribute_list.in %}
- {% if af.distribute_list.in_state is defined and af.distribute_list.in_state == "absent" %}
- {{ indent_space }}no distribute-list {{ af.distribute_list.in }} in
- {% else %}
- {{ indent_space }}distribute-list {{ af.distribute_list.in }} in
- {% endif %}
- {% endif %}
- {% if af.distribute_list.out is defined and af.distribute_list.out %}
- {% if af.distribute_list.out_state is defined and af.distribute_list.out_state == "absent" %}
- {{ indent_space }}no distribute-list {{ af.distribute_list.out }} out
- {% else %}
- {{ indent_space }}distribute-list {{ af.distribute_list.out }} out
- {% endif %}
- {% endif %}
- {% endif %}
-
-
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if neighbor.adv_interval is defined %}
- {% if neighbor.adv_interval %}
- {{ indent_space }}advertisement-interval {{ neighbor.adv_interval }}
- {% else %}
- {{ indent_space }}no advertisement-interval
- {% endif %}
- {% endif %}
- {% if neighbor.adv_start is defined and neighbor.adv_start >= 0 %}
- {% if neighbor.adv_start_state is defined and neighbor.adv_start_state == "absent" %}
- {{ indent_space }}no advertisement-start
- {% else %}
- {{ indent_space }}advertisement-start {{ neighbor.adv_start }}
- {% endif %}
- {% endif %}
- {% if neighbor.conn_retry_timer is defined %}
- {% if neighbor.conn_retry_timer %}
- {{ indent_space }}connection-retry-timer {{ neighbor.conn_retry_timer }}
- {% else %}
- {{ indent_space }}no connection-retry-timer
- {% endif %}
- {% endif %}
- {% if neighbor.remove_pri_as is defined and neighbor.remove_pri_as == "present" %}
- {{ indent_space }}remove-private-as
- {% elif neighbor.remove_pri_as is defined and neighbor.remove_pri_as == "absent" %}
- {{ indent_space }}no remove-private-as
- {% endif %}
- {% if neighbor.fall_over is defined and neighbor.fall_over == "present" %}
- {{ indent_space }}fall-over
- {% elif neighbor.fall_over is defined and neighbor.fall_over == "absent" %}
- {{ indent_space }}no fall-over
- {% endif %}
- {% if neighbor.bfd is defined and neighbor.bfd %}
- {{ indent_space }}bfd
- {% elif neighbor.bfd is defined and not neighbor.bfd %}
- {{ indent_space }}no bfd
- {% endif %}
- {% if neighbor.timer is defined %}
- {% if neighbor.timer %}
- {{ indent_space }}timers {{ neighbor.timer }}
- {% else %}
- {{ indent_space }}no timers
- {% endif %}
- {% endif %}
- {% if neighbor.admin is defined %}
- {% if neighbor.admin == "up" %}
- {{ indent_space }}no shutdown
- {% else %}
- {{ indent_space }}shutdown
- {% endif %}
- {% endif %}
- {% if neighbor.distribute_list is defined and neighbor.distribute_list %}
- {{ indent_space }}address-family ipv4 unicast
- {% if neighbor.distribute_list.in is defined and neighbor.distribute_list.in %}
- {% if neighbor.distribute_list.in_state is defined and neighbor.distribute_list.in_state == "absent" %}
- {{ indent_space }}no distribute-list {{ neighbor.distribute_list.in }} in
- {% else %}
- {{ indent_space }}distribute-list {{ neighbor.distribute_list.in }} in
- {% endif %}
- {% endif %}
- {% if neighbor.distribute_list.out is defined and neighbor.distribute_list.out %}
- {% if neighbor.distribute_list.out_state is defined and neighbor.distribute_list.out_state == "absent" %}
- {{ indent_space }}no distribute-list {{ neighbor.distribute_list.out }} out
- {% else %}
- {{ indent_space }}distribute-list {{ neighbor.distribute_list.out }} out
- {% endif %}
- {% endif %}
- {% endif %}
- {% if neighbor.sender_loop_detect is defined %}
- {{ indent_space }}address-family ipv4 unicast
- {% if neighbor.sender_loop_detect %}
- {{ indent_space }}sender-side-loop-detection
- {% else %}
- {{ indent_space }}no sender-side-loop-detection
- {% endif %}
- {% endif %}
-
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_ibgp_redist_internal_configs(af_vars) %}
- {% if af_vars.ibgp_redist_internal is defined and af_vars.ibgp_redist_internal%}
- {% if af_vars.ibgp_redist_internal.state is defined and af_vars.ibgp_redist_internal.state == "absent" %}
- no bgp redistribute-internal
- {% else %}
- bgp redistribute-internal
- {% endif %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_distance_bgp_configs(af_vars) %}
- {% if af_vars.distance_bgp is defined and af_vars.distance_bgp %}
- {% if af_vars.distance_bgp.state is defined and af_vars.distance_bgp.state == "absent" %}
- no distance bgp
- {% else %}
- {% if af_vars.distance_bgp.value is defined and af_vars.distance_bgp.value %}
- distance bgp {{ af_vars.distance_bgp.value }}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endmacro %}
-
-
-{% macro render_dampening_configs(af_vars) %}
- {% if af_vars.dampening is defined and af_vars.dampening %}
- {% if af_vars.dampening.state is defined and af_vars.dampening.state == "absent" %}
- no dampening
- {% else %}
- {% if af_vars.dampening.value is defined and af_vars.dampening.value %}
- {% if af_vars.dampening.route_map is defined and af_vars.dampening.route_map %}
- dampening {{ af_vars.dampening.value }} route-map {{ af_vars.dampening.route_map }}
- {% else %}
- dampening {{ af_vars.dampening.value }}
- {% endif %}
- {% else %}
- {% if af_vars.dampening.route_map is defined and af_vars.dampening.route_map %}
- dampening 15 750 2000 60 route-map {{ af_vars.dampening.route_map }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_af_configs(af_vars) %}
- {% if af_vars is defined and af_vars %}
- {% if af_vars.aggregate_address is defined and af_vars.aggregate_address %}
- {% for addr in af_vars.aggregate_address %}
- {% if addr.ip_and_mask is defined and addr.ip_and_mask %}
- {% if addr.state is defined and addr.state == "absent" %}
- no aggregate-address {{ addr.ip_and_mask }}
- {% else %}
- {% set aggr_str = addr.ip_and_mask %}
- {% if addr.adv_map is defined and addr.adv_map %}
- {% set aggr_str = aggr_str ~ " advertise-map " ~ addr.adv_map %}
- {% endif %}
- {% if addr.as_set is defined and addr.as_set %}
- {% set aggr_str = aggr_str ~ " as-set " %}
- {% endif %}
- {% if addr.attr_map is defined and addr.attr_map %}
- {% set aggr_str = aggr_str ~ " attribute-map " ~ addr.attr_map %}
- {% endif %}
- {% if addr.summary is defined and addr.summary %}
- {% set aggr_str = aggr_str ~ " summary-only" %}
- {% endif %}
- {% if addr.suppress_map is defined and addr.suppress_map %}
- {% set aggr_str = aggr_str ~ " suppress-map " ~ addr.suppress_map %}
- {% endif %}
- aggregate-address {{ aggr_str }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_rtid_configs(routerid_vars) %}
- {% if routerid_vars %}
- router-id {{ routerid_vars }}
- {% else %}
- no router-id
- {% endif %}
-{% endmacro %}
-
-{% macro render_bfd_all_neigh_configs(bfd_all_neigh_vars) %}
- {% if bfd_all_neigh_vars.state is defined and bfd_all_neigh_vars.state == "absent"%}
- no bfd all-neighbors
- {% else %}
- {% set bfd_vars = bfd_all_neigh_vars %}
- {% if bfd_vars.interval is defined and bfd_vars.min_rx is defined and bfd_vars.multiplier is defined %}
- bfd all-neighbors interval {{ bfd_vars.interval }} min_rx {{ bfd_vars.min_rx }} multiplier {{ bfd_vars.multiplier }} role {{ bfd_vars.role }}
- {% else %}
- bfd all-neighbors
- {% endif %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_log_neigh_change_configs(log_neigh_change_vars) %}
- {% if log_neigh_change_vars %}
- log-neighbor-changes
- {% else %}
- no log-neighbor-changes
- {% endif %}
-{% endmacro %}
-
-{% macro render_maxpath_ebgp_configs(maxpath_ebgp_vars) %}
- {% if maxpath_ebgp_vars %}
- maximum-paths ebgp {{ maxpath_ebgp_vars }}
- {% else %}
- no maximum-paths ebgp
- {% endif %}
-{% endmacro %}
-
-{% macro render_maxpath_ibgp_configs(maxpath_ibgp_vars) %}
- {% if maxpath_ibgp_vars %}
- maximum-paths ibgp {{ maxpath_ibgp_vars }}
- {% else %}
- no maximum-paths ibgp
- {% endif %}
-{% endmacro %}
-
-{% macro render_graceful_restart_configs(graceful_restart_vars) %}
- {% if graceful_restart_vars %}
- graceful-restart role receiver-only
- {% else %}
- no graceful-restart role receiver-only
- {% endif %}
-{% endmacro %}
-
-{% macro render_always_compare_med_configs(always_compare_med_vars) %}
- {% if always_compare_med_vars %}
- always-compare-med
- {% else %}
- no always-compare-med
- {% endif %}
-{% endmacro %}
-
-{% macro render_default_loc_pref_configs(default_loc_pref_vars) %}
- {% if default_loc_pref_vars %}
- default local-preference {{ default_loc_pref_vars }}
- {% else %}
- no default local-preference
- {% endif %}
-{% endmacro %}
-
-{% macro render_fast_ext_fallover_configs(fast_ext_fallover_vars) %}
- {% if fast_ext_fallover_vars %}
- fast-external-fallover
- {% else %}
- no fast-external-fallover
- {% endif %}
-{% endmacro %}
-
-{% macro render_confederation_configs(confederation_vars) %}
- {% if confederation_vars.identifier is defined %}
- {% if confederation_vars.identifier %}
- confederation identifier {{ confederation_vars.identifier }}
- {% else %}
- no confederation identifier 1
- {% endif %}
- {% endif %}
- {% if confederation_vars.peers is defined and confederation_vars.peers %}
- {% if confederation_vars.peers_state is defined and confederation_vars.peers_state == "absent" %}
- no confederation peers {{ confederation_vars.peers }}
- {% else %}
- confederation peers {{ confederation_vars.peers }}
- {% endif %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_route_reflector_client_configs(route_reflector_vars) %}
- {% if route_reflector_vars.client_to_client is defined %}
- {% if route_reflector_vars.client_to_client %}
- client-to-client reflection
- {% else %}
- no client-to-client reflection
- {% endif %}
- {% endif %}
-{% endmacro %}
-{% macro render_route_reflector_cluster_configs(route_reflector_vars) %}
- {% if route_reflector_vars.cluster_id is defined %}
- {% if route_reflector_vars.cluster_id %}
- cluster-id {{ route_reflector_vars.cluster_id }}
- {% else %}
- no cluster-id 1
- {% endif %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_best_path_as_configs(best_path_vars) %}
- {% if best_path_vars.as_path is defined and best_path_vars.as_path %}
- {% if best_path_vars.as_path_state is defined and best_path_vars.as_path_state == "absent" %}
- no bestpath as-path {{ best_path_vars.as_path }}
- {% else %}
- bestpath as-path {{ best_path_vars.as_path }}
- {% endif %}
- {% endif %}
-{% endmacro %}
-{% macro render_best_path_routerid_configs(best_path_vars) %}
- {% if best_path_vars.ignore_router_id is defined %}
- {% if best_path_vars.ignore_router_id %}
- bestpath router-id ignore
- {% else %}
- no bestpath router-id ignore
- {% endif %}
- {% endif %}
-{% endmacro %}
-{% macro render_best_path_med_configs(best_path_vars,indent_space) %}
- {% if best_path_vars.med is defined and best_path_vars.med %}
- {% for med in best_path_vars.med %}
- {% if med.attribute is defined and med.attribute %}
- {% if med.state is defined and med.state == "absent" %}
- {{ indent_space }}no bestpath med {{ med.attribute }}
- {% else %}
- {{ indent_space }}bestpath med {{ med.attribute }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endmacro %}
-
-{% macro render_ipv4_network_configs(ipv4_network_vars) %}
- {% for net in ipv4_network_vars %}
- {% if net.address is defined and net.address %}
- {% if net.state is defined and net.state == "absent"%}
- no network {{ net.address }}
- {% else %}
- network {{ net.address }}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endmacro %}
-
-{% macro render_ipv6_network_configs(ipv6_network_vars) %}
- {% for net in ipv6_network_vars %}
- {% if net.address is defined and net.address %}
- {% if net.state is defined and net.state == "absent"%}
- no network {{ net.address }}
- {% else %}
- network {{ net.address }}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endmacro %}
-
-{% macro render_redistribute_configs(redistribute_vars,indent_space) %}
- {% for route in redistribute_vars %}
- {% if route.route_type is defined and route.route_type %}
- {% if route.address_type is defined and route.address_type %}
- {{ indent_space }}address-family {{ route.address_type }} unicast
- {% if route.state is defined and route.state == "absent" %}
- {% if route.route_type == "imported_bgp" %}
- {% set redist_str = "imported-bgp-routes vrf " ~ route.imported_bgp_vrf_name %}
- {% elif route.route_type == "ospf" and route.ospf_id %}
- {% set redist_str = route.route_type ~ " " ~ route.ospf_id %}
- {% elif route.route_type == "l2vpn" %}
- {% set redist_str = route.route_type ~ " evpn" %}
- {% else %}
- {% set redist_str = route.route_type %}
- {% endif %}
- {{ indent_space }}no redistribute {{ redist_str }}
- {% else %}
- {% if route.route_map_name is defined and route.route_map_name %}
- {% if route.route_type == "imported_bgp" %}
- {% set redist_str = "imported-bgp-routes vrf " ~ route.imported_bgp_vrf_name ~ " route-map " ~ route.route_map_name %}
- {% elif route.route_type == "ospf" and route.ospf_id %}
- {% set redist_str = route.route_type ~ " " ~ route.ospf_id ~ " route-map " ~ route.route_map_name %}
- {% elif route.route_type == "l2vpn" %}
- {% set redist_str = route.route_type ~ " evpn route-map " ~ route.route_map_name %}
- {% else %}
- {% set redist_str = route.route_type ~ " route-map " ~ route.route_map_name %}
- {% endif %}
- {{ indent_space }}redistribute {{ redist_str }}
- {% else %}
- {% if route.route_type == "imported_bgp" %}
- {% set redist_str = "imported-bgp-routes vrf " ~ route.imported_bgp_vrf_name %}
- {% elif route.route_type == "ospf" and route.ospf_id %}
- {% set redist_str = route.route_type ~ " " ~ route.ospf_id %}
- {% elif route.route_type == "l2vpn" %}
- {% set redist_str = route.route_type ~ " evpn" %}
- {% else %}
- {% set redist_str = route.route_type %}
- {% endif %}
- {{ indent_space }}redistribute {{ redist_str }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endmacro %}
-
-{% if os10_bgp is defined and os10_bgp %}
-{% set bgp_vars = os10_bgp %}
-{% if bgp_vars.state is defined and bgp_vars.state == "absent" %}
-no router bgp
-{% else %}
- {# Add Feature to the switch #}
- {% if bgp_vars.asn is defined and bgp_vars.asn %}
-router bgp {{ bgp_vars.asn }}
- {% set indent_space = "" %}
- {% if bgp_vars.router_id is defined %}
- {% set routerid_vars = bgp_vars.router_id %}
-{{ render_rtid_configs(routerid_vars) }}
- {% endif %}
-
- {% if bgp_vars.as_notation is defined %}
- {% set as_vars = bgp_vars.as_notation %}
-{{ render_as_notation_configs(as_vars) }}
- {% endif %}
-
- {% if bgp_vars.enforce_first_as is defined %}
- {% set enforce_first_as_vars = bgp_vars.enforce_first_as %}
-{{ render_enforce_first_as_configs(enforce_first_as_vars) }}
- {% endif %}
-
- {% if bgp_vars.non_deterministic_med is defined %}
- {% set non_deter_med_vars = bgp_vars.non_deterministic_med %}
-{{ render_non_deterministic_med_configs(non_deter_med_vars) }}
- {% endif %}
-
- {% if bgp_vars.outbound_optimization is defined %}
- {% set out_opt_vars = bgp_vars.outbound_optimization %}
-{{ render_outbound_optimization_configs(out_opt_vars) }}
- {% endif %}
-
- {% if bgp_vars.bfd_all_neighbors is defined and bgp_vars.bfd_all_neighbors is defined %}
- {% set bfd_all_neigh_vars = bgp_vars.bfd_all_neighbors %}
-{{ render_bfd_all_neigh_configs(bfd_all_neigh_vars) }}
- {% endif %}
-
- {% if bgp_vars.log_neighbor_changes is defined %}
- {% set log_neigh_change_vars = bgp_vars.log_neighbor_changes %}
-{{ render_log_neigh_change_configs(log_neigh_change_vars) }}
- {% endif %}
-
- {% if bgp_vars.maxpath_ebgp is defined %}
- {% set maxpath_ebgp_vars = bgp_vars.maxpath_ebgp %}
-{{ render_maxpath_ebgp_configs(maxpath_ebgp_vars) }}
- {% endif %}
-
- {% if bgp_vars.maxpath_ibgp is defined %}
- {% set maxpath_ibgp_vars = bgp_vars.maxpath_ibgp %}
-{{ render_maxpath_ibgp_configs(maxpath_ibgp_vars) }}
- {% endif %}
-
- {% if bgp_vars.graceful_restart is defined %}
- {% set graceful_restart_vars = bgp_vars.graceful_restart %}
-{{ render_graceful_restart_configs(graceful_restart_vars) }}
- {% endif %}
-
- {% if bgp_vars.always_compare_med is defined %}
- {% set always_compare_med_vars = bgp_vars.always_compare_med %}
-{{ render_always_compare_med_configs(always_compare_med_vars) }}
- {% endif %}
-
- {% if bgp_vars.default_loc_pref is defined %}
- {% set default_loc_pref_vars = bgp_vars.default_loc_pref %}
-{{ render_default_loc_pref_configs(default_loc_pref_vars) }}
- {% endif %}
-
- {% if bgp_vars.fast_ext_fallover is defined %}
- {% set fast_ext_fallover_vars = bgp_vars.fast_ext_fallover %}
-{{ render_fast_ext_fallover_configs(fast_ext_fallover_vars) }}
- {% endif %}
-
- {% if bgp_vars.confederation is defined and bgp_vars.confederation %}
- {% set confederation_vars = bgp_vars.confederation %}
-{{ render_confederation_configs(confederation_vars) }}
- {% endif %}
-
- {% if bgp_vars.route_reflector is defined and bgp_vars.route_reflector %}
- {% set route_reflector_vars = bgp_vars.route_reflector %}
-{{ render_route_reflector_client_configs(route_reflector_vars) }}
-{{ render_route_reflector_cluster_configs(route_reflector_vars) }}
- {% endif %}
-
- {% if bgp_vars.best_path is defined and bgp_vars.best_path %}
- {% set best_path_vars = bgp_vars.best_path %}
-{{ render_best_path_as_configs(best_path_vars) }}
-{{ render_best_path_routerid_configs(best_path_vars) }}
-{{ render_best_path_med_configs(best_path_vars,indent_space) }}
- {% endif %}
-
- {% if bgp_vars.address_family_ipv4 is defined and bgp_vars.address_family_ipv4 %}
- {% set af_vars = bgp_vars.address_family_ipv4 %}
- address-family ipv4 unicast
-{{ render_af_configs(af_vars) }}
-{{ render_ibgp_redist_internal_configs(af_vars) }}
-{{ render_dampening_configs(af_vars) }}
-{{ render_default_metric_configs(af_vars) }}
-{{ render_distance_bgp_configs(af_vars) }}
- {% endif %}
-
- {% if bgp_vars.address_family_ipv6 is defined and bgp_vars.address_family_ipv6 %}
- {% set af_vars = bgp_vars.address_family_ipv6 %}
- address-family ipv6 unicast
-{{ render_af_configs(af_vars) }}
-{{ render_ibgp_redist_internal_configs(af_vars) }}
-{{ render_dampening_configs(af_vars) }}
-{{ render_default_metric_configs(af_vars) }}
-{{ render_distance_bgp_configs(af_vars) }}
- {% endif %}
-
- {% if bgp_vars.ipv4_network is defined and bgp_vars.ipv4_network %}
- {% set ipv4_network_vars = bgp_vars.ipv4_network %}
- address-family ipv4 unicast
-{{ render_ipv4_network_configs(ipv4_network_vars) }}
- {% endif %}
-
- {% if bgp_vars.ipv6_network is defined and bgp_vars.ipv6_network %}
- {% set ipv6_network_vars = bgp_vars.ipv6_network %}
- address-family ipv6 unicast
-{{ render_ipv6_network_configs(ipv6_network_vars) }}
- {% endif %}
-
- {% if bgp_vars.redistribute is defined and bgp_vars.redistribute %}
- {% set redistribute_vars = bgp_vars.redistribute %}
-{{ render_redistribute_configs(redistribute_vars,indent_space) }}
- {% endif %}
-
- {% if bgp_vars.neighbor is defined and bgp_vars.neighbor %}
- {% set neigh_vars = bgp_vars.neighbor %}
-{{ render_neigh_configs(neigh_vars,indent_space) }}
- {% endif %}
-
- {% if bgp_vars.vrfs is defined %}
- {% set indent_space = " " %}
- {% for vrf in bgp_vars.vrfs %}
- {% if vrf.state is defined and vrf.state == "absent" %}
- no vrf {{ vrf.name }}
- {% else %}
- vrf {{ vrf.name }}
-
- {% if vrf.router_id is defined %}
- {% set routerid_vars = vrf.router_id %}
- {{ render_rtid_configs(routerid_vars) }}
- {% endif %}
-
- {% if vrf.as_notation is defined %}
- {% set as_vars = vrf.as_notation %}
- {{ render_as_notation_configs(as_vars) }}
- {% endif %}
-
- {% if vrf.enforce_first_as is defined %}
- {% set enforce_first_as_vars = vrf.enforce_first_as %}
- {{ render_enforce_first_as_configs(enforce_first_as_vars) }}
- {% endif %}
-
- {% if vrf.non_deterministic_med is defined %}
- {% set non_deter_med_vars = vrf.non_deterministic_med %}
- {{ render_non_deterministic_med_configs(non_deter_med_vars) }}
- {% endif %}
-
- {% if vrf.outbound_optimization is defined %}
- {% set out_opt_vars = vrf.outbound_optimization %}
- {{ render_outbound_optimization_configs(out_opt_vars) }}
- {% endif %}
-
- {% if vrf.bfd_all_neighbors is defined and vrf.bfd_all_neighbors is defined %}
- {% set bfd_all_neigh_vars = vrf.bfd_all_neighbors %}
- {{ render_bfd_all_neigh_configs(bfd_all_neigh_vars) }}
- {% endif %}
-
- {% if vrf.log_neighbor_changes is defined %}
- {% set log_neigh_change_vars = vrf.log_neighbor_changes %}
- {{ render_log_neigh_change_configs(log_neigh_change_vars) }}
- {% endif %}
-
- {% if vrf.maxpath_ebgp is defined %}
- {% set maxpath_ebgp_vars = vrf.maxpath_ebgp %}
- {{ render_maxpath_ebgp_configs(maxpath_ebgp_vars) }}
- {% endif %}
-
- {% if vrf.maxpath_ibgp is defined %}
- {% set maxpath_ibgp_vars = vrf.maxpath_ibgp %}
- {{ render_maxpath_ibgp_configs(maxpath_ibgp_vars) }}
- {% endif %}
-
- {% if vrf.graceful_restart is defined %}
- {% set graceful_restart_vars = vrf.graceful_restart %}
- {{ render_graceful_restart_configs(graceful_restart_vars) }}
- {% endif %}
-
- {% if vrf.always_compare_med is defined %}
- {% set always_compare_med_vars = vrf.always_compare_med %}
- {{ render_always_compare_med_configs(always_compare_med_vars) }}
- {% endif %}
-
- {% if vrf.default_loc_pref is defined %}
- {% set default_loc_pref_vars = vrf.default_loc_pref %}
- {{ render_default_loc_pref_configs(default_loc_pref_vars) }}
- {% endif %}
-
- {% if vrf.fast_ext_fallover is defined %}
- {% set fast_ext_fallover_vars = vrf.fast_ext_fallover %}
- {{ render_fast_ext_fallover_configs(fast_ext_fallover_vars) }}
- {% endif %}
-
- {% if vrf.route_reflector is defined and vrf.route_reflector %}
- {% set route_reflector_vars = vrf.route_reflector %}
- {{ render_route_reflector_client_configs(route_reflector_vars) }}
- {{ render_route_reflector_cluster_configs(route_reflector_vars) }}
- {% endif %}
-
- {% if vrf.best_path is defined and vrf.best_path %}
- {% set best_path_vars = vrf.best_path %}
- {{ render_best_path_as_configs(best_path_vars) }}
- {{ render_best_path_routerid_configs(best_path_vars) }}
-{{ render_best_path_med_configs(best_path_vars,indent_space) }}
- {% endif %}
-
- {% if vrf.address_family_ipv4 is defined and vrf.address_family_ipv4 %}
- {% set af_vars = vrf.address_family_ipv4 %}
- address-family ipv4 unicast
- {{ render_af_configs(af_vars) }}
- {{ render_dampening_configs(af_vars) }}
- {{ render_ibgp_redist_internal_configs(af_vars) }}
- {{ render_default_metric_configs(af_vars) }}
- {{ render_distance_bgp_configs(af_vars) }}
- {% endif %}
-
- {% if vrf.address_family_ipv6 is defined and vrf.address_family_ipv6 %}
- {% set af_vars = vrf.address_family_ipv6 %}
- address-family ipv6 unicast
- {{ render_af_configs(af_vars) }}
- {{ render_dampening_configs(af_vars) }}
- {{ render_ibgp_redist_internal_configs(af_vars) }}
- {{ render_default_metric_configs(af_vars) }}
- {{ render_distance_bgp_configs(af_vars) }}
- {% endif %}
-
- {% if vrf.ipv4_network is defined and vrf.ipv4_network %}
- {% set ipv4_network_vars = vrf.ipv4_network %}
- address-family ipv4 unicast
- {{ render_ipv4_network_configs(ipv4_network_vars) }}
- {% endif %}
-
- {% if vrf.ipv6_network is defined and vrf.ipv6_network %}
- {% set ipv6_network_vars = vrf.ipv6_network %}
- address-family ipv6 unicast
- {{ render_ipv6_network_configs(ipv6_network_vars) }}
- {% endif %}
-
- {% if vrf.redistribute is defined and vrf.redistribute %}
- {% set redistribute_vars = vrf.redistribute %}
-{{ render_redistribute_configs(redistribute_vars,indent_space) }}
- {% endif %}
-
- {% if vrf.neighbor is defined and vrf.neighbor %}
- {% set neigh_vars = vrf.neighbor %}
-{{ render_neigh_configs(neigh_vars,indent_space) }}
- {% endif %}
-
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/main.os10.yaml
deleted file mode 100644
index e556186d1..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/main.os10.yaml
+++ /dev/null
@@ -1,384 +0,0 @@
----
-# vars file for dellemc.os10.os10_bgp,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_bgp:
- asn: 12
- router_id: 90.1.1.4
- as_notation: asdot
- enforce_first_as: false
- non_deterministic_med: true
- outbound_optimization: true
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: true
- fast_ext_fallover: false
- always_compare_med: true
- default_loc_pref: 1000
- confederation:
- identifier: 25
- peers: 23 24
- peers_state: present
- route_reflector:
- client_to_client: false
- cluster_id: 4294967295
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- default_metric: 10
- distance_bgp:
- value: 3 4 6
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- address_family:
- - type: ipv4
- activate: false
- state: present
- max_prefix:
- count: 20
- threshold: 90
- warning: true
- state: present
- listen:
- - subnet: 4.4.4.4/32
- limit: 4
- subnet_state: present
- - subnet: 6.6.6.6/32
- limit: 3
- subnet_state: present
- - subnet: 23::/64
- limit:
- subnet_state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2-spine1"
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- password: bgppassword
- route_reflector_client: true
- adv_start: 100
- adv_start_state: present
- conn_retry_timer: 20
- remove_pri_as: present
- src_loopback: 0
- address_family:
- - type: ipv4
- activate: true
- state: present
- max_prefix:
- count: 30
- threshold: 50
- state: present
- default_originate:
- route_map: aa
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: true
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ebgp_peergroup: ebgp_pg
- ebgp_peergroup_state: absent
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- sender_loop_detect: true
- password: bgppassword
- address_family:
- - type: ipv4
- activate: true
- sender_loop_detect: true
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- - type: l2vpn
- activate: true
- sender_loop_detect: false
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- vrfs:
- - name: "test1"
- router_id: 70.1.1.4
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: false
- enforce_first_as: false
- non_deterministic_med: true
- outbound_optimization: true
- fast_ext_fallover: false
- always_compare_med: true
- default_loc_pref: 1000
- route_reflector:
- client_to_client: false
- cluster_id: 2000
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- distance_bgp:
- value: 3 4 6
- state: present
- ibgp_redist_internal:
- state: present
- default_metric: 10
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- - attribute: missing-as-worst
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- description: "template peer1"
- adv_interval: 50
- adv_start: 100
- adv_start_state: present
- ebgp_multihop: 20
- fall_over: present
- conn_retry_timer: 20
- remove_pri_as: present
- bfd: yes
- address_family:
- - type: ipv4
- state: present
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan10
- send_community:
- - type: extended
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.10.1
- name: peer1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- route_reflector_client: true
- src_loopback: 0
- address_family:
- - type: ipv4
- activate: false
- distribute_list:
- in: dd
- in_state: present
- out: dd
- out_state: present
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: false
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- default_originate:
- route_map: aa
- state: present
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ebgp_peergroup: ebgp_pg
- ebgp_peergroup_state: present
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- address_family:
- - type: ipv4
- activate: false
- sender_loop_detect: false
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- default_originate:
- route_map: dd
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- - route_type: connected
- route_map_name: bb
- address_type: ipv4
- state: present
- - route_type: l2vpn
- route_map_name: cc
- address_type: ipv4
- state: present
- - route_type: imported_bgp
- imported_bgp_vrf_name: test2
- route_map_name: dd
- address_type: ipv4
- state: present
- - route_type: ospf
- ospf_id: 12
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- as_notation: asdot
- state: present
- - name: "test2"
- router_id: 80.1.1.4
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- as_notation: asdot
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_bgp/tests/test.yaml
deleted file mode 100644
index fd5211f4d..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_bgp
diff --git a/ansible_collections/dellemc/os10/roles/os10_bgp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_bgp/vars/main.yml
deleted file mode 100644
index de9999bd2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_bgp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_bgp
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/LICENSE b/ansible_collections/dellemc/os10/roles/os10_copy_config/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/README.md b/ansible_collections/dellemc/os10/roles/os10_copy_config/README.md
deleted file mode 100644
index eadefecb0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-Copy-config role
-================
-
-This role is used to push the backup running configuration into a Dell EMC PowerSwitch platform running Dell EMC SmartFabric OS10, and merges the configuration in the template file with the running configuration of the device.
-
-The copy-config role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- No predefined variables are part of this role
-- Use *host_vars* or *group_vars* as part of the template file
-- Configuration file is host-specific
-- Copy the host-specific configuration to the respective file under the template directory in *<host_name>.j2* format
-- Variables and values are case-sensitive
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_copy_config* role to push the configuration file into the device. It creates a *hosts* file with the switch details and corresponding variables. It writes a simple playbook that only references the *os10_copy_config* role. By including the role, you automatically get access to all of the tasks to push configuration file.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
-
- # This variable shall be applied in the below jinja template for each host by defining here
- os10_bgp
- asn: 64801
-
-**Sample roles/os10_copy_config/templates/leaf1.j2**
-
- ! Leaf1 BGP profile on Dell OS10 switch
- snmp-server community public ro
- hash-algorithm ecmp crc
- !
- interface ethernet1/1/1:1
- no switchport
- ip address 100.1.1.2/24
- ipv6 address 2001:100:1:1::2/64
- mtu 9216
- no shutdown
- !
- interface ethernet1/1/9:1
- no switchport
- ip address 100.2.1.2/24
- ipv6 address 2001:100:2:1::2/64
- mtu 9216
- no shutdown
- !
- router bgp {{ os10_bgp.asn }}
- bestpath as-path multipath-relax
- bestpath med missing-as-worst
- router-id 100.0.2.1
- !
- address-family ipv4 unicast
- !
- address-family ipv6 unicast
- !
- neighbor 100.1.1.1
- remote-as 64901
- no shutdown
- !
- neighbor 100.2.1.1
- remote-as 64901
- no shutdown
- !
- neighbor 2001:100:1:1::1
- remote-as 64901
- no shutdown
- !
- address-family ipv4 unicast
- no activate
- exit
- !
- address-family ipv6 unicast
- activate
- exit
- !
- neighbor 2001:100:2:1::1
- remote-as 64901
- no shutdown
- !
- address-family ipv4 unicast
- no activate
- exit
- !
- address-family ipv6 unicast
- activate
- exit
- !
-
-**Simple playbook to setup to push configuration file into device — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_copy_config
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/defaults/main.yml
deleted file mode 100644
index de0edc0c2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_copy_config
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/handlers/main.yml
deleted file mode 100644
index e11a88057..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_copy_config
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/meta/main.yml
deleted file mode 100644
index 166589390..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- This role shall be used to push the backup running configuration into the device.
- This role shall merge the configuration in the template file with the running configuration of the device
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/tasks/main.yml
deleted file mode 100644
index dd62a63ff..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# tasks file for dellemc.os10.os10_copy_config
- - name: "Merge the config file to running configuration for OS10"
- os10_config:
- src: "{{ hostname }}.j2"
- when: (ansible_network_os is defined and ansible_network_os== "dellemc.os10.os10")
-# notify: save config os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/templates/leaf1.j2 b/ansible_collections/dellemc/os10/roles/os10_copy_config/templates/leaf1.j2
deleted file mode 100644
index b02686f5e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/templates/leaf1.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-! Version 10.3.0E
-! Last configuration change at March 09 21:47:35 2020
-!
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/inventory
deleted file mode 100644
index 85a255f94..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
----
-localhost
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/test.yml
deleted file mode 100644
index 6c7b80394..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os10.os10_copy_config
diff --git a/ansible_collections/dellemc/os10/roles/os10_copy_config/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_copy_config/vars/main.yml
deleted file mode 100644
index 9f021ecf6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_copy_config/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_copy_config
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/LICENSE b/ansible_collections/dellemc/os10/roles/os10_dns/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/README.md b/ansible_collections/dellemc/os10/roles/os10_dns/README.md
deleted file mode 100644
index b65d7622a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/README.md
+++ /dev/null
@@ -1,125 +0,0 @@
-DNS role
-========
-
-This role facilitates the configuration of the domain name service (DNS). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The DNS role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_dns keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``name_server`` | list | Configures DNS (see ``name_server.*``) | os10 |
-| ``name_server.ip`` | list | Configures the name server IP | os10 |
-| ``name_server.vrf`` | list | Configures VRF for each IP | os10 |
-| ``name_server.state`` | string: absent,present\* | Deletes the name server IP if set to absent | os10 |
-| ``domain_list`` | list | Configures domain-list (see ``domain_list.*``) | os10 |
-| ``domain_list.name`` | list | Configures the domain-list name | os10 |
-| ``domain_list.vrf`` | list | Configures VRF for each domain-list name | os10 |
-| ``domain_list.state`` | string: absent,present\* | Deletes the domain-list if set to absent | os10 |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_dns* role to completely set up the DNS server configuration. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os10_dns* role. By including the role, you automatically get access to all of the tasks to configure DNS.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_dns:
- domain_lookup: true
- name_server:
- - ip:
- - 3.1.1.1
- - 3.1.1.2
- vrf:
- - test
- - test1
- - ip:
- - 3.1.1.2
- vrf:
- - test1
- state: absent
- - ip:
- - 2.2.2.2
- - ip:
- - 3.3.2.2
- state: absent
- domain_list:
- - name:
- - dname7
- - dname8
- vrf:
- - test
- - test1
- - name:
- - dname7
- vrf:
- - test
- - test1
- state: absent
- - name:
- - dname3
- - dname4
- - name:
- - dname5
- - dname6
- state: absent
-
-> **NOTE**: vrf should be present which can be configured using os10_vrf role
-
-**Simple playbook to setup DNS — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_dns
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/defaults/main.yml
deleted file mode 100644
index d826575e4..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_dns
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/handlers/main.yml
deleted file mode 100644
index a6cd5e697..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_dns
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/meta/main.yml
deleted file mode 100644
index 1f0baa16f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_dns role facilitates the configuration DNS attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/tasks/main.yml
deleted file mode 100644
index 417ebacf1..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for Dellos10
-
- - name: "Generating DNS configuration for os10"
- template:
- src: os10_dns.j2
- dest: "{{ build_dir }}/dns10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning DNS configuration for os10"
- os10_config:
- src: os10_dns.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/templates/os10_dns.j2 b/ansible_collections/dellemc/os10/roles/os10_dns/templates/os10_dns.j2
deleted file mode 100644
index f381b3d0f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/templates/os10_dns.j2
+++ /dev/null
@@ -1,101 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-Purpose:
-Configure DNS commands for OS10 devices
-os10_dns:
-os10_dns:
- domain_lookup: true
- name_server:
- - ip:
- - 3.1.1.1
- - 3.1.1.2
- vrf:
- - test
- - test1
- - ip:
- - 3.1.1.2
- vrf:
- - test1
- state: absent
- - ip:
- - 2.2.2.2
- - ip:
- - 3.3.2.2
- state: absent
- domain_list:
- - name:
- - dname7
- - dname8
- vrf:
- - test
- - test1
- - name:
- - dname7
- vrf:
- - test
- - test1
- state: absent
- - name:
- - dname3
- - dname4
- - name:
- - dname5
- - dname6
- state: absent
-#####################################}
-{% if (os10_dns is defined and os10_dns) %}
- {% if (os10_dns.name_server is defined and os10_dns.name_server) %}
- {% for name_server in os10_dns.name_server %}
- {% set absent = "" %}
- {% if name_server.state is defined and name_server.state == "absent" %}
- {% set absent = "no " %}
- {% endif %}
-
- {% set vrf_name_list = name_server.vrf %}
- {% if (vrf_name_list is defined and vrf_name_list ) %}
- {% for vrf_name in vrf_name_list %}
- {% set ip_list = name_server.ip %}
- {% if (ip_list is defined and ip_list ) %}
- {% for ip_val in ip_list %}
- {{ absent }}ip name-server vrf {{ vrf_name }} {{ ip_val }}
- {% endfor %}
- {% elif name_server.state is defined and name_server.state == "absent"%}
- {{ absent }}ip name-server vrf {{ vrf_name }}
- {% endif %}
- {% endfor %}
- {% else %}
- {% set ip_list = name_server.ip %}
- {% if (ip_list is defined and ip_list ) %}
- {{ absent }}ip name-server {{ ip_list|join(' ') }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if (os10_dns.domain_list is defined and os10_dns.domain_list) %}
- {% for domain in os10_dns.domain_list %}
- {% set absent = "" %}
- {% if domain.state is defined and domain.state == "absent" %}
- {% set absent = "no " %}
- {% endif %}
-
- {% set vrf_name_list = domain.vrf %}
- {% if (vrf_name_list is defined and vrf_name_list ) %}
- {% for vrf_name in vrf_name_list %}
- {% set name_list = domain.name %}
- {% if (name_list is defined and name_list ) %}
- {% for name_val in name_list %}
- {{ absent }}ip domain-list vrf {{ vrf_name }} {{ name_val }}
- {% endfor %}
- {% elif domain.state is defined and domain.state == "absent"%}
- {{ absent }}ip domain-list vrf {{ vrf_name }}
- {% endif %}
- {% endfor %}
- {% else %}
- {% set name_list = domain.name %}
- {% if (name_list is defined and name_list ) %}
- {{ absent }}ip domain-list {{ name_list|join(' ') }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_dns/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_dns/tests/main.yaml
deleted file mode 100644
index 6305318dc..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/tests/main.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-# vars file for dellemc.os10.os10_dns,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_dns:
- domain_lookup: true
- name_server:
- - ip:
- - 3.1.1.1
- - 3.1.1.2
- vrf:
- - test
- - test1
- - ip:
- - 3.1.1.2
- vrf:
- - test1
- state: absent
- - ip:
- - 2.2.2.2
- - ip:
- - 3.3.2.2
- state: absent
- domain_list:
- - name:
- - dname7
- - dname8
- vrf:
- - test
- - test1
- - name:
- - dname7
- vrf:
- - test
- - test1
- state: absent
- - name:
- - dname3
- - dname4
- - name:
- - dname5
- - dname6
- state: absent
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_dns/tests/test.yaml
deleted file mode 100644
index ab6aaca5a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_dns
diff --git a/ansible_collections/dellemc/os10/roles/os10_dns/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_dns/vars/main.yml
deleted file mode 100644
index 199599560..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_dns/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_dns
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_ecmp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/README.md b/ansible_collections/dellemc/os10/roles/os10_ecmp/README.md
deleted file mode 100644
index 6932fdf6f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/README.md
+++ /dev/null
@@ -1,78 +0,0 @@
-ECMP role
-=========
-
-This role facilitates the configuration of equal cost multi-path (ECMP), and it supports the configuration of ECMP for IPv4. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The ECMP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_ecmp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``ecmp_group_max_paths`` | integer | Configures the number of maximum paths per ECMP group | os10 |
-| ``trigger_threshold`` | integer | Configures the number of link bundle utilization trigger threshold | os10 |
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_ecmp* role to configure ECMP for IPv4. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The example writes a simple playbook that only references the *os10_ecmp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_ecmp:
- ecmp_group_max_paths: 3
- trigger_threshold: 50
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_ecmp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/defaults/main.yml
deleted file mode 100644
index 406d1cfc4..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_ecmp
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/handlers/main.yml
deleted file mode 100644
index 24ccf4de3..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_ecmp
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/meta/main.yml
deleted file mode 100644
index f6448d4c5..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_ecmp role facilitates the configuration of ECMP group attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/tasks/main.yml
deleted file mode 100644
index 012d41195..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os10
-
- - name: "Generating ECMP configuration for os10"
- template:
- src: os10_ecmp.j2
- dest: "{{ build_dir }}/ecmp10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning ECMP configuration for os10"
- os10_config:
- src: os10_ecmp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/templates/os10_ecmp.j2 b/ansible_collections/dellemc/os10/roles/os10_ecmp/templates/os10_ecmp.j2
deleted file mode 100644
index 6a0b04dd0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/templates/os10_ecmp.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-Purpose:
-Configure ECMP commands for OS10 devices
-os10_ecmp:
- ecmp_group_max_paths: 3
- trigger_threshold: 50
-#####################################}
-{% if os10_ecmp is defined and os10_ecmp %}
- {% if os10_ecmp.ecmp_group_max_paths is defined %}
- {% if os10_ecmp.ecmp_group_max_paths %}
-ip ecmp-group maximum-paths {{ os10_ecmp.ecmp_group_max_paths }}
- {% else %}
-no ip ecmp-group maximum-paths
- {% endif %}
- {% endif %}
- {% if os10_ecmp.trigger_threshold is defined %}
- {% if os10_ecmp.trigger_threshold %}
-link-bundle-utilization trigger-threshold {{ os10_ecmp.trigger_threshold }}
- {% else %}
-no link-bundle-utilization trigger-threshold
- {% endif %}
- {% endif %}
-
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/main.os10.yaml
deleted file mode 100644
index ff00dfd4c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/main.os10.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# vars file for dellemc.os10.os10_ecmp,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_ecmp:
- ecmp_group_max_paths: 3
- trigger_threshold: 50
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/test.yaml
deleted file mode 100644
index 2df95ee65..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_ecmp
diff --git a/ansible_collections/dellemc/os10/roles/os10_ecmp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_ecmp/vars/main.yml
deleted file mode 100644
index cfd6a141c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ecmp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_ecmp
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/LICENSE b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/README.md b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/README.md
deleted file mode 100644
index 0ff99bf2f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/README.md
+++ /dev/null
@@ -1,119 +0,0 @@
-os10_fabric_summary
-=====================================
-This role is used to get show system information of all devices in the fabric. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Fabric summary role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``os10_cli_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the default value is used |
-| ``os10_cli_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-- *xmltodict* library should be installed to get show command output in dict format from XML
-- To install the package use the *pip install xmltodict* command
-
-Example playbook
-----------------
-
-This example uses the *os10_fabric_summary* role to completely get the show attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the ansible_network_os variable with the corresponding Dell EMC OS10 name.
-
-The *os10_fabric_summary* role has a simple playbook that only references the *os10_fabric_summary* role.
-
-**Sample hosts file**
-
- site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- [spine]
- site1-spine1
- site1-spine2
- site2-spine1
- site2-spine2
- [LeafAndSpineSwitch:children]
- spine
-
-**Sample host_vars/site1-spine1**
-
-
- cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
- os10_cli_user: xxxx
- os10_cli_pass: xxxx
- ansible_network_os: dellemc.os10.os10
-
-**Simple playbook to setup fabric summary — provision.yaml**
-
- ---
- - name: show system summary command
- hosts: localhost
- gather_facts: False
- connection: local
- roles:
- - os10_fabric_summary
-
-**Run**
-
- ansible-playbook -i hosts provision.yaml
-
-**Samaple Output**
-
- "results": [
- {
- "device type": "S6010-ON",
- "host": "10.11.180.21",
- "hostname": "host3",
- "inv_name": "site1-spine1",
- "node-mac": "e4:f0:04:9b:e5:dc",
- "service-tag": "D33FXC2",
- "software-version": "10.4.9999EX"
- },
- {
- "device type": "S6010-ON",
- "host": "10.11.180.22",
- "hostname": "host22",
- "inv_name": "site1-spine2",
- "node-mac": "e4:f0:04:9b:eb:dc",
- "service-tag": "J33FXC2",
- "software-version": "10.4.9999EX"
- },
- {
- "device type": "S6010-ON",
- "host": "10.11.180.24",
- "hostname": "site2-spine1",
- "inv_name": "site2-spine1",
- "node-mac": "e4:f0:04:9b:ee:dc",
- "service-tag": "343FXC2",
- "software-version": "10.4.9999EX"
- },
- {
- "device type": "S6010-ON",
- "host": "10.11.180.23",
- "hostname": "site2-spine2",
- "inv_name": "site2-spine2",
- "node-mac": "e4:f0:04:9b:f1:dc",
- "service-tag": "543FXC2",
- "software-version": "10.4.9999EX"
- }
- ]
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/meta/main.yml
deleted file mode 100644
index 428d79f6d..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- role_name: os10_fabric_summary
- author: Dell EMC Networking Engineering
- description: This role provides the system network information of all the switches in the fabric Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tasks/main.yml
deleted file mode 100644
index 784d66428..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tasks/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- name: "Get Dell EMC OS10 Show system summary"
- os10_command:
- commands: ['show system | display-xml']
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_system
-- name: "set fact to form database"
- set_fact:
- output: "{{ output|default([])+ [{'inv_name': item.item, 'host': item.invocation.module_args.provider.host, 'stdout_show_system': item.stdout}] }}"
- loop: "{{ show_system.results }}"
-- name: "debug the output of system summary DB"
- debug: var=output
-- name: "show system network call to lib "
- show_system_network_summary:
- cli_responses: "{{ output }}"
- output_type: "{{ output_method if output_method is defined else 'json' }}"
- register: show_system_network_summary
-- name: "debug the output of system summary DB"
- debug: var=show_system_network_summary
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine1 b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine1
deleted file mode 100644
index 36a99cdb1..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine1
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
-
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine2 b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine2
deleted file mode 100644
index 36a99cdb1..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site1-spine2
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
-
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine1 b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine1
deleted file mode 100644
index 36a99cdb1..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine1
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
-
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine2 b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine2
deleted file mode 100644
index 36a99cdb1..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/host_vars/site2-spine2
+++ /dev/null
@@ -1,12 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
-
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/inventory.yaml
deleted file mode 100644
index ff511df50..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/inventory.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
-site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
-site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
-site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
-
-[spine]
-site1-spine1
-site1-spine2
-site2-spine1
-site2-spine2
-
-[LeafAndSpineSwitch:children]
-spine
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/main.yaml
deleted file mode 100644
index 1f450079a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/main.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/test.yml
deleted file mode 100644
index e865c7903..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_fabric_summary/tests/test.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- name: setup for os10 fabric summary
- hosts: localhost
- gather_facts: False
- connection: local
- roles:
- - dellemc.os10.os10_fabric_summary
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/LICENSE b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/README.md b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/README.md
deleted file mode 100644
index dd98aa956..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/README.md
+++ /dev/null
@@ -1,152 +0,0 @@
-ACL flow-based monitor role
-===========================
-
-This role facilitates configuring ACL flow-based monitoring attributes. Flow-based mirroring is a mirroring session in which traffic matches specified policies that are mirrored to a destination port. Port-based mirroring maintains a database that contains all monitoring sessions (including port monitor sessions). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The ACL flow-based role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take the `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os10_flow_monitor` (dictionary) with session ID key (in *session <ID>* format; 1 to 18)
-- Variables and values are case-sensitive
-
-**session ID keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``session_type`` | string: local_*_,rspan-source,erspan-source | Configures the monitoring session type | os10 |
-| ``description`` | string | Configures the monitor session description | os10 |
-| ``port_match`` | list | Displays a list of interfaces with location source and destination | os10 |
-| ``port_match.interface_name`` | string | Configures the interface | os10 |
-| ``port_match.location`` | string: source,destination | Configures the source/destination of an interface | os10 |
-| ``port_match.state`` | string: absent,present\* | Deletes the interface if set to absent | os10 |
-| ``flow_based`` | boolean | Enables flow-based monitoring | os10 |
-| ``shutdown`` | string: up,down\* | Enable/disables the monitoring session | os10 |
-| ``state`` | string: absent,present\* | Deletes the monitoring session corresponding to the session ID if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_flow_monitor* role to configure session monitor configuration. It creates a *hosts* file with the switch details and corresponding variables. The hosts file defines the `anisble_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, the variable is set to false.
-It writes a simple playbook that only references the *os10_flow_monitor* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
- os10_flow_monitor:
- session 1:
- session_type: local
- description: "Discription goes here"
- port_match:
- - interface_name: ethernet 1/1/4
- location: source
- state: present
- - interface_name: ethernet 1/1/3
- location: destination
- state: present
- flow_based: true
- shutdown: up
- state: present
- session 2:
- session_type: local
- description: "Discription of session goes here"
- port_match:
- - interface_name: ethernet 1/1/6
- location: source
- state: present
- - interface_name: ethernet 1/1/7
- location: destination
- state: present
- flow_based: true
- shutdown: up
- state: present
- session 3:
- state: absent
- os10_acl:
- - name: testflow
- type: ipv4
- description: testflow description
- extended: true
- entries:
- - number: 5
- permit: true
- protocol: icmp
- source: any
- destination: any
- other_options: capture session 1 count
- state: present
- - number: 10
- permit: true
- protocol: ip
- source: 102.1.1.0/24
- destination: any
- other_option: capture session 1 count byte
- state: present
- - number: 15
- permit: false
- protocol: udp
- source: any
- destination: any
- other_options: capture session 2 count byte
- state: present
- - number: 20
- permit: false
- protocol: tcp
- source: any
- destination: any
- other_options: capture session 2 count byte
- state: present
- stage_ingress:
- - name: ethernet 1/1/1
- state: present
-
-> **NOTE**: Destination port should not be an L2/L3 port which can be configured using the *os10_interface* role.
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_flow_monitor
- - dellemc.os10.os10_acl
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/defaults/main.yml
deleted file mode 100644
index 3cc17642e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_flow_monitor
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/handlers/main.yml
deleted file mode 100644
index 91b1038ec..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_flow_moitor
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/meta/main.yml
deleted file mode 100644
index c81fad541..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_flow_monitor role facilitates the configuration of ACL flow based monitor attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - os10
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tasks/main.yml
deleted file mode 100644
index b5bf0bc3c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for Dellos10
- - name: "Generating Flow monitor configuration for os10"
- template:
- src: os10_flow_monitor.j2
- dest: "{{ build_dir }}/flow_monitor10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning Flow monitor configuration for os10"
- os10_config:
- src: os10_flow_monitor.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/templates/os10_flow_monitor.j2 b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/templates/os10_flow_monitor.j2
deleted file mode 100644
index 535c61809..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/templates/os10_flow_monitor.j2
+++ /dev/null
@@ -1,86 +0,0 @@
-#jinja2: trim_blocks: True, lstrip_blocks: True
-{##########################################
-Purpose:
-Configure Flow monitor commands for os10 Devices
-os10_flow_monitor:
- session 1:
- session_type: local
- description: "Discription goes here"
- port_match:
- - interface_name: ethernet 1/1/4
- location: source
- state: present
- - interface_name: ethernet 1/1/3
- location: destination
- state: present
- flow_based: true
- shutdown: up
- state: present
- session 2:
- session_type: local
- description: "Discription of session goes here"
- port_match:
- - interface_name: ethernet 1/1/6
- location: source
- state: present
- - interface_name: ethernet 1/1/7
- location: destination
- state: present
- flow_based: false
- shutdown: up
- state: present
- session 3:
- state: absent
-#########################################}
-{% if os10_flow_monitor is defined and os10_flow_monitor %}
-{% for key in os10_flow_monitor.keys() %}
-{% set session_id = key.split(" ") %}
-{% set session_vars = os10_flow_monitor[key] %}
-{% set session_type = "" %}
-
-{% if session_vars.session_type is defined and session_vars.session_type != "local" %}
-{% set session_type = "type " + session_vars.session_type %}
-{% endif %}
-
- {% if session_vars.state is defined and session_vars.state == "absent" %}
-no monitor session {{ session_id[1] }}
- {% else %}
-monitor session {{ session_id[1] }} {{ session_type }}
-
- {% if session_vars.description is defined and session_vars.description %}
- description "{{ session_vars.description }}"
- {% else %}
- no description
- {% endif %}
-
- {% if session_vars.port_match is defined and session_vars.port_match %}
- {% for match_vars in session_vars.port_match %}
- {% set negate = "" %}
- {% if match_vars["state"] is defined and match_vars["state"] == "absent" %}
- {% set negate = "no " %}
- {% endif %}
- {% set location = "source" %}
- {% if match_vars["location"] is defined and match_vars["location"] == "destination" %}
- {% set location = "destination" %}
- {% endif %}
- {{ negate }}{{ location }} interface {{ match_vars["interface_name"] }}
- {% endfor %}
- {% endif %}
-
- {% if session_vars.shutdown is defined and session_vars.shutdown == "up" %}
- no shut
- {% else %}
- shut
- {% endif %}
-
-{% if session_vars.flow_based is defined %}
- {% if session_vars.flow_based %}
- flow-based enable
- {% else %}
- no flow-based enable
- {% endif %}
-{% endif %}
-
-{% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/main.yaml
deleted file mode 100644
index 750932c85..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/main.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-# vars file for dellemc.os10.os10_flow_monitor,
-# below gives a example configuration
-# Sample variables for OS10 device
-os10_flow_monitor:
- session 1:
- session_type: local
- description: "Discription goes here"
- port_match:
- - interface_name: ethernet 1/1/4
- location: source
- state: present
- - interface_name: ethernet 1/1/3
- location: destination
- state: present
- flow_based: true
- shutdown: up
- state: present
- session 2:
- session_type: local
- description: "Discription of session goes here"
- port_match:
- - interface_name: ethernet 1/1/6
- location: source
- state: present
- - interface_name: ethernet 1/1/7
- location: destination
- state: present
- flow_based: false
- shutdown: up
- state: present
- session 3:
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/test.yaml
deleted file mode 100644
index 44a56b7ec..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_flow_monitor
diff --git a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_flow_monitor/vars/main.yml
deleted file mode 100644
index 0943cd2c0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_flow_monitor/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_flow_monitor
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/LICENSE b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/README.md b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/README.md
deleted file mode 100644
index 9ae8f731c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/README.md
+++ /dev/null
@@ -1,73 +0,0 @@
-Image upgrade role
-===================================
-
-This role facilitates upgrades or installation of a software image. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Image upgrade role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_image_upgrade keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``operation_type`` | string: cancel,install | Displays the type of image operation | os10 |
-| ``software_image_url`` | string | Configures the URL path to the image file | os10 |
-| ``software_version`` | string | Displays the software version of the image file | os10 |
-| ``number_of_retries`` | int | Configures the numbe of retries to check the status of image install process | os10 |
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_image_upgrade* role to upgrade/install software image. It creates a *hosts* file with the switch details, corresponding *host_vars* file, and a simple playbook that references the *os10_image_upgrade* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- os10_image_upgrade:
- operation_type: install
- software_image_url: tftp://10.16.148.8/PKGS_OS10-Enterprise-10.2.9999E.5790-installer-x86_64.bin
- software_version: 10.2.9999E
- number_of_retries: 50
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_image_upgrade
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/defaults/main.yml
deleted file mode 100644
index 809f7a436..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_image_upgrade
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/handlers/main.yml
deleted file mode 100644
index 7bfc6bc73..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_image_upgrade
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/meta/main.yml
deleted file mode 100644
index b35a5382e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_image_upgrade role facilitates install/upgrade software image for OS10 switches
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tasks/main.yml
deleted file mode 100644
index ee2d557d0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tasks/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
- - block:
- - name: "Process image {{ os10_image_upgrade.operation_type }} operation"
- vars:
- command_postfix: "{{ os10_image_upgrade.operation_type }} {{ '' if os10_image_upgrade.operation_type == 'cancel' else os10_image_upgrade.software_image_url }}"
- os10_command:
- commands:
- - command: "image {{ command_postfix }}"
- prompt: "yes/no]:"
- answer: "yes"
- register: result
- - name: "Get image {{ os10_image_upgrade.operation_type }} operation status"
- os10_command:
- commands: "show image status"
- register: image_status
- - name: "Validate image status"
- fail: msg="Image {{ os10_image_upgrade.operation_type }} operation Failed"
- when: image_status.stdout.0.find("Failed") > 1
- - name: "Wait for image {{ os10_image_upgrade.operation_type }} operation"
- os10_command:
- commands: "show image status"
- register: result
- until: result.stdout.0.find("In progress") < 1
- retries: "{{ os10_image_upgrade.number_of_retries }}"
- delay: 15
- - name: "Wait for image {{ os10_image_upgrade.operation_type }} operation"
- os10_command:
- commands: "show image status"
- register: image_status
- - name: "Validate software version"
- fail: msg="Image {{ os10_image_upgrade.operation_type }} operation Failed"
- when: image_status.stdout.0.find("Failed") > 1
- - name: "Image {{ os10_image_upgrade.operation_type }} status"
- debug: msg="Image {{ os10_image_upgrade.operation_type }} successful"
- when: image_status.stdout.0.find("Failed") <= -1
- when:
- - os10_image_upgrade.operation_type == "install"
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/inventory
deleted file mode 100644
index 85a255f94..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
----
-localhost
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/main.yml
deleted file mode 100644
index 66507dc75..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/main.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# vars file for dellemc.os10.os10_image_upgrade,
-# below gives a example configuration
-
-os10_image_upgrade:
- operation_type: install
- software_image_url: http://10.16.127.7//tftpboot/NGOS/FMB-ar-rel_10.5.1-release/AmazonInstallers/last_good/PKGS_OS10-Enterprise-10.5.1.0.124stretch-installer-x86_64.bin
- software_version: 10.5.1.0
- number_of_retries: 50
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/test.yml
deleted file mode 100644
index a2ed11615..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os10.os10_image_upgrade
diff --git a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_image_upgrade/vars/main.yml
deleted file mode 100644
index a9fa11547..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_image_upgrade/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_image_upgrade, below gives a example configuration
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/LICENSE b/ansible_collections/dellemc/os10/roles/os10_interface/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/README.md b/ansible_collections/dellemc/os10/roles/os10_interface/README.md
deleted file mode 100644
index bbb4f8ee6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/README.md
+++ /dev/null
@@ -1,178 +0,0 @@
-Interface role
-==============
-
-This role facilitates the configuration of interface attributes. It supports the configuration of admin state, description, MTU, IP address, IP helper, suppress_ra, and port mode. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Interface role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os10_interface` (dictionary) holds a dictionary with the interface name; interface name can correspond to any of the valid OS interfaces with the unique interface identifier name
-- For physical interfaces, the interface name must be in *<interfacename> <tuple>* format; for logical interfaces, the interface must be in *<logical_interfacename> <id>* format; physical interface name can be *ethernet 1/1/32*
-- For interface ranges, the interface name must be in *range <interface_type> <node/slot/port[:subport]-node/slot/port[:subport]>* format; *range ethernet 1/1/1-1/1/4*
-- Logical interface names can be *vlan 1* or *port-channel 1*
-- Variables and values are case-sensitive
-
-> **NOTE**: Only define supported variables for the interface type, and do not define the *switchport* variable for a logical interface.
-
-**interface name keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``desc`` | string | Configures a single line interface description | os10 |
-| ``portmode`` | string | Configures port-mode according to the device type | os10 |
-| ``switchport`` | boolean: true,false\* | Configures an interface in L2 mode | os10 |
-| ``admin`` | string: up,down\* | Configures the administrative state for the interface; configuring the value as administratively "up" enables the interface; configuring the value as administratively "down" disables the interface | os10 |
-| ``mtu`` | integer | Configures the MTU size for L2 and L3 interfaces (1280 to 65535) | os10 |
-| ``fanout`` | string:dual, single; string:10g-4x, 40g-1x, 25g-4x, 100g-1x, 50g-2x (os10) | Configures fanout to the appropriate value | os10 |
-| ``suppress_ra`` | string; present,absent | Configures IPv6 router advertisements if set to present | os10 |
-| ``ip_type_dynamic`` | boolean: true,false | Configures IP address DHCP if set to true (*ip_and_mask* is ignored if set to true) | os10 |
-| ``ipv6_type_dynamic`` | boolean: true,false | Configures an IPv6 address for DHCP if set to true (*ipv6_and_mask* is ignored if set to true) | os10 |
-| ``ipv6_autoconfig`` | boolean: true,false | Configures stateless configuration of IPv6 addresses if set to true (*ipv6_and_mask* is ignored if set to true) | os10 |
-| ``vrf`` | string | Configures the specified VRF to be associated to the interface | os10 |
-| ``min_ra`` | string | Configures RA minimum interval time period | os10 |
-| ``max_ra`` | string | Configures RA maximum interval time period | os10 |
-| ``ip_and_mask`` | string | Configures the specified IP address to the interface | os10 |
-| ``ipv6_and_mask`` | string | Configures a specified IPv6 address to the interface | os10 |
-| ``virtual_gateway_ip`` | string | Configures an anycast gateway IP address for a VxLAN virtual network as well as VLAN interfaces| os10 |
-| ``virtual_gateway_ipv6`` | string | Configures an anycast gateway IPv6 address for VLAN interfaces| os10 |
-| ``state_ipv6`` | string: absent,present\* | Deletes the IPV6 address if set to absent | os10 |
-| ``ip_helper`` | list | Configures DHCP server address objects (see ``ip_helper.*``) | os10 |
-| ``ip_helper.ip`` | string (required) | Configures the IPv4 address of the DHCP server (A.B.C.D format) | os10 |
-| ``ip_helper.state`` | string: absent,present\* | Deletes the IP helper address if set to absent | os10 |
-| ``flowcontrol`` | dictionary | Configures the flowcontrol attribute (see ``flowcontrol.*``) | os10 |
-| ``flowcontrol.mode`` | string: receive,transmit | Configures the flowcontrol mode | os10 |
-| ``flowcontrol.enable`` | string: on,off | Configures the flowcontrol mode on | os10 |
-| ``flowcontrol.state`` | string: absent,present\* | Deletes the flowcontrol if set to absent | os10 |
-| ``ipv6_bgp_unnum`` | dictionary | Configures the IPv6 BGP unnum attributes (see ``ipv6_bgp_unnum.*``) below | os10 |
-| ``ipv6_bgp_unnum.state`` | string: absent,present\* | Disables auto discovery of BGP unnumbered peer if set to absent | os10 |
-| ``ipv6_bgp_unnum.peergroup_type`` | string: ebgp,ibgp | Specifies the type of template to inherit from | os10 |
-
-| ``stp_rpvst_default_behaviour`` | boolean: false,true | Configures RPVST default behaviour of BPDU's when set to True which is default | os10 |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_interface* role to set up description, MTU, admin status, port mode, and switchport details for an interface. The example creates a *hosts* file with the switch details and orresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, this variable is set to false. The example writes a simple playbook that only references the *os10_interface* role.
-
-**Sample hosts file**
-
- leaf3 ansible_host= <ip_address>
-
-**Sample host_vars/leaf3**
-
- hostname: "leaf3"
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_interface:
- ethernet 1/1/32:
- desc: "Connected to Core 2"
- mtu: 2500
- stp_rpvst_default_behaviour: False
- portmode:
- admin: up
- switchport: False
- ip_and_mask:
- ip_type_dynamic: True
- ipv6_type_dynamic: True
- ethernet 1/1/12:
- desc: "ipv6 auto config"
- switchport: False
- mtu: 2500
- admin: up
- ipv6_autoconfig: True
- ethernet 1/1/14:
- fanout: 10g-4x
- ethernet 1/1/13:
- desc: "set ipv6 address"
- switchport: False
- admin: up
- ipv6_and_mask: 2001:4898:5809:faa2::10/126
- state_ipv6: present
- ethernet 1/1/1:
- desc: "Connected to Leaf1"
- portmode: "trunk"
- switchport: True
- suppress_ra: present
- admin: up
- stp_rpvst_default_behaviour: False
- ethernet 1/1/3:
- desc: site2-spine2
- ip_and_mask: 10.9.0.4/31
- mtu: 9216
- switchport: False
- admin: up
- flowcontrol:
- mode: "receive"
- enable: "on"
- state: "present"
-
- vlan 100:
- ip_and_mask:
- ipv6_and_mask: 2001:4898:5808:ffaf::1/64
- state_ipv6: present
- ip_helper:
- - ip: 10.0.0.33
- state: present
- admin: up
- range ethernet 1/1/1-1/1/32:
- mtu: 2500
- port-channel 10:
- admin: up
- switchport: False
- suppress_ra:
- stp_rpvst_default_behaviour: True
- ipv6_bgp_unnum:
- state: present
- peergroup_type: ebgp
- vlan 10:
- ip_and_mask: "10.1.1.1/24"
- virtual_gateway_ip: "10.1.1.254"
- virtual_gateway_ipv6: "10:1:1::254"
- admin: up
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf3
- roles:
- - dellemc.os10.os10_interface
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/defaults/main.yml
deleted file mode 100644
index 6f2146327..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_interface
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/handlers/main.yml
deleted file mode 100644
index 72e658748..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_interface
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/meta/main.yml
deleted file mode 100644
index 8f0bfd3ba..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_interface role facilitates the configuration of interface attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/tasks/main.yml
deleted file mode 100644
index c8656c510..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating interface configuration for os10"
- template:
- src: os10_interface.j2
- dest: "{{ build_dir }}/intf10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning interface configuration for os10"
- os10_config:
- src: os10_interface.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/templates/os10_interface.j2 b/ansible_collections/dellemc/os10/roles/os10_interface/templates/os10_interface.j2
deleted file mode 100644
index c4dc61b70..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/templates/os10_interface.j2
+++ /dev/null
@@ -1,258 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#####################################################
-Purpose:
-Configure interface commands for os10 Devices.
-os10_interface:
- ethernet 1/1/31:
- desc: "OS10 intf"
- portmode: trunk
- mtu: 2000
- switchport: False
- admin: up
- ip_type_dynamic: True
- ip_and_mask: "192.168.11.1/24"
- virtual_gateway_ip: "172.17.17.1"
- suppress_ra: present
- ipv6_autoconfig: True
- ipv6_and_mask: 2001:4898:5808:ffa2::5/126
- state_ipv6: present
- ip_helper:
- - ip: 10.0.0.36
- state: present
- flowcontrol:
- mode: "receive"
- enable: "on"
- state: "present"
- ethernet 1/1/3:
- fanout: 10g-4x
- range ethernet 1/1/1-1/1/4:
- switchport: True
- admin: down
- stp_rpvst_default_behaviour: False
- virtual-network 100:
- vrf: "test"
- ip_and_mask: "15.1.1.1/24"
- virtual_gateway_ip: "15.1.1.254"
- admin: up
- port-channel 10:
- admin: up
- switchport: False
- suppress_ra:
- ipv6_bgp_unnum:
- state: present
- peergroup_type: ebgp
- port-channel 20:
- admin: up
- stp_rpvst_default_behaviour: True
- vlan 10:
- ip_and_mask: "10.1.1.1/24"
- virtual_gateway_ip: "10.1.1.254"
- virtual_gateway_ipv6: "10:1:1::254"
- admin: up
-
-#####################################################}
-{% if os10_interface is defined and os10_interface %}
-{% for key in os10_interface.keys() %}
- {% set intf_vars = os10_interface[key] %}
- {% set port = key.split(" ") %}
- {% set interface_key = "" %}
- {% if intf_vars.fanout is defined %}
- {% if intf_vars.fanout %}
-interface breakout {{ port[1] }} map {{ intf_vars.fanout }}
- {% else %}
-no interface breakout {{ port[1] }}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% for key in os10_interface.keys() %}
- {% set intf_vars = os10_interface[key] %}
- {% set port = key.split(" ") %}
- {% set interface_key = "" %}
- {% if (intf_vars.fanout is defined and not intf_vars.fanout) or (intf_vars.fanout is not defined) %}
- {% if key.startswith('range')%}
- {% set interface_key = port[0] + " " + port[1] + port[2] %}
- {% else %}
- {% set interface_key = port[0] + port[1] %}
- {% endif %}
-interface {{ interface_key }}
- {% if intf_vars.desc is defined %}
- {% if intf_vars.desc %}
- {% if intf_vars.desc|wordcount > 1 %}
- description "{{ intf_vars.desc }}"
- {% else %}
- description {{ intf_vars.desc }}
- {% endif %}
- {% else %}
- no description
- {% endif %}
- {% endif %}
-
-
-
- {% if intf_vars.switchport is defined %}
- {% if intf_vars.switchport %}
- {% if intf_vars.portmode is defined and intf_vars.portmode %}
- switchport mode {{ intf_vars.portmode }}
- {% endif %}
- {% else %}
- no switchport
- {% endif %}
- {% else %}
- {% if intf_vars.portmode is defined %}
- {% if intf_vars.portmode %}
- switchport mode {{ intf_vars.portmode }}
- {% else %}
- no switchport
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if intf_vars.mtu is defined %}
- {% if intf_vars.mtu %}
- mtu {{ intf_vars.mtu }}
- {% else %}
- no mtu
- {% endif %}
- {% endif %}
-
- {% if intf_vars.ip_type_dynamic is defined %}
- {% if intf_vars.ip_type_dynamic %}
- ip address dhcp
- {% else %}
- no ip address
- {% endif %}
- {% else %}
- {% if intf_vars.vrf is defined %}
- {% if intf_vars.vrf %}
- ip vrf forwarding {{ intf_vars.vrf }}
- {% else %}
- no ip address vrf
- {% endif %}
- {% endif %}
- {% if intf_vars.ip_and_mask is defined %}
- {% if intf_vars.ip_and_mask %}
- ip address {{ intf_vars.ip_and_mask }}
- {% else %}
- no ip address
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if intf_vars.virtual_gateway_ip is defined %}
- {% if intf_vars.virtual_gateway_ip %}
- ip virtual-router address {{ intf_vars.virtual_gateway_ip }}
- {% else %}
- no ip virtual-router address
- {% endif %}
- {% endif %}
-
- {% if intf_vars.virtual_gateway_ipv6 is defined %}
- {% if intf_vars.virtual_gateway_ipv6 %}
- ipv6 virtual-router address {{ intf_vars.virtual_gateway_ipv6 }}
- {% else %}
- no ipv6 virtual-router address
- {% endif %}
- {% endif %}
-
- {% if intf_vars.suppress_ra is defined %}
- {% if intf_vars.suppress_ra == "present" %}
- no ipv6 nd send-ra
- {% else %}
- ipv6 nd send-ra
- {% endif %}
- {% endif %}
-
- {% if intf_vars.stp_rpvst_default_behaviour is defined %}
- {% if intf_vars.stp_rpvst_default_behaviour %}
- spanning-tree rapid-pvst default-behavior
- {% else %}
- no spanning-tree rapid-pvst default-behavior
- {% endif %}
- {% endif %}
-
- {% if intf_vars.ipv6_autoconfig is defined %}
- {% if intf_vars.ipv6_autoconfig %}
- ipv6 address autoconfig
- {% else %}
- no ipv6 address
- {% endif %}
- {% elif intf_vars.ipv6_type_dynamic is defined %}
- {% if intf_vars.ipv6_type_dynamic %}
- ipv6 address dhcp
- {% else %}
- no ipv6 address
- {% endif %}
- {% else %}
- {% if intf_vars.ipv6_and_mask is defined %}
- {% if intf_vars.ipv6_and_mask %}
- {% if intf_vars.state_ipv6 is defined and intf_vars.state_ipv6 == "absent" %}
- no ipv6 address {{ intf_vars.ipv6_and_mask }}
- {% else %}
- ipv6 address {{ intf_vars.ipv6_and_mask }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if intf_vars.max_ra is defined %}
- {% if intf_vars.max_ra %}
- ipv6 nd max-ra-interval {{ intf_vars.max_ra }}
- {% else %}
- no ipv6 nd max-ra-interval
- {% endif %}
- {% endif %}
- {% if intf_vars.min_ra is defined %}
- {% if intf_vars.min_ra %}
- ipv6 nd min-ra-interval {{ intf_vars.min_ra }}
- {% else %}
- no ipv6 nd min-ra-interval
- {% endif %}
- {% endif %}
- {% if intf_vars.ip_helper is defined and intf_vars.ip_helper %}
- {% for helper in intf_vars.ip_helper %}
- {% if helper.ip is defined and helper.ip %}
- {% if helper.state is defined and helper.state == "absent" %}
- no ip helper-address {{ helper.ip }}
- {% else %}
- ip helper-address {{ helper.ip }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if intf_vars.flowcontrol is defined and intf_vars.flowcontrol %}
- {% if intf_vars.flowcontrol.mode is defined %}
- {% if intf_vars.flowcontrol.mode %}
- {% if intf_vars.flowcontrol.state is defined and intf_vars.flowcontrol.state == "absent" %}
- no flowcontrol {{ intf_vars.flowcontrol.mode }}
- {% else %}
- {% if intf_vars.flowcontrol.enable is defined %}
- {% if intf_vars.flowcontrol.enable == "on" %}
- flowcontrol {{ intf_vars.flowcontrol.mode }} on
- {% else %}
- flowcontrol {{ intf_vars.flowcontrol.mode }} off
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if intf_vars.admin is defined %}
- {% if intf_vars.admin == "up" %}
- no shutdown
- {% elif intf_vars.admin == "down" %}
- shutdown
- {% endif %}
- {% endif %}
- {% if intf_vars.ipv6_bgp_unnum is defined and intf_vars.ipv6_bgp_unnum %}
- {% if intf_vars.ipv6_bgp_unnum.state == "absent" %}
- no ipv6 bgp unnumbered
- {% elif intf_vars.ipv6_bgp_unnum.state == "present" and intf_vars.ipv6_bgp_unnum.peergroup_type == "ebgp" %}
- ipv6 bgp unnumbered ebgp-template
- {% elif intf_vars.ipv6_bgp_unnum.state == "present" and intf_vars.ipv6_bgp_unnum.peergroup_type == "ibgp" %}
- ipv6 bgp unnumbered ibgp-template
- {% endif %}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_interface/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_interface/tests/main.os10.yaml
deleted file mode 100644
index d77b7fc68..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/tests/main.os10.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
----
-# vars file for dellemc.os10.os10_interface
-# Sample variables for OS10 device
-os10_interface:
- ethernet 1/1/32:
- desc: "Connected to Core 2"
- mtu: 2500
- portmode:
- admin: up
- switchport: False
- ip_and_mask:
- ip_type_dynamic: True
- ipv6_type_dynamic: True
- ethernet 1/1/12:
- desc: "ipv6 auto config"
- switchport: False
- mtu: 2500
- admin: up
- ipv6_autoconfig: True
- ethernet 1/1/14:
- fanout: 10g-4x
- ethernet 1/1/13:
- desc: "set ipv6 address"
- switchport: False
- admin: up
- ipv6_and_mask: 2001:4898:5809:faa2::10/126
- state_ipv6: present
- ethernet 1/1/1:
- desc: "Connected to Leaf1"
- portmode: "trunk"
- switchport: True
- suppress_ra: present
- admin: up
- stp_rpvst_default_behaviour: False
- ethernet 1/1/3:
- desc: site2-spine2
- ip_and_mask: 10.9.0.4/31
- mtu: 9216
- switchport: False
- admin: up
- flowcontrol:
- mode: "receive"
- enable: "on"
- state: "present"
- stp_rpvst_default_behaviour: True
- vlan 100:
- ip_and_mask:
- ipv6_and_mask: 2001:4898:5808:ffaf::1/64
- state_ipv6: present
- ip_helper:
- - ip: 10.0.0.33
- state: present
- admin: up
- range ethernet 1/1/1-1/1/32:
- mtu: 2500
- admin: up
- switchport: False
- port-channel 10:
- admin: up
- switchport: False
- suppress_ra:
- ipv6_bgp_unnum:
- state: present
- peergroup_type: ebgp
- port-channel 20:
- admin: up
- stp_rpvst_default_behaviour: False
- vlan 10:
- ip_and_mask: "10.1.1.1/24"
- virtual_gateway_ip: "10.1.1.254"
- virtual_gateway_ipv6: "10:1:1::254"
- admin: up
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_interface/tests/test.yaml
deleted file mode 100644
index 5b1ac0946..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_interface
diff --git a/ansible_collections/dellemc/os10/roles/os10_interface/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_interface/vars/main.yml
deleted file mode 100644
index 247ea1577..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_interface/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_interface
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/LICENSE b/ansible_collections/dellemc/os10/roles/os10_lag/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/README.md b/ansible_collections/dellemc/os10/roles/os10_lag/README.md
deleted file mode 100644
index eb679dcff..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/README.md
+++ /dev/null
@@ -1,103 +0,0 @@
-LAG role
-========
-
-This role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type as a static or dynamic LAG and minimum required link. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The LAG role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Object drives the tasks in this role
-- `os10_lag` (dictionary) contains the hostname (dictionary)
-- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value to any variable negates the corresponding configuration
-- `os10_lag` (dictionary) holds a dictionary with the port-channel ID key in `Po <ID>` format (1 to 128)
-- Variables and values are case-sensitive
-
-**port-channel ID keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string: static,dynamic | Configures the interface either as a static or dynamic LAG | os10 |
-| ``min_links`` | integer | Configures the minimum number of links in the LAG that must be in *operup* status (1 to 32) | os10 |
-| ``max_bundle_size`` | integer | Configures the maximum bundle size for the port channel | os10 |
-| ``lacp_system_priority`` | integer | Configures the LACP system-priority value | os10 |
-| ``lacp_fallback_enable`` | boolean | Configures LACP fallback | os10 |
-| ``channel_members`` | list | Specifies the list of port members to be associated to the port-channel (see ``channel_members.*``) | os10 |
-| ``channel_members.port`` | string | Specifies valid interface names to be configured as port-channel members | os10 |
-| ``channel_members.mode`` | string: active,passive,on | Configures mode of channel members | os10 |
-| ``channel_members.port_priority`` | integer | Configures port priority on devices for channel members | os10 |
-| ``channel_members.lacp_rate_fast`` | boolean | Configures the LACP rate as fast if set to true | os10 |
-| ``state`` | string: absent,present\* | Deletes the LAG corresponding to the port-channel ID if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_lag* role to setup port channel ID and description, and configures hash algorithm and minimum links for the LAG. Channel members can be configured for the port-channel either in static or dynamic mode. You can also delete the LAG with the port-channel ID or delete the members associated to it. This example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_lag* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_lag:
- Po 12:
- type: dynamic
- min_links: 2
- max_bundle_size: 2
- lacp_system_priority: 2
- channel_members:
- - port: ethernet 1/1/31
- mode: "active"
- port_priority: 3
- lacp_rate_fast: true
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_lag
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/defaults/main.yml
deleted file mode 100644
index 6eaa54eac..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_lag
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/handlers/main.yml
deleted file mode 100644
index 06b4bef88..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_lag
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/meta/main.yml
deleted file mode 100644
index 6fcd3c68c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_lag role facilitates the configuration of LAG attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/tasks/main.yml
deleted file mode 100644
index e103552f6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating LAG configuration for os10"
- template:
- src: os10_lag.j2
- dest: "{{ build_dir }}/lag10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning LAG configuration for os10"
- os10_config:
- src: os10_lag.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/templates/os10_lag.j2 b/ansible_collections/dellemc/os10/roles/os10_lag/templates/os10_lag.j2
deleted file mode 100644
index 722ff5ffe..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/templates/os10_lag.j2
+++ /dev/null
@@ -1,89 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-Purpose:
-Configure LAG commands for os10 Devices.
-os10_lag:
- Po 12:
- type: dynamic
- min_links: 2
- max_bundle_size: 2
- lacp_system_priority: 2
- channel_members:
- - port: ethernet 1/1/31
- mode: "active"
- port_priority: 3
- lacp_rate_fast: true
- state: present
-################################}
-{% if os10_lag is defined and os10_lag %}
-{% for key in os10_lag.keys() %}
-{% set channel_id = key.split(" ") %}
-{% set lag_vars = os10_lag[key] %}
-
- {% if lag_vars.lacp_system_priority is defined %}
- {% if lag_vars.lacp_system_priority %}
-lacp system-priority {{ lag_vars.lacp_system_priority }}
- {% else %}
-no lacp system-priority
- {% endif %}
- {% endif %}
-
- {% if lag_vars.state is defined and lag_vars.state == "absent" %}
-no interface port-channel {{ channel_id[1] }}
- {% else %}
-interface port-channel{{ channel_id[1] }}
- {% if lag_vars.min_links is defined %}
- {% if lag_vars.min_links %}
- minimum-links {{ lag_vars.min_links }}
- {% else %}
- no minimum-links
- {% endif %}
- {% endif %}
- {% if lag_vars.max_bundle_size is defined %}
- {% if lag_vars.max_bundle_size %}
- lacp max-bundle {{ lag_vars.max_bundle_size }}
- {% else %}
- no lacp max-bundle
- {% endif %}
- {% endif %}
- {% if lag_vars.lacp_fallback_enable is defined and lag_vars.lacp_fallback_enable %}
- lacp fallback enable
- {% endif %}
- {% if lag_vars.channel_members is defined %}
- {% for ports in lag_vars.channel_members %}
- {% if ports.port is defined and ports.port %}
-interface {{ ports.port }}
- {% if lag_vars.type is defined and lag_vars.type == "static" %}
- {% if ports.mode is defined and ports.mode == "on" %}
- channel-group {{ channel_id[1] }} mode on
- {% else %}
- no channel-group
- {% endif %}
- {% elif lag_vars.type is defined and lag_vars.type == "dynamic" %}
- {% if ports.mode is defined and ports.mode == "active" or ports.mode == "passive" %}
- channel-group {{ channel_id[1] }} mode {{ ports.mode }}
- {% else %}
- no channel-group
- {% endif %}
- {% endif %}
- {% if ports.lacp_rate_fast is defined %}
- {% if ports.lacp_rate_fast %}
- lacp rate fast
- {% else %}
- no lacp rate fast
- {% endif %}
- {% endif %}
- {% if ports.port_priority is defined %}
- {% if ports.port_priority %}
- lacp port-priority {{ ports.port_priority }}
- {% else %}
- no lacp port-priority
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_lag/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_lag/tests/main.os10.yaml
deleted file mode 100644
index 525189817..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/tests/main.os10.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# vars file for dellemc.os10.os10_lag
-# Sample variables for OS10 device
-os10_lag:
- Po 12:
- type: dynamic
- min_links: 2
- max_bundle_size: 2
- lacp_system_priority: 2
- channel_members:
- - port: ethernet 1/1/31
- mode: "active"
- port_priority: 3
- lacp_rate_fast: true
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_lag/tests/test.yaml
deleted file mode 100644
index 6c130a0e0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_lag
diff --git a/ansible_collections/dellemc/os10/roles/os10_lag/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_lag/vars/main.yml
deleted file mode 100644
index 5b1cd5b18..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lag/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_lag
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_lldp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/README.md b/ansible_collections/dellemc/os10/roles/os10_lldp/README.md
deleted file mode 100644
index 0c08af4dc..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/README.md
+++ /dev/null
@@ -1,149 +0,0 @@
-LLDP role
-=========
-
-This role facilitates the configuration of link layer discovery protocol (LLDP) attributes at a global and interface level. It supports the configuration of hello, mode, multiplier, advertise TLVs, management interface, FCoE, and iSCSI at global and interface level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The LLDP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_lldp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``enable`` | boolean | Enables or disables LLDP at a global level | os10 |
-| ``multiplier`` | integer | Configures the global LLDP multiplier (2 to 10) | os10 |
-| ``reinit`` | integer | Configures the reinit value (1 to 10) | os10 |
-| ``timer`` | integer | Configures the timer value (5 to 254) | os10 |
-| ``advertise`` | dictionary | Configures LLDP-MED and TLV advertisement at the global level (see ``advertise.*``) | os10 |
-| ``advertise.med`` | dictionary | Configures MED TLVs advertisement (see ``med_tlv.*``) | os10 |
-| ``med.fast_start_repeat_count`` | integer | Configures med fast start repeat count value (1 to 10) | os10 |
-| ``med.application`` | list | Configures global MED TLVs advertisement for an application (see ``application.*``) | os10 |
-| ``application.name`` | string | Configures the application name for MED TLVs advertisement | os10 |
-| ``application.vlan_id`` | integer | Configures the VLAN ID for the application MED TLVs advertisement (1 to 4094) | os10 |
-| ``application.l2_priority`` | integer | Configures the L2 priority for the application MED TLVs advertisement (0 to 7) | os10 |
-| ``application.code_point_value`` | integer | Configures differentiated services code point values for MED TLVs advertisement (0 to 63) | os10 |
-| ``application.vlan_type`` | string: tag, untag | Configures the VLAN type for the application MED TLvs advertisement | os10 |
-| ``application.network_policy_id`` | integer | Configures network policy ID for the application MED TLVs advertisement | os10 |
-| ``application.state`` | string: present\*,absent | Deletes the application if set to absent | os10 |
-| ``local_interface`` | dictionary | Configures LLDP at the interface level (see ``local_interface.*``) | os10 |
-| ``local_interface.<interface name>`` | dictionary | Configures LLDP at the interface level (see ``<interface name>.*``) | os10 |
-| ``<interface name>.mode`` | string: rx,tx | Configures LLDP mode configuration at the interface level | os10 |
-| ``<interface name>.mode_state`` | string: absent,present | Configures transmit/receive at the interface level| os10 |
-| ``<interface name>.advertise`` | dictionary | Configures LLDP-MED TLV advertisement at the interface level (see ``advertise.*``) | os10 |
-| ``advertise.med`` | dictionary | Configures MED TLVs advertisement at the interface level (see ``med_tlv.*``) | os10 |
-| ``med.enable`` | boolean | Enables interface level MED capabilities | os10 |
-| ``med.tlv`` | string | Configures MED TLV advertisement at interface level | os10 |
-| ``med.tlv_state`` | string: present\*,absent | Deletes the interface level MED configuration if set to absent | os10 |
-| ``med.application`` | list | Configures MED TLVs advertisement for the application at the interface level (see ``application.*``) | os10 |
-| ``application.network_policy_id`` | integer | Configures the *network_policy_id* for the application of MED | os10 |
-| ``application.state`` | string: present\*,absent | Deletes the associated network policy ID for the application if set to absent.| os10 |
-| ``advertise.tlv`` | list | Configures TLVs advertisement at interface level (see `<interface_name>.tlv.*`) | os10 |
-| ``tlv.name`` | string: basic-tlv,dcbxp,dcbxp-appln,dot1-tlv,dot3-tlv | Configures corresponding to the TLV name specified at the interface | os10 |
-| ``tlv.value`` | string | Specifies corresponding TLV value according to the name as a string | os10 |
-| ``tlv.state`` | string: present\*,absent | Deletes the interface level TLVs advertisement if set to absent | os10 |
-
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_lldp* role to configure protocol lldp. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_lldp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_lldp:
- enable: false
- multiplier: 3
- reinit: 2
- timer: 5
- advertise:
- med:
- fast_start_repeat_count: 4
- application:
- - name: guest-voice
- network_policy_id: 0
- vlan_id: 2
- vlan_type: tag
- l2_priority: 3
- code_point_value: 4
- state: present
- - name: voice
- network_policy_id: 1
- vlan_id: 3
- vlan_type: untag
- l2_priority: 3
- code_point_value: 4
- state: absent
- local_interface:
- ethernet 1/1/1:
- mode: rx
- mode_state: present
- advertise:
- med:
- enable: true
- tlv: inventory
- tlv_state: present
- application:
- - network_policy_id: 4
- state: present
- tlv:
- - name: basic-tlv
- value: management-address port-description
- state: present
- - name: dcbxp-appln
- value: iscsi
- state: present
- - name: dcbxp
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_lldp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/defaults/main.yml
deleted file mode 100644
index 464b4d96e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_lldp
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/handlers/main.yml
deleted file mode 100644
index f49343b1f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_lldp
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/meta/main.yml
deleted file mode 100644
index 7d843eed2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os10_lldp role facilitates the configuration of Link Layer Discovery Protocol(LLDP) attributes in devices
- running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/tasks/main.yml
deleted file mode 100644
index fc86a9d47..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for Dellos10
- - name: "Generating LLDP configuration for os10"
- template:
- src: os10_lldp.j2
- dest: "{{ build_dir }}/lldp10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning LLDP configuration for os10"
- os10_config:
- src: os10_lldp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/templates/os10_lldp.j2 b/ansible_collections/dellemc/os10/roles/os10_lldp/templates/os10_lldp.j2
deleted file mode 100644
index 6d362e21d..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/templates/os10_lldp.j2
+++ /dev/null
@@ -1,195 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{###################################################
-Purpose:
-Configure LLDP commands for os10 Devices.
-
-os10_lldp:
- enable: false
- multiplier: 3
- reinit: 2
- timer: 5
- advertise:
- med:
- fast_start_repeat_count: 4
- application:
- - name: guest-voice
- network_policy_id: 0
- vlan_id: 2
- vlan_type: tag
- l2_priority: 3
- code_point_value: 4
- state: present
- - name: voice
- network_policy_id: 1
- vlan_id: 3
- vlan_type: untag
- l2_priority: 3
- code_point_value: 4
- state: absent
- local_interface:
- ethernet 1/1/1:
- mode: rx
- mode_state: present
- advertise:
- med:
- enable: true
- tlv : inventory
- tlv_state: present
- application:
- - network_policy_id: 4
- state: present
- tlv:
- - name: basic-tlv
- value: management-address port-description
- state: present
- - name: dcbxp-appln
- value: iscsi
- state: present
- - name: dcbxp
- state: present
-
-{###############################################################################################}
-{% if os10_lldp is defined and os10_lldp %}
-{% for key,value in os10_lldp.items() %}
- {% if key == "enable" %}
- {% if value %}
-lldp enable
- {% else %}
-no lldp enable
- {% endif %}
- {% elif key == "reinit" %}
- {% if value %}
-lldp reinit {{ value }}
- {% else %}
-no lldp reinit
- {% endif %}
- {% elif key == "multiplier" %}
- {% if value %}
-lldp holdtime-multiplier {{ value }}
- {% else %}
-no lldp holdtime-multiplier
- {% endif %}
- {% elif key == "timer" %}
- {% if value %}
-lldp timer {{ value }}
- {% else %}
-no lldp timer
- {% endif %}
- {% elif key == "advertise" %}
- {% if value %}
- {% for ke,valu in value.items() %}
- {% if ke == "med" %}
- {% if valu %}
- {% for med,val in valu.items() %}
- {% if med == "fast_start_repeat_count" %}
- {% if val %}
-lldp med fast-start-repeat-count {{ val }}
- {% else %}
-no lldp med fast-start-repeat-count
- {% endif %}
- {% elif med == "application" %}
- {% if val %}
- {% for item in val %}
- {% if item.network_policy_id is defined and item.network_policy_id %}
- {% if item.state is defined and item.state == "absent" %}
-no lldp med network-policy {{ item.network_policy_id }}
- {% else %}
- {% if item.name is defined and item.name %}
- {% if item.vlan_id is defined and item.vlan_id %}
- {% if item.vlan_type is defined and item.vlan_type %}
- {% if item.l2_priority is defined and item.l2_priority %}
- {% if item.code_point_value is defined and item.code_point_value %}
-lldp med network-policy {{ item.network_policy_id }} app {{ item.name }} vlan {{ item.vlan_id }} vlan-type {{ item.vlan_type }} priority {{ item.l2_priority }} dscp {{ item.code_point_value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %}
-{% if os10_lldp is defined and os10_lldp %}
-{% for key in os10_lldp.keys() %}
-{% set lldp_vars = os10_lldp[key] %}
-{% if key == "local_interface" %}
- {% for intf in lldp_vars.keys() %}
- {% set intf_vars = lldp_vars[intf] %}
-interface {{ intf }}
- {% if intf_vars.mode is defined and intf_vars.mode %}
- {% if intf_vars.mode_state is defined and intf_vars.mode_state == "absent" %}
- {% if intf_vars.mode == "rx" %}
- no lldp receive
- {% elif intf_vars.mode == "tx" %}
- no lldp transmit
- {% endif %}
- {% else %}
- {% if intf_vars.mode == "rx" %}
- lldp receive
- {% elif intf_vars.mode == "tx" %}
- lldp transmit
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if intf_vars.advertise is defined and intf_vars.advertise %}
- {% if intf_vars.advertise.med is defined and intf_vars.advertise.med %}
- {% if intf_vars.advertise.med.enable is defined %}
- {% if intf_vars.advertise.med.enable %}
- lldp med enable
- {% else %}
- lldp med disable
- {% endif %}
- {% endif %}
- {% if intf_vars.advertise.med.tlv is defined and intf_vars.advertise.med.tlv %}
- {% if intf_vars.advertise.med.tlv_state is defined and intf_vars.advertise.med.tlv_state == "absent" %}
- no lldp med tlv-select {{ intf_vars.advertise.med.tlv }}
- {% else %}
- lldp med tlv-select {{ intf_vars.advertise.med.tlv }}
- {% endif %}
- {% endif %}
- {% if intf_vars.advertise.med.application is defined and intf_vars.advertise.med.application %}
- {% for item in intf_vars.advertise.med.application %}
- {% if item.network_policy_id is defined and item.network_policy_id %}
- {% if item.state is defined and item.state == "absent" %}
- lldp med network-policy remove {{ item.network_policy_id }}
- {% else %}
- lldp med network-policy add {{ item.network_policy_id }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% if intf_vars.advertise.tlv is defined and intf_vars.advertise.tlv %}
- {% for it in intf_vars.advertise.tlv %}
- {% if it.name is defined and it.name %}
- {% if it.state is defined and it.state == "absent" %}
- {% if it.value is defined and it.value %}
- no lldp tlv-select {{ it.name }} {{ it.value }}
- {% else %}
- no lldp tlv-select {{ it.name }}
- {% endif %}
- {% else %}
- {% if it.value is defined and it.value %}
- lldp tlv-select {{ it.name }} {{ it.value }}
- {% else %}
- lldp tlv-select {{ it.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/main.os10.yaml
deleted file mode 100644
index f07408ed7..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/main.os10.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-# vars file for dellemc.os10.os10_lldp,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_lldp:
- enable: false
- multiplier: 3
- reinit: 2
- timer: 5
- advertise:
- med:
- fast_start_repeat_count: 4
- application:
- - name: guest-voice
- network_policy_id: 0
- vlan_id: 2
- vlan_type: tag
- l2_priority: 3
- code_point_value: 4
- state: present
- - name: voice
- network_policy_id: 1
- vlan_id: 3
- vlan_type: untag
- l2_priority: 3
- code_point_value: 4
- state: absent
- local_interface:
- ethernet 1/1/1:
- mode: rx
- mode_state: present
- advertise:
- med:
- enable: true
- tlv: inventory
- tlv_state: present
- application:
- - network_policy_id: 4
- state: present
- tlv:
- - name: basic-tlv
- value: management-address port-description
- state: present
- - name: dcbxp-appln
- value: iscsi
- state: present
- - name: dcbxp
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_lldp/tests/test.yaml
deleted file mode 100644
index e928c00f2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_lldp
diff --git a/ansible_collections/dellemc/os10/roles/os10_lldp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_lldp/vars/main.yml
deleted file mode 100644
index 8802ce769..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_lldp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_lldp
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/LICENSE b/ansible_collections/dellemc/os10/roles/os10_logging/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/README.md b/ansible_collections/dellemc/os10/roles/os10_logging/README.md
deleted file mode 100644
index c8a2dbf23..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/README.md
+++ /dev/null
@@ -1,97 +0,0 @@
-Logging role
-============
-
-This role facilitates the configuration of global logging attributes, and it supports the configuration of logging servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Logging role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_logging keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``logging`` | list | Configures the logging server (see ``logging.*``) | os10 |
-| ``logging.ip`` | string (required) | Configures the IPv4 address for the logging server (A.B.C.D format) | os10 |
-| ``logging.state`` | string: absent,present\* | Deletes the logging server if set to absent | os10 |
-| ``console`` | dictionary | Configures logging to the console (see ``console.*``) | os10 |
-| ``console.enable`` | boolean | Enables/disables logging to the console | os10 |
-| ``console.severity`` | string | Configures the minimum severity level for logging to the console | os10 |
-| ``log_file`` | dictionary | Configures logging to a log file (see ``log_file.*``) | os10 |
-| ``log_file.enable`` | boolean | Enables/disables logging to a log file | os10 |
-| ``log_file.severity`` | string | Configures the minimum severity level for logging to a log file | os10 |
-| ``source_interface`` | string | Configures the source interface for logging | os10 |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_logging* role to completely set up logging servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_logging:
- logging:
- - ip: 1.1.1.1
- state: absent
- console:
- enable: True
- severity: log-err
- log_file:
- enable: True
- severity: log-err
- source_interface: "ethernet1/1/30"
-
-**Simple playbook to setup logging — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_logging
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/defaults/main.yml
deleted file mode 100644
index 2fbccfcde..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_logging
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/handlers/main.yml
deleted file mode 100644
index b79ed93fe..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_logging
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/meta/main.yml
deleted file mode 100644
index a9b06cab6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_logging role facilitates the configuration of logging attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/tasks/main.yml
deleted file mode 100644
index 91ce9e7bb..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating logging configuration for os10"
- template:
- src: os10_logging.j2
- dest: "{{ build_dir }}/logging10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning logging configuration for os10"
- os10_config:
- src: os10_logging.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/templates/os10_logging.j2 b/ansible_collections/dellemc/os10/roles/os10_logging/templates/os10_logging.j2
deleted file mode 100644
index 442376b54..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/templates/os10_logging.j2
+++ /dev/null
@@ -1,67 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure logging commands for os10 Devices
-os10_logging:
- logging:
- - ip: 1.1.1.1
- state: present
- console:
- enable: True
- severity: log-err
- log_file:
- enable: True
- severity: log-err
- source_interface: "ethernet1/1/30"
-###############################################}
-{% if os10_logging is defined and os10_logging %}
- {% for key,value in os10_logging.items() %}
- {% if key == "logging" %}
- {% for item in value %}
- {% if item.ip is defined and item.ip %}
- {% if item.state is defined and item.state == "absent" %}
-no logging server {{ item.ip }}
- {% else %}
-logging server {{ item.ip }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% elif key == "log_file" %}
- {% if value.enable is defined %}
- {% if value.enable %}
-logging log-file enable
- {% else %}
-logging log-file disable
- {% endif %}
- {% endif %}
- {% if value.severity is defined %}
- {% if value.severity %}
-logging log-file severity {{ value.severity }}
- {% else %}
-no logging log-file severity
- {% endif %}
- {% endif %}
- {% elif key == "console" %}
- {% if value.enable is defined %}
- {% if value.enable %}
-logging console enable
- {% else %}
-logging console disable
- {% endif %}
- {% endif %}
- {% if value.severity is defined %}
- {% if value.severity %}
-logging console severity {{ value.severity }}
- {% else %}
-no logging console severity
- {% endif %}
- {% endif %}
- {% elif key == "source_interface" %}
- {% if value %}
-logging source-interface {{ value }}
- {% else %}
-no logging source-interface
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_logging/tests/inventory.yaml
deleted file mode 100644
index d32792f7d..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.16.148.72 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_logging/tests/main.os10.yaml
deleted file mode 100644
index c9255b23e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/tests/main.os10.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# vars file for dellemc.os10.os10_logging,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_logging:
- logging:
- - ip: 1.1.1.1
- state: absent
- console:
- enable: True
- severity: log-err
- log_file:
- enable: True
- severity: log-err
- source_interface: "ethernet1/1/30"
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_logging/tests/test.yaml
deleted file mode 100644
index cb66b3a8c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_logging
diff --git a/ansible_collections/dellemc/os10/roles/os10_logging/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_logging/vars/main.yml
deleted file mode 100644
index 736ff3b8f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_logging/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_logging
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/LICENSE b/ansible_collections/dellemc/os10/roles/os10_network_validation/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/README.md b/ansible_collections/dellemc/os10/roles/os10_network_validation/README.md
deleted file mode 100644
index e9014c42b..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/README.md
+++ /dev/null
@@ -1,304 +0,0 @@
-Network validation role
-=========================
-
-This roles is used to verify network validation. It validates network features of a wiring connection, BGP neighbors, MTU between neighbors, and VLT pairing. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10. The Network validation role requires an SSH connection for connectivity to a Dell EMC OS10 device. You can use any of the built-in OS connection variables.
-
-- **Wiring validation** — Based on the LLDP neighbor establishment, the intended neighbor input model is defined by the _group_var/all_ user which is compared with the actual LLDP neighbor; report is generated if there is any mismatch with the intended neighbors
-
-- **BGP validation** — Based on the BGP neighbor state establishment, report is generated if the BGP neighbor state is not in an established state
-
-- **MTU validation** — Based on the interface MTU, the report is generated if there is an MTU mismatch between LLDP neighbors
-
-- **VLT validation** — Based on the VLT information, the report is generated if the backup VLT link is down or not present
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- Variables and values are case-sensitive
-
-**wiring_validation keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``intended_neighbors`` | list | Defines topology details planned | os10 |
-| ``source_switch`` | string | Defines the source switch inventory name planned | os10 |
-| ``source_port`` | string | Defines the source port planned | os10 |
-| ``dest_switch`` | string | Defines the destination switch inventory name planned | os10 |
-| ``dest_port`` | string | Defines the destination port planned | os10 |
-
-**bgp_validation keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``intended_bgp_neighbors`` | list | Defines topology details planned | os10 |
-| ``source_switch`` | string | Defines the source switch inventory name planned | os10 |
-
-**vlt_validation keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``intended_vlt_pairs`` | list | Defines topology details planned | os10 |
-| ``primary`` | string | Defines the primary role of switch inventory name planned | os10 |
-| ``secondary`` | string | Defines the secondary role of switch inventory name planned | os10 |
-
-Connection variables
---------------------
-
-Ansible Dell EMC roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible _group_vars_ or _host_vars_ directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if the value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; defaults to 22 |
-| ``os10_cli_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the default value is used |
-| ``os10_cli_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the _become_ method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use, if required, to enter privileged mode on the remote device; if `ansible_become` is set to no, this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-- The _xmltodict_ library should be installed to convert show command output in dictionary format from XML
-- To install the package, use the pip install xmltodict command
-- The *os10_fabric_summary* role must be included to query system network summary information
-
-Example playbook
-----------------
-
-This example uses the *os10_network_validation* role to verify network validations. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-
-**Sample hosts file**
-
- site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site2-spine1 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- site2-spine2 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
- [spine]
- site1-spine1
- site1-spine2
- site2-spine1
- site2-spine2
- [LeafAndSpineSwitch:children]
- spine
-
-
-**Sample host_vars/site1-spine1**
-
- cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
-
- os10_cli_user: xxxx
- os10_cli_pass: xxxx
- ansible_network_os: dellemc.os10.os10
-
-
-#### Sample ``group_var/all``
-
-**Sample input for wiring validation**
-
-
- intended_neighbors:
- - source_switch: site1-spine2
- source_port: ethernet1/1/5
- dest_port: ethernet1/1/29
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/6
- dest_port: ethernet1/1/30
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/7
- dest_port: ethernet1/1/31
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/8
- dest_port: ethernet1/1/32
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/9
- dest_port: ethernet1/1/21
- dest_switch: site1-spine1
- - source_switch: site1-spine2
- source_port: ethernet1/1/7
- dest_port: ethernet1/1/29
- dest_switch: site1-spine3
-
-**Sample input for BGP validation**
-
- intended_bgp_neighbors:
- - source_switch: site1-spine1
- neighbor_ip: ["10.11.0.1","10.9.0.1","10.9.0.3","10.9.0.5","1.1.1.1"]
- - source_switch: site1-spine2
- neighbor_ip: ["10.11.0.0","10.9.0.9","10.9.0.11","10.9.0.15"]
-
-**Sample input for VLT validation**
-
- intended_vlt_pairs:
- - primary: site1-spine1
- secondary: site2-spine2
- - primary: site2-spine1
- secondary: site2-spine2
-
-
-#### Simple playbook to setup network validation
-
-**Sample playbook of ``validation.yaml`` to run complete validation**
-
- ---
- - name: setup network validation
- hosts: localhost
- gather_facts: no
- connection: local
- roles:
- - os10_network_validation
-
-**Sample playbook to run wiring validation**
-
- ---
- - name: setup wiring validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: wiring_validation.yaml
-
-**Sample playbook to run BGP validation**
-
- ---
- - name: setup bgp validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: bgp_validation.yaml
-
-**Sample playbook to run VLT validation**
-
- ---
- - name: setup vlt validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: vlt_validation.yaml
-
-**Sample playbook to run MTU validation**
-
- ---
- - name: setup mtu validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: mtu_validation.yaml
-
-
-**Run**
-
-Execute the playbook and examine the results.
-
- ansible-playbook -i inventory.yaml validation.yaml
-
-**sample output of wiring validation**
-
- "results": [
- {
- "dest_port": "ethernet1/1/1",
- "dest_switch": "site2-spine2",
- "error_type": "link-missing",
- "reason": "link is not found for source switch: site2-spine1,port: ethernet1/1/1",
- "source_port": "ethernet1/1/1",
- "source_switch": "site2-spine1"
- },
- {
- "dest_port": "ethernet1/1/2",
- "dest_switch": "site2-spine1",
- "error_type": "link-mismatch",
- "reason": "Destination switch is not an expected value, expected switch: site2-spine1,port: ethernet1/1/2; actual switch: site1-spine2(svc-tag:J33FXC2, node_mac:e4:f0:04:9b:eb:dc), port: ethernet1/1/1",
- "source_port": "ethernet1/1/1",
- "source_switch": "site1-spine1"
- }
- ]
-
-**sample output of BGP validation**
-
- "results": [
- {
- "bgp_neighbor": "10.9.0.1",
- "bgp_state": "idle",
- "error_type": "remote_port_down",
- "possible_reason": "remote port site2-spine1 ethernet1/1/2 is down",
- "source_switch": "site1-spine1"
- },
- {
- "bgp_neighbor": "-",
- "bgp_state": "idle",
- "error_type": "not_an_intended_neighbor",
- "possible_reason": "neighbor 10.9.0.7 is not an intended, please add this neighbor in the intended_bgp_neighbors",
- "source_switch": "site1-spine1"
- },
- {
- "bgp_neighbor": "1.1.1.1",
- "error_type": "config_missing",
- "possible_reason": "neighbor config missing",
- "source_switch": "site1-spine1"
- },
- {
- "bgp_neighbor": "10.9.0.9",
- "bgp_state": "idle",
- "error_type": "remote_port_down",
- "possible_reason": "remote port site2-spine1 ethernet1/1/3 is down",
- "source_switch": "site1-spine2"
- }
- ]
-
-**sample output of VLT validation**
-
- "results": [
- {
- "error_type": "secondary_mismatch",
- "intended_primary": "site1-spine1",
- "intended_secondary": "site2-spine2",
- "possible_reason": "config mismatch as site2-spine2 is expected, but the actual secondary is site1-spine2 ",
- "secondary": "site1-spine2"
- },
- {
- "error_type": "peer_missing",
- "intended_primary": "site2-spine1",
- "intended_secondary": "site2-spine2",
- "possible_reason": "peer info is not configured or peer interface is down"
- }
- ]
-
-**sample output of MTU validation**
-
- "msg": {
- "results": "There is no MTU mistmatch between neighbors"
- }
-
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_network_validation/meta/main.yml
deleted file mode 100644
index b01fd4b1f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/meta/main.yml
+++ /dev/null
@@ -1,21 +0,0 @@
-# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- role_name: os10_network_validation
- author: Dell EMC Networking Engineering
- description: The os10_network_validation role faclitates to provide the Network validation in devices running on Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
-
- dependencies:
- - role: os10_fabric_summary
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/bgp_validation.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/bgp_validation.yaml
deleted file mode 100644
index a289b50cf..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/bgp_validation.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- name: "Get Dell EMC OS10 Show ip bgp summary"
- os10_command:
- commands:
- - command: "show ip bgp summary | display-xml"
- - command: "show ip interface brief | display-xml"
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_bgp
-- name: "set fact to form bgp database"
- set_fact:
- output_bgp: "{{ output_bgp|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'stdout_show_bgp': item.stdout.0, 'stdout_show_ip': item.stdout.1}] }}"
- loop: "{{ show_bgp.results }}"
-- name: call lib to convert bgp info from xml to dict format
- base_xml_to_dict:
- cli_responses: "{{ item.stdout_show_bgp }}"
- with_items:
- - "{{ output_bgp }}"
- register: show_bgp_list
-- name: call lib to convert ip interface info from xml to dict format
- base_xml_to_dict:
- cli_responses: "{{ item.stdout_show_ip }}"
- with_items:
- - "{{ output_bgp }}"
- register: show_ip_intf_list
-- name: call lib for bgp validation
- bgp_validate:
- show_ip_bgp: "{{ show_bgp_list.results }}"
- show_ip_intf_brief: "{{ show_ip_intf_list.results }}"
- bgp_neighbors: "{{ intended_bgp_neighbors }}"
- register: bgp_validation_output
-- name: "debug bgp database"
- debug: var=bgp_validation_output.msg.results
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/main.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/main.yaml
deleted file mode 100644
index c81545b80..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/main.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: "Validate the wiring info"
- import_tasks: wiring_validation.yaml
-- name: "Validate the BGP info"
- import_tasks: bgp_validation.yaml
-- name: "Validate the VLT info"
- import_tasks: vlt_validation.yaml
-- name: "Validate the MTU info for lldp neigbors"
- import_tasks: mtu_validation.yaml
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/mtu_validation.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/mtu_validation.yaml
deleted file mode 100644
index fbc58538e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/mtu_validation.yaml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: "Get Dell EMC OS10 MTU mismatch info"
- os10_command:
- commands:
- - command: "show lldp neighbors"
- - command: "show ip interface brief | display-xml"
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_output
-- name: "set fact to form database"
- set_fact:
- output_mtu: "{{ output_mtu|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'stdout_show_lldp': item.stdout.0, 'stdout_show_ip': item.stdout.1 }] }}"
- loop: "{{ show_output.results }}"
-- name: "debug the output database"
- debug: var=output_mtu
-- name: call lib to convert ip interface info from xml to dict format
- base_xml_to_dict:
- cli_responses: "{{ item.stdout_show_ip }}"
- with_items: "{{ output_mtu }}"
- register: show_ip_intf_list
-- name: "Get Dell EMC OS10 Show system"
- import_role:
- name: os10_fabric_summary
- register: show_system_network_summary
-- name: "call lib to process"
- mtu_validate:
- show_lldp_neighbors_list: "{{ output_mtu }}"
- show_system_network_summary: "{{ show_system_network_summary.msg.results }}"
- show_ip_intf_brief: "{{ show_ip_intf_list.results }}"
- register: mtu_validation
-- name: "debug mtu validation result"
- debug: var=mtu_validation.msg.results
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/vlt_validation.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/vlt_validation.yaml
deleted file mode 100644
index 1a673e190..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/vlt_validation.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- name: "Get Dell EMC OS10 Show run vlt"
- os10_command:
- commands:
- - command: "show running-configuration vlt | grep vlt-domain"
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_run_vlt
-- name: "set fact to form show vlt database"
- set_fact:
- output_vlt: "{{ output_vlt|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'stdout_show_vlt': item.stdout.0}] }}"
- loop: "{{ show_run_vlt.results }}"
-- name: "debug output_vlt"
- debug: var=output_vlt
-- name: "Get Dell EMC OS10 Show vlt info"
- os10_command:
- commands:
- - command: "show vlt {{ item.stdout_show_vlt.split()[1] }} | display-xml"
- provider: "{{ hostvars[item.inv_name].cli }}"
- with_items: "{{ output_vlt }}"
- register: show_vlt
-- name: "set fact to form vlt database"
- set_fact:
- vlt_out: "{{ vlt_out|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'show_vlt_stdout': item.stdout.0}] }}"
- loop: "{{ show_vlt.results }}"
- register: vlt_output
-- name: call lib to convert vlt info from xml to dict format
- base_xml_to_dict:
- cli_responses: "{{ item.show_vlt_stdout }}"
- with_items:
- - "{{ vlt_out }}"
- register: vlt_dict_output
-- name: "Get Dell EMC OS10 Show system"
- import_role:
- name: os10_fabric_summary
- register: show_system_network_summary
-- name: call lib to process
- vlt_validate:
- show_vlt: "{{ vlt_dict_output.results }}"
- show_system_network_summary: "{{ show_system_network_summary.msg.results }}"
- intended_vlt_pairs: "{{ intended_vlt_pairs }}"
- register: show_vlt_info
-- name: "debug vlt validation result"
- debug: var=show_vlt_info.msg.results
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/wiring_validation.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/wiring_validation.yaml
deleted file mode 100644
index d89ac18c6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tasks/wiring_validation.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-- name: "Get Dell EMC OS10 wiring info"
- os10_command:
- commands:
- - command: "show lldp neighbors"
- provider: "{{ hostvars[item].cli }}"
- with_items: "{{ groups['all'] }}"
- register: show_lldp
-- name: "set facts to form lldp db"
- set_fact:
- output_lldp: "{{ output_lldp|default([])+ [{'host': item.invocation.module_args.provider.host, 'inv_name': item.item, 'stdout_show_lldp': item.stdout}] }}"
- loop: "{{ show_lldp.results }}"
-- name: "Get Dell EMC OS10 Show system"
- import_role:
- name: os10_fabric_summary
- register: show_system_network_summary
-- name: call lib to process
- wiring_validate:
- show_lldp_neighbors_list: "{{ output_lldp }}"
- show_system_network_summary: "{{ show_system_network_summary.msg.results }}"
- planned_neighbors: "{{ intended_neighbors }}"
- register: wiring_validation
-- name: "debug the wiring validation results"
- debug: var=wiring_validation.msg.results
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/group_vars/all b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/group_vars/all
deleted file mode 100644
index 01c4856ec..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/group_vars/all
+++ /dev/null
@@ -1,30 +0,0 @@
-#wiring_validation input
-intended_neighbors:
- - source_switch: site1-spine1
- source_port: ethernet1/1/1
- dest_port: ethernet1/1/1
- dest_switch: site1-spine2
- - source_switch: site2-spine1
- source_port: ethernet1/1/1
- dest_port: ethernet1/1/1
- dest_switch: site2-spine2
- - source_switch: site1-spine1
- source_port: ethernet1/1/1
- dest_port: ethernet1/1/2
- dest_switch: site2-spine1
- - source_switch: site1-spine1
- source_port: ethernet1/1/2
- dest_port: ethernet1/1/2
- dest_switch: site2-spine2
-#bgp_validation input
-intended_bgp_neighbors:
- - source_switch: site1-spine1
- neighbor_ip: ["10.11.0.1", "10.9.0.1", "10.9.0.3", "10.9.0.5", "1.1.1.1"]
- - source_switch: site1-spine2
- neighbor_ip: ["10.11.0.0", "10.9.0.9", "10.9.0.11", "10.9.0.15"]
-#vlt_validation input
-intended_vlt_pairs:
- - primary: site1-spine1
- secondary: site1-spine2
- - primary: site2-spine1
- secondary: site2-spine2
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine1 b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine1
deleted file mode 100644
index 1f450079a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine1
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine2 b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine2
deleted file mode 100644
index 1f450079a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site1-spine2
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine1 b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine1
deleted file mode 100644
index 1f450079a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine1
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine2 b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine2
deleted file mode 100644
index 1f450079a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/host_vars/site2-spine2
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/inventory.yaml
deleted file mode 100644
index d18389471..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/inventory.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-site1-spine1 ansible_host=10.11.180.21 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
-site1-spine2 ansible_host=10.11.180.22 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
-site2-spine1 ansible_host=10.11.180.24 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
-site2-spine2 ansible_host=10.11.180.23 os10_cli_user=admin os10_cli_pass=admin ansible_network_os=dellemc.os10.os10
-
-[spine]
-site1-spine1
-site1-spine2
-site2-spine1
-site2-spine2
-
-[LeafAndSpineSwitch:children]
-spine
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/main.yaml
deleted file mode 100644
index 1f450079a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/main.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Sample variables for OS10 device
-cli:
- host: "{{ ansible_host }}"
- username: "{{ os10_cli_user | default('admin') }}"
- password: "{{ os10_cli_pass | default('admin') }}"
- timeout: 300
-
-os10_cli_user: xxxx
-os10_cli_pass: xxxx
-ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/test.yaml
deleted file mode 100644
index aff21dfed..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_network_validation/tests/test.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-# Sample playbook to validate network validation role
-- name: setup network validation
- hosts: localhost
- gather_facts: False
- connection: local
- roles:
- - dellemc.os10.os10_network_validation
-
-# Sample playbook to validate wiring validation
-- name: setup play for wiring validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: wiring_validation.yaml
-
-# Sample playbook to validate bgp validation
-- name: setup playbook to validate bgp validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: bgp_validation.yaml
-
-# Sample playbook to validate vlt validation
-- name: setup playbook to validate vlt validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: vlt_validation.yaml
-
-# Sample playbook to validate mtu validation
-- name: setup playbook to validate mtu validation
- hosts: localhost
- gather_facts: False
- connection: local
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_network_validation
- tasks_from: mtu_validation.yaml
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_ntp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/README.md b/ansible_collections/dellemc/os10/roles/os10_ntp/README.md
deleted file mode 100644
index 17e879c6b..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/README.md
+++ /dev/null
@@ -1,124 +0,0 @@
-NTP role
-========
-
-This role facilitates the configuration of network time protocol (NTP) attributes. It specifically enables configuration of NTP server, NTP source, authentication, and broadcast service. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The NTP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_ntp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``server`` | list | Configures the NTP server (see ``server.*``) | os10 |
-| ``server.ip`` | string (required) | Configures an IPv4 address for the NTP server (A.B.C.D format) | os10 |
-| ``server.key`` | integer | Configures the peer authentication key for the NTP server | os10 |
-| ``server.prefer`` | boolean | Configures the peer preference | os10 |
-| ``server.state`` | string: absent,present\* | Deletes the NTP server if set to absent | os10 |
-| ``source`` | string | Configures the interface for the source address | os10 |
-| ``master`` | integer | Configures the local clock to act as the server | os10 |
-| ``authenticate`` | boolean | Configures authenticate time sources | os10 |
-| ``authentication_key`` | list | Configures authentication key for trusted time sources (see ``authentication_key.*``) | os10 |
-| ``authentication_key.key_num`` | integer | Configures authentication key number | os10 |
-| ``authentication_key.key_string_type`` | integer: 0,9 | Configures hidden authentication key string if the value is 9, and configures unencrypted authentication key string if the value is 0 | os10 |
-| ``authentication_key.key_string`` | string | Configures the authentication key string | os10 |
-| ``authentication_key.type`` | string: md5,sha1,sha2-256 | Configures the authentication type | os10 |
-| ``authentication_key.state`` | string: absent,present\* | Deletes the authenticaton key if set to absent | os10 |
-| ``trusted_key`` | list | Configures key numbers for trusted time sources (see ``trusted_key.*``) | os10 |
-| ``trusted_key.key_num`` | integer | Configures the key number | os10 |
-| ``trusted_key.state`` | string: absent,present\* | Deletes the trusted key if set to absent | os10 |
-| ``intf`` | dictionary | Configures NTP on the interface (see ``intf.*``) | os10 |
-| ``intf.<interface name>`` | dictionary | Configures NTP on the interface (see ``<interface name>.*``) | os10 |
-| ``<interface name>.disable`` | boolean | Configures NTP disable on the interface | os10 |
-| ``<interface name>.broadcast`` | boolean | Configures NTP broadcast client service on the interface | os10 |
-| ``vrf`` | dictionary | Enables NTP on VRF (see ``vrf.*``) | os10 |
-| ``vrf.name`` | string | Name of the VRF to enable NTP | os10 |
-| ``vrf.state`` | string: absent,present\* | Disables NTP on the VRF if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_ntp* role to set the NTP server, source ip, authentication and broadcast service. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When the `os10_cfg_generate` variable is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os10_ntp* role.
-
-By including the role, you automatically get access to all of the tasks to configure NTP attributes. The sample *host_vars* is for os10.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- host: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_ntp:
- source: ethernet 1/1/2
- master: 5
- authenticate: true
- authentication_key:
- - key_num: 123
- key_string_type: 9
- key_string: test
- type: md5
- state: present
- trusted_key:
- - key_num: 1323
- state: present
- server:
- - ip: 2.2.2.2
- key: 345
- prefer: true
- state: present
- intf:
- ethernet 1/1/2:
- disable: true
- broadcast: true
- vrf:
- name: red
- state: present
-
-**Simple playbook to setup NTP — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_ntp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/defaults/main.yml
deleted file mode 100644
index 7d2d8eee2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_ntp
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/handlers/main.yml
deleted file mode 100644
index 965f50b51..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_ntp
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/meta/main.yml
deleted file mode 100644
index 3befe0cd3..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_ntp role facilitates the configuration of NTP attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/tasks/main.yml
deleted file mode 100644
index 202e5601c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/tasks/main.yml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-# tasks file for os10
- - name: "Generating NTP configuration for os10"
- template:
- src: os10_ntp.j2
- dest: "{{ build_dir }}/ntp10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning NTP configuration for os10"
- os10_config:
- src: os10_ntp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
-
- - name: "Generating NTP VRF configuration for os10"
- lineinfile:
- path: "{{ build_dir }}/ntp10_{{ hostname }}.conf.part"
- line: "{{ lookup('template', 'os10_ntp_vrf.j2') }}"
- insertafter: EOF
- when: >
- (ansible_network_os is defined and
- ansible_network_os == "dellemc.os10.os10" and
- ((os10_cfg_generate | default('False')) | bool) and
- os10_ntp.vrf is defined and
- os10_ntp.vrf)
-
- - name: "Provisioning NTP VRF configuration for os10"
- os10_config:
- lines:
- - command: "{{ lookup('template', 'os10_ntp_vrf.j2') }}"
- prompt: "Do you want to continue"
- answer: "yes"
- when: >
- (ansible_network_os is defined and
- ansible_network_os == "dellemc.os10.os10" and
- os10_ntp.vrf is defined and
- os10_ntp.vrf)
-# notify: save config os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp.j2 b/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp.j2
deleted file mode 100644
index 7524c935e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp.j2
+++ /dev/null
@@ -1,125 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure NTP commands for os10 Devices
-os10_ntp:
- source: ethernet 1/1/2
- master: 5
- authenticate: true
- vrf:
- name: red
- authentication_key:
- - key_num: 123
- key_string_type: 7
- key_string: test
- type: md5
- state: present
- trusted_key:
- - key_num: 1323
- state: present
- server:
- - ip: 2.2.2.2
- key: 345
- prefer: true
- state: present
- intf:
- ethernet 1/1/2:
- disable: true
- broadcast: true
-###############################################}
-{% if os10_ntp is defined and os10_ntp %}
- {% if os10_ntp.source is defined %}
- {% if os10_ntp.source %}
-ntp source {{ os10_ntp.source }}
- {% else %}
-no ntp source
- {% endif %}
- {% endif %}
- {% if os10_ntp.master is defined %}
- {% if os10_ntp.master %}
-ntp master {{ os10_ntp.master }}
- {% else %}
-no ntp master
- {% endif %}
- {% endif %}
- {% if os10_ntp.authenticate is defined %}
- {% if os10_ntp.authenticate %}
-ntp authenticate
- {% else %}
-no ntp authenticate
- {% endif %}
- {% endif %}
- {% if os10_ntp.server is defined and os10_ntp.server %}
- {% for item in os10_ntp.server %}
- {% if item.ip is defined and item.ip %}
- {% if item.state is defined and item.state == "absent" %}
-no ntp server {{ item.ip }}
- {% else %}
- {% if item.key is defined and item.key %}
- {% if item.prefer is defined and item.prefer %}
-ntp server {{ item.ip }} key {{ item.key }} prefer
- {% else %}
-ntp server {{ item.ip }} key {{ item.key }}
- {% endif %}
- {% else %}
- {% if item.prefer is defined and item.prefer %}
-ntp server {{ item.ip }} prefer
- {% else %}
-ntp server {{ item.ip }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if os10_ntp.authentication_key is defined and os10_ntp.authentication_key %}
- {% for item in os10_ntp.authentication_key %}
- {% if item.key_num is defined and item.key_num %}
- {% if item.state is defined and item.state == "absent" %}
-no ntp authentication-key {{ item.key_num }}
- {% else %}
- {% if item.key_string_type is defined and item.key_string_type >= 0 %}
- {% if item.key_string is defined and item.key_string %}
- {% if item.type is defined and item.type %}
- {% set auth_type = item.type %}
- {% else %}
- {% set auth_type = 'md5' %}
- {% endif%}
-ntp authentication-key {{ item.key_num }} {{ auth_type }} {{ item.key_string_type }} {{ item.key_string }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if os10_ntp.trusted_key is defined and os10_ntp.trusted_key %}
- {% for item in os10_ntp.trusted_key %}
- {% if item.key_num is defined and item.key_num %}
- {% if item.state is defined and item.state == "absent" %}
-no ntp trusted-key {{ item.key_num }}
- {% else %}
-ntp trusted-key {{ item.key_num }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if os10_ntp.intf is defined and os10_ntp.intf %}
- {% for key in os10_ntp.intf.keys() %}
-interface {{ key }}
- {% if os10_ntp.intf[key].disable is defined %}
- {% if os10_ntp.intf[key].disable %}
- ntp disable
- {% else %}
- no ntp disable
- {% endif %}
- {% endif %}
- {% if os10_ntp.intf[key].broadcast is defined %}
- {% if os10_ntp.intf[key].broadcast %}
- ntp broadcast client
- {% else %}
- no ntp broadcast client
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp_vrf.j2 b/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp_vrf.j2
deleted file mode 100644
index d2e12a397..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/templates/os10_ntp_vrf.j2
+++ /dev/null
@@ -1,18 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure NTP VRF for os10 Devices
-os10_ntp:
- vrf:
- name: red
- state: present
-###############################################}
-{% if os10_ntp is defined and os10_ntp %}
- {% if os10_ntp.vrf is defined and os10_ntp.vrf.name is defined %}
- {% if os10_ntp.vrf.state is defined and os10_ntp.vrf.state == "absent" %}
-no ntp enable vrf {{ os10_ntp.vrf.name }}
- {% else %}
-ntp enable vrf {{ os10_ntp.vrf.name }}
- {% endif%}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/inventory
deleted file mode 100644
index 85a255f94..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
----
-localhost
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/main.os10.yaml
deleted file mode 100644
index cc2a9b09a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/main.os10.yaml
+++ /dev/null
@@ -1,25 +0,0 @@
----
-# vars file for dellemc.os10.os10_ntp,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_ntp:
- source: ethernet 1/1/2
- master: 5
- authenticate: true
- authentication_key:
- - key_num: 123
- key_string_type: 0
- key_string: test
- state: present
- trusted_key:
- - key_num: 1323
- state: present
- server:
- - ip: 2.2.2.2
- key: 345
- prefer: true
- state: present
- intf:
- ethernet 1/1/2:
- disable: true
- broadcast: true
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/tests/test.yml
deleted file mode 100644
index dce69c7b5..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os10.os10_ntp
diff --git a/ansible_collections/dellemc/os10/roles/os10_ntp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_ntp/vars/main.yml
deleted file mode 100644
index e90d53c06..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_ntp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_ntp
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/LICENSE b/ansible_collections/dellemc/os10/roles/os10_prefix_list/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/README.md b/ansible_collections/dellemc/os10/roles/os10_prefix_list/README.md
deleted file mode 100644
index dce141e8f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/README.md
+++ /dev/null
@@ -1,104 +0,0 @@
-Prefix-list role
-================
-
-This role facilitates the configuration of a prefix-list. It supports the configuration of an IP prefix-list, and assigns the prefix-list to line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The prefix-list role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` value
-- If `os10_cfg_generate` set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_prefix_list keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string (required): ipv4,ipv6 | Configures an L3 (IPv4/IPv6) prefix-list | os10 |
-| ``name`` | string (required) | Configures the prefix-list name | os10 |
-| ``description`` | string | Configures the prefix-list description | os10 |
-| ``entries`` | list | Configures rules in the prefix-list (see ``seqlist.*``) | os10 |
-| ``entries.number`` | int (required) | Specifies the sequence number of the prefix-list rule | os10 |
-| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true, and specifies to reject packets if set to false | os10 |
-| ``entries.net_num`` | string (required) | Specifies the network number | os10 |
-| ``entries.mask`` | string (required) | Specifies the mask | os10 |
-| ``entries.condition_list`` | list | Configures conditions to filter packets (see ``condition_list.*``)| os10 |
-| ``condition_list.condition`` | list | Specifies the condition to filter packets from the source address | os10 |
-| ``condition_list.prelen`` | string (required) | Specifies the allowed prefix length | os10 |
-| ``entries.state`` | string: absent,present\* | Deletes the rule from the prefix-list if set to absent | os10 |
-| ``state`` | string: absent,present\* | Deletes the prefix-list if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_prefix_list* role to configure prefix-list for both IPv4 and IPv6. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_prefix_list* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: present
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_prefix_list
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/defaults/main.yml
deleted file mode 100644
index 8d2180296..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_prefix_list
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/handlers/main.yml
deleted file mode 100644
index 72b64726c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_prefix_list
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/.galaxy_install_info b/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/.galaxy_install_info
deleted file mode 100644
index ccddfc422..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/.galaxy_install_info
+++ /dev/null
@@ -1 +0,0 @@
-{install_date: 'Fri Mar 10 15:35:29 2017', version: v1.0.1}
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/main.yml
deleted file mode 100644
index 287ff507a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/meta/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_prefix_list role facilitates the configuration of prefix list attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tasks/main.yml
deleted file mode 100644
index 449b80ba6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating prefix list configuration for os10"
- template:
- src: os10_prefix_list.j2
- dest: "{{ build_dir }}/prefixlist10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning prefix list configuration for os10"
- os10_config:
- src: os10_prefix_list.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/templates/os10_prefix_list.j2 b/ansible_collections/dellemc/os10/roles/os10_prefix_list/templates/os10_prefix_list.j2
deleted file mode 100644
index fe598331d..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/templates/os10_prefix_list.j2
+++ /dev/null
@@ -1,95 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-Purpose:
-Configure pl on OS10 devices
-os10_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: present
- state: present
-#####################################}
-{% if (os10_prefix_list is defined and os10_prefix_list) %}
- {% for val in os10_prefix_list %}
- {% if val.name is defined and val.name %}
- {% if val.state is defined and val.state == "absent" %}
- {% if val.type is defined and val.type == "ipv4" %}
-no ip prefix-list {{ val.name }}
- {% elif val.type is defined and val.type == "ipv6" %}
-no ipv6 prefix-list {{ val.name }}
- {% endif %}
- {% else %}
- {% if val.type is defined and val.type == "ipv4" %}
- {% set ip = "ip" %}
- {% elif val.type is defined and val.type == "ipv6" %}
- {% set ip = "ipv6" %}
- {% endif %}
- {% if val.description is defined %}
- {% if val.description %}
-{{ ip }} prefix-list {{ val.name }} description {{ val.description }}
- {% else %}
-no {{ ip }} prefix-list {{ val.name }} description {{ val.description }}
- {% endif %}
- {% endif %}
- {% if val.entries is defined and val.entries %}
- {% for rule in val.entries %}
- {% if rule.number is defined and rule.number %}
- {% if rule.state is defined %}
- {% if rule.state == "absent" %}
-no {{ ip }} prefix-list {{ val.name }} seq {{ rule.number }}
- {% else %}
- {% if rule.permit is defined %}
- {% if rule.permit %}
- {% set is_permit = "permit" %}
- {% else %}
- {% set is_permit = "deny" %}
- {% endif %}
- {% endif %}
- {% if rule.net_num is defined and rule.net_num %}
- {% if rule.mask is defined %}
- {% if rule.mask or rule.mask == 0 %}
- {% if rule.condition_list is defined and rule.condition_list %}
- {% set condition_string = [' '] %}
- {% set item = "" %}
- {% if rule.condition_list | length > 1 %}
- {% for condition in rule.condition_list %}
- {% if rule.condition_list[0].condition == "ge" and rule.condition_list[1].condition == "le" %}
- {% set item = condition_string[0] + condition.condition + ' ' + condition.prelen|string + ' ' %}
- {% endif %}
- {% if condition_string.insert(0,item) %} {% endif %}
- {% endfor %}
- {% else %}
- {% for condition in rule.condition_list %}
- {% if rule.condition_list[0].condition == "ge" or rule.condition_list[0].condition == "le" %}
- {% set item = condition_string[0] + condition.condition + ' ' + condition.prelen|string + ' '
-%}
- {% endif %}
- {% if condition_string.insert(0,item) %} {% endif %}
- {% endfor %}
- {% endif %}
-{{ ip }} prefix-list {{ val.name }} seq {{ rule.number }} {{ is_permit }} {{ rule.net_num }}/{{ rule.mask }}{{ condition_string[0] }}
- {% else %}
-{{ ip }} prefix-list {{ val.name }} seq {{ rule.number}} {{ is_permit }} {{ rule.net_num }}/{{ rule.mask }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/main.os10.yaml
deleted file mode 100644
index 3e8250fdd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/main.os10.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-# vars file for dellemc.os10.os10_prefix_list,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/test.yaml
deleted file mode 100644
index 46fb739ae..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_prefix_list
diff --git a/ansible_collections/dellemc/os10/roles/os10_prefix_list/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_prefix_list/vars/main.yml
deleted file mode 100644
index bcff7f3fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_prefix_list/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_prefix_list
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/LICENSE b/ansible_collections/dellemc/os10/roles/os10_qos/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/README.md b/ansible_collections/dellemc/os10/roles/os10_qos/README.md
deleted file mode 100644
index 584159707..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/README.md
+++ /dev/null
@@ -1,90 +0,0 @@
-QoS role
-========
-
-This role facilitates the configuration quality of service (QoS) attributes like policy-map and class-map. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The QoS role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_qos keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``policy_map`` | list | Configures the policy-map (see ``policy_map.*``) | os10 |
-| ``policy_map.name`` | string (required) | Configures the policy-map name | os10 |
-| ``policy_map.type`` | string: qos\*, application, control-plane, network-qos, queuing in os10 | Configures the policy-map type | os10 |
-| ``policy_map.state`` | string: present\*,absent | Deletes the policy-map if set to absent | os10 |
-| ``class_map`` | list | Configures the class-map (see ``class_map.*``) | os10 |
-| ``class_map.name`` | string (required) | Configures the class-map name | os10 |
-| ``class_map.type`` | string: qos\*,application,control-plane,network-qos,queuing | Configures the class-map type | os10 |
-| ``class_map.state`` | string: present\*,absent | Deletes the class-map if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_qos* role to configure the policy-map class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_qos* role. By including the role, you automatically get access to all of the tasks to configure QoS features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_qos:
- policy_map:
- - name: testpolicy
- type: qos
- state: present
- class_map:
- - name: testclass
- type: application
- state: present
-
-**Simple playbook to setup qos — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - Dell-Networking.os10.os10_qos
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/defaults/main.yml
deleted file mode 100644
index 447b43293..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# defaults file for os10_qos
-match_type:
- match_all: match-all
- match_any: match-any
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/handlers/main.yml
deleted file mode 100644
index 43fd82c70..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for os10_qos
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/meta/main.yml
deleted file mode 100644
index 0b07e1d92..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_qos role facilitates the configuration of qos attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/tasks/main.yml
deleted file mode 100644
index 6921f69a2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for Dellos10
- - name: "Provisioning Qos configuration for os10"
- os10_config:
- src: os10_qos.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
-
- - name: "Generating Qos configuration for os10"
- template:
- src: os10_qos.j2
- dest: "{{ build_dir }}/qos10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/templates/os10_qos.j2 b/ansible_collections/dellemc/os10/roles/os10_qos/templates/os10_qos.j2
deleted file mode 100644
index a94c54158..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/templates/os10_qos.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#####################################################
-Purpose:
-Configure qos commands for os10 Devices.
-os10_qos:
- policy_map:
- - name: testpolicy
- type: qos
- state: present
- class_map:
- - name: testclass
- type: application
- state: present
-#####################################################}
-{% if os10_qos is defined and os10_qos %}
-{% for key in os10_qos.keys() %}
- {% if key =="policy_map" %}
- {% for vars in os10_qos[key] %}
- {% if vars.name is defined and vars.name %}
- {% if vars.state is defined and vars.state == "absent" %}
-no policy-map {{ vars.name }}
- {% else %}
- {% if vars.type is defined and vars.type %}
-policy-map type {{ vars.type }} {{ vars.name }}
- {% else %}
-policy-map type qos {{ vars.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% elif key =="class_map" %}
- {% for vars in os10_qos[key] %}
- {% if vars.name is defined and vars.name %}
- {% if vars.state is defined and vars.state == "absent" %}
-no class-map {{ vars.name }}
- {% else %}
- {% if vars.type is defined and vars.type %}
-class-map type {{ vars.type }} {{ vars.name }}
- {% else %}
-class-map type qos {{ vars.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_qos/tests/inventory
deleted file mode 100644
index 85a255f94..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/tests/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
----
-localhost
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_qos/tests/main.os10.yaml
deleted file mode 100644
index 191d94cec..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/tests/main.os10.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# Sample variables for OS10 device
-os10_qos:
- policy_map:
- - name: testpolicy
- type: qos
- state: present
- class_map:
- - name: testclas
- type: qos
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_qos/tests/test.yml
deleted file mode 100644
index 4107ee811..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - os10_qos
diff --git a/ansible_collections/dellemc/os10/roles/os10_qos/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_qos/vars/main.yml
deleted file mode 100644
index ecee178ee..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_qos/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for os10_qos
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/LICENSE b/ansible_collections/dellemc/os10/roles/os10_raguard/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/README.md b/ansible_collections/dellemc/os10/roles/os10_raguard/README.md
deleted file mode 100644
index abf7cf4af..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/README.md
+++ /dev/null
@@ -1,126 +0,0 @@
-IPv6 RA uard role
-===================
-
-This role facilitates the configuration of IPv6 RA Guard attributes. It specifically enables configuration of IPv6 RA Guard feature enable/disable, IPv6 RA Guard policy definition and policy parameter configuration, and attachment of IPv6 RA Guard policy to an interface. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The IPv6 RA Guard role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_raguard keys**
-
-
-| Key | Type | Description |Support |
-|--------------------------------------|-------------------------|----------------------------------------------------------|---------|
-| ``enable`` | boolean | Enables IPv6 RA-Guard feature | os10 |
-| ``policy`` | list | Configures the IPv6 RA Guard policy (see ``policy.*``) | os10 |
-| ``policy.state`` | string: absent/present\*| Deletes the policy if set to absent | os10 |
-| ``policy.name`` | string (required) | Configures the IPv6 RA Guard policy name | os10 |
-| ``policy.device_role.value`` | string (required) | Configures the device role for a policy | os10 |
-| ``policy.device_role.state`` | string: absent,present\*| Deletes the device role if set to absent | os10 |
-| ``policy.managed_config_flag.value`` | string | Configures the managed config flag param for a policy | os10 |
-| ``policy.managed_config_flag.state`` | string: absent,present\*| Deletes the managed config flag if set to absent | os10 |
-| ``policy.other_config_flag.value`` | string | Configures the other config flag param for a policy | os10 |
-| ``policy.other_config_flag.state`` | string: absent,present\*| Deletes the other config flag if set to absent | os10 |
-| ``policy.mtu.value`` | integer | Configures the MTU param for a policy | os10 |
-| ``policy.mtu.state`` | string: absent,present\*| Deletes the MTU if set to absent | os10 |
-| ``policy.reachable_time.value`` | integer | Configures the reachable time param for a policy | os10 |
-| ``policy.reachable_time.state`` | string: absent,present\*| Deletes the reachable time if set to absent | os10 |
-| ``policy.retrans_timer.value`` | integer | Configures the retransmit timer param for a policy | os10 |
-| ``policy.retrans_timer.state`` | string: absent,present\*| Deletes the retransmit timer if set to absent | os10 |
-| ``policy.router_lifetime.value`` | integer | Configures the router lifetime param for a policy | os10 |
-| ``policy.router_lifetime.state`` | string: absent,present\*| Deletes the router lifetime if set to absent | os10 |
-| ``policy.router_preference.value`` | string | Configures the router preference param for a policy | os10 |
-| ``policy.router_preference.state`` | string: absent,present\*| Deletes the router preference if set to absent | os10 |
-| ``policy.match`` | list | Configures the prefix/ACL/MAC list param for a policy | os10 |
-| ``policy.match.type`` | string | Configures the prefix/ACL/MAC type for a policy | os10 |
-| ``policy.match.name`` | string | Configures the prefix/ACL/MAC name for a policy | os10 |
-| ``policy.match.state`` | string: absent,present\*| Deletes the prefix/ACL/MAC if set to absent | os10 |
-| ``intf`` | dictionary | Configures IPv6 RA Guard on the interface (see``intf.*``) | os10 |
-| ``intf.<interface name>`` | dictionary | Configures RA Guard on the interface (see``<interface name>.*``)| os10 |
-| ``<interface name>.policy_name`` | String | Configures RA Guard policy name to be attached on an interface | os10 |
-| ``<interface name>.vlan`` | String | Configures VLAN name to which policy to be attached on an interface| os10|
-| ``<interface name>.state`` | String: absent,present\*| Deletes the policy if set to absent an interface | os10|
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_raguard* role to configure the IPv6 RA Guard feature enable/disable, IPv6 RA Guard Policy defination and policy parameter configuration, Attachment of IPv6 RA Guard policy to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os10_raguard* role. By including the role, you automatically get access to all of the tasks to configure IPv6 RA Guard attributes.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- host: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_raguard:
- enable: true
- policy:
- - policy_name: test
- device_role:
- value: router
- state: present
- managed_config_flag:
- value: "on"
- state: present
- mtu:
- value: 1280
- state: present
- match:
- - type: prefix_list
- name: test_prefix
- state: present
- state: present
- intf:
- ethernet 1/1/2:
- policy_name: test
- vlan: 10
- state: present
-
-**Simple playbook to setup IPv6 RA Guard — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_raguard
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/defaults/main.yml
deleted file mode 100644
index 57e6cf6b0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_raguard
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/handlers/main.yml
deleted file mode 100644
index 5b69a7974..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_raguard
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/meta/main.yml
deleted file mode 100644
index 1093615db..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_raguard role facilitates the configuration of IPv6 RA Guard attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/tasks/main.yml
deleted file mode 100644
index f2ccf55a2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for dellemc.os10.os10_raguard
-# tasks file for os10
- - name: "Generating IPv6 RA Guard configuration for os10"
- template:
- src: os10_raguard.j2
- dest: "{{ build_dir }}/raguard10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning IPv6 RA Guard configuration for os10"
- os10_config:
- src: os10_raguard.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/templates/os10_raguard.j2 b/ansible_collections/dellemc/os10/roles/os10_raguard/templates/os10_raguard.j2
deleted file mode 100644
index 7abc27d08..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/templates/os10_raguard.j2
+++ /dev/null
@@ -1,174 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure IPv6 RA Guard commands for OS10 Devices
-os10_raguard:
- enable: true
- policy:
- - policy_name: test
- device_role:
- value: router
- state: present
- managed_config_flag:
- value: "on"
- state: present
- other_config_flag:
- value: "on"
- state: present
- mtu:
- value: 1280
- state: present
- reachable_time:
- value: 100
- state: present
- retrans_timer:
- value: 100
- state: present
- router_lifetime:
- value: 10
- state: present
- router_preference:
- value: high
- state: present
- match:
- - type: prefix_list
- name: test_prefix
- state: present
- - type: access_list
- name: test_access
- state: present
- - type: mac_list
- name: test_mac
- state: present
- state: present
- intf:
- ethernet 1/1/2:
- policy_name: test
- vlan: 10
- state: present
- ethernet 1/1/3:
- policy_name: test
- vlan: all
- state: present
- ethernet 1/1/4:
- policy_name: test
- vlan: 10,11,12,15
- state: present
-###############################################}
-{% if os10_raguard is defined and os10_raguard %}
- {% if os10_raguard.enable is defined %}
- {% if os10_raguard.enable %}
-ipv6 nd ra-guard enable
- {% else %}
-no ipv6 nd ra-guard enable
- {% endif %}
- {% endif %}
- {% if os10_raguard.policy is defined and os10_raguard.policy %}
- {% for item in os10_raguard.policy %}
- {% if item.policy_name is defined and item.policy_name %}
- {% if item.state is defined and item.state == "absent" %}
-no ipv6 nd ra-guard policy {{ item.policy_name }}
- {% else %}
-ipv6 nd ra-guard policy {{ item.policy_name }}
- {% if item.device_role is defined and item.device_role %}
- {% if item.device_role.state is defined and item.device_role.state == "absent" %}
-no device-role {{ item.device_role }}
- {% else %}
-device-role {{ item.device_role.value }}
- {% endif %}
- {% endif %}
- {% if item.device_role is defined and item.device_role.value == "router" %}
- {% if item.managed_config_flag is defined and item.managed_config_flag %}
- {% if item.managed_config_flag.state is defined and item.managed_config_flag.state == "absent" %}
-no managed-config-flag {{ item.managed_config_flag.value }}
- {% else %}
-managed-config-flag {{ item.managed_config_flag.value }}
- {% endif %}
- {% endif %}
- {% if item.other_config_flag is defined and item.other_config_flag %}
- {% if item.other_config_flag.state is defined and item.other_config_flag.state == "absent" %}
-no other-config-flag {{ item.other_config_flag.value }}
- {% else %}
-other-config-flag {{ item.other_config_flag.value }}
- {% endif %}
- {% endif %}
- {% if item.mtu is defined and item.mtu %}
- {% if item.mtu.state is defined and item.mtu.state == "absent" %}
-no mtu {{ item.mtu.value }}
- {% else %}
-mtu {{ item.mtu.value }}
- {% endif %}
- {% endif %}
- {% if item.reachable_time is defined and item.reachable_time %}
- {% if item.reachable_time.state is defined and item.reachable_time.state == "absent" %}
-no reachable-time {{ item.reachable_time.value }}
- {% else %}
-reachable-time {{ item.reachable_time.value }}
- {% endif %}
- {% endif %}
- {% if item.retrans_timer is defined and item.retrans_timer %}
- {% if item.retrans_timer.state is defined and item.retrans_timer.state == "absent" %}
-no retrans-timer {{ item.retrans_timer.value }}
- {% else %}
-retrans-timer {{ item.retrans_timer.value }}
- {% endif %}
- {% endif %}
- {% if item.router_lifetime is defined and item.router_lifetime %}
- {% if item.router_lifetime.state is defined and item.router_lifetime.state == "absent" %}
-no router-lifetime {{ item.router_lifetime.value }}
- {% else %}
-router-lifetime {{ item.router_lifetime.value }}
- {% endif %}
- {% endif %}
- {% if item.router_preference is defined and item.router_preference %}
- {% if item.router_preference.state is defined and item.router_preference.state == "absent" %}
-no router-preference maximum
- {% else %}
-router-preference maximum {{ item.router_preference.value }}
- {% endif %}
- {% endif %}
- {% if item.match is defined and item.match %}
- {% for item1 in item.match %}
- {% if item1.type is defined %}
- {% if item1.type == "prefix_list" %}
- {% if item1.state is defined and item1.state == "absent" %}
-no match ra ipv6-prefix-list {{ item1.name }}
- {% else %}
-match ra ipv6-prefix-list {{ item1.name }}
- {% endif %}
- {% endif %}
- {% if item1.type == "access_list" %}
- {% if item1.state is defined and item1.state == "absent" %}
-no match ra ipv6-access-list {{ item1.name }}
- {% else %}
-match ra ipv6-access-list {{ item1.name }}
- {% endif %}
- {% endif %}
- {% if item1.type == "mac_list" %}
- {% if item1.state is defined and item1.state == "absent" %}
-no match ra mac-access-list {{ item1.name }}
- {% else %}
-match ra mac-access-list {{ item1.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if os10_raguard.intf is defined and os10_raguard.intf %}
- {% for key in os10_raguard.intf.keys() %}
-interface {{ key }}
- {% if os10_raguard.intf[key].policy_name is defined and os10_raguard.intf[key].policy_name %}
- {% if os10_raguard.intf[key].state is defined and os10_raguard.intf[key].state == "absent" %}
-no ipv6 nd ra-guard attach-policy {{ os10_raguard.intf[key].policy_name }} vlan {{ os10_raguard.intf[key].vlan }}
- {% else %}
-ipv6 nd ra-guard attach-policy {{ os10_raguard.intf[key].policy_name }} vlan {{ os10_raguard.intf[key].vlan }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/inventory.yaml
deleted file mode 100644
index 85a255f94..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/inventory.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-localhost
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/main.os10.yaml
deleted file mode 100644
index 3d1548c7c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/main.os10.yaml
+++ /dev/null
@@ -1,56 +0,0 @@
----
-# vars file for dellemc.os10.os10_raguard,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_raguard:
- enable: true
- policy:
- - policy_name: test
- device_role:
- value: router
- state: present
- managed_config_flag:
- value: "on"
- state: present
- other_config_flag:
- value: "on"
- state: present
- mtu:
- value: 1280
- state: present
- reachable_time:
- value: 100
- state: present
- retrans_timer:
- value: 100
- state: present
- router_lifetime:
- value: 10
- state: present
- router_preference:
- value: high
- state: present
- match:
- - type: prefix_list
- name: test_prefix
- state: present
- - type: access_list
- name: test_access
- state: present
- - type: mac_list
- name: test_mac
- state: present
- state: present
- intf:
- ethernet 1/1/2:
- policy_name: test
- vlan: 10
- state: present
- ethernet 1/1/3:
- policy_name: test
- vlan: all
- state: present
- ethernet 1/1/4:
- policy_name: test
- vlan: 10,11,12,15
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_raguard/tests/test.yaml
deleted file mode 100644
index 7ae62a2c8..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os10.os10_raguard
diff --git a/ansible_collections/dellemc/os10/roles/os10_raguard/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_raguard/vars/main.yml
deleted file mode 100644
index 172b49cf2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_raguard/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_raguard
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/LICENSE b/ansible_collections/dellemc/os10/roles/os10_route_map/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/README.md b/ansible_collections/dellemc/os10/roles/os10_route_map/README.md
deleted file mode 100644
index 1160ca48c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/README.md
+++ /dev/null
@@ -1,190 +0,0 @@
-Route-map role
-==============
-
-This role facilitates the configuration of route-map attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The route-map role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_route_map keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``route_map`` | list | Configures the route-map (see ``route_map.*``) | os10 |
-| ``route_map.name`` | string (required) | Configures the route-map name | os10 |
-| ``route_map.permit`` | boolean | Configures permit/deny set operations | os10 |
-| ``route_map.seq_num`` | integer | Configures the sequence number | os10 |
-| ``route_map.continue`` | integer | Configures the next sequence number | os10 |
-| ``route_map.set`` | dictionary | Configures route-map to set values in the destination routing protocol (see ``set.*``) | os10 |
-| ``set.local_pref`` | integer | Configures the BGP local preference path attribute | os10 |
-| ``set.metric`` | string | Configures a specific value to add or subtract from the existing metric value ("+ <value>", "- <value>", <value> format) | os10 |
-| ``set.metric_type`` | string: internal,type-1,type-2 | Configures the metric type for the destination routing protocol | os10 |
-| ``set.origin`` | string: igp,egp,incomplete | Configures the BGP origin attribute | os10 |
-| ``set.weight`` | integer | Configures the weight for the BGP route | os10 |
-| ``set.comm_list`` | dictionary | Configures the BGP community list (see ``comm_list.*``) | os10 |
-| ``comm_list.add`` | string | Adds the community attribute of a BGP update | os10 |
-| ``comm_list.delete`` | string | Deletes a community attribute of a BGP update | os10 |
-| ``set.community`` | string | Configures the community attribute for a BGP route update | os10 |
-| ``set.extcomm_list`` | dictionary | Configures the BGP extcommunity list (see ``extcomm_list.*``) | os10 |
-| ``extcomm_list.add`` | string | Adds an extended community attribute of a BGP update | os10 |
-| ``extcomm_list.delete`` | string | Deletes the extended community attribute of a BGP update | os10 |
-| ``set.extcommunity`` | string | Configures the extended community attribute for a BGP route update | os10 |
-| ``set.next_hop`` | list | Configures the next-hop address (see ``next_hop.*``) | os10 |
-| ``next_hop.type`` | string: ip,ipv6 | Configures the type of the next-hop address | os10 |
-| ``next_hop.address`` | string | Configures the next-hop address | os10 |
-| ``next_hop.track_id`` | integer | Configures the object track ID | os10 |
-| ``next_hop.state`` | string: present\*,absent | Deletes the next-hop address if set to absent | os10 |
-| ``route_map.match`` | list | Configures the route-map to match values from the route table (see ``match.*``) | os10 |
-| ``match.ip_type`` | string (required): ipv4,ipv6 | Configures the IPv4/IPv6 address to match | os10 |
-| ``match.access_group`` | string | Configures the access-group or list to match | os10 |
-| ``match.source_protocol_ebgp`` | string | Configures the source protocol to eBGP to match | os10 |
-| ``match.source_protocol_ibgp`` | string | Configures the source protocol to iBGP to match | os10 |
-| ``match.source_protocol_evpn`` | string | Configures the source protocol to EVPN to match | os10 |
-| ``match.source_protocol_static`` | string | Configures the source protocol to static to match | os10 |
-| ``match.source_protocol_connected`` | string | Configures the source protocol to connected to match | os10 |
-| ``match.source_protocol_ospf`` | string | Configures the source protocol to OSPF to match | os10 |
-| ``match.prefix_list`` | string | Configures the IP prefix-list to match against | os10 |
-| ``route_map.state`` | string, choices: present\*,absent | Deletes the route-map if set to absent | os10 |
-| ``as_path`` | list | Configures the BGP AS path filter (see ``as_path.*``) | os10 |
-| ``as_path.access_list`` | string (required) | Configures the access-list name | os10 |
-| ``as_path.permit`` | boolean (required) | Configures an AS path to accept or reject | os10 |
-| ``as_path.regex``| string (required) | Configures a regular expression | os10 |
-| ``as_path.state`` | string: absent,present\* | Deletes the BGP as path filter if set to absent | os10 |
-| ``community_list`` | list | Configures a community list entry (see ``community_list.*``) | os10 |
-| ``community_list.type`` | string (required): standard,expanded | Configures the type of community-list entry | os10 |
-| ``community_list.name`` | string (required) | Configures the name of community-list entry | os10 |
-| ``community_list.permit`` | boolean(required) | Configures the community to accept or reject | os10 |
-| ``community_list.regex`` | string (required) | Configures the regular expression for extended community list; mutually exclusive with *community_list.community* | os10 |
-| ``community_list.community`` | string (required) | Configures a well-known community or community number for standard community list; mutually exclusive with *community_list.regex* | os10 |
-| ``community_list.state`` | string: absent,present\* | Deletes the community list entry if set to absent | os10 |
-| ``extcommunity_list`` | list | Configures extcommunity-list entry (see ``extcommunity_list.*``) | os10 |
-| ``extcommunity_list.type`` | string (required): standard,expanded | Configures the type of extcommunity-list entry | os10 |
-| ``extcommunity_list.name`` | string (required) | Configures the name of extcommunity-list entry | os10 |
-| ``extcommunity_list.permit`` | boolean(required) | Configures the extcommunity to accept or reject | os10 |
-| ``extcommunity_list.regex`` | string (required) | Configures the regular expression for the extended extcommunity list; mutually exclusive with *extcommunity_list.community* | os10 |
-| ``extcommunity_list.community`` | string (required) | Configures the extended community for standard community-list; mutually exclusive with *extcommunity_list.regex* | os10 |
-| ``extcommunity_list.state`` | string: absent,present\* | Deletes the extcommunity-list entry if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_route_map* role for the route-map, policy-map, and class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_route_map* role. By including the role, you automatically get access to all of the tasks to configure route-map features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_route_map:
- as_path:
- - access_list: aa
- permit: true
- regex: www
- state: present
- community_list:
- - type: expanded
- name: qq
- permit: true
- regex: aaa
- state: present
- - type: standard
- name: qqq
- permit: false
- community: internet
- state: present
- extcommunity_list:
- - type: expanded
- name: qq
- permit: true
- regex: aaa
- state: present
- - type: standard
- name: qqq
- permit: false
- community: "rt 22:33"
- state: present
- route_map:
- - name: test
- permit: true
- seq_num: 1
- continue: 20
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- source_protocol_ebgp: present
- source_protocol_ibgp: present
- source_protocol_evpn: present
- source_protocol_static: present
- source_protocol_ospf: present
- source_protocol_connected: present
- set:
- local_pref: 1200
- metric_type: internal
- metric: + 30
- origin: igp
- weight: 50
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: present
- community: internet
- comm_list:
- add: qq
- delete: qqq
- extcommunity: "22:33"
- extcomm_list:
- add: aa
- delete: aa
- state: present
-
-**Simple playbook to setup QoS —leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_route_map
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/defaults/main.yml
deleted file mode 100644
index a78d55b8f..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_route_map
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/handlers/main.yml
deleted file mode 100644
index ef0b45319..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_route_map
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/meta/main.yml
deleted file mode 100644
index b73ddcef3..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_route_map role facilitates the configuration of route map attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - os10
- - dellemc
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/tasks/main.yml
deleted file mode 100644
index 62c94bdd5..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Provisioning route-map configuration for os10"
- os10_config:
- src: os10_route_map.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
-
- - name: "Generating route map configuration for os10"
- template:
- src: os10_route_map.j2
- dest: "{{ build_dir }}/routemap10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/templates/os10_route_map.j2 b/ansible_collections/dellemc/os10/roles/os10_route_map/templates/os10_route_map.j2
deleted file mode 100644
index bfca3d177..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/templates/os10_route_map.j2
+++ /dev/null
@@ -1,348 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#####################################################
-Purpose:
-Configure route-map commands for os10 Devices.
-os10_route_map:
- as_path:
- - access_list: aa
- permit: true
- regex: www
- state: present
- community_list:
- - type: standard
- name: qqq
- permit: false
- community: internet
- state: present
- extcommunity_list:
- - type: standard
- name: qqq
- permit: false
- community: "rt 22:33"
- state: present
- route_map:
- - name: test
- permit: true
- seq_num: 1
- continue: 20
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- source_protocol_ebgp: "present"
- source_protocol_ibgp: "present"
- source_protocol_evpn: "present"
- source_protocol_ospf: "present"
- source_protocol_static: "present"
- source_protocol_connected: "present"
- set:
- local_pref: 1200
- metric_type: internal
- metric: + 30
- origin: igp
- weight: 50
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: present
- community: internet
- comm_list:
- add: qq
- delete: qqq
- extcommunity: "22:33"
- extcomm_list:
- add: aa
- delete: aa
- state: present
-#####################################################}
-{% if os10_route_map is defined and os10_route_map %}
-{% for key in os10_route_map.keys() %}
- {% if key == "route_map" %}
- {% for vars in os10_route_map[key] %}
- {% if vars.name is defined and vars.name %}
- {% if vars.state is defined and vars.state == "absent" %}
-no route-map {{ vars.name }}
- {% else %}
- {% if vars.permit is defined and vars.permit %}
- {% if vars.seq_num is defined and vars.seq_num %}
-route-map {{ vars.name }} permit {{ vars.seq_num }}
- {% else %}
-route-map {{ vars.name }} permit 10
- {% endif %}
- {% elif vars.permit is defined and not vars.permit %}
- {% if vars.seq_num is defined and vars.seq_num %}
-route-map {{ vars.name }} deny {{ vars.seq_num }}
- {% else %}
-route-map {{ vars.name }} deny 10
- {% endif %}
- {% else %}
- {% if vars.seq_num is defined and vars.seq_num %}
-route-map {{ vars.name }} permit {{ vars.seq_num }}
- {% else %}
-route-map {{ vars.name }} permit 10
- {% endif %}
- {% endif %}
- {% if vars.set is defined and vars.set %}
- {% if vars.set.local_pref is defined %}
- {% if vars.set.local_pref %}
- set local-preference {{ vars.set.local_pref }}
- {% else %}
- no set local-preference
- {% endif %}
- {% endif %}
- {% if vars.set.metric_type is defined %}
- {% if vars.set.metric_type %}
- set metric-type {{ vars.set.metric_type }}
- {% else %}
- no set metric-type internal
- {% endif %}
- {% endif %}
- {% if vars.set.metric is defined %}
- {% if vars.set.metric %}
- set metric {{ vars.set.metric }}
- {% else %}
- no set metric
- {% endif %}
- {% endif %}
- {% if vars.set.origin is defined %}
- {% if vars.set.origin %}
- set origin {{ vars.set.origin }}
- {% else %}
- no set origin
- {% endif %}
- {% endif %}
- {% if vars.set.community is defined %}
- {% if vars.set.community %}
- set community {{ vars.set.community }}
- {% else %}
- no set community internet
- {% endif %}
- {% endif %}
- {% if vars.set.extcommunity is defined %}
- {% if vars.set.extcommunity %}
- set extcommunity rt {{ vars.set.extcommunity }}
- {% else %}
- no set extcommunity rt 11:33
- {% endif %}
- {% endif %}
- {% if vars.set.weight is defined %}
- {% if vars.set.weight %}
- set weight {{ vars.set.weight }}
- {% else %}
- no set weight
- {% endif %}
- {% endif %}
- {% if vars.set.comm_list is defined and vars.set.comm_list %}
- {% if vars.set.comm_list.add is defined and vars.set.comm_list.add %}
- set comm-list {{ vars.set.comm_list.add }} add
- {% else %}
- no set comm-list aa add
- {% endif %}
- {% if vars.set.comm_list.delete is defined and vars.set.comm_list.delete %}
- set comm-list {{ vars.set.comm_list.delete }} delete
- {% else %}
- no set comm-list aa delete
- {% endif %}
- {% endif %}
- {% if vars.set.extcomm_list is defined and vars.set.extcomm_list %}
- {% if vars.set.extcomm_list.add is defined and vars.set.extcomm_list.add %}
- set extcomm-list {{ vars.set.extcomm_list.add }} add
- {% else %}
- no set extcomm-list aa add
- {% endif %}
- {% if vars.set.extcomm_list.delete is defined and vars.set.extcomm_list.delete %}
- set extcomm-list {{ vars.set.extcomm_list.delete }} delete
- {% else %}
- no set extcomm-list aa delete
- {% endif %}
- {% endif %}
- {% if vars.set.next_hop is defined and vars.set.next_hop %}
- {% for item in vars.set.next_hop %}
- {% if item.type is defined and item.type %}
- {% if item.address is defined and item.address %}
- {% if item.state is defined and item.state=="absent" %}
- no set {{ item.type }} next-hop {{ item.address }}
- {% else %}
- {% if item.track_id is defined and item.track_id %}
- set {{ item.type }} next-hop {{ item.address }} track-id {{ item.track_id }}
- {% else %}
- set {{ item.type }} next-hop {{ item.address }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% if vars.continue is defined %}
- {% if vars.continue %}
- continue {{ vars.continue }}
- {% else %}
- no continue
- {% endif %}
- {% endif %}
- {% if vars.match is defined and vars.match %}
- {% for match in vars.match %}
- {% if match.ip_type is defined and match.ip_type %}
- {% if match.ip_type =="ipv4" %}
- {% set ip = "ip" %}
- {% else %}
- {% set ip = "ipv6" %}
- {% endif %}
- {% if match.access_group is defined %}
- {% if match.access_group %}
- match {{ ip }} address {{ match.access_group }}
- {% else %}
- no match {{ ip }} address a
- {% endif %}
- {% endif %}
- {% if match.prefix_list is defined %}
- {% if match.prefix_list %}
- match {{ ip }} address prefix-list {{ match.prefix_list }}
- {% else %}
- no match {{ ip }} address prefix-list a
- {% endif %}
- {% endif %}
- {% endif %}
- {% if match.source_protocol_ebgp is defined and match.source_protocol_ebgp %}
- {% if match.source_protocol_ebgp == "present" %}
- match source-protocol bgp ebgp
- {% endif %}
- {% if match.source_protocol_ebgp == "absent" %}
- no match source-protocol bgp ebgp
- {% endif %}
- {% endif %}
- {% if match.source_protocol_ibgp is defined and match.source_protocol_ibgp %}
- {% if match.source_protocol_ibgp == "present" %}
- match source-protocol bgp ibgp
- {% endif %}
- {% if match.source_protocol_ibgp == "absent" %}
- no match source-protocol bgp ibgp
- {% endif %}
- {% endif %}
- {% if match.source_protocol_evpn is defined and match.source_protocol_evpn %}
- {% if match.source_protocol_evpn == "present" %}
- match source-protocol bgp evpn
- {% endif %}
- {% if match.source_protocol_evpn == "absent" %}
- no match source-protocol bgp evpn
- {% endif %}
- {% endif %}
- {% if match.source_protocol_ospf is defined and match.source_protocol_ospf %}
- {% if match.source_protocol_ospf == "present" %}
- match source-protocol ospf
- {% endif %}
- {% if match.source_protocol_ospf == "absent" %}
- no match source-protocol ospf
- {% endif %}
- {% endif %}
- {% if match.source_protocol_static is defined and match.source_protocol_static %}
- {% if match.source_protocol_static == "present" %}
- match source-protocol static
- {% endif %}
- {% if match.source_protocol_static == "absent" %}
- no match source-protocol static
- {% endif %}
- {% endif %}
- {% if match.source_protocol_connected is defined and match.source_protocol_connected %}
- {% if match.source_protocol_connected == "present" %}
- match source-protocol connected
- {% endif %}
- {% if match.source_protocol_connected == "absent" %}
- no match source-protocol connected
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% elif key == "as_path" %}
- {% for item in os10_route_map[key] %}
- {% if item.access_list is defined and item.access_list %}
- {% if item.permit is defined %}
- {% if item.permit %}
- {% set filter = "permit" %}
- {% else %}
- {% set filter = "deny" %}
- {% endif %}
- {% if item.regex is defined and item.regex %}
- {% if item.state is defined and item.state == "absent" %}
-no ip as-path access-list {{ item.access_list }} {{ filter }} {{ item.regex }}
- {% else %}
-ip as-path access-list {{ item.access_list }} {{ filter }} {{ item.regex }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if item.state is defined and item.state == "absent" %}
-no ip as-path access-list {{ item.access_list }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% elif key == "community_list" %}
- {% for item in os10_route_map[key] %}
- {% if item.type is defined and item.type %}
- {% if item.name is defined and item.name %}
- {% if item.permit is defined %}
- {% if item.permit %}
- {% set filter = "permit" %}
- {% else %}
- {% set filter = "deny" %}
- {% endif %}
- {% if item.regex is defined and item.regex %}
- {% if item.state is defined and item.state == "absent" %}
-no ip community-list standard {{ item.name }} {{ filter }} {{ item.regex }}
- {% else %}
-ip community-list standard {{ item.name }} {{ filter }} {{ item.regex }}
- {% endif %}
- {% elif item.community is defined and item.community %}
- {% if item.state is defined and item.state == "absent" %}
-no ip community-list standard {{ item.name }} {{ filter }} {{ item.community }}
- {% else %}
-ip community-list {{ item.type }} {{ item.name }} {{ filter }} {{ item.community }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if item.state is defined and item.state == "absent" %}
-no ip community-list standard {{ item.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% elif key == "extcommunity_list" %}
- {% for item in os10_route_map[key] %}
- {% if item.type is defined and item.type %}
- {% if item.name is defined and item.name %}
- {% if item.permit is defined %}
- {% if item.permit %}
- {% set filter = "permit" %}
- {% else %}
- {% set filter = "deny" %}
- {% endif %}
- {% if item.regex is defined and item.regex %}
- {% if item.state is defined and item.state == "absent" %}
-no ip extcommunity-list standard {{ item.name }} {{ filter }} {{ item.regex }}
- {% else %}
-ip extcommunity-list standard {{ item.name }} {{ filter }} {{ item.regex }}
- {% endif %}
- {% elif item.community is defined and item.community %}
- {% if item.state is defined and item.state == "absent" %}
-no ip extcommunity-list standard {{ item.name }} {{ filter }} {{ item.community }}
- {% else %}
-ip extcommunity-list {{ item.type }} {{ item.name }} {{ filter }} {{ item.community }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if item.state is defined and item.state == "absent" %}
-no ip extcommunity-list standard {{ item.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/inventory b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/inventory
deleted file mode 100644
index 85a255f94..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/inventory
+++ /dev/null
@@ -1,2 +0,0 @@
----
-localhost
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/main.yaml
deleted file mode 100644
index e791b2951..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/main.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
----
-# Sample variables for OS10 device
-os10_route_map:
- as_path:
- - access_list: aa
- permit: true
- regex: www
- state: present
- community_list:
- - type: standard
- name: qqq
- permit: false
- community: internet
- state: present
- extcommunity_list:
- - type: standard
- name: qqq
- permit: false
- community: "rt 22:33"
- state: present
- route_map:
- - name: test
- permit: true
- seq_num: 1
- continue: 20
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- source_protocol_ebgp: present
- source_protocol_ibgp: present
- source_protocol_evpn: present
- source_protocol_static: present
- source_protocol_ospf: present
- source_protocol_connected: present
- set:
- local_pref: 1200
- metric_type: internal
- metric: + 30
- origin: igp
- weight: 50
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: present
- community: internet
- comm_list:
- add: qq
- delete: qqq
- extcommunity: "22:33"
- extcomm_list:
- add: aa
- delete: aa
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/tests/test.yml
deleted file mode 100644
index 4302a12d0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os10.os10_route_map
diff --git a/ansible_collections/dellemc/os10/roles/os10_route_map/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_route_map/vars/main.yml
deleted file mode 100644
index ff4a48ac4..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_route_map/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_route_map
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_snmp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/README.md b/ansible_collections/dellemc/os10/roles/os10_snmp/README.md
deleted file mode 100644
index a875a2340..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/README.md
+++ /dev/null
@@ -1,269 +0,0 @@
-SNMP role
-=========
-
-This role facilitates the configuration of global SNMP attributes. It supports the configuration of SNMP server attributes including users, group, community, location, and traps. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The SNMP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_snmp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``snmp_contact`` | string | Configures SNMP contact information | os10 |
-| ``snmp_location`` | string | Configures SNMP location information | os10 |
-| ``snmp_community`` | list | Configures SNMP community information (see ``snmp_community.*``) | os10 |
-| ``snmp_community.name`` | string (required) | Configures the SNMP community string | os10 |
-| ``snmp_community.access_mode`` | string: ro,rw | Configures access-mode for the community | os10 |
-| ``snmp_community.access_list`` | dictionary | Configures ACL for the community (see ``snmp_community.access_list.*``) | os10 |
-| ``snmp_community.access_list.name`` | string | Specifies the name of the ACL for the community | os10 |
-| ``snmp_community.access_list.state`` | string: absent,present\* | Deletes the ACL from the community if set to absent | os10 |
-| ``snmp_community.state`` | string: absent,present\* | Deletes the SNMP community information if set to absent | os10 |
-| ``snmp_engine_id`` | string | Configures SNMP local EngineID | os10 |
-| ``snmp_remote_engine_id`` | list | Configures SNMP remote engine information (see ``snmp_remote_engine_id.*``) | os10 |
-| ``snmp_remote_engine_id.ip`` | string | Configures the IP address of the SNMP remote engine | os10 |
-| ``snmp_remote_engine_id.engine_id`` | string | Configures the EngineID of the SNMP remote engine | os10 |
-| ``snmp_remote_engine_id.udpport`` | string | Configures the UDP port of the SNMP remote engine | os10 |
-| ``snmp_remote_engine_id.state`` | string: absent,present\* | Deletes the SNMP remote engine information if set to absent | os10 |
-| ``snmp_group`` | list | Configures the SNMP group information (see ``snmp_group.*``) | os10 |
-| ``snmp_group.name`` | string | Configures the name of the SNMP group | os10 |
-| ``snmp_group.version`` | string: 1,2c,3 | Configures the version of the SNMP group | os10 |
-| ``snmp_group.security_level`` | string: auth,noauth,priv | Configures the security level of SNMP group for version 3 | os10 |
-| ``snmp_group.access_list`` | dictionary | Configures the access list of the SNMP group (see ``snmp_group.access_list.*``)| os10 |
-| ``snmp_group.access_list.name`` | string | Specifies the name of the access list for the SNMP group wtih version 1 or 2c | os10 |
-| ``snmp_group.access_list.state`` | string: absent,present\* | Deletes the access list from the SNMP group if set to absent | os10 |
-| ``snmp_group.read_view`` | dictionary | Configures the read view of the SNMP group (see ``snmp_group.read_view.*``) | os10 |
-| ``snmp_group.read_view.name`` | string | Specifies the name of the read view for the SNMP group | os10 |
-| ``snmp_group.read_view.state`` | string: absent,present\* | Deletes the read view from the SNMP group if set to absent | os10 |
-| ``snmp_group.write_view`` | dictionary | Configures the write view of the SNMP group (see ``snmp_group.write_view``) | os10 |
-| ``snmp_group.write_view.name`` | string | Specifies the name of the write view for the SNMP group | os10 |
-| ``snmp_group.write_view.state`` | string: absent,present\* | Deletes the write view from the SNMP group if set to absent | os10 |
-| ``snmp_group.notify_view`` | dictionary | Configures the notify view of the SNMP group (see ``snmp_group.notify_view.*``) | os10 |
-| ``snmp_group.notify_view.name`` | string | Specifies the name of the notify view for the SNMP group | os10 |
-| ``snmp_group.notify_view.state`` | string: absent,present\* | Deletes the notify view from the SNMP group if set to absent | os10 |
-| ``snmp_group.state`` | string: absent,present\* | Deletes the SNMP group if set to absent | os10 |
-| ``snmp_host`` | list | Configures SNMP hosts to receive SNMP traps (see ``snmp_host.*``) | os10 |
-| ``snmp_host.ip`` | string | Configures the IP address of the SNMP trap host | os10 |
-| ``snmp_host.communitystring`` | string | Configures the SNMP community string of the trap host for version 1 or 2c | os10 |
-| ``snmp_host.udpport`` | string | Configures the UDP number of the SNMP trap host (0 to 65535) | os10 |
-| ``snmp_host.version`` | string: 1,2c,3 (required) | Specifies the SNMP version of the host (1 or 2c or 3 in os10) | os10 |
-| ``snmp_host.security_level`` | string: auth,noauth,priv | Configures the security level of the SNMP host for version 3 | os10 |
-| ``snmp_host.security_name`` | string | Configures the security name of the SNMP host for version 3 | os10 |
-| ``snmp_host.notification_type`` | string: traps,informs | Configures the notification type of the SNMP host | os10 |
-| ``snmp_host.trap_categories`` | dictionary | Enables or disables different trap categories for the SNMP host (see ``snmp_host.trap_categories.*``) | os10 |
-| ``snmp_host.trap_categories.dom`` | boolean: true,false | Enables or disables dom category traps for the SNMP host | os10 |
-| ``snmp_host.trap_categories.entity`` | boolean: true,false | Enables or disables entity category traps for the SNMP host | os10 |
-| ``snmp_host.trap_categories.envmon`` | boolean: true,false | Enables or disables envmon category traps for the SNMP host | os10 |
-| ``snmp_host.trap_categories.lldp`` | boolean: true,false | | Enables or disables lldp category traps for the SNMP host | os10 |
-| ``snmp_host.trap_categories.snmp`` | boolean: true,false | | Enables or disables snmp category traps for the SNMP host | os10 |
-| ``snmp_host.state`` | string: absent,present\* | Deletes the SNMP trap host if set to absent | os10 |
-| ``snmp_source_interface`` | string | Configures the source interface for SNMP | os10 |
-| ``snmp_traps`` | list | Configures SNMP traps (see ``snmp_traps.*``) | os10 |
-| ``snmp_traps.name`` | string | Enables SNMP traps | os10 |
-| ``snmp_traps.state`` | string: absent,present\* | Deletes the SNMP trap if set to absent | os10 |
-| ``snmp_user`` | list | Configures the SNMP user information (see ``snmp_user.*``) | os10 |
-| ``snmp_user.name`` | string | Specifies the name of the SNMP user | os10 |
-| ``snmp_user.group_name`` | string | Specifies the group of the SNMP user | os10 |
-| ``snmp_user.version `` | string: 1,2c,3 | Configures the version for the SNMP user | os10 |
-| ``snmp_user.access_list`` | string | Configures the access list for the SNMP user with version 1 or 2c | os10 |
-| ``snmp_user.authentication`` | dictionary | Configures the authentication information for the SNMP user with version 3 (see ``snmp_user.authentication.*``) | os10 |
-| ``snmp_user.authentication.localized`` | boolean: true,false | Configures the password to be in localized key format or not | os10 |
-| ``snmp_user.authentication.algorithm`` | string: md5, sha | Configures the authentication algorithm for the SNMP user | os10 |
-| ``snmp_user.authentication.password`` | string | Configures the authentication password for the SNMP user; if localized is true it should be a hexadecimal string prefixed with 0x and qouted | os10 |
-| ``snmp_user.authentication.encryption`` | dictionary | Configures the encryption parameters for the SNMP user | os10 |
-| ``snmp_user.authentication.encryption.algorithm`` | string: aes,des | Configures the encryption algorithm for the SNMP user | os10 |
-| ``snmp_user.authentication.encryption.password`` | string | Configures encryption password for the SNMP user; if localized is true it should be a hexadecimal string prefixed with 0x and qouted | os10 |
-| ``snmp_user.remote`` | dictionary | Configures the remote SNMP entity the user belongs to (see ``snmp_user.remote.*``) | os10 |
-| ``snmp_user.remote.ip`` | string | Configures the IP address of the remote entity for the SNMP user | os10 |
-| ``snmp_user.remote.udpport`` | string | Configures the UDP port of the remote entiry for the SNMP user | os10 |
-| ``snmp_user.state`` | string: absent,present\* | Deletes the SNMP user if set to absent | os10 |
-| ``snmp_view`` | list | Configures SNMPv3 view information (see ``snmp_view.*``) | os10 |
-| ``snmp_view.name`` | string | Configures the SNMP view name (up to 20 characters) | os10 |
-| ``snmp_view.oid_subtree`` | integer | Configures the SNMP view for the OID subtree | os10 |
-| ``snmp_view.include`` | boolean: true,false | Specifies if the MIB family should be included or excluded from the view | os10 |
-| ``snmp_view.state`` | string: absent,present\* | Deletes the SNMP view if set to absent | os10 |
-| ``snmp_vrf`` | string | Configures the VRF for SNMP | os10 |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_snmp* role to completely set up the SNMP server attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_snmp* role. By including the role, you automatically get access to all of the tasks to configure SNMP features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_snmp:
- snmp_contact: test
- snmp_location: Chennai
- snmp_source_interface: loopback 10
- snmp_vrf: test
- snmp_community:
- - name: public
- access_mode: ro
- access_list:
- name: test_acl
- state: present
- state: present
- snmp_engine_id: 123456789
- snmp_remote_engine_id:
- - host: 1.1.1.1
- engine_id: '0xab'
- udpport: 162
- state: present
- snmp_traps:
- - name: all
- state: present
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: true
- state: absent
- snmp_host:
- - ip: 1.1.1.1
- communitystring: c1
- version: "2c"
- udpport: 4
- state: present
- - ip: 2.2.2.2
- version: 1
- communitystring: c3
- trap_categories:
- dom: true
- lldp: true
- state: present
- - ip: 3.1.1.1
- version: 3
- security_level: priv
- security_name: test
- notification_type: informs
- udpport: 200
- trap_categories:
- dom: true
- entity: true
- envmon: true
- snmp: true
- state: present
- snmp_group:
- - name: group_1
- version: "2c"
- state: present
- access_list:
- name: test_acl
- state: present
- read_view:
- name: view_1
- state: present
- write_view:
- name: view_2
- state: present
- notify_view:
- name: view_3
- state: present
- - name: group_2
- version: 3
- security_level: priv
- state: present
- read_view:
- name: view_1
- state: absent
- notify_view:
- name: view_3
- state: present
- snmp_user:
- - name: user_1
- group_name: group_1
- version: 3
- authentication:
- localized: true
- algorithm: md5
- password: 9fc53d9d908118b2804fe80e3ba8763d
- encryption:
- algorithm: aes
- password: d0452401a8c3ce42804fe80e3ba8763d
- state: present
- - name: user_2
- group_name: group_1
- version: 3
- remote:
- ip: 1.1.1.1
- udpport: 200
- authentication:
- localized: true
- algorithm: md5
- password: '0x9fc53d9d908118b2804fe80e3ba8763d'
- encryption:
- algorithm: aes
- password: '0xd0452401a8c3ce42804fe80e3ba8763d'
- state: present
- - name: user_3
- group_name: group_1
- version: 2c
- state: present
- - name: user_4
- group_name: group_1
- version: 3
- state: present
- - name: user_5
- group_name: group_2
- version: 2c
- remote:
- ip: 1.1.1.1
- udpport: 200
- access_list: test_acl
- state: present
-
-**Simple playbook to setup SNMP — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_snmp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/defaults/main.yml
deleted file mode 100644
index 81a11877c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_snmp
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/handlers/main.yml
deleted file mode 100644
index 1a8a31424..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_snmp
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/meta/main.yml
deleted file mode 100644
index efbf4e408..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_snmp role facilitates the configuration of snmp attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/tasks/main.yml
deleted file mode 100644
index 7ed03578d..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating SNMP configuration for os10"
- template:
- src: os10_snmp.j2
- dest: "{{ build_dir }}/snmp10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning SNMP configuration for os10"
- os10_config:
- src: os10_snmp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/templates/os10_snmp.j2 b/ansible_collections/dellemc/os10/roles/os10_snmp/templates/os10_snmp.j2
deleted file mode 100644
index f95817096..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/templates/os10_snmp.j2
+++ /dev/null
@@ -1,441 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure snmp commands for os10 Devices
-os10_snmp:
- snmp_contact: test
- snmp_location: Chennai
- snmp_vrf: test
- snmp_source_interface: loopback 10
- snmp_community:
- - name: public
- access_mode: ro
- access_list:
- name: test_acl
- state: present
- state: present
- snmp_traps:
- - name: all
- state: present
- snmp_engine_id: 123456789
- snmp_remote_engine_id:
- - host: 1.1.1.1
- engine_id: '0xab'
- udpport: 162
- state: present
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: true
- state: absent
- snmp_host:
- - ip: 1.1.1.1
- communitystring: c1
- version: "2c"
- udpport: 4
- state: present
- - ip: 2.2.2.2
- version: 1
- communitystring: c3
- state: present
- - ip: 3.1.1.1
- version: 3
- security_level: priv
- security_name: test
- notification_type: informs
- udpport: 200
- trap_categories:
- dom: true
- entity: true
- envmon: true
- snmp: true
- state: present
- snmp_group:
- - name: group_1
- version: "2c"
- state: present
- access_list:
- name: test_acl
- state: present
- read_view:
- name: view_1
- state: present
- write_view:
- name: view_2
- state: present
- notify_view:
- name: view_3
- state: present
- - name: group_2
- version: 3
- security_level: priv
- state: present
- read_view:
- name: view_1
- state: absent
- notify_view:
- name: view_3
- state: present
- snmp_user:
- - name: user_1
- group_name: group_1
- version: 3
- authentication:
- localized: true
- algorithm: md5
- password: 9fc53d9d908118b2804fe80e3ba8763d
- encryption:
- algorithm: aes
- password: d0452401a8c3ce42804fe80e3ba8763d
- state: present
- - name: user_2
- group_name: group_1
- version: 3
- remote:
- ip: 1.1.1.1
- udpport: 200
- authentication:
- localized: true
- algorithm: md5
- password: '0x9fc53d9d908118b2804fe80e3ba8763d'
- encryption:
- algorithm: aes
- password: '0xd0452401a8c3ce42804fe80e3ba8763d'
- state: present
- - name: user_3
- group_name: group_1
- version: 2c
- state: present
- - name: user_4
- group_name: group_1
- version: 3
- state: present
- - name: user_5
- group_name: group_2
- version: 2c
- remote:
- ip: 1.1.1.1
- udpport: 200
- access_list: test_acl
- state: present
-
-###############################################}
-{% if os10_snmp is defined and os10_snmp %}
- {% if os10_snmp.snmp_community is defined %}
- {% set value = os10_snmp.snmp_community %}
- {% if value %}
- {% for item in value %}
- {% if item.name is defined and item.name %}
- {% if item.access_mode is defined and item.access_mode %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server community {{ item.name }} {{ item.access_mode }}
- {% else %}
- {% if item.access_list is defined and item.access_list and item.access_list.name is defined and item.access_list.name %}
- {% if item.access_list.state is defined and item.access_list.state == "absent" %}
-no snmp-server community {{ item.name }} {{ item.access_mode }} acl {{ item.access_list.name }}
- {% else %}
-snmp-server community {{ item.name }} {{ item.access_mode }} acl {{ item.access_list.name }}
- {% endif %}
- {% else %}
-snmp-server community {{ item.name }} {{ item.access_mode }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-
- {% if os10_snmp.snmp_contact is defined %}
- {% set value = os10_snmp.snmp_contact %}
- {% if value %}
-snmp-server contact {{ value }}
- {% else %}
-no snmp-server contact
- {% endif %}
- {% endif %}
-
- {% if os10_snmp.snmp_engine_id is defined %}
- {% set value = os10_snmp.snmp_engine_id %}
- {% if value %}
-snmp-server engineID local {{ value }}
- {% else %}
-no snmp-server engineID local
- {% endif %}
- {% endif %}
-
- {# Remove users before removing remote engine #}
- {% if os10_snmp.snmp_user is defined and os10_snmp.snmp_user %}
- {% set value = os10_snmp.snmp_user %}
- {% for item in value %}
- {% if item.name is defined and item.name and item.group_name is defined and item.group_name %}
- {% if item.state is defined and item.state == "absent" %}
- {% set user_remote_option = "" %}
- {% if item.remote is defined and item.remote %}
- {% if item.remote.ip is defined and item.remote.ip %}
- {% if item.remote.udpport is defined and item.remote.udpport %}
- {% set user_remote_option = " remote " + item.remote.ip + " udp-port " + item.remote.udpport|string %}
- {% endif %}
- {% endif %}
- {% endif %}
-no snmp-server user {{ item.name }} {{ item.group_name }}{{ user_remote_option }} {{ item.version }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if os10_snmp.snmp_remote_engine_id is defined %}
- {% set value = os10_snmp.snmp_remote_engine_id %}
- {% for item in value %}
- {% if item.host is defined and item.host %}
- {% if item.state is defined and item.state == "absent" %}
- {% if item.udpport is defined and item.udpport %}
-no snmp-server engineID remote {{ item.host }} udp-port {{ item.udpport }}
- {% else %}
-no snmp-server engineID remote {{ item.host }}
- {% endif %}
- {% else %}
- {% if item.engine_id is defined and item.engine_id %}
- {% if item.udpport is defined and item.udpport %}
-snmp-server engineID remote {{ item.host }} udp-port {{ item.udpport }} {{ item.engine_id }}
- {% else %}
-snmp-server engineID remote {{ item.host }} udp-port 162 {{ item.engine_id }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if os10_snmp.snmp_traps is defined %}
- {% set value = os10_snmp.snmp_traps %}
- {% if value %}
- {% for val in value %}
- {% if val.name is defined and val.name %}
- {% if val.state is defined and val.state == "absent" %}
- {% if val.name == "all" %}
- {% set trap_list = ['snmp authentication','snmp linkDown','snmp linkUp','envmon fan','envmon power-supply','envmon temperature'] %}
- {% for name in trap_list %}
-no snmp-server enable traps {{ name }}
- {% endfor %}
- {% else %}
-no snmp-server enable traps {{ val.name }}
- {% endif %}
- {% else %}
- {% if val.name == "all" %}
- {% set trap_list = ['snmp authentication','snmp linkDown','snmp linkUp','envmon fan','envmon power-supply','envmon temperature'] %}
- {% for name in trap_list %}
-snmp-server enable traps {{ name }}
- {% endfor %}
- {% else %}
-snmp-server enable traps {{ val.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-
- {% if os10_snmp.snmp_group is defined and os10_snmp.snmp_group %}
- {% set value = os10_snmp.snmp_group %}
- {% for item in value %}
- {% if item.name is defined and item.name and item.version is defined and item.version %}
- {% set group_value = item.name + " " + item.version|string %}
- {% if item.security_level is defined and item.security_level %}
- {% if item.version|string != "1" and item.version|string != "2c" %}
- {% set group_value = group_value + " " + item.security_level %}
- {% endif %}
- {% endif %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server group {{ group_value }}
- {% else %}
- {% set group_options = [] %}
- {% if item.version|string == "1" or item.version|string == "2c" %}
- {% if item.access_list is defined and item.access_list and item.access_list.name is defined and item.access_list.name %}
- {% if item.access_list.state is defined and item.access_list.state == "absent" %}
-no snmp-server group {{ group_value }} access {{ item.access_list.name }}
- {% else %}
- {{ group_options.append("access "+item.access_list.name) }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if item.notify_view is defined and item.notify_view and item.notify_view.name is defined and item.notify_view.name %}
- {% if item.notify_view.state is defined and item.notify_view.state == "absent" %}
-no snmp-server group {{ group_value }} notify {{ item.notify_view.name }}
- {% else %}
- {{ group_options.append("notify "+item.notify_view.name)}}
- {% endif %}
- {% endif %}
- {% if item.read_view is defined and item.read_view and item.read_view.name is defined and item.read_view.name %}
- {% if item.read_view.state is defined and item.read_view.state == "absent" %}
-no snmp-server group {{ group_value }} read {{ item.read_view.name }}
- {% else %}
- {{ group_options.append("read "+item.read_view.name) }}
- {% endif %}
- {% endif %}
- {% if item.write_view is defined and item.write_view and item.write_view.name is defined and item.write_view.name %}
- {% if item.write_view.state is defined and item.write_view.state == "absent" %}
-no snmp-server group {{ group_value }} write {{ item.write_view.name }}
- {% else %}
- {{ group_options.append("write "+item.write_view.name)}}
- {% endif %}
- {% endif %}
-snmp-server group {{ group_value }} {{ group_options|join(" ") }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if os10_snmp.snmp_host is defined and os10_snmp.snmp_host %}
- {% set value = os10_snmp.snmp_host %}
- {% for item in value %}
- {% if item.ip is defined and item.ip %}
- {% set host_port_option = "" %}
- {% if item.udpport is defined and item.udpport %}
- {% set host_port_option = " udp-port " + item.udpport|string %}
- {% endif %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server host {{ item.ip }}{{ host_port_option }}
- {% else %}
- {% set host_notif_type = "traps" %}
- {% if item.notification_type is defined and item.notification_type %}
- {% set host_notif_type = item.notification_type %}
- {% endif %}
- {% if item.version is defined and item.version %}
- {% set host_version = "" %}
- {% if item.version|string == "1" or item.version|string == "2c" %}
- {% if item.communitystring is defined and item.communitystring %}
- {% set host_version = item.version|string + " " + item.communitystring %}
- {% endif %}
- {% elif item.security_level is defined and item.security_level %}
- {% if item.security_name is defined and item.security_name %}
- {% set host_version = item.version|string + " " + item.security_level + " " + item.security_name %}
- {% endif %}
- {% endif %}
- {% set host_trap_categories = [] %}
- {% if item.trap_categories is defined and item.trap_categories %}
- {% for cat_key, cat_value in item.trap_categories.items() %}
- {% if cat_value %}
- {% if cat_key == "dom" %}
- {{ host_trap_categories.append("dom")}}
- {% elif cat_key == "entity" %}
- {{ host_trap_categories.append("entity") }}
- {% elif cat_key == "envmon" %}
- {{ host_trap_categories.append("envmon") }}
- {% elif cat_key == "lldp" %}
- {{ host_trap_categories.append("lldp") }}
- {% elif cat_key == "snmp" %}
- {{ host_trap_categories.append("snmp") }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if host_version %}
-snmp-server host {{ item.ip }} {{ host_notif_type }} version {{ host_version }}{{ host_port_option }} {{ host_trap_categories|join(" ") }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if os10_snmp.snmp_location is defined %}
- {% set value = os10_snmp.snmp_location %}
- {% if value %}
-snmp-server location {{ value }}
- {% else %}
-no snmp-server location
- {% endif %}
- {% endif %}
-
- {% if os10_snmp.snmp_source_interface is defined %}
- {% set value = os10_snmp.snmp_source_interface %}
- {% if value %}
-snmp-server source-interface {{ value.split() | join() }}
- {% else %}
-no snmp-server source-interface
- {% endif %}
- {% endif %}
-
- {% if os10_snmp.snmp_user is defined and os10_snmp.snmp_user %}
- {% set value = os10_snmp.snmp_user %}
- {% for item in value %}
- {% if item.name is defined and item.name and item.group_name is defined and item.group_name %}
- {% if item.version is defined and item.version %}
- {% if item.state is defined and item.state == "absent" %}
- {# snmp user removal is handled above remote engind id #}
- {% else %}
- {% set user_remote_option = "" %}
- {% if item.remote is defined and item.remote %}
- {% if item.remote.ip is defined and item.remote.ip %}
- {% if item.remote.udpport is defined and item.remote.udpport %}
- {% set user_remote_option = " remote " + item.remote.ip + " udp-port " + item.remote.udpport|string %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if item.version|string == "1" or item.version|string == "2c" %}
- {% set user_acl_option = "" %}
- {% if item.access_list is defined and item.access_list %}
- {% set user_acl_option = "access " + item.access_list %}
- {% endif %}
-snmp-server user {{ item.name }} {{ item.group_name }}{{ user_remote_option }} {{ item.version }} {{ user_acl_option }}
- {% else %}
- {% set user_auth_option = "" %}
- {% if item.authentication is defined and item.authentication %}
- {% if item.authentication.localized is defined and item.authentication.localized %}
- {% set user_auth_option = " localized" %}
- {% endif %}
- {% if item.authentication.algorithm is defined and item.authentication.algorithm %}
- {% if item.authentication.password is defined and item.authentication.password %}
- {% set user_auth_option = user_auth_option + " auth " + item.authentication.algorithm + " " + item.authentication.password %}
- {% if item.authentication.encryption is defined and item.authentication.encryption %}
- {% if item.authentication.encryption.algorithm is defined and item.authentication.encryption.algorithm %}
- {% if item.authentication.encryption.password is defined and item.authentication.encryption.password %}
- {% set user_auth_option = user_auth_option + " priv " + item.authentication.encryption.algorithm + " " + item.authentication.encryption.password %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-snmp-server user {{ item.name }} {{ item.group_name }}{{ user_remote_option }} {{ item.version }}{{ user_auth_option }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if os10_snmp.snmp_view is defined %}
- {% set value = os10_snmp.snmp_view %}
- {% if value %}
- {% for item in value %}
- {% if item.name is defined and item.name %}
- {% if item.oid_subtree is defined and item.oid_subtree %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server view {{ item.name }} {{ item.oid_subtree }}
- {% else %}
- {% if item.include is defined %}
- {% if item.include %}
-snmp-server view {{ item.name }} {{ item.oid_subtree }} included
- {% elif not item.include %}
-snmp-server view {{ item.name }} {{ item.oid_subtree }} excluded
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-
- {% if os10_snmp.snmp_vrf is defined %}
- {% set value = os10_snmp.snmp_vrf %}
- {% if value %}
-snmp-server vrf {{ value }}
- {% else %}
-no snmp-server vrf default
- {% endif %}
- {% endif %}
-
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/main.os10.yaml
deleted file mode 100644
index bafcc2101..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/main.os10.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-# vars file for dellemc.os10.os10_snmp,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_snmp:
- snmp_contact: test
- snmp_location: Chennai
- snmp_community:
- - name: public
- access_mode: ro
- state: present
- snmp_traps:
- - name: all
- state: present
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: false
- state: absent
- snmp_host:
- - ip: 1.1.1.1
- communitystring: c1
- version: "2c"
- udpport: 4
- state: present
- - ip: 2.2.2.2
- version: 1
- communitystring: c3
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_snmp/tests/test.yaml
deleted file mode 100644
index 6b4b4e7e2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_snmp
diff --git a/ansible_collections/dellemc/os10/roles/os10_snmp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_snmp/vars/main.yml
deleted file mode 100644
index 407dad8ed..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_snmp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_snmp
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/LICENSE b/ansible_collections/dellemc/os10/roles/os10_system/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/README.md b/ansible_collections/dellemc/os10/roles/os10_system/README.md
deleted file mode 100644
index 119138afc..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/README.md
+++ /dev/null
@@ -1,126 +0,0 @@
-System role
-===========
-
-This role facilitates the configuration of global system attributes. It specifically enables configuration of hostname and hashing algorithm. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The System role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_system keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``hostname`` | string | Configures a hostname to the device (no negate command) | os10 |
-| ``hardware_forwarding`` | string: scaled-l2,scaled-l3-routes,scaled-l3-hosts | Configures hardware forwarding mode | os10 |
-| ``hash_algo`` | dictionary | Configures hash algorithm commands (see ``hash_algo.*``) | os10 |
-| ``hash_algo.algo`` | list | Configures hashing algorithm (see ``algo.*``) | os10 |
-| ``algo.name`` | string (required) | Configures the name of the hashing algorithm | os10 |
-| ``algo.mode`` | string (required) | Configures the hashing algorithm mode | os10 |
-| ``algo.state`` | string: absent,present\* | Deletes the hashing algorithm if set to absent | os10 |
-| ``load_balance`` | dictionary | Configures the global traffic load balance (see ``load_balance.*``) | os10 |
-| ``load_balance.ingress_port`` | boolean: true,false | Specifies whether to use the source port ID for the hashing algorithm | os10 |
-| ``load_balance.ip_selection`` | list | Configures IPv4 key fields to use in hashing algorithm; | os10 |
-| ``ip_selection.field`` | string | Configures IPv4 key fields to use in hashing algorithm | os10 |
-| ``ip_selection.state`` | string: absent,present\* | Deletes the IPv4 key fields if set to absent | os10 |
-| ``load_balance.ipv6_selection`` | list | Configures IPv6 key fields to use in hashing algorithm | os10 |
-| ``ipv6_selection.field`` | string | Configures IPv6 key fields to use in hashing algorithm | os10 |
-| ``ipv6_selection.state`` | string: absent,present\* | Deletes the IPv6 key fields if set to absent | os10 |
-| ``load_balance.mac_selection`` | list | Configures MAC key fields to use in hashing algorithm (see ``mac_selection.*``) | os10 |
-| ``mac_selection.field`` | string | Configures MAC key fields to use in hashing algorithm | os10 |
-| ``mac_selection.state`` | string: absent,present\* | Deletes the MAC key fields if set to absent | os10 |
-| ``load_balance.tcp_udp_selection`` | list | Configures TCP UDP ports for load balancing configurations (see ``tcp_udp_selection.*``) | os10 |
-| ``tcp_udp_selection.field`` | string | Configures TCP UDP port fields to use in hashing algorithm | os10 |
-| ``tcp_udp_selection.state`` | string: absent,present\* | Deletes the TCP UDP ports if set to absent | os10 |
-| ``min_ra`` | string | Configures global RA minimum interval value, applicable to all interfaces across VRFs | os10 |
-| ``max_ra`` | string | Configures global RA maximum interval value, applicable to all interfaces across VRFs | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_system role* to completely set the NTP server, hostname, enable password, management route, hash alogrithm, clock, line terminal, banner and reload type. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The system role writes a simple playbook that only references the *os10_system* role. By including the role, you automatically get access to all of the tasks to configure system features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_system:
- hostname: os10
- hardware_forwarding: scaled-l3-hosts
- hash_algo:
- algo:
- - name: lag
- mode: crc
- state: present
- - name: ecmp
- mode: xor
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: source-ip
- state: present
- ipv6_selection:
- - field: source-ip
- state: present
- mac_selection:
- - field: source-mac
- state: present
- tcp_udp_selection:
- - field: l4-source-port
- state: present
- max_ra: 15
- min_ra: 10
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_system
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/defaults/main.yml
deleted file mode 100644
index 559240559..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_system
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/handlers/main.yml
deleted file mode 100644
index b79131294..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_system
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/meta/main.yml
deleted file mode 100644
index 588850b60..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_system role facilitates the configuration of system attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/tasks/main.yml
deleted file mode 100644
index 5b61c8628..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating system configuration for os10"
- template:
- src: os10_system.j2
- dest: "{{ build_dir }}/system10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning system configuration for os10"
- os10_config:
- src: os10_system.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/templates/os10_system.j2 b/ansible_collections/dellemc/os10/roles/os10_system/templates/os10_system.j2
deleted file mode 100644
index 95edc2eb3..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/templates/os10_system.j2
+++ /dev/null
@@ -1,130 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-
-Purpose:
-Configure system commands for os10 Devices
-
-os10_system:
- hostname: os10
- hardware_forwarding: scaled-l3-routes
- hash_algo:
- algo:
- - name: lag
- mode: crc
- state: present
- - name: ecmp
- mode: xor
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: source-ip
- state: present
- ipv6_selection:
- - field: source-ip
- state: present
- mac_selection:
- - field: source-mac
- state: present
- tcp_udp_selection:
- - field: l4-source-port
- state: present
- max_ra: 15
- min_ra: 10
-
-###############################################}
-{% if os10_system is defined and os10_system %}
-{% if os10_system.hostname is defined and os10_system.hostname %}
-hostname {{ os10_system.hostname }}
-{% endif %}
-{% if os10_system.max_ra is defined %}
- {% if os10_system.max_ra %}
-ipv6 nd max-ra-interval {{ os10_system.max_ra }}
- {% else %}
-no ipv6 nd max-ra-interval
- {% endif %}
-{% endif %}
-{% if os10_system.min_ra is defined %}
- {% if os10_system.min_ra %}
-ipv6 nd min-ra-interval {{ os10_system.min_ra }}
- {% else %}
-no ipv6 nd min-ra-interval
- {% endif %}
-{% endif %}
-{% for key,value in os10_system.items() %}
- {% if key == "hardware_forwarding" %}
- {% if value %}
-hardware forwarding-table mode {{ value }}
- {% else %}
-no hardware forwarding-table mode
- {% endif %}
- {% elif key == "hash_algo" and value %}
- {% if value.algo is defined and value.algo %}
- {% for item in value.algo %}
- {% if item.name is defined and item.name %}
- {% if item.mode is defined and item.mode %}
- {% if item.state is defined and item.state == "absent" %}
-no hash-algorithm {{ item.name }} {{ item.mode }}
- {% else %}
-hash-algorithm {{ item.name }} {{ item.mode }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% elif key == "load_balance" and value %}
- {% if value.ingress_port is defined %}
- {% if value.ingress_port %}
-load-balancing ingress-port enable
- {% else %}
-no load-balancing ingress-port enable
- {% endif %}
- {% endif %}
- {% if value.ip_selection is defined and value.ip_selection %}
- {% for listitem in value.ip_selection %}
- {% if listitem.field is defined and listitem.field %}
- {% if listitem.state is defined and listitem.state == "absent" %}
-no load-balancing ip-selection {{ listitem.field }}
- {% else %}
-load-balancing ip-selection {{ listitem.field }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if value.ipv6_selection is defined and value.ipv6_selection %}
- {% for listitem in value.ipv6_selection %}
- {% if listitem.field is defined and listitem.field %}
- {% if listitem.state is defined and listitem.state == "absent" %}
-no load-balancing ipv6-selection {{ listitem.field }}
- {% else %}
-load-balancing ipv6-selection {{ listitem.field }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if value.mac_selection is defined and value.mac_selection %}
- {% for listitem in value.mac_selection %}
- {% if listitem.field is defined and listitem.field %}
- {% if listitem.state is defined and listitem.state == "absent" %}
-no load-balancing mac-selection {{ listitem.field }}
- {% else %}
-load-balancing mac-selection {{ listitem.field }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if value.tcp_udp_selection is defined and value.tcp_udp_selection %}
- {% for listitem in value.tcp_udp_selection %}
- {% if listitem.field is defined and listitem.field %}
- {% if listitem.state is defined and listitem.state == "absent" %}
-no load-balancing tcp-udp-selection {{ listitem.field }}
- {% else %}
-load-balancing tcp-udp-selection {{ listitem.field }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_system/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_system/tests/main.os10.yaml
deleted file mode 100644
index ea4bc20f5..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/tests/main.os10.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-# vars file for dellemc.os10.os10_system,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_system:
- hostname: os10
- hardware_forwarding: scaled-l3-hosts
- hash_algo:
- algo:
- - name: lag
- mode: crc
- state: present
- - name: ecmp
- mode: xor
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: source-ip
- state: present
- ipv6_selection:
- - field: source-ip
- state: present
- mac_selection:
- - field: source-mac
- state: present
- tcp_udp_selection:
- - field: l4-source-port
- state: present
- max_ra: 15
- min_ra: 10
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_system/tests/test.yaml
deleted file mode 100644
index 8674f0973..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_system
diff --git a/ansible_collections/dellemc/os10/roles/os10_system/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_system/vars/main.yml
deleted file mode 100644
index 4a69de595..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_system/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_system,
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/LICENSE b/ansible_collections/dellemc/os10/roles/os10_template/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/README.md b/ansible_collections/dellemc/os10/roles/os10_template/README.md
deleted file mode 100644
index d7faf0132..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/README.md
+++ /dev/null
@@ -1,75 +0,0 @@
-Template role
-==============
-
-This role provides access to structured data from show commands. This role facilitates the TEXTFSM parsing engine. TextFSM is a template based state machine . It takes the raw string input from the CLI of network devices, run them through a TEXTFSM template and return structured text in the form of a Python dictionary. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Template role is highly customizable, and it works with separate template definitions which contain variables and rules with regular expressions. This library is very helpful to parse any text-based CLI output from network devices. The Template role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- Variables and values are case-sensitive
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_template* role to parse any text-based CLI output. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name. All the supported CLI commands are imported as tasks in tasks/main.yml.
-
-For the *os10_template* role plugins to be used, you may need to specify the actual path of role in *ansible.cfg* file.
-
-**Sample ansible.cfg**
-
- action_plugins = ../../plugins/modules/
-
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address> ansible_network_os=dellemc.os10.os10 ansible_ssh_user=xxxxx ansible_ssh_pass=xxxxx
-
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_template
-
-**Example playbook to run specific show command — leaf.yaml**
-
-
- ---
- - name: PARSE SHOW IP INTERFACE BRIEF
- hosts: leaf1
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
-
-
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_template/meta/main.yml
deleted file mode 100644
index 4df1a6b48..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- role_name: os10_template
- author: Dell EMC Networking Engineering
- description: The os10_template role facilitates to provide the structured output from CLI in devices running on Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/main.yml
deleted file mode 100644
index 9b41a6c26..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
- - name: os10 dellemc.os10.os10_template test
- import_tasks: show_ip_interface_brief.yaml
-
- - name: os10 dellemc.os10.os10_template test
- import_tasks: show_port-channel_summary.yaml
-
- - name: os10 dellemc.os10.os10_template test
- import_tasks: show_lldp_neighbors.yaml
-
- - name: os10 dellemc.os10.os10_template test
- import_tasks: show_ip_vrf.yaml
-
- - name: os10 dellemc.os10.os10_template test
- import_tasks: show_ip_bgp_summary.yaml
-
- - name: os10 dellemc.os10.os10_template test
- import_tasks: show_vlan.yaml
-
- - name: os10 dellemc.os10.os10_template test
- import_tasks: show_vlt_err_disabled_ports.yaml
-
- - name: os10 dellemc.os10.os10_template test
- import_tasks: show_spanning_tree_compatibility_mode.yaml
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_bgp_summary.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_bgp_summary.yaml
deleted file mode 100644
index d2ee2ac7e..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_bgp_summary.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
- - name: CAPTURE SHOW IP BGP SUMMARY
- os10_command:
- commands:
- - show ip bgp summary
- register: output
-
- - name: DISPLAY THE OUTPUT
- debug: var=output.stdout
-
- - name: INSTALL TEXTFSM
- import_tasks: textfsm.yaml
-
- - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_ip_bgp_summary
- textfsm_parser:
- src: "{{ lookup('file', './templates/os10_show_ip_bgp_summary.template') }}"
- content: "{{ output.stdout[0] }}"
- name: bgp_facts
- register: result
- vars:
- - ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_interface_brief.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_interface_brief.yaml
deleted file mode 100644
index f29967750..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_interface_brief.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
- - name: CAPTURE SHOW_IP_INTERFACE_BRIEF
- os10_command:
- commands:
- - show ip interface brief
- register: output
-
- - name: DISPLAY THE OUTPUT
- debug: var=output.stdout
-
- - name: INSTALL TEXTFSM
- import_tasks: textfsm.yaml
-
- - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_ip_interface_brief
- textfsm_parser:
- src: "{{ lookup('file', './templates/os10_show_ip_interface_brief.template') }}"
- content: " {{ output.stdout[0] }}"
- name: ip_interface_facts
- register: result
- vars:
- - ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_vrf.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_vrf.yaml
deleted file mode 100644
index 616416edc..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_ip_vrf.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
- - name: CAPTURE SHOW IP VRF
- os10_command:
- commands:
- - show ip vrf
- register: output
-
- - name: DISPLAY THE OUTPUT
- debug: var=output.stdout
-
- - name: INSTALL TEXTFSM
- import_tasks: textfsm.yaml
-
- - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_ip_vrf
- textfsm_parser:
- src: "{{ lookup('file', './templates/os10_show_ip_vrf.template') }}"
- content: "{{ output.stdout[0] }}"
- name: vrf_facts
- register: result
- vars:
- - ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_lldp_neighbors.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_lldp_neighbors.yaml
deleted file mode 100644
index 1fedfe4ba..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_lldp_neighbors.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
- - name: CAPTURE SHOW LLDP NEIGHBORS
- os10_command:
- commands:
- - show lldp neighbors
- register: output
-
- - name: DISPLAY THE OUTPUT
- debug: var=output.stdout
-
- - name: INSTALL TEXTFSM
- import_tasks: textfsm.yaml
-
- - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_lldp_neighbors
- textfsm_parser:
- src: "{{ lookup('file', './templates/os10_show_lldp_neighbors.template') }}"
- content: "{{ output.stdout[0] }}"
- name: lldp_facts
- register: result
- vars:
- - ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_port-channel_summary.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_port-channel_summary.yaml
deleted file mode 100644
index 2d26c14e3..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_port-channel_summary.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
- - name: CAPTURE SHOW PORT-CHANNEL SUMMARY
- os10_command:
- commands:
- - show port-channel summary
- register: output
-
- - name: DISPLAY THE OUTPUT
- debug: var=output.stdout
-
- - name: INSTALL TEXTFSM
- import_tasks: textfsm.yaml
-
- - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_port-channel_summary
- textfsm_parser:
- src: "{{ lookup('file', './templates/os10_show_port-channel_summary.template') }}"
- content: "{{ output.stdout[0] }}"
- name: port_channel_facts
- register: result
- vars:
- - ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml
deleted file mode 100644
index a3c69524a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_spanning_tree_compatibility_mode.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
- - name: CAPTURE SHOW SPANNING TREE COMPATIBILITY MODE
- os10_command:
- commands:
- - command: show spanning-tree compatibility-mode
- register: output
-
- - name: DISPLAY THE OUTPUT
- debug: var=output.stdout
-
- - name: INSTALL TEXTFSM
- import_tasks: textfsm.yaml
-
- - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_spanning_tree_compatibility_mode
- textfsm_parser:
- src: "{{ lookup('file', './templates/os10_show_spanning_tree_compatibility_mode.template') }}"
- content: "{{ output.stdout[0] }}"
- name: spanning_tree_comp_mode_facts
- register: result
- vars:
- - ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlan.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlan.yaml
deleted file mode 100644
index ee3c988e2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlan.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
- - name: CAPTURE SHOW VLAN
- os10_command:
- commands:
- - show vlan
- register: output
-
- - name: DISPLAY THE OUTPUT
- debug: var=output.stdout
-
- - name: INSTALL TEXTFSM
- import_tasks: textfsm.yaml
-
- - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_vlan
- textfsm_parser:
- src: "{{ lookup('file', './templates/os10_show_vlan.template') }}"
- content: "{{ output.stdout[0] }}"
- name: vlan_facts
- register: result
- vars:
- - ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml
deleted file mode 100644
index 0e0f8b3d0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/show_vlt_err_disabled_ports.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
----
- - name: CAPTURE SHOW VLT ERR DISABLED PORTS
- dellos10_command:
- commands: ['show vlt all error-disabled-ports']
- register: output
-
- - name: DISPLAY THE OUTPUT
- debug: var=output.stdout
-
- - name: INSTALL TEXTFSM
- import_tasks: textfsm.yaml
-
- - name: PARSE THE OUTPUT for {{ ansible_network_os }} show_vlt_err_dis_ports
- textfsm_parser:
- src: "{{ lookup('file', './templates/os10_show_vlt_err_disabled_ports.template') }}"
- content: "{{ output.stdout[0] }}"
- name: vlt_err_dis_facts
- register: result
- vars:
- - ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tasks/textfsm.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tasks/textfsm.yaml
deleted file mode 100644
index 99394b44c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tasks/textfsm.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-#Install Textfsm
- - pip:
- name: textfsm
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_bgp_summary.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_bgp_summary.template
deleted file mode 100644
index 52ddc2898..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_bgp_summary.template
+++ /dev/null
@@ -1,16 +0,0 @@
-Value Filldown RouterId (\d+\.\d+\.\d+\.\d+)
-Value Filldown LocalAs (\d+)
-Value Filldown BFD (enabled)
-Value Neighbor (\S+)
-Value AS (\d+)
-Value MsgRcvd (\d+)
-Value MsgSent (\d+)
-Value Status (\S+)
-Value State (\S+)
-
-Start
- ^BGP router identifier ${RouterId} local AS number ${LocalAs}
- ^Global BFD is ${BFD}
- ^(?!Neighbor)${Neighbor}\s+${AS}\s+${MsgRcvd}\s+${MsgSent}\s+${Status}\s+${State} -> Record
-
-EOF
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_interface_brief.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_interface_brief.template
deleted file mode 100644
index 20d017175..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_interface_brief.template
+++ /dev/null
@@ -1,9 +0,0 @@
-Value INTERFACE_NAME (\S+\s\S+)
-Value IP_ADDRESS (\S+)
-Value OK (YES|NO)
-Value METHOD (DHCP|manual|unset)
-Value STATUS (up|down|admin down)
-Value PROTOCOL (up|down|admin down)
-
-Start
- ^${INTERFACE_NAME}\s+${IP_ADDRESS}\s+${OK}\s+${METHOD}\s+${STATUS}\s+${PROTOCOL} -> Record
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_vrf.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_vrf.template
deleted file mode 100644
index c7e4d7541..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_ip_vrf.template
+++ /dev/null
@@ -1,7 +0,0 @@
-Value VRFName (\S+)
-Value Interfaces (\S+)
-
-Start
- ^(?!VRF-Name)${VRFName}\s+${Interfaces} -> Record
- ^(?!VRF-Name)${VRFName} -> Record
- ^\s+${Interfaces} -> Record
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_lldp_neighbors.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_lldp_neighbors.template
deleted file mode 100644
index 3c9353138..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_lldp_neighbors.template
+++ /dev/null
@@ -1,7 +0,0 @@
-Value LocPortID (\S+)
-Value RemHostName (\S+)
-Value RemPortId (\S+)
-Value RemChassisId ([a-fA-F0-9:]{17})
-
-Start
- ^${LocPortID}\s+${RemHostName}\s+${RemPortId}\s+${RemChassisId} -> Record
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_port-channel_summary.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_port-channel_summary.template
deleted file mode 100644
index 1e77b92e7..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_port-channel_summary.template
+++ /dev/null
@@ -1,9 +0,0 @@
-Value GROUP (\d+)
-Value PORT_CHANNEL (\S+\s+[(D)|(U)]+)
-Value TYPE ([Eth|Gig|ten|For]+)
-Value PROTOCOL ([DYNAMIC|STATIC]+)
-Value List MEMBER_PORTS (\s.*)
-
-Start
- ^${GROUP}\s+${PORT_CHANNEL}\s+${TYPE}\s+${PROTOCOL}\s+${MEMBER_PORTS} -> Record
- ^${GROUP}\s+${PORT_CHANNEL}\s+${TYPE}\s+${PROTOCOL} -> Record
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template
deleted file mode 100644
index bf365e33b..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_spanning_tree_compatibility_mode.template
+++ /dev/null
@@ -1,6 +0,0 @@
-Value Interface_name ([a-zA-Z\-]+\s*[\d\/\:]+)
-Value Instance (VLAN\s+(\d+))
-Value Compatibility_mode (\S+\s*)
-
-Start
- ^${Interface_name}\s+${Instance}\s+${Compatibility_mode} -> Record
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlan.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlan.template
deleted file mode 100644
index f71e95734..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlan.template
+++ /dev/null
@@ -1,12 +0,0 @@
-Value Codes (\*|\@|\M|\R|\s)
-Value NUM (\d+)
-Value Status (Active|Inactive)
-Value Description (\S+|\s+)
-Value Q (A|T)
-Value Ports (\S+)
-
-Start
- ^${Codes}\s+${NUM}\s+${Status}\s+${Description}\s+${Q}\s+${Ports} -> Record
- ^${Codes}\s+${NUM}\s+${Status}\s+${Description} -> Record
- ^${Codes}\s+${NUM}\s+${Status} -> Record
- ^\s+${Q}\s+${Ports} -> Record
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template b/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template
deleted file mode 100644
index 340b7a2bb..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/templates/os10_show_vlt_err_disabled_ports.template
+++ /dev/null
@@ -1,5 +0,0 @@
-Value VLT_PORT_CHANNEL_ID (\d+)
-Value PORT_CHANNEL (\S+\s*)
-
-Start
- ^${VLT_PORT_CHANNEL_ID}\s+${PORT_CHANNEL} -> Record
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tests/group_vars/all b/ansible_collections/dellemc/os10/roles/os10_template/tests/group_vars/all
deleted file mode 100644
index 902b33015..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tests/group_vars/all
+++ /dev/null
@@ -1,3 +0,0 @@
-ansible_ssh_user: xxxx
-ansible_ssh_pass: xxxx
-ansible_network_os: dellemc.os10.os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tests/inventory.yaml
deleted file mode 100644
index a76e08176..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[LeafAndSpineSwitch:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tests/main.yaml
deleted file mode 100644
index ca1c43cca..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tests/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: LeafAndSpineSwitch
- connection: network_cli
- roles:
- - dellemc.os10.os10_template
diff --git a/ansible_collections/dellemc/os10/roles/os10_template/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_template/tests/test.yaml
deleted file mode 100644
index 6d49466df..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_template/tests/test.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- hosts: LeafAndSpineSwitch
- connection: network_cli
- collections:
- - dellemc.os10
- tasks:
- - import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/LICENSE b/ansible_collections/dellemc/os10/roles/os10_uplink/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/README.md b/ansible_collections/dellemc/os10/roles/os10_uplink/README.md
deleted file mode 100644
index 8ffeb0e71..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/README.md
+++ /dev/null
@@ -1,109 +0,0 @@
-Uplink role
-===========
-
-This role facilitates the configuration of uplink failure detection feature attributes. It specifically enables configuration of association between upstream and downstream interfaces known as uplink-state group. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Uplink role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_uplink keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``uplink_state_group`` | list | Configures the uplink state group (see ``uplink_state_group.*``) | os10 |
-| ``uplink_state_group.id`` | integer | Configures the uplink state group instance | os10 |
-| ``uplink_state_group.enable`` | boolean: True,False | Enables the uplink state group instance | os10 |
-| ``uplink_state_group.defer_time`` | integer | Configures defer timer for the uplink state group | os10 |
-| ``uplink_state_group.uplink_type`` | list | Configures the upstream and downstream attribute (see ``uplink_type.*``) | os10 |
-| ``uplink_type.type`` | string: upstream,downstream | Configures the uplink type | os10 |
-| ``uplink_type.intf`` | string | Configures the uplink interface | os10 |
-| ``uplink_type.state`` | string: absent,present\* | Removes the uplink stream if set to absent | os10 |
-| ``uplink_state_group.downstream`` | dictionary | Configures downstream information for the uplink state group (see ``downstream.*``) | os10 |
-| ``downstream.disable_links`` | integer | Configures number of downstream links to be disabled. String 'all' can be used to disable all downstream links | os10 |
-| ``downstream.auto_recover`` | boolean: True,False | Enables or disables auto recover for downstream interfaces | os10 |
-| ``uplink_state_group.state`` | string: absent,present\* | Removes the uplink state group instance if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
-********************
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_uplink role* to completely set the uplink sate group instance, and upstream, downstream interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The uplink role writes a simple playbook that only references the *os10_uplink* role. By including the role, you automatically get access to all of the tasks to configure uplink features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_uplink:
- uplink_state_group:
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: "present"
- - type: "downstream"
- intf: "ethernet1/1/2-1/1/5"
- state: "present"
- state: "present"
- downstream:
- disable_links: all
- auto_recover: false
- defer_time: 50
- - id: 2
- enable: True
- state: "present"
-
-> **NOTE**: Interfaces should be created using the *os10_interface* role.
-
-**Simple playbook to setup uplink — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_uplink
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/defaults/main.yml
deleted file mode 100644
index 441d767ec..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_uplink
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/handlers/main.yml
deleted file mode 100644
index 7abb00129..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_uplink
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/meta/main.yml
deleted file mode 100644
index 0bc561964..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- role_name: os10_uplink
- author: Dell EMC Networking Engineering
- description: The os10_uplink role facilitates the configuration of uplink attributes in devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/tasks/main.yml
deleted file mode 100644
index 6500ea3cd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating uplink configuration for os10"
- template:
- src: os10_uplink.j2
- dest: "{{ build_dir }}/uplink10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning uplink configuration for os10"
- os10_config:
- src: os10_uplink.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/templates/os10_uplink.j2 b/ansible_collections/dellemc/os10/roles/os10_uplink/templates/os10_uplink.j2
deleted file mode 100644
index 64a237d81..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/templates/os10_uplink.j2
+++ /dev/null
@@ -1,102 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-#Purpose:
-Configure uplink commands for os10 Devices
-
-os10_uplink:
- uplink_state_group:
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: "present"
- - type: "downstream"
- intf: "ethernet1/1/2-1/1/5"
- state: "present"
- downstream:
- disable_links: all
- auto_recover: false
- defer_time: 50
- state: "present"
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: "present"
- - type: "downstream"
- intf: "ethernet1/1/2-1/1/5"
- state: "present"
- downstream:
- disable_links: 10
- auto_recover: false
- state: "present"
-
-###############################################}
-{% if os10_uplink is defined and os10_uplink %}
- {% if os10_uplink.uplink_state_group is defined and os10_uplink.uplink_state_group %}
- {% for uplink_val in os10_uplink.uplink_state_group %}
- {% if uplink_val.id is defined %}
- {% if uplink_val.state is defined and uplink_val.state == "absent" %}
-no uplink-state-group {{ uplink_val.id }}
- {% else %}
-uplink-state-group {{ uplink_val.id }}
- {% if uplink_val.enable is defined %}
- {% if uplink_val.enable == True %}
- enable
- {% else %}
- no enable
- {% endif %}
- {% endif %}
- {% if uplink_val.downstream is defined and uplink_val.downstream %}
- {% if uplink_val.downstream.auto_recover is defined %}
- {% if uplink_val.downstream.auto_recover %}
- downstream auto-recover
- {% else %}
- no downstream auto-recover
- {% endif %}
- {% endif %}
- {% if uplink_val.downstream.disable_links is defined %}
- {% if uplink_val.downstream.disable_links %}
- downstream disable-links {{ uplink_val.downstream.disable_links }}
- {% else %}
- no downstream disable-links
- {% endif %}
- {% endif %}
- {% endif %}
- {% if uplink_val.uplink_type is defined and uplink_val.uplink_type %}
- {% for uplink in uplink_val.uplink_type %}
- {% if uplink.type is defined and uplink.type %}
- {% if uplink.state is defined and uplink.state == "absent" %}
- {% if uplink.intf is defined and uplink.intf %}
- {% if uplink.type == "downstream" %}
- no downstream {{ uplink.intf }}
- {% elif uplink.type == "upstream" %}
- no upstream {{ uplink.intf }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if uplink.intf is defined and uplink.intf %}
- {% if uplink.type == "downstream" %}
- downstream {{ uplink.intf }}
- {% elif uplink.type == "upstream" %}
- upstream {{ uplink.intf }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if uplink_val.defer_time is defined %}
- {% if uplink_val.defer_time %}
- defer-time {{ uplink_val.defer_time }}
- {% else %}
- no defer-time
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/main.yaml
deleted file mode 100644
index 90afe0088..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/main.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# vars file for dellemc.os10.os10_uplink,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_uplink:
- uplink_state_group:
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: "present"
- - type: "downstream"
- intf: "ethernet1/1/2-1/1/5"
- state: "present"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_uplink/tests/test.yaml
deleted file mode 100644
index 92c99613a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_uplink
diff --git a/ansible_collections/dellemc/os10/roles/os10_uplink/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_uplink/vars/main.yml
deleted file mode 100644
index a376eeb13..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_uplink/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_uplink
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/LICENSE b/ansible_collections/dellemc/os10/roles/os10_users/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/README.md b/ansible_collections/dellemc/os10/roles/os10_users/README.md
deleted file mode 100644
index 09d55f1dd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-Users role
-==========
-
-This role facilitates the configuration of global system user attributes, and it supports the configuration of CLI users. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The Users role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_users list keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``username`` | string (required) | Configures the username which must adhere to specific format guidelines (valid usernames begin with A-Z, a-z, or 0-9 and can also contain `@#$%^&*-_= +;<>,.~` characters) | os10 |
-| ``password`` | string | Configures the password set for the username; password length must be at least eight characters | os10 |
-| ``role`` | string | Configures the role assigned to the user | os10 |
-| ``state`` | string: absent,present\* | Deletes a user account if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_users* role to configure global system user attributes. It creates a hosts file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file. It writes a simple playbook that only references the *os10_users* role. By including the role, you automatically get access to all of the tasks to configure user features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_users:
- - username: test
- password: a1a2a3a4!@#$
- role: sysadmin
- state: present
- - username: u1
- password: a1a2a3a4!@#$
- role: netadmin
- state: present
-
-**Simple playbook to setup users — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_users
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/defaults/main.yml
deleted file mode 100644
index 668eeface..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_users
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/handlers/main.yml
deleted file mode 100644
index e73b341b6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_users
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/meta/main.yml
deleted file mode 100644
index c73b755ea..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_users role facilitates the configuration of user attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/tasks/main.yml
deleted file mode 100644
index eb870a132..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating users configuration for os10"
- template:
- src: os10_users.j2
- dest: "{{ build_dir }}/users10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning users configuration for os10"
- os10_config:
- src: os10_users.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/templates/os10_users.j2 b/ansible_collections/dellemc/os10/roles/os10_users/templates/os10_users.j2
deleted file mode 100644
index 080f6a590..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/templates/os10_users.j2
+++ /dev/null
@@ -1,27 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure users commands for os10 Devices
-os10_users:
- - username: test
- password: test
- role: sysadmin
- state: present
-###############################################}
-{% if os10_users is defined and os10_users %}
- {% for item in os10_users %}
- {% if item.username is defined and item.username %}
- {% if item.state is defined and item.state == "absent" %}
-no username {{ item.username }}
- {% else %}
- {% if item.password is defined and item.password %}
- {% if item.role is defined and item.role %}
-username {{ item.username }} password {{ item.password }} role {{ item.role }}
- {% else %}
-username {{ item.username }} password {{ item.password }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_users/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_users/tests/main.os10.yaml
deleted file mode 100644
index 0bbc06333..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/tests/main.os10.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-# vars file for dellemc.os10.os10_users,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_users:
- - username: test
- password: a1a2a3a4!@#$
- role: sysadmin
- state: present
- - username: u1
- password: a1a2a3a4!@#$
- role: netadmin
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_users/tests/test.yaml
deleted file mode 100644
index 268124512..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_users
diff --git a/ansible_collections/dellemc/os10/roles/os10_users/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_users/vars/main.yml
deleted file mode 100644
index 49177b5b0..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_users/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_users
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vlan/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/README.md b/ansible_collections/dellemc/os10/roles/os10_vlan/README.md
deleted file mode 100644
index 71a7adf9a..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/README.md
+++ /dev/null
@@ -1,123 +0,0 @@
-VLAN role
-=========
-
-This role facilitates configuring virtual LAN (VLAN) attributes. It supports the creation and deletion of a VLAN and its member ports. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VLAN role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- For variables with no state variable, setting an empty value for the variable negates the corresponding configuration
-- `os10_vlan` (dictionary) holds the key with the VLAN ID key and default-vlan key.
-- VLAN ID key should be in format "vlan ID" (1 to 4094)
-- Variables and values are case-sensitive
-
-**os10_vlan**
-
-| Key | Type | Notes | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``default_vlan_id`` | integer | Configures the vlan-id as the default VLAN for an existing VLAN | os10 |
-
-**VLAN ID keys**
-
-| Key | Type | Notes | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``description`` | string | Configures a single line description for the VLAN | os10 |
-| ``tagged_members`` | list | Specifies the list of port members to be tagged to the corresponding VLAN (see ``tagged_members.*``) | os10 |
-| ``tagged_members.port`` | string | Specifies valid device interface names to be tagged for each VLAN | os10 |
-| ``tagged_members.state`` | string: absent,present | Deletes the tagged association for the VLAN if set to absent | os10 |
-| ``untagged_members`` | list | Specifies the list of port members to be untagged to the corresponding VLAN (see ``untagged_members.*``) | os10 |
-| ``untagged_members.port`` | string | Specifies valid device interface names to be untagged for each VLAN | os10 |
-| ``untagged_members.state`` | string: absent,present | Deletes the untagged association for the VLAN if set to absent | os10 |
-| ``state`` | string: absent,present\* | Deletes the VLAN corresponding to the ID if set to absent | os10 |
-| ``virtual_gateway_ip`` | string | Configures an anycast gateway IPv4 address for VLAN interfaces| os10 |
-| ``virtual_gateway_ipv6`` | string | Configures an anycast gateway IPv6 address for VLAN interfaces| os10 |
-| ``ip_and_mask`` | string | Configures the specified IP address to the interface | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars directories* or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-## Example playbook
-
-This example uses the *os10_vlan* role to setup the VLAN ID and name, and it configures tagged and untagged port members for the VLAN. You can also delete the VLAN with the ID or delete the members associated to it. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vlan* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_vlan:
- default_vlan_id: 2
- vlan 100:
- description: "Blue"
- tagged_members:
- - port: ethernet 1/1/32
- state: present
- - port: ethernet 1/1/31
- state: present
- untagged_members:
- - port: ethernet 1/1/30
- state: present
- - port: ethernet 1/1/29
- state: present
- state: present
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: "present"
- state: "present"
- vlan 10:
- description: "vlan with anycast GW"
- ip_and_mask: "10.1.1.1/24"
- virtual_gateway_ip: "10.1.1.254"
- virtual_gateway_ipv6: "10:1:1::254"
- state: "present"
-
-> **NOTE**: Interfaces should be created using the *os10_interface* role.
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vlan
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/defaults/main.yml
deleted file mode 100644
index 7510d5943..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_vlan
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/handlers/main.yml
deleted file mode 100644
index acd669929..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_vlan
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/meta/main.yml
deleted file mode 100644
index 03e7a180c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_vlan role facilitates the configuration of VLAN attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/tasks/main.yml
deleted file mode 100644
index f4d69bc94..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating VLAN configuration for os10"
- template:
- src: os10_vlan.j2
- dest: "{{ build_dir }}/vlan10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning VLAN configuration for os10"
- os10_config:
- src: os10_vlan.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/templates/os10_vlan.j2 b/ansible_collections/dellemc/os10/roles/os10_vlan/templates/os10_vlan.j2
deleted file mode 100644
index f07f10b33..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/templates/os10_vlan.j2
+++ /dev/null
@@ -1,129 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{##########################################
-Purpose:
-Configure VLAN Interface commands for os10 Devices
-os10_vlan:
- default_vlan_id : 4
- vlan 100:
- description: "red"
- tagged_members:
- - port: ethernet 1/1/32
- state: present
- - port: ethernet 1/1/31
- state: absent
- untagged_members:
- - port: ethernet 1/1/30
- state: present
- - port: ethernet 1/1/29
- state: present
- state: present
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: "present"
- state: "present"
- vlan 10:
- description: "vlan with anycast GW"
- ip_and_mask: "10.1.1.1/24"
- virtual_gateway_ip: "10.1.1.254"
- virtual_gateway_ipv6: "10:1:1::254"
- state: "present"
-
-#########################################}
-{% if os10_vlan is defined and os10_vlan %}
-{% for key,value in os10_vlan.items() %}
- {% if key == "default_vlan_id" %}
- {% if value %}
-default vlan-id {{ value }}
- {% else %}
-no default vlan-id
- {% endif %}
- {% else %}
-
- {% set vlan_id = key.split(" ") %}
- {% set vlan_vars = os10_vlan[key] %}
-
- {% if vlan_vars.state is defined and vlan_vars.state == "absent" %}
-no interface vlan{{ vlan_id[1] }}
- {% else %}
-interface vlan{{ vlan_id[1] }}
- {% if vlan_vars.description is defined %}
- {% if vlan_vars.description %}
- {% if vlan_vars.description|wordcount > 1 %}
- description "{{ vlan_vars.description }}"
- {% else %}
- description {{ vlan_vars.description }}
- {% endif %}
- {% else %}
- no description
- {% endif %}
- {% endif %}
-
- {% if vlan_vars.ip_and_mask is defined %}
- {% if vlan_vars.ip_and_mask %}
- ip address {{ vlan_vars.ip_and_mask }}
- {% else %}
- no ip address
- {% endif %}
- {% endif %}
-
- {% if vlan_vars.virtual_gateway_ip is defined %}
- {% if vlan_vars.virtual_gateway_ip %}
- ip virtual-router address {{ vlan_vars.virtual_gateway_ip }}
- {% else %}
- no ip virtual-router address
- {% endif %}
- {% endif %}
-
- {% if vlan_vars.virtual_gateway_ipv6 is defined %}
- {% if vlan_vars.virtual_gateway_ipv6 %}
- ipv6 virtual-router address {{ vlan_vars.virtual_gateway_ipv6 }}
- {% else %}
- no ipv6 virtual-router address
- {% endif %}
- {% endif %}
-
- {# Keep member configs in the end as it switches to member interface context #}
- {% if vlan_vars.untagged_members is defined %}
- {% for ports in vlan_vars.untagged_members %}
- {% if ports.port is defined and ports.port %}
- {% if 'range' in ports.port %}
-interface {{ ports.port }}
- {% else %}
-interface {{ ports.port.split() | join() }}
- {% endif %}
- {% if ports.state is defined and ports.state == "absent" %}
- no switchport access vlan
- {% else %}
- switchport access vlan {{ vlan_id[1] }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if vlan_vars.tagged_members is defined %}
- {% for ports in vlan_vars.tagged_members %}
- {% if ports.port is defined and ports.port %}
- {% if 'range' in ports.port %}
-interface {{ ports.port }}
- {% else %}
-interface {{ ports.port.split() | join() }}
- {% endif %}
- {% if ports.state is defined and ports.state == "absent" %}
- no switchport trunk allowed vlan {{ vlan_id[1] }}
- {% else %}
- switchport mode trunk
- switchport trunk allowed vlan {{ vlan_id[1] }}
- {% endif %}
- {% if ports.access_vlan is defined and ports.access_vlan == "false" %}
- no switchport access vlan
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/main.os10.yaml
deleted file mode 100644
index 78e247389..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/main.os10.yaml
+++ /dev/null
@@ -1,31 +0,0 @@
----
-# vars file for dellemc.os10.os10_vlan,
-# below gives a example configuration
-# Sample variables for OS10 device
-os10_vlan:
- default_vlan_id: 2
- vlan 100:
- description: "Blue"
- tagged_members:
- - port: ethernet 1/1/32
- state: present
- - port: ethernet 1/1/31
- state: present
- untagged_members:
- - port: ethernet 1/1/30
- state: present
- - port: ethernet 1/1/29
- state: present
- state: present
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: "present"
- state: "present"
- vlan 10:
- description: "vlan with anycast GW"
- ip_and_mask: "10.1.1.1/24"
- virtual_gateway_ip: "10.1.1.254"
- virtual_gateway_ipv6: "10:1:1::254"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_vlan/tests/test.yaml
deleted file mode 100644
index 13442eff6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_vlan
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlan/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlan/vars/main.yml
deleted file mode 100644
index c856f3024..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlan/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_vlan
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vlt/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/README.md b/ansible_collections/dellemc/os10/roles/os10_vlt/README.md
deleted file mode 100644
index 85ed917a2..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/README.md
+++ /dev/null
@@ -1,108 +0,0 @@
-VLT role
-========
-
-This role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VLT role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables .
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_vlt keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``domain`` | integer (required) | Configures the VLT domain identification number (1 to 1000) | os10 |
-| ``backup_destination`` | string | Configures an IPv4 address for the VLT backup link (A.B.C.D format or X:X:X:X::X format) | os10 |
-| ``destination_type`` | string | Configures the backup destination based on this destination type (IPv4 or IPv6)| os10 |
-| ``backup_destination_vrf`` | string | Configures the virtual routing and forwarding (VRF) instance through which the backup destination IP is reachable (*vrfname* must be present) | os10 |
-| ``discovery_intf`` | string | Configures the discovery interface for the VLT domain (range of interfaces)| os10 |
-| ``discovery_intf_state`` | string: absent,present | Deletes the discovery interfaces for the VLT domain if set to absent | os10 |
-| ``peer_routing`` | boolean | Configures VLT peer routing | os10 |
-| ``priority`` | integer (default:32768) | Configures VLT priority | os10 |
-| ``vlt_mac`` | string | Configures the VLT MAC address | os10 |
-| ``vlt_peers`` | dictionary | Contains objects to configure the VLT peer port-channel (see ``vlt_peers.*``) | os10 |
-| ``vlt_peers.<portchannelid>`` | dictionary | Configures the VLT peer port-channel (`Po <portchannelid> value`) | os10 |
-| ``vlt_peers.<portchannelid>.peer_lag`` | integer | Configures the port-channel ID of the VLT peer lag | os10 |
-| ``state`` | string: absent,present | Deletes the VLT instance if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network OS roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-The *os10_vlt* role is built on modules included in the core Ansible code. These modules were added in ansible version 2.2.0.
-
-Example playbook
-----------------
-
-This example uses the *os10_vlt* role to setup a VLT-domain. It creates a *hosts* file with the switch details and corresponding variables.The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vlt* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
- os10_vlt:
- domain: 1
- backup_destination: "192.168.211.175"
- destination_type: "ipv4"
- backup_destination_vrf:
- discovery_intf: 1/1/12
- discovery_intf_state: present
- peer_routing: True
- vlt_mac: aa:aa:aa:aa:aa:aa
- vlt_peers:
- Po 12:
- peer_lag: 13
- state: present
-
-> **NOTE**: Discovery interface must not be in switchport mode and can be configured using the *os10_interface* role.
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vlt
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/defaults/main.yml
deleted file mode 100644
index daa359938..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_vlt
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/handlers/main.yml
deleted file mode 100644
index 910f1fa87..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_vlt
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/meta/main.yml
deleted file mode 100644
index c3164f7dc..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_vlt role facilitates the configuration of VLT attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/tasks/main.yml
deleted file mode 100644
index 63fa380a8..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating VLT configuration for os10"
- template:
- src: os10_vlt.j2
- dest: "{{ build_dir }}/vlt10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning VLT configuration for os10"
- os10_config:
- src: os10_vlt.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/templates/os10_vlt.j2 b/ansible_collections/dellemc/os10/roles/os10_vlt/templates/os10_vlt.j2
deleted file mode 100644
index 4915ff718..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/templates/os10_vlt.j2
+++ /dev/null
@@ -1,108 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-Purpose:
-Configure VLT commands fon os10 Devices.
-os10_vlt:
- domain: 1
- backup_destination: "192.168.1.1"
- destination_type: "ipv4"
- backup_destination_vrf: "management"
- discovery_intf: 1/1/12
- discovery_intf_state: present
- peer_routing: True
- priority: 1
- vlt_mac: aa:aa:aa:aa:aa:aa
- vlt_peers:
- Po 12:
- peer_lag: 13
- state: present
-################################}
-{% if os10_vlt is defined and os10_vlt %}
- {% if os10_vlt.domain is defined and os10_vlt.domain %}
- {% if os10_vlt.state is defined and os10_vlt.state == 'absent' %}
-no vlt-domain {{ os10_vlt.domain }}
- {% else %}
-vlt-domain {{ os10_vlt.domain }}
- {% if os10_vlt.backup_destination is defined %}
- {% if os10_vlt.backup_destination %}
- {% if os10_vlt.destination_type is defined %}
- {% if os10_vlt.destination_type == "ipv6" %}
- backup destination ipv6 {{ os10_vlt.backup_destination }}
- {% elif os10_vlt.destination_type == "ipv4" %}
- {% if os10_vlt.backup_destination_vrf is defined and os10_vlt.backup_destination_vrf %}
- backup destination {{ os10_vlt.backup_destination }} vrf {{ os10_vlt.backup_destination_vrf }}
- {% else %}
- backup destination {{ os10_vlt.backup_destination }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% else %}
- no backup destination
- {% endif %}
- {% endif %}
- {% if os10_vlt.discovery_intf_state is defined and os10_vlt.discovery_intf_state == "absent" %}
- {% if os10_vlt.discovery_intf is defined and os10_vlt.discovery_intf %}
- no discovery-interface ethernet{{ os10_vlt.discovery_intf }}
- {% endif %}
- {% else %}
- {% if os10_vlt.discovery_intf is defined and os10_vlt.discovery_intf %}
- discovery-interface ethernet{{ os10_vlt.discovery_intf }}
- {% endif %}
- {% endif %}
- {% if os10_vlt.backup_destination is defined %}
- {% if os10_vlt.backup_destination %}
- {% if os10_vlt.destination_type is defined %}
- {% if os10_vlt.destination_type == 'ipv6' %}
- backup destination ipv6 {{ os10_vlt.backup_destination }}
- {% elif os10_vlt.destination_type == 'ipv4' %}
- {% if os10_vlt.backup_destination_vrf is defined and os10_vlt.backup_destination_vrf %}
- backup destination {{ os10_vlt.backup_destination }} vrf {{ os10_vlt.backup_destination_vrf }}
- {% else %}
- backup destination {{ os10_vlt.backup_destination }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% else %}
- no backup destination
- {% endif %}
- {% endif %}
- {% if os10_vlt.priority is defined %}
- {% if os10_vlt.priority %}
- primary-priority {{ os10_vlt.priority }}
- {% else %}
- no primary-priority
- {% endif %}
- {% endif %}
- {% if os10_vlt.peer_routing is defined %}
- {% if os10_vlt.peer_routing %}
- peer-routing
- {% else %}
- no peer-routing
- {% endif %}
- {% endif %}
- {% if os10_vlt.vlt_mac is defined %}
- {% if os10_vlt.vlt_mac %}
- vlt-mac {{ os10_vlt.vlt_mac }}
- {% else %}
- no vlt-mac
- {% endif %}
- {% endif %}
-
- {% endif %}
- {% endif %}
- {% if os10_vlt.vlt_peers is defined and os10_vlt.vlt_peers %}
- {% for key in os10_vlt.vlt_peers.keys() %}
- {% set channel_id = key.split(" ") %}
- {% set peer_vars = os10_vlt.vlt_peers[key] %}
-interface port-channel{{ channel_id[1] }}
- {% if peer_vars.peer_lag is defined %}
- {% if peer_vars.peer_lag %}
- vlt-port-channel {{ peer_vars.peer_lag}}
- {% else %}
- no vlt-port-channel
- {% endif %}
- {% endif %}
-
- {% endfor %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/main.os10.yaml
deleted file mode 100644
index 1c15f1594..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/main.os10.yaml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# vars file for dellemc.os10.os10_vlt,
-# below gives a example configuration
-# Sample variables for OS10 device
-os10_vlt:
- domain: 1
- backup_destination: "192.168.211.175"
- destination_type: "ipv4"
- backup_destination_vrf:
- discovery_intf: 1/1/12
- discovery_intf_state: present
- peer_routing: True
- vlt_mac: aa:aa:aa:aa:aa:aa
- vlt_peers:
- Po 12:
- peer_lag: 13
- state: present
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_vlt/tests/test.yaml
deleted file mode 100644
index 8f950f01c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_vlt
diff --git a/ansible_collections/dellemc/os10/roles/os10_vlt/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vlt/vars/main.yml
deleted file mode 100644
index aee0f95b4..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vlt/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_vlt
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vrf/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/README.md b/ansible_collections/dellemc/os10/roles/os10_vrf/README.md
deleted file mode 100644
index 464efc5bd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/README.md
+++ /dev/null
@@ -1,143 +0,0 @@
-VRF role
-========
-
-This role facilitates to configure the basics of virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VRF role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the variable `ansible_network_os` that can take the `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_vrf keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``vrfdetails`` | list | Configures the list of VRF instances (see ``instances.*``) | os10 |
-| ``vrfdetails.vrf_name`` | string | Specifies the VRF instance name (default is management) | os10 |
-| ``vrfdetails.state`` | string | Deletes the VRF instance name if set to absent | os10 |
-| ``vrfdetails.ip_route_import`` | string | Configures VRF IP subcommands | os10 |
-| ``ip_route_import.community_value`` | string | Configures the route community value | os10 |
-| ``ip_route_import.route_map_value`` | string | Configures the route-map value | os10 |
-| ``ip_route_import.state`` | string | Deletes the IP configuration if set to absent | os10 |
-| ``vrfdetails.ip_route_export`` | string | Configures VRF IP subcommands | os10 |
-| ``ip_route_export.community_value`` | string | Configures the route community value | os10 |
-| ``ip_route_export.route_map_value`` | string | Configures the route-map value | os10 |
-| ``ip_route_export.state`` | string | Deletes the IP config if set to absent | os10 |
-| ``vrfdetails.ipv6_route_import`` | string | Configures VRF IPv6 subcommands | os10 |
-| ``ipv6_route_import.community_value`` | string | Configures the route community value | os10 |
-| ``ipv6_route_import.route_map_value`` | string | Configures the route-map value | os10 |
-| ``ipv6_route_import.state`` | string | Deletes the IP config if set to absent | os10 |
-| ``vrfdetails.ipv6_route_export`` | string | Configures VRF IPv6 subcommands | os10 |
-| ``ipv6_route_import.community_value`` | string | Configures the route community value | os10 |
-| ``ipv6_route_export.route_map_value`` | string | Configures the route-map value | os10 |
-| ``ipv6_route_import.state`` | string | Deletes the IP config if set to absent | os10 |
-| ``vrfdetails.map_ip_interface`` | list | Specifies a list of valid interface names | os10 |
-| ``map_ip_interface.intf_id`` | string | Specifies a valid interface name | os10 |
-| ``map_ip_interface.state`` | string | Deletes VRF association in the interface if set to absent | os10 |
-| ``upd_src_ip_loopback_id`` | string | Configures the source IP for any leaked route in VRF from the provided loopback ID, delete if empty string| os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-The *os10_vrf* role is built on modules included in the core Ansible code. These modules were added in ansible version 2.2.0
-
-Example playbook
-----------------
-
-This example uses the *os10_vrf* role to setup a VRF and associate it to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that references the *os10_vrf* role.
-*upd_src_ip_loopback_id* has an dependency with association of the interface in a VRF, and the *os10_vrf* role needs to be invoked twice with different input dictionary one for the create and one for *upd_src_ip_loopback_id*.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
- os10_vrf:
- vrfdetails:
- - vrf_name: "os10vrf"
- state: "present"
- ip_route_import:
- community_value: "10:20"
- state: "present"
- route_map_value: "test4"
- ip_route_export:
- community_value: "30:40"
- state: "present"
- route_map_value: "test3"
- ipv6_route_import:
- community_value: "40:50"
- state: "absent"
- route_map_value: "test2"
- ipv6_route_export:
- community_value: "60:70"
- state: "absent"
- route_map_value: "test2"
- map_ip_interface:
- - intf_id : "loopback11"
- state : "present"
-
- os_vrf_upd_src_loopback:
- vrfdetails:
- - vrf_name: "os10vrf"
- state: "present"
- upd_src_ip_loopback_id: 11
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vrf
-
-**Simple playbook with `upd_src_ip_loopback_id` — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vrf
- - hosts: leaf1
- vars:
- os10_vrf: "{{ os_vrf_upd_src_loopback }}"
- roles:
- - dellemc.os10.os10_vrf
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/defaults/main.yml
deleted file mode 100644
index e00abdd92..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_vrf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/handlers/main.yml
deleted file mode 100644
index 7bd70e180..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_vrf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/meta/main.yml
deleted file mode 100644
index db8f619dc..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_vrf role facilitates the configuration of VRF attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/tasks/main.yml
deleted file mode 100644
index ef6515796..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating vrf configuration for os10"
- template:
- src: os10_vrf.j2
- dest: "{{ build_dir }}/vrf10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning VRF configuration for os10"
- os10_config:
- src: os10_vrf.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/templates/os10_vrf.j2 b/ansible_collections/dellemc/os10/roles/os10_vrf/templates/os10_vrf.j2
deleted file mode 100644
index e77f6c14b..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/templates/os10_vrf.j2
+++ /dev/null
@@ -1,122 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-Purpose:
-Configure VRF on os10 Devices.
-os10_vrf:
- vrfdetails:
- -vrf_name: "os10vrf"
- state: "present"
- ip_route_import:
- community_value: 10:15
- state: "absent"
- route_map_value: test1
- ip_route_export:
- commnity_value: 20:30
- state: "present"
- route_map_value: test2
- ipv6_route_import:
- community_value: 10:15
- state: "present"
- route_map_value: test3
- ipv6_route_export:
- commnity_value: 20:30
- state: "present"
- route_map_value: test4
- map_ip_interface:
- -intf_id : loopback6
- state : "present"
- upd_src_ip_loopback_id: 5
- -vrf_name: "os10vrf1"
- state: "absent"
-################################}
-{% if (os10_vrf is defined and os10_vrf) %}
-{% if os10_vrf.vrfdetails is defined %}
- {% for vrf in os10_vrf.vrfdetails %}
- {% if vrf.vrf_name is defined %}
- {% if vrf.vrf_name %}
- {% if vrf.state is defined and vrf.state == 'absent' %}
-no ip vrf {{ vrf.vrf_name }}
- {% else %}
-ip vrf {{ vrf.vrf_name }}
- {% if vrf.ip_route_import is defined and vrf.ip_route_import %}
- {% set route_vars = vrf.ip_route_import %}
- {% if route_vars.community_value is defined and route_vars.community_value %}
- {% if route_vars.state == 'present' %}
- {% if route_vars.route_map_value is defined and route_vars.route_map_value %}
- ip route-import {{ route_vars.community_value }} route-map {{ route_vars.route_map_value }}
- {% else %}
- ip route-import {{ route_vars.community_value }}
- {% endif %}
- {% else %}
- no ip route-import {{ route_vars.community_value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if vrf.ipv6_route_import is defined and vrf.ipv6_route_import %}
- {% set route_vars = vrf.ipv6_route_import %}
- {% if route_vars.community_value is defined and route_vars.community_value %}
- {% if route_vars.state == 'present' %}
- {% if route_vars.route_map_value is defined and route_vars.route_map_value %}
- ipv6 route-import {{ route_vars.community_value }} route-map {{ route_vars.route_map_value }}
- {% else %}
- ipv6 route-import {{ route_vars.community_value }}
- {% endif %}
- {% else %}
- no ipv6 route-import {{ route_vars.community_value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if vrf.ip_route_export is defined and vrf.ip_route_export %}
- {% set route_vars = vrf.ip_route_export %}
- {% if route_vars.community_value is defined and route_vars.community_value %}
- {% if route_vars.state == 'present' %}
- {% if route_vars.route_map_value is defined and route_vars.route_map_value %}
- ip route-export {{ route_vars.community_value }} route-map {{ route_vars.route_map_value }}
- {% else %}
- ip route-export {{ route_vars.community_value }}
- {% endif %}
- {% else %}
- no ip route-export {{ route_vars.community_value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if vrf.ipv6_route_export is defined and vrf.ipv6_route_export %}
- {% set route_vars = vrf.ipv6_route_export %}
- {% if route_vars.community_value is defined and route_vars.community_value %}
- {% if route_vars.state == 'present' %}
- {% if route_vars.route_map_value is defined and route_vars.route_map_value %}
- ipv6 route-export {{ route_vars.community_value }} route-map {{ route_vars.route_map_value }}
- {% else %}
- ipv6 route-export {{ route_vars.community_value }}
- {% endif %}
- {% else %}
- no ipv6 route-export {{ route_vars.community_value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if vrf.map_ip_interface is defined and vrf.map_ip_interface %}
- exit
- {% for map_ip_interface in vrf.map_ip_interface %}
- {% if map_ip_interface.intf_id is defined and map_ip_interface.intf_id %}
-interface {{ map_ip_interface.intf_id }}
- {% if map_ip_interface.state is defined and map_ip_interface.state == "absent" %}
- no ip vrf forwarding
- {% else %}
- ip vrf forwarding {{ vrf.vrf_name }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if vrf.upd_src_ip_loopback_id is defined %}
- {% if vrf.upd_src_ip_loopback_id %}
- update-source-ip loopback{{ vrf.upd_src_ip_loopback_id}}
- {% else %}
- no update-source-ip loopback
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/main.os10.yaml
deleted file mode 100644
index b8a265d85..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/main.os10.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-# vars file for dellemc.os10.os10_vrf,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_vrf:
- vrfdetails:
- - vrf_name: "os10vrf"
- state: "present"
- ip_route_import:
- community_value: "10:20"
- state: "present"
- route_map_value: "test1"
- ip_route_export:
- community_value: "30:40"
- state: "present"
- route_map_value: "test2"
- ipv6_route_import:
- community_value: "40:50"
- state: "absent"
- route_map_value: "test3"
- ipv6_route_export:
- community_value: "60:70"
- state: "absent"
- route_map_value: "test4"
- map_ip_interface:
- - intf_id: "loopback11"
- state: "present"
-
-os_vrf_upd_src_loopback:
- vrfdetails:
- - vrf_name: "os10vrf"
- state: "present"
- upd_src_ip_loopback_id: 11
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_vrf/tests/test.yaml
deleted file mode 100644
index 6093a28fc..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_vrf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrf/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrf/vars/main.yml
deleted file mode 100644
index 5ed35d638..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrf/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars for dellemc.os10.os10_vrf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vrrp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/README.md b/ansible_collections/dellemc/os10/roles/os10_vrrp/README.md
deleted file mode 100644
index 299166bff..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/README.md
+++ /dev/null
@@ -1,139 +0,0 @@
-VRRP role
-=========
-
-This role facilitates configuring virtual router redundancy protocol (VRRP) attributes. It supports the creation of VRRP groups for interfaces and setting the VRRP group attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VRRP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os10_vrrp` (dictionary) holds a dictionary with the interface name key
-- Interface name can correspond to any of the valid OS10 interface with a unique interface identifier name
-- Physical interfaces names must be in *<interfacename> <tuple>* format (for example *fortyGigE 1/1*)
-- Variables and values are case-sensitive
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``vrrp`` | dictionary | Configures VRRP commands (see ``vrrp.*``) | os10 |
-| ``version`` | dictionary | Configures VRRP version | os10 |
-| ``vrrp_active_active_mode`` | dictionary | Configures VRRP active-active mode | os10 |
-| ``delay_reload`` | integer | Configures the minimum delay timer applied after boot (0 to 900) | os10 |
-| ``vrrp_group`` | list | Configures VRRP group commands (see ``vrrp_group.*``) | os10 |
-| ``vrrp_group.type`` | string: ipv6,ipv4 | Specifies the type of the VRRP group | os10 |
-| ``vrrp_group.group_id`` | integer (required) | Configures the ID for the VRRP group (1 to 255) | os10 |
-| ``vrrp_group.virtual_address`` | string | Configures a virtual-address to the VRRP group (A.B.C.D format) | os10 |
-| ``virtual_address.ip`` | string | Configures a virtual ip address (A.B.C.D format) | os10 |
-| ``virtual_address.state`` | string: present\*,absent | Configures/unconfigures a virtual-address (A.B.C.D format) | os10 |
-| ``vrrp_group.preempt`` | boolean: true\*,false | Configures preempt mode on the VRRP group | os10 |
-| ``vrrp_group.priority`` |integer | Configures priority for the VRRP group (1 to 255; default 100) | os10 |
-| ``vrrp_group.adv_interval_centisecs`` | integer | Configures the advertisement interval for the VRRP group in centiseconds (25 to 4075; default 100) and in multiple of 25; centisecs gets converted into seconds in version 2 | os10 |
-| ``vrrp_group.track_interface`` | list | Configures the track interface of the VRRP group (see ``track.*``) | os10 |
-| ``track_interface.resource_id`` | integer | Configures the object tracking resource ID of the VRRP group; mutually exclusive with *track.interface* | os10 |
-| ``track_interface.interface`` | string | Configures the track interface of the VRRP group (<interface name> <interface number> format) | os10 |
-| ``track_interface.priority_cost`` | integer | Configures the priority cost for track interface of the VRRP group (1 to 254; default 10) | os10 |
-| ``track_interface.state`` | string: present\*,absent | Deletes the specific track interface from the VRRP group if set to absent | os10 |
-| ``vrrp_group.track_interface.state`` | string: present*,absent | Deletes all track interfaces from the VRRP group if set to absent | os10 |
-| ``vrrp_group.state`` | string: present\*,absent | Deletes the VRRP group from the interface if set to absent | os10 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_vrrp* role to configure VRRP commands at the interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os10_vrrp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
- os10_vrrp:
- vrrp:
- delay_reload: 2
- version: 3
- ethernet1/1/1:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: present
- - ip: 3001:4828:5808:ffa3::9
- state: present
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 200
- state: present
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: present
- - ip: 4.1.1.2
- state: present
- - ip: 4.1.1.3
- state: absent
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- adv_interval_centisecs: 200
- state: present
- vlan100:
- vrrp_active_active_mode: true
-
-> **NOTE**: Interface VRRP cannot exist with L2 modes and can be configured using the *os10_interface* role.
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vrrp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/defaults/main.yml
deleted file mode 100644
index 089bfddc5..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_vrrp
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/handlers/main.yml
deleted file mode 100644
index aef7df3b5..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_vrrp
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/meta/main.yml
deleted file mode 100644
index 38560f941..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os10_vrrp role facilitates the configuration of Virtual Router Redundancy Protocol (VRRP) attributes in
- devices running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - dellemc
- - emc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/tasks/main.yml
deleted file mode 100644
index 3d8a1a6cf..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating VRRP configuration for os10"
- template:
- src: os10_vrrp.j2
- dest: "{{ build_dir }}/vrrp10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning VRRP configuration for os10"
- os10_config:
- src: os10_vrrp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/templates/os10_vrrp.j2 b/ansible_collections/dellemc/os10/roles/os10_vrrp/templates/os10_vrrp.j2
deleted file mode 100644
index a1c75b6a6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/templates/os10_vrrp.j2
+++ /dev/null
@@ -1,154 +0,0 @@
-#jinja2: trim_blocks: True, lstrip_blocks: True
-{##########################################
-Purpose:
-Configure VRRP commands for os10 Devices
-os10_vrrp:
- vrrp:
- delay_reload: 2
- version: 3
- ethernet1/1/1:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: present
- - ip: 3001:4828:5808:ffa3::9
- state: present
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 200
- state: present
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: present
- - ip: 4.1.1.2
- state: present
- - ip: 4.1.1.3
- state: absent
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- adv_interval_centisecs: 200
- state: present
- vlan100:
- vrrp_active_active_mode: True
-#########################################}
-{% if os10_vrrp is defined and os10_vrrp %}
-{% for key,value in os10_vrrp.items() %}
-{% if key == "vrrp" %}
- {% if value.delay_reload is defined %}
- {% if value.delay_reload >=0 %}
-vrrp delay reload {{ value.delay_reload }}
- {% else %}
-vrrp delay reload {{ value.delay_reload }}
- {% endif %}
- {% endif %}
- {% if value.version is defined %}
- {% if value.version %}
-vrrp version {{ value.version }}
- {% else %}
-no vrrp version
- {% endif %}
- {% endif %}
-{% else %}
-interface {{ key }}
- {% if value %}
- {% if key.startswith("vlan") %}
- {% if value.vrrp_active_active_mode is defined and value.vrrp_active_active_mode %}
- vrrp mode active-active
- {% else %}
- no vrrp mode active-active
- {% endif %}
- {% endif %}
- {% if value.vrrp_group is defined and value.vrrp_group %}
- {% for group in value.vrrp_group %}
- {% if group.group_id is defined and group.group_id %}
- {% if group.state is defined and group.state == "absent" %}
- {% if group.type is defined and group.type == "ipv6" %}
- no vrrp-ipv6-group {{ group.group_id }}
- {% else %}
- no vrrp-group {{ group.group_id }}
- {% endif %}
- {% else %}
- {% if group.type is defined and group.type == "ipv6" %}
- vrrp-ipv6-group {{ group.group_id }}
- {% else %}
- vrrp-group {{ group.group_id }}
- {% endif %}
- {% if group.adv_interval_centisecs is defined %}
- {% if group.adv_interval_centisecs %}
- advertise-interval centisecs {{ group.adv_interval_centisecs }}
- {% else %}
- no advertise-interval centisecs
- {% endif %}
- {% endif %}
- {% if group.adv_interval_secs is defined %}
- {% if group.adv_interval_secs %}
- advertise-interval secs {{ group.adv_interval_secs }}
- {% else %}
- no advertise-interval secs
- {% endif %}
- {% endif %}
- {% if group.track_interface is defined and group.track_interface %}
- {% for track_item in group.track_interface %}
- {% if track_item.state is defined and track_item.state == "absent" %}
- {% if track_item.resource_id is defined and track_item.resource_id %}
- no track {{ track_item.resource_id }}
- {% endif %}
- {% else %}
- {% if track_item.resource_id is defined and track_item.resource_id %}
- {% if track_item.priority_cost is defined and track_item.priority_cost %}
- track {{ track_item.resource_id }} priority-cost {{ track_item.priority_cost }}
- {% else %}
- track {{ track_item.resource_id }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if group.virtual_address is defined and group.virtual_address %}
- {% for virtual_interface in group.virtual_address %}
- {% if virtual_interface.state is defined and virtual_interface.state == "absent" %}
- {% if virtual_interface.ip is defined and virtual_interface.ip %}
- no virtual-address {{ virtual_interface.ip }}
- {% endif %}
- {% else %}
- {% if virtual_interface.ip is defined and virtual_interface.ip %}
- virtual-address {{ virtual_interface.ip }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if group.preempt is defined %}
- {% if group.preempt %}
- preempt
- {% else %}
- no preempt
- {% endif %}
- {% endif %}
- {% if group.priority is defined %}
- {% if group.priority %}
- priority {{ group.priority }}
- {% else %}
- no priority
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/main.os10.yaml b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/main.os10.yaml
deleted file mode 100644
index 974751700..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/main.os10.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-# vars file for dellemc.os10.os10_vrrp,
-# below gives a example configuration
-# Sample variables for OS10 device
-os10_vrrp:
- vrrp:
- delay_reload: 2
- version: 3
- ethernet1/1/1:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: present
- - ip: 3001:4828:5808:ffa3::9
- state: present
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 200
- state: present
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: present
- - ip: 4.1.1.2
- state: present
- - ip: 4.1.1.3
- state: absent
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- adv_interval_centisecs: 200
- state: present
- vlan100:
- vrrp_active_active_mode: true
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/test.yaml
deleted file mode 100644
index 2ed5ab8ef..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_vrrp
diff --git a/ansible_collections/dellemc/os10/roles/os10_vrrp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vrrp/vars/main.yml
deleted file mode 100644
index aa78d6774..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vrrp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_vrrp
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/LICENSE b/ansible_collections/dellemc/os10/roles/os10_vxlan/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/README.md b/ansible_collections/dellemc/os10/roles/os10_vxlan/README.md
deleted file mode 100644
index 09b23bb36..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/README.md
+++ /dev/null
@@ -1,259 +0,0 @@
-VxLAN role
-========
-
-This role facilitates the configuration of virtual extensible LAN (VxLAN) attributes. It supports the configuration of virtual networks, Ethernet virtual private network (EVPN), and network virtualization edge (NVE). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The VxLAN role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os10_vxlan keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``anycast_gateway_mac`` | string | Configures an anycast gateway IP address for a VxLAN virtual network | os10 |
-| ``loopback`` | dictionary | Configures the loopback interface (see ``loopback.*``) | os10 |
-| ``loopback.loopback_id`` | integer | Configures the loopback interface number (0 to 16383) | os10 |
-| ``loopback.description`` | string | Configures the interface description | os10 |
-| ``loopback.ip_address`` | string | Configure the IP address | os10 |
-| ``loopback.state`` | string: absent,present\* | Removes loopback interface if set to absent | os10 |
-| ``nve`` | dictionary | Configures network virtualization edge (see ``nve.*``) | os10 |
-| ``nve.source_interface`` | integer | Configures source loopback interface | os10 |
-| ``nve.controller`` | dictionary | Configures controller; supports only one controller connection at a time (see ``controller.*``) | os10 |
-| ``controller.name`` | string: NSX, ovsdb | Configures the NVE controller | os10 |
-| ``controller.max_backoff`` | integer | Configures max_backoff value (setting an empty value negates the corresponding configuration) | os10 |
-| ``controller.control_cfg`` | list | Configures the controller IP and port (see ``control_cfg.*``) | os10 |
-| ``control_cfg.ip_addr`` | string | Configures the controller IP | os10 |
-| ``control_cfg.port`` | integer | Configures the controller port | os10 |
-| ``control_cfg.state`` | string: absent,present\* | Removes the controller IP and port configuration if set to absent | os10 |
-| ``controller.state`` | string: absent,present\* | Removes the controller if set to absent | os10 |
-| ``nve.state`` | string: absent,present\* | Removes the NVE if set to absent | os10 |
-| ``evpn`` | dictionary | Enables EVPN in control plane (see ``evpn.*``) | os10 |
-| ``evpn.autoevi`` | boolean: True, False | Configures auto-EVI; no further manual configuration is allowed in auto-EVI mode | os10 |
-| ``evpn.rmac`` | string | Configures router MAC address | os10 |
-| ``evpn.evi`` | list | Configures EVPN instance (see ``evi.*``)| os10 |
-| ``evpn.dis_rt_asn`` | boolean | Enables/disables AS number usage in route target | os10 |
-| ``evpn.vrf`` | dictionary | Enables VRF for EVPN| os10 |
-| ``vrf.name`` | string | Configures VRF name | os10 |
-| ``vrf.state`` | string(present,absent) | Configures/removes VRF for EVPN | os10 |
-| ``vrf.vni`` | integer | Configures VNI for the VRF | os10 |
-| ``vrf.rd`` | string | Configures RD for the VRF | os10 |
-| ``vrf.route_target`` | dictionary | Enables route target for the VRF | os10 |
-| ``route_target.type`` | string (manual, auto) | Configures the route target type | os10 |
-| ``route_target.asn_value`` | string | Configure AS number | os10 |
-| ``route_target.state`` | string (present,absent) | Configures/unconfigures the route target | os10 |
-| ``route_target.route_target_type`` | string | Configures the route target type | os10 |
-| ``vrf.adv_ipv4`` | dictionary | Enables IPv4 advertisement VRF | os10 |
-| ``adv_ipv4.type`` | string | Configures IPv4 advertisement type | os10 |
-| ``adv_ipv4.rmap_name`` | string | Configures route-map for advertisement | os10 |
-| ``adv_ipv4.unconfig`` | boolean | Configures/unconfigures route-map for advertisement | os10 |
-| ``evi.id`` | integer | Configures the EVPN instance ID (1 to 65535) | os10 |
-| ``evi.rd`` | string | Configures the route distinguisher | os10 |
-| ``evi.vni`` | dictionary | Configures VNI value (see ``vni.*``) | os10 |
-| ``vni.id`` | integer | Configures VNI value; configure the same VNI value configured for the VxLAN virtual network | os10 |
-| ``vni.state`` | string: absent,present\* | Removes the VNI if set to absent | os10 |
-| ``evi.route_target`` | list | Configures route target (see ``route_target.*``) | os10 |
-| ``route_target.type`` | string: manual,auto | Configures the route target (auto mode auto-configures an import and export value for EVPN routes) | os10 |
-| ``route_target.asn_value`` | string | Configures the route target ASN value | os10 |
-| ``route_target.route_target_type`` | string: import,export,both | Configures the route target type | os10 |
-| ``route_target.state`` | string: absent,present\* | Removes the route target if set to absent | os10 |
-| ``evi.state`` | string: absent,present\* | Removes EVPN instance ID if set to absent | os10 |
-| ``evpn.state`` | string: absent,present\* | Removes the EVPN configuration if set to absent | os10 |
-| ``virtual_network`` | dictionary | Configures the virtual network attributes (see ``virtual_network.*``) | os10 |
-| ``virtual_network.untagged_vlan`` | integer | Configures the reserved untagged VLAN ID (1 to 4093) | os10 |
-| ``virtual_network.virtual_net`` | list | Configures the virtual network attributes for VxLAN tunneling (see ``virtual_net.*``) | os10 |
-| ``virtual_net.id`` | integer | Configures a virtual network ( virtual-network ID, from 1 to 65535) | os10 |
-| ``virtual_net.description`` | string | Configures the description for virtual network | os10 |
-| ``virtual_net.vlt_vlan_id`` | integer | Configures the VLTi VLAN ID | os10 |
-| ``virtual_net.member_interface`` | list | Configures the trunk member interface attributes to the virtual network (see ``member_interface.*``) | os10 |
-| ``member_interface.ifname`` | string | Configures interface name to provision the virtual network member interface | os10 |
-| ``member_interface.type`` | string: tagged,untagged | Configures the type to provision the virtual network member interface | os10 |
-| ``member_interface.vlanid`` | integer | Configures the VLAN ID to provision the virtual network member interface | os10 |
-| ``member_interface.state`` | string: absent,present\* | Removes the virtual network member interface if set to absent | os10 |
-| ``virtual_net.vxlan_vni`` | dictionary | Configures the VxLAN attributes to virtual network (see ``vxlan_vni.*``) | os10 |
-| ``vxlan_vni.id`` | integer | Configures the VxLAN ID to a virtual network | os10 |
-| ``vxlan_vni.remote_endpoint`` | list | Configures the IP address of a remote tunnel endpoint in a VxLAN network (see ``remote_endpoint.*``) | os10 |
-| ``remote_endpoint.ip`` | string | Configures the IP address of a remote tunnel endpoint (1.1.1.1) | os10 |
-| ``remote_endpoint.state`` | string: absent,present\* | Removes the remote tunnel endpoint in a VxLAN network if set to absent | os10 |
-| ``vxlan_vni.state`` | string: absent,present\* | Removes the VxLAN ID if set to absent | os10 |
-| ``virtual_net.state`` | string: absent,present\* | Removes a virtual network if set to absent | os10 |
-| ``vlan_association`` | list | Configures the VLAN association with virtual network (see ``vlan_association.*``) | os10 |
-| ``vlan_association.vlan_id`` | integer | Specifies the VLAN ID | os10 |
-| ``vlan_association.virtual_net`` | integer | Specifies the virtual netwrok ID which is to be associated with VLAN | os10 |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_vxlan* role to configure the VxLAN network, source IP address on VxLAN tunnel endpoint and virtual networks. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables. The hosts file should define the ansible_network_os variable with the corresponding Dell EMC OS10 name.
-
-When `os10_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os10_vxlan* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/os10
-
- os10_vxlan:
- anycast_gateway_mac: "00:22:33:44:55:66"
- loopback:
- loopback_id: 10
- description: "HARDWARE_VXLAN"
- ip_address: "10.8.0.1/32"
- state: "present"
- nve:
- source_interface: 10
- controller:
- name: "ovsdb"
- max_backoff: 2000
- control_cfg:
- - ip_addr: "1.2.3.4"
- port: 30
- state: "present"
- state: "present"
- state: "present"
- evpn:
- autoevi: False
- evi:
- - id: 111
- rd: "auto"
- vni:
- id: 111
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "111:111"
- route_target_type: "both"
- state: "present"
- - type: "manual"
- asn_value: "11:11"
- route_target_type: "export"
- state: "present"
- state: "present"
- - id: 222
- rd: "2.2.2.2:222"
- vni:
- id: 222
- state: "present"
- route_target:
- - type: "auto"
- asn_value:
- route_target_type:
- state: "present"
- state: "present"
- vrf:
- - name: "test"
- vni: 1000
- adv_ipv4:
- - type: "connected"
- state: "present"
- - type: "bgp"
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "65530:65534"
- route_target_type: "both"
- state: "present"
- - name: "blue"
- state: "absent"
- rmac: 00:11:11:11:11:11
- dis_rt_asn: "true"
- state: "present"
- virtual_network:
- untagged_vlan: 1001
- virtual_net:
- - id: 111
- description: "NSX_Cluster_VNI_111"
- vlt_vlan_id: 11
- member_interface:
- - ifname: "ethernet 1/1/15"
- type: "tagged"
- vlanid: 15
- state: "present"
- - ifname: "port-channel 12"
- type: "tagged"
- vlanid: 11
- state: "present"
- vxlan_vni:
- id: 111
- remote_endpoint:
- - ip: "1.1.1.1"
- state: "present"
- - ip: "11.11.11.11"
- state: "present"
- - ip: "111.111.111.111"
- state: "present"
- state: "present"
- state: "present"
- - id: 222
- description: "NSX_Cluster_VNI_222"
- vlt_vlan_id: 22
- member_interface:
- - ifname: "ethernet 1/1/16"
- type: "tagged"
- vlanid: 16
- state: "present"
- vxlan_vni:
- id: 222
- remote_endpoint:
- - ip: "2.2.2.2"
- state: "present"
- - ip: "22.22.22.22"
- state: "present"
- state: "present"
- state: "present"
- vlan_association:
- - vlain_id: 111
- virtual_net: 111
-
-> **NOTE**: Member interfaces should be in switchport trunk mode which can be configured using the *os10_interface* role.
-
-**Simple playbook to configure VxLAN — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os10.os10_vxlan
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/defaults/main.yml
deleted file mode 100644
index adeae5500..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_vxlan
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/handlers/main.yml
deleted file mode 100644
index a6b6bc571..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_vxlan
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/meta/main.yml
deleted file mode 100644
index 87908f3ce..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/meta/main.yml
+++ /dev/null
@@ -1,20 +0,0 @@
-# Copyright (c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- role_name: os10_vxlan
- author: Dell EMC Networking Engineering
- description: >
- The os10_vxlan role facilitates the configuration of nve evpn and virtual network attributes in devices
- running Dell EMC SmartFabric OS10.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/tasks/main.yml
deleted file mode 100644
index f90be2d76..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating vxlan configuration for os10"
- template:
- src: os10_vxlan.j2
- dest: "{{ build_dir }}/vxlan10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning vxlan configuration for os10"
- os10_config:
- src: os10_vxlan.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/templates/os10_vxlan.j2 b/ansible_collections/dellemc/os10/roles/os10_vxlan/templates/os10_vxlan.j2
deleted file mode 100644
index b56da068c..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/templates/os10_vxlan.j2
+++ /dev/null
@@ -1,434 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-#Purpose:
-Configure VXLAN commands for os10 Devices
-os10_vxlan:
- anycast_gateway_mac: "00:22:33:44:55:66"
- loopback:
- loopback_id: 10
- description: "HARDWARE_VXLAN"
- ip_address: "10.8.0.1/32"
- state: "present"
- nve:
- source_interface: 10
- controller:
- name: "ovsdb"
- max_backoff: 2000
- control_cfg:
- - ip_addr: "1.2.3.4"
- port: 30
- state: "present"
- state: "present"
- state: "present"
- evpn:
- autoevi: False
- evi:
- - id: 111
- rd: "auto"
- vni:
- id: 111
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "111:111"
- route_target_type: "both"
- state: "present"
- - type: "manual"
- asn_value: "11:11"
- route_target_type: "export"
- state: "present"
- state: "present"
- - id: 222
- rd: "2.2.2.2:222"
- vni:
- id: 222
- state: "present"
- route_target:
- - type: "auto"
- asn_value:
- route_target_type:
- state: "present"
- state: "present"
- vrf:
- - name: "test"
- vni: 1000
- adv_ipv4:
- - type: "connected"
- state: "present"
- - type: "bgp"
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "65530:65534"
- route_target_type: "both"
- state: "present"
- - name: "blue"
- state: "absent"
- rmac: 00:11:11:11:11:11
- dis_rt_asn: "true"
- state: "present"
- virtual_network:
- untagged_vlan: 1001
- virtual_net:
- - id: 111
- description: "NSX_Cluster_VNI_111"
- vlt_vlan_id: 11
- member_interface:
- - ifname: "ethernet 1/1/15"
- type: "tagged"
- vlanid: 15
- state: "present"
- - ifname: "port-channel 12"
- type: "tagged"
- vlanid: 11
- state: "present"
- vxlan_vni:
- id: 111
- remote_endpoint:
- - ip: "1.1.1.1"
- state: "present"
- - ip: "11.11.11.11"
- state: "present"
- - ip: "111.111.111.111"
- state: "present"
- state: "present"
- state: "present"
- - id: 222
- description: "NSX_Cluster_VNI_222"
- vlt_vlan_id: 22
- member_interface:
- - ifname: "ethernet 1/1/16"
- type: "tagged"
- vlanid: 16
- state: "present"
- vxlan_vni:
- id: 222
- remote_endpoint:
- - ip: "2.2.2.2"
- state: "present"
- - ip: "22.22.22.22"
- state: "present"
- state: "present"
- state: "present"
- vlan_association:
- - vlain_id: 111
- virtual_net: 111
-###############################################}
-{% if os10_vxlan is defined and os10_vxlan %}
- {% if os10_vxlan.anycast_gateway_mac is defined %}
- {% if os10_vxlan.anycast_gateway_mac %}
-ip virtual-router mac-address {{ os10_vxlan.anycast_gateway_mac }}
- {% else %}
-no ip virtual-router mac-address
- {% endif %}
- {% endif %}
- {% if os10_vxlan.loopback is defined %}
- {% set loopback = os10_vxlan.loopback %}
- {% if loopback.state is defined and loopback.state == "absent" %}
- {% if loopback.loopback_id is defined and loopback.loopback_id %}
-no interface loopback {{ loopback.loopback_id }}
- {% endif %}
- {% else %}
- {% if loopback.loopback_id is defined and loopback.loopback_id %}
-interface loopback {{ loopback.loopback_id }}
- {% endif %}
- {% if loopback.description is defined %}
- {% if loopback.description %}
- description {{ loopback.description }}
- {% else %}
- no description {{ loopback.description }}
- {% endif %}
- {% endif %}
- {% if loopback.ip_address is defined %}
- {% if loopback.ip_address %}
- ip address {{ loopback.ip_address }}
- {% else %}
- no ip address
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if os10_vxlan.nve is defined and os10_vxlan.nve %}
- {% set nve = os10_vxlan.nve %}
- {% if nve.state is defined and nve.state == "absent" %}
-no nve
- {% else %}
-nve
- {% if nve.source_interface is defined %}
- {% if nve.source_interface >= 0 %}
- source-interface loopback{{ nve.source_interface }}
- {% else %}
- no source-interface
- {% endif %}
- {% endif %}
- {% if nve.controller is defined %}
- {% set controller = os10_vxlan.nve.controller %}
- {% if controller.state is defined and controller.state == "absent" %}
- no controller
- {% else %}
- {% if controller.name is defined and controller.name %}
- controller {{ controller.name }}
- {% endif %}
- {% if controller.max_backoff is defined %}
- {% if controller.max_backoff %}
- max-backoff {{ controller.max_backoff }}
- {% else %}
- no max-backoff
- {% endif %}
- {% endif %}
- {% if controller.control_cfg is defined and controller.control_cfg %}
- {% for ctrl_cfg in controller.control_cfg %}
- {% if ctrl_cfg.ip_addr is defined and ctrl_cfg.ip_addr %}
- {% if ctrl_cfg.port is defined and ctrl_cfg.port %}
- {% if ctrl_cfg.state is defined and ctrl_cfg.state == "absent" %}
- {% if controller.name == "ovsdb" %}
- no ip {{ ctrl_cfg.ip_addr }} port {{ ctrl_cfg.port }} ssl
- {% else %}
- no ip {{ ctrl_cfg.ip_addr }} port {{ ctrl_cfg.port }}
- {% endif %}
- {% else %}
- {% if controller.name == "ovsdb" %}
- ip {{ ctrl_cfg.ip_addr }} port {{ ctrl_cfg.port }} ssl
- {% else %}
- ip {{ ctrl_cfg.ip_addr }} port {{ ctrl_cfg.port }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if os10_vxlan.evpn is defined and os10_vxlan.evpn %}
- {% set evpn = os10_vxlan.evpn %}
- {% if evpn.state is defined and evpn.state == "absent" %}
-no evpn
- {% else %}
-evpn
- {% if evpn.rmac is defined %}
- {% if evpn.rmac %}
- router-mac {{ evpn.rmac }}
- {% else %}
- no router-mac
- {% endif %}
- {% endif %}
- {% if evpn.dis_rt_asn is defined and evpn.dis_rt_asn == "true" %}
- disable-rt-asn
- {% else %}
- no disable-rt-asn
- {% endif %}
- {% if evpn.evi is defined and evpn.evi %}
- {% for evi in evpn.evi %}
- {% if evi.id is defined and evi.id %}
- {% if evi.state is defined and evi.state == "absent" %}
- no evi {{ evi.id }}
- {% else %}
- evi {{ evi.id }}
- {% if evi.vni is defined and evi.vni %}
- {% if evi.vni.id is defined and evi.vni.id %}
- {% if evi.vni.state is defined and evi.vni.state == "absent" %}
- no vni {{ evi.vni.id }}
- {% else %}
- vni {{ evi.vni.id }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if evi.rd is defined %}
- {% if evi.rd %}
- rd {{ evi.rd }}
- {% else %}
- no rd
- {% endif %}
- {% endif %}
- {% if evi.route_target is defined and evi.route_target %}
- {% for rt in evi.route_target %}
- {% if rt.type is defined and rt.type == "manual" %}
- {% if rt.asn_value is defined and rt.asn_value %}
- {% if rt.state is defined and rt.state == "absent" %}
- no route-target {{ rt.asn_value }}
- {% else %}
- {% if rt.route_target_type is defined and rt.route_target_type %}
- route-target {{ rt.asn_value }} {{ rt.route_target_type }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif rt.type is defined and rt.type == "auto" %}
- {% if rt.state is defined and rt.state == "absent" %}
- no route-target auto
- {% else %}
- route-target auto
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if evpn.vrf is defined %}
- {% for evpn_vrf in evpn.vrf %}
- {% if evpn_vrf.state is defined and evpn_vrf.state == "absent" and evpn_vrf.name %}
- no vrf {{ evpn_vrf.name }}
- {% elif evpn_vrf.name %}
- vrf {{ evpn_vrf.name }}
- {% if evpn_vrf.vni is defined %}
- {% if evpn_vrf.vni %}
- vni {{ evpn_vrf.vni }}
- {% else %}
- no vni
- {% endif %}
- {% endif %}
- {% if evpn_vrf.rd is defined %}
- {% if evpn_vrf.rd %}
- rd {{ evpn_vrf.rd }}
- {% else %}
- no rd
- {% endif %}
- {% endif %}
- {% if evpn_vrf.route_target is defined and evpn_vrf.route_target %}
- {% for rt in evpn_vrf.route_target %}
- {% if rt.type is defined and rt.type == "manual" %}
- {% if rt.asn_value is defined and rt.asn_value %}
- {% if rt.state is defined and rt.state == "absent" %}
- no route-target {{ rt.asn_value }}
- {% else %}
- {% if rt.route_target_type is defined and rt.route_target_type %}
- route-target {{ rt.asn_value }} {{ rt.route_target_type }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif rt.type is defined and rt.type == "auto" %}
- {% if rt.state is defined and rt.state == "absent" %}
- no route-target auto
- {% else %}
- route-target auto
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if evpn_vrf.adv_ipv4 is defined and evpn_vrf.adv_ipv4 %}
- {% for rt in evpn_vrf.adv_ipv4 %}
- {% if rt.unconfig is defined and rt.unconfig == "true" and rt.type is defined and rt.rmap_name is defined %}
- no advertise ipv4 {{ rt.type }} route-map {{ rt.rmap_name }}
- {% elif rt.type is defined and rt.rmap_name is defined %}
- advertise ipv4 {{ rt.type }} route-map {{ rt.rmap_name }}
- {% elif rt.unconfig is defined and rt.unconfig == "true" and rt.type is defined %}
- no advertise ipv4 {{ rt.type }}
- {% elif rt.type is defined %}
- advertise ipv4 {{ rt.type }}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if evpn.autoevi is defined %}
- {% if evpn.autoevi == True %}
- auto-evi
- {% else %}
- no auto-evi
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if os10_vxlan.virtual_network is defined and os10_vxlan.virtual_network %}
- {% set vir_net = os10_vxlan.virtual_network %}
- {% if vir_net.untagged_vlan is defined %}
- {% if vir_net.untagged_vlan %}
-virtual-network untagged-vlan {{ vir_net.untagged_vlan }}
- {% else %}
-no virtual-network untagged-vlan
- {% endif %}
- {% endif %}
- {% if vir_net.virtual_net is defined and vir_net.virtual_net %}
- {% for v_net in vir_net.virtual_net %}
- {% if v_net.id is defined and v_net.id %}
- {% if v_net.state is defined and v_net.state == "absent" %}
-no interface virtual-network {{ v_net.id }}
-no virtual-network {{ v_net.id }}
- {% else %}
-virtual-network {{ v_net.id }}
- {% if v_net.description is defined %}
- {% if v_net.description %}
- description {{ v_net.description }}
- {% else %}
- no description
- {% endif %}
- {% endif %}
- {% if v_net.vlt_vlan_id is defined %}
- {% if v_net.vlt_vlan_id %}
- vlti-vlan {{ v_net.vlt_vlan_id }}
- {% else %}
- no vlti-vlan
- {% endif %}
- {% endif %}
- {% if v_net.member_interface is defined and v_net.member_interface %}
- {% for member_intf in v_net.member_interface %}
- {% if member_intf.ifname is defined and member_intf.ifname %}
- {% if member_intf.type is defined %}
- {% if member_intf.type == "tagged" %}
- {% if member_intf.vlanid is defined and member_intf.vlanid %}
- {% if member_intf.state is defined and member_intf.state == "absent" %}
- no member-interface {{ member_intf.ifname }} vlan-tag {{ member_intf.vlanid }}
- {% else %}
- member-interface {{ member_intf.ifname }} vlan-tag {{ member_intf.vlanid }}
- {% endif %}
- {% endif %}
- {% elif member_intf.type == "untagged" %}
- {% if member_intf.state is defined and member_intf.state == "absent" %}
- no member-interface {{ member_intf.ifname }} untagged
- {% else %}
- member-interface {{ member_intf.ifname }} untagged
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if v_net.vxlan_vni is defined and v_net.vxlan_vni %}
- {% set vxlan_vni = v_net.vxlan_vni %}
- {% if vxlan_vni.id is defined and vxlan_vni.id %}
- {% if vxlan_vni.state is defined and vxlan_vni.state == "absent" %}
- no vxlan-vni {{ vxlan_vni.id }}
- {% else %}
- vxlan-vni {{ vxlan_vni.id }}
- {% if vxlan_vni.remote_endpoint is defined and vxlan_vni.remote_endpoint %}
- {% for remote_endpt in vxlan_vni.remote_endpoint %}
- {% if remote_endpt.ip is defined and remote_endpt.ip %}
- {% if remote_endpt.state is defined and remote_endpt.state == "absent" %}
- no remote-vtep {{ remote_endpt.ip }}
- {% else %}
- remote-vtep {{ remote_endpt.ip }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% if os10_vxlan.vlan_association is defined and os10_vxlan.vlan_association %}
- {% for vlan in os10_vxlan.vlan_association %}
- {% if vlan.vlan_id is defined and vlan.vlan_id %}
-interface vlan{{ vlan.vlan_id }}
- {% if vlan.virtual_net is defined %}
- {% if vlan.virtual_net %}
- virtual-network {{ vlan.virtual_net }}
- {% else %}
- no virtual-network
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/main.yaml
deleted file mode 100644
index d326e6355..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/main.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
----
-os10_vxlan:
- anycast_gateway_mac: "00:22:33:44:55:66"
- loopback:
- loopback_id: 10
- description: "HARDWARE_VXLAN"
- ip_address: "10.8.0.1/32"
- state: "present"
- nve:
- source_interface: 10
- controller:
- name: "ovsdb"
- max_backoff: 2000
- control_cfg:
- - ip_addr: "1.2.3.4"
- port: 30
- state: "present"
- state: "present"
- state: "present"
- evpn:
- autoevi: False
- evi:
- - id: 111
- rd: "auto"
- vni:
- id: 111
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "111:111"
- route_target_type: "both"
- state: "present"
- - type: "manual"
- asn_value: "11:11"
- route_target_type: "export"
- state: "present"
- state: "present"
- - id: 222
- rd: "2.2.2.2:222"
- vni:
- id: 222
- state: "present"
- route_target:
- - type: "auto"
- asn_value:
- route_target_type:
- state: "present"
- state: "present"
- vrf:
- - name: "test"
- vni: 1000
- adv_ipv4:
- - type: "connected"
- state: "present"
- - type: "bgp"
- state: "present"
- route_target:
- - type: "manual"
- asn_value: "65530:65534"
- route_target_type: "both"
- state: "present"
- - name: "blue"
- state: "absent"
- rmac: 00:11:11:11:11:11
- dis_rt_asn: "true"
- state: "present"
- virtual_network:
- untagged_vlan: 1001
- virtual_net:
- - id: 111
- description: "NSX_Cluster_VNI_111"
- vlt_vlan_id: 11
- member_interface:
- - ifname: "ethernet 1/1/15"
- type: "tagged"
- vlanid: 15
- state: "present"
- - ifname: "port-channel 12"
- type: "tagged"
- vlanid: 11
- state: "present"
- vxlan_vni:
- id: 111
- remote_endpoint:
- - ip: "1.1.1.1"
- state: "present"
- - ip: "11.11.11.11"
- state: "present"
- - ip: "111.111.111.111"
- state: "present"
- state: "present"
- state: "present"
- - id: 222
- description: "NSX_Cluster_VNI_222"
- vlt_vlan_id: 22
- member_interface:
- - ifname: "ethernet 1/1/16"
- type: "tagged"
- vlanid: 16
- state: "present"
- vxlan_vni:
- id: 222
- remote_endpoint:
- - ip: "2.2.2.2"
- state: "present"
- - ip: "22.22.22.22"
- state: "present"
- state: "present"
- state: "present"
- vlan_association:
- - vlain_id: 111
- virtual_net: 111
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/test.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/test.yml
deleted file mode 100644
index f5e4a6c11..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os10host
- connection: network_cli
- roles:
- - dellemc.os10.os10_vxlan
diff --git a/ansible_collections/dellemc/os10/roles/os10_vxlan/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_vxlan/vars/main.yml
deleted file mode 100644
index 0373f0aab..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_vxlan/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_vxlan
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/LICENSE b/ansible_collections/dellemc/os10/roles/os10_xstp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/README.md b/ansible_collections/dellemc/os10/roles/os10_xstp/README.md
deleted file mode 100644
index 0dd919b27..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/README.md
+++ /dev/null
@@ -1,196 +0,0 @@
-# xSTP role
-
-This role facilitates the configuration of xSTP attributes. It supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP), rapid per-VLAN spanning-tree (Rapid PVST+), multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). It supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC SmartFabric OS10.
-
-The xSTP role requires an SSH connection for connectivity to a Dell EMC SmartFabric OS10 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os10.os10` as the value
-- If `os10_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- `os10_xstp` (dictionary) contains the hostname (dictionary)
-- Hostname is the value of the *hostname* variable that corresponds to the name of the device
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value to any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**hostname keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|----------------------|
-| ``type`` | string (required) | Configures the type of spanning-tree mode specified that can vary according to the device including RSTP, rapid-PVST, and MST | os10 |
-| ``enable`` | boolean: true,false | Enables/disables the spanning-tree protocol specified in the type variable | os10 |
-| ``mac_flush_timer`` | integer | Configures the mac_flush_timer value (0 to 500) | os10 |
-| ``rstp`` | dictionary | Configures rapid spanning-tree (see ``rstp.*``) | os10 |
-| ``rstp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os10 |
-| ``rstp.max_age`` | integer | Configures the max_age timer for RSTP (6 to 40) | os10 |
-| ``rstp.hello_time`` | integer | Configures the hello-time for RSTP (1 to 10) | os10 |
-| ``rstp.forward_time`` | integer | Configures the forward-time for RSTP (4 to 30) | os10 |
-| ``rstp.force_version`` | string: stp | Configures the force version for the BPDUs transmitted by RSTP | os10 |
-| ``rstp.mac_flush_threshold`` | integer | Configures the MAC flush threshold for RSTP (1 to 65535) | os10 |
-| ``pvst`` | dictionary | Configures per-VLAN spanning-tree protocol (see ``pvst.*``) | os10 |
-| ``pvst.vlan`` | list | Configures the VLAN for PVST (see ``vlan.*``) | os10 |
-| ``vlan.range_or_id`` | string | Configures a VLAN/range of VLANs for the per-VLAN spanning-tree protocol | os10 |
-| ``vlan.max_age`` | integer | Configures the max_age timer for a VLAN (6 to 40) | os10 |
-| ``vlan.hello_time`` | integer | Configures the hello-time for a VLAN (1 to 10) | os10 |
-| ``vlan.forward_time`` | integer | Configures the forward-time for a VLAN (4 to 30) | os10 |
-| ``vlan.enable`` | boolean: true,false | Enables/disables spanning-tree for the associated VLAN range_or_id | os10 |
-| ``vlan.mac_flush_threshold`` | integer | Configures the MAC flush threshold for a VLAN (1 to 65535) | os10 |
-| ``vlan.root`` | string: primary,secondary | Designates the primary or secondary root for the associated VLAN range_or_id; mutually exclusive with *vlan.bridge_priority* | os10 |
-| ``vlan.bridge_priority`` | integer | Configures bridge-priority for the per-VLAN spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *vlan.root* | os10 |
-| ``mstp`` | dictionary | Configures multiple spanning-tree protocol (see ``mstp.*``) | os10 |
-| ``mstp.max_age`` | integer | Configures the max_age timer for MSTP (6 to 40) | os10 |
-| ``mstp.max_hops`` | integer | Configures the max-hops for MSTP (6 to 40) | os10 |
-| ``mstp.hello_time`` | integer | Configures the hello-time for MSTP (1 to 10) | os10 |
-| ``mstp.forward_time`` | integer | Configures the forward-time for MSTP (4 to 30) | os10 |
-| ``mstp.force_version`` | string: stp,rstp | Configures the force-version for the BPDUs transmitted by MSTP | os10 |
-| ``mstp.mstp_instances`` | list | Configures a MSTP instance (see ``mstp_instances.*``) | os10 |
-| ``mstp_instances.number_or_range`` | integer | Configures the multiple spanning-tree instance number| os10 |
-| ``mstp_instances.bridge_priority`` | integer | Configures the bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *mstp_instances.root* | os10 |
-| ``mstp_instances.enable`` | boolean: true,false | Enables/disables spanning-tree for the associated MSTP instance | os10 |
-| ``mstp_instances.mac_flush_threshold`` | integer | Configures the MAC flush-threshold for an MSTP instance (1 to 65535) | os10 |
-| ``mstp_instances.root`` | string: primary,secondary | Designates the primary or secondary root for the associated MSTP instance; mutually exclusive with *mstp_instances.bridge_priority* | os10 |
-| ``mstp.mst_config`` | dictionary | Configures multiple spanning-tree (see ``mstp.mst_config.*``); supported | os10 |
-| ``mst_config.name`` | string | Configures the name which is specified for the MSTP | os10 |
-| ``mst_config.revision`` | integer | Configures the revision number for MSTP | os10 |
-| ``mst_config.cfg_list`` | list | Configures the multiple spanning-tree list (see ``mst_config.cfg_list.*``) | os10 |
-| ``cfg_list.number`` | integer | Specifies the MSTP instance number | os10 |
-| ``cfg_list.vlans`` | string | Configures a VLAN/range of VLANs by mapping it to an instance number | os10 |
-| ``cfg_list.vlans_state`` | string: absent,present\* | Deletes a set of VLANs mapped to the spanning-tree instance if set to absent | os10 |
-| ``intf`` | list | Configures multiple spanning-tree in an interface (see ``intf.*``) | os10 |
-| ``intf <interface name>``| dictionary | Configures the interface name (see ``intf.<interface name>.*``) | os10 |
-| ``intf.<interface name>.edge_port`` | boolean: true,false | Configures the EdgePort as dynamic if set to true | os10 |
-| ``intf.<interface name>.bpdu_filter``| boolean: true,false | Enables/disables bpdufilter at the interface | os10 |
-| ``intf.<interface name>.bpdu_guard``| boolean: true,false | Enables/disables bpduguard at the interface | os10 |
-| ``intf.<interface name>.guard``| string: loop,root,none | Configures guard on the interface | os10 |
-| ``intf.<interface name>.enable`` | boolean: true,false | Enables/disables spanning-tree at the interface level | os10 |
-| ``intf.<interface name>.link_type``| string: auto,point-to-point,shared | Configures the link type at the interface | os10 |
-| ``intf.<interface name>.rstp`` | dictionary | Configures the RSTP interface name (see ``intf.<interface name>.rstp.*``) | os10 |
-| ``rstp.priority``| integer | Configures the RSTP priority value at the interface | os10 |
-| ``rstp.cost`` | integer | Configures the RSTP cost value at the interface | os10 |
-| ``intf.<interface name>.msti`` | list | Configures the MSTi interface name (see ``intf.<interface name>.msti``) | os10 |
-| ``msti.instance_number`` | integer or range | Specifies the MSTP instance number or range | os10 |
-| ``msti.priority`` | integer | Specifies the priority value to be configured at the interface | os10 |
-| ``msti.cost`` | integer | Specifies the cost value to be configured at the interface | os10 |
-| ``intf.<interface name>.vlan`` | list | Configures the VLAN interface name (see ``intf.<interface name>.vlan``) | os10 |
-| ``vlan.range_or_id`` | integer or range | Specifies the VLAN ID or range | os10 |
-| ``vlan.priority`` | integer | Specifies the priority value to be configured at the interface | os10 |
-| ``vlan.cost`` | integer | Specifies the cost value to be configured at the interface | os10 |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOM`E environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os10, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-
-Example playbook
-----------------
-
-This example uses the *os10_xstp* role to configure different variants of spanning-tree. Based on the type of STP and defined objects, VLANs are associated and bridge priorities are assigned. It creates a *hosts* file with the switch details, and a *host_vars* file with connection variables. The corresponding role variables are defined in the *vars/main.yml* file at the role path.
-
-It writes a simple playbook that only references the *os10_xstp* role. By including the role, you automatically get access to all of the tasks to configure xSTP.
-
-**Sample hosts file**
-
- spine1 ansible_host= <ip_address>
-
-**Sample host_vars/spine1**
-
- hostname: spine1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os10.os10
- build_dir: ../temp/temp_os10
-
-**Sample vars/main.yml**
-
- os10_xstp:
- type: rstp
- enable: true
- path_cost: true
- mac_flush_timer: 4
- rstp:
- max_age: 6
- hello_time: 7
- forward_time: 7
- force_version: stp
- bridge_priority: 4096
- mac_flush_threshold: 5
- pvst:
- vlan:
- - range_or_id: 10
- max_age: 6
- enable: true
- hello_time: 7
- forward_time: 7
- bridge_priority: 4096
- mac_flush_threshold: 9
- mstp:
- max_age: 6
- max_hops: 6
- hello_time: 7
- forward_time: 7
- force_version: stp
- mstp_instances:
- - number_or_range: 1
- enable: true
- mac_flush_threshold: 9
- bridge_priority: 4096
- mst_config:
- name: cfg1
- revision: 5
- cfg_list:
- - number: 1
- vlans: 10,12
- vlans_state: present
- intf:
- ethernet 1/1/8:
- edge_port: true
- bpdu_filter: true
- bpdu_guard: true
- guard: loop
- enable: true
- link_type: point-to-point
- msti:
- - instance_number: 1
- priority: 32
- cost: 1
- rstp:
- priority: 32
- cost: 7
- vlan:
- - range_or_id: 6
- priority: 16
- cost: 8
-
-
-**Simple playbook to setup system — spine.yml**
-
- - hosts: spine
- roles:
- - dellemc.os10.os10_xstp
-
-**Run**
-
- ansible-playbook -i hosts spine.yml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/defaults/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/defaults/main.yml
deleted file mode 100644
index daaf0f8fa..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os10.os10_xstp
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/handlers/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/handlers/main.yml
deleted file mode 100644
index 645522da6..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os10.os10_xstp
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/meta/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/meta/main.yml
deleted file mode 100644
index 8a63b5009..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os10_xstp role facilitates the configuration of STP attributes in devices running Dell EMC SmartFabric OS10.
- company: Dell Technologies
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os10
-
- galaxy_tags:
- - networking
- - dell
- - dellemc
- - emc
- - os10
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/tasks/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/tasks/main.yml
deleted file mode 100644
index e14eb0f76..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os10
- - name: "Generating xSTP configuration for os10"
- template:
- src: os10_xstp.j2
- dest: "{{ build_dir }}/xstp10_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10") and ((os10_cfg_generate | default('False')) | bool)
-# notify: save config os10
- register: generate_output
-
- - name: "Provisioning xSTP configuration for os10"
- os10_config:
- src: os10_xstp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os10.os10")
-# notify: save config os10
- register: output
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/templates/os10_xstp.j2 b/ansible_collections/dellemc/os10/roles/os10_xstp/templates/os10_xstp.j2
deleted file mode 100644
index dc7456baf..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/templates/os10_xstp.j2
+++ /dev/null
@@ -1,398 +0,0 @@
-#jinja2: trim_blocks: True, lstrip_blocks: True
-{###############################################
-PURPOSE: Configure xSTP commands for OS10 devices
-os10_xstp:
- type: rstp
- enable: true
- mac_flush_timer: 4
- rstp:
- max_age: 6
- hello_time: 7
- forward_time: 7
- force_version: stp
- bridge_priority: 4096
- mac_flush_threshold: 5
- pvst:
- vlan:
- - range_or_id: 10
- max_age: 6
- enable: true
- hello_time: 7
- forward_time: 7
- bridge_priority: 4096
- mac_flush_threshold: 9
- mstp:
- max_age: 6
- max_hops: 6
- hello_time: 7
- forward_time: 7
- force_version: stp
- mstp_instances:
- - number_or_range: 1
- enable: true
- mac_flush_threshold: 9
- bridge_priority: 4096
- mst_config:
- name: cfg1
- revision: 5
- cfg_list:
- - number: 1
- vlans: 10,12
- vlans_state: present
- intf:
- ethernet 1/1/8:
- edge_port: true
- bpdu_filter: true
- bpdu_guard: true
- guard: loop
- enable: true
- link_type: point-to-point
- msti:
- - instance_number: 1
- priority: 32
- cost: 1
- rstp:
- priority: 32
- cost: 7
- vlan:
- - range_or_id: 6
- priority: 16
- cost: 8
-#################################################}
-{% if os10_xstp is defined and os10_xstp %}
-{% set xstp_vars = os10_xstp %}
-{% if xstp_vars.type is defined %}
- {% if xstp_vars.type %}
-spanning-tree mode {{ xstp_vars.type }}
- {% else %}
-no spanning-tree mode r
- {% endif %}
-{% endif %}
-{% if xstp_vars.enable is defined %}
- {% if xstp_vars.enable %}
-no spanning-tree disable
- {% else %}
-spanning-tree disable
- {% endif %}
-{% endif %}
-{% if xstp_vars.mac_flush_timer is defined %}
- {% if xstp_vars.mac_flush_timer == 0 or xstp_vars.mac_flush_timer %}
-spanning-tree mac-flush-timer {{ xstp_vars.mac_flush_timer }}
- {% else %}
-no spanning-tree mac-flush-timer
- {% endif %}
-{% endif %}
-
-{% if xstp_vars.rstp is defined and xstp_vars.rstp %}
- {% set val = xstp_vars.rstp %}
- {% if val.bridge_priority is defined %}
- {% if val.bridge_priority == 0 or val.bridge_priority %}
-spanning-tree rstp priority {{ val.bridge_priority }}
- {% else %}
-no spanning-tree rstp priority
- {% endif %}
- {% endif %}
- {% if val.forward_time is defined %}
- {% if val.forward_time %}
-spanning-tree rstp forward-time {{ val.forward_time }}
- {% else %}
-no spanning-tree rstp forward-time
- {% endif %}
- {% endif %}
- {% if val.hello_time is defined %}
- {% if val.hello_time %}
-spanning-tree rstp hello-time {{ val.hello_time }}
- {% else %}
-no spanning-tree rstp hello-time
- {% endif %}
- {% endif %}
- {% if val.max_age is defined %}
- {% if val.max_age %}
-spanning-tree rstp max-age {{ val.max_age }}
- {% else %}
-no spanning-tree rstp max-age
- {% endif %}
- {% endif %}
- {% if val.mac_flush_threshold is defined %}
- {% if val.mac_flush_threshold %}
-spanning-tree rstp mac-flush-threshold {{ val.mac_flush_threshold }}
- {% else %}
-no spanning-tree rstp mac-flush-threshold
- {% endif %}
- {% endif %}
- {% if val.force_version is defined %}
- {% if val.force_version %}
-spanning-tree rstp force-version {{ val.force_version }}
- {% else %}
-no spanning-tree rstp force-version
- {% endif %}
- {% endif %}
-{% endif %}
-
-{% if xstp_vars.pvst is defined and xstp_vars.pvst %}
- {% set val = xstp_vars.pvst %}
- {% if val.vlan is defined and val.vlan %}
- {% for vlan in val.vlan %}
- {% if vlan.range_or_id is defined and vlan.range_or_id %}
- {% if vlan.bridge_priority is defined %}
- {% if vlan.bridge_priority == 0 or vlan.bridge_priority %}
-spanning-tree vlan {{ vlan.range_or_id }} priority {{ vlan.bridge_priority }}
- {% else %}
-no spanning-tree vlan {{ vlan.range_or_id }} priority
- {% endif %}
- {% endif %}
- {% if vlan.enable is defined %}
- {% if vlan.enable %}
-no spanning-tree vlan {{ vlan.range_or_id }} disable
- {% else %}
-spanning-tree vlan {{ vlan.range_or_id }} disable
- {% endif %}
- {% endif %}
-
- {% if vlan.forward_time is defined %}
- {% if vlan.forward_time %}
-spanning-tree vlan {{ vlan.range_or_id }} forward-time {{ vlan.forward_time }}
- {% else %}
-no spanning-tree vlan {{ vlan.range_or_id }} forward-time
- {% endif %}
- {% endif %}
- {% if vlan.hello_time is defined %}
- {% if vlan.hello_time %}
-spanning-tree vlan {{ vlan.range_or_id }} hello-time {{ vlan.hello_time }}
- {% else %}
-no spanning-tree vlan {{ vlan.range_or_id }} hello-time
- {% endif %}
- {% endif %}
- {% if vlan.max_age is defined %}
- {% if vlan.max_age %}
-spanning-tree vlan {{ vlan.range_or_id }} max-age {{ vlan.max_age }}
- {% else %}
-no spanning-tree vlan {{ vlan.range_or_id }} max-age
- {% endif %}
- {% endif %}
- {% if vlan.mac_flush_threshold is defined %}
- {% if vlan.mac_flush_threshold %}
-spanning-tree vlan {{ vlan.range_or_id }} mac-flush-threshold {{ vlan.mac_flush_threshold }}
- {% else %}
-no spanning-tree vlan {{ vlan.range_or_id }} mac-flush-threshold
- {% endif %}
- {% endif %}
- {% if vlan.root is defined %}
- {% if vlan.root %}
-spanning-tree vlan {{ vlan.range_or_id }} root {{ vlan.root }}
- {% else %}
-no spanning-tree vlan {{ vlan.range_or_id }} root p
- {% endif %}
- {% endif %}
-
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %}
-{% if xstp_vars.mstp is defined and xstp_vars.mstp %}
- {% set val = xstp_vars.mstp %}
- {% if val.forward_time is defined %}
- {% if val.forward_time %}
-spanning-tree mst forward-time {{ val.forward_time }}
- {% else %}
-no spanning-tree mst forward-time
- {% endif %}
- {% endif %}
- {% if val.hello_time is defined %}
- {% if val.hello_time %}
-spanning-tree mst hello-time {{ val.hello_time }}
- {% else %}
-no spanning-tree mst hello-time
- {% endif %}
- {% endif %}
- {% if val.max_age is defined %}
- {% if val.max_age %}
-spanning-tree mst max-age {{ val.max_age }}
- {% else %}
-no spanning-tree mst max-age
- {% endif %}
- {% endif %}
- {% if val.max_hops is defined %}
- {% if val.max_hops %}
-spanning-tree mst max-hops {{ val.max_hops }}
- {% else %}
-no spanning-tree mst max-hops
- {% endif %}
- {% endif %}
- {% if val.force_version is defined %}
- {% if val.force_version %}
-spanning-tree mst force-version {{ val.force_version }}
- {% else %}
-no spanning-tree mst force-version
- {% endif %}
- {% endif %}
-
- {% if val.mstp_instances is defined and val.mstp_instances %}
- {% for instance in val.mstp_instances %}
- {% if instance.number_or_range is defined and instance.number_or_range %}
- {% if instance.bridge_priority is defined %}
- {% if instance.bridge_priority ==0 or instance.bridge_priority %}
-spanning-tree mst {{ instance.number_or_range }} priority {{ instance.bridge_priority }}
- {% else %}
-no spanning-tree mst {{ instance.number_or_range }} priority
- {% endif %}
- {% endif %}
- {% if instance.enable is defined %}
- {% if instance.enable %}
-no spanning-tree mst {{ instance.number_or_range }} disable
- {% else %}
-spanning-tree mst {{ instance.number_or_range }} disable
- {% endif %}
- {% endif %}
- {% if instance.mac_flush_threshold is defined %}
- {% if instance.mac_flush_threshold %}
-spanning-tree mst {{ instance.number_or_range }} mac-flush-threshold {{ instance.mac_flush_threshold }}
- {% else %}
-no spanning-tree mst {{ instance.number_or_range }} mac-flush-threshold
- {% endif %}
- {% endif %}
- {% if instance.root is defined %}
- {% if instance.root %}
-spanning-tree mst {{ instance.number_or_range }} root {{ instance.root }}
- {% else %}
-no spanning-tree mst {{ instance.number_or_range }} root p
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if val.mst_config is defined and val.mst_config %}
-spanning-tree mst configuration
- {% if val.mst_config.name is defined %}
- {% if val.mst_config.name %}
- name {{ val.mst_config.name }}
- {% else %}
- no name
- {% endif %}
- {% endif %}
- {% if val.mst_config.revision is defined %}
- {% if val.mst_config.revision %}
- revision {{ val.mst_config.revision }}
- {% else %}
- no revision
- {% endif %}
- {% endif %}
- {% for instance in val.mst_config.cfg_list %}
- {% if instance.number is defined and instance.number %}
- {% if instance.vlans is defined and instance.vlans %}
- {% if instance.vlans_state is defined and instance.vlans_state == "absent" %}
- no instance {{ instance.number }} vlan {{ instance.vlans }}
- {% else %}
- instance {{ instance.number }} vlan {{ instance.vlans }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %}
-{% if xstp_vars.intf is defined and xstp_vars.intf %}
- {% for intr in xstp_vars.intf.keys() %}
- {% set intf_vars = xstp_vars.intf[intr] %}
-interface {{ intr }}
- {% if intf_vars.edge_port is defined %}
- {% if not intf_vars.edge_port %}
- no spanning-tree port type edge
- {% else %}
- spanning-tree port type edge
- {% endif %}
- {% endif %}
- {% if intf_vars.bpdu_filter is defined %}
- {% if intf_vars.bpdu_filter %}
- spanning-tree bpdufilter enable
- {% else %}
- spanning-tree bpdufilter disable
- {% endif %}
- {% endif %}
- {% if intf_vars.bpdu_guard is defined %}
- {% if intf_vars.bpdu_guard %}
- spanning-tree bpduguard enable
- {% else %}
- spanning-tree bpduguard disable
- {% endif %}
- {% endif %}
- {% if intf_vars.guard is defined %}
- {% if intf_vars.guard %}
- spanning-tree guard {{ intf_vars.guard }}
- {% else %}
- spanning-tree guard none
- {% endif %}
- {% endif %}
- {% if intf_vars.enable is defined %}
- {% if intf_vars.enable %}
- no spanning-tree disable
- {% else %}
- spanning-tree disable
- {% endif %}
- {% endif %}
- {% if intf_vars.link_type is defined %}
- {% if intf_vars.link_type %}
- spanning-tree link-type {{ intf_vars.link_type }}
- {% else %}
- no spanning-tree link-type
- {% endif %}
- {% endif %}
- {% if intf_vars.rstp is defined and intf_vars.rstp %}
- {% if intf_vars.rstp.priority is defined %}
- {% if intf_vars.rstp.priority %}
- spanning-tree rstp priority {{ intf_vars.rstp.priority }}
- {% else %}
- no spanning-tree rstp priority
- {% endif %}
- {% endif %}
- {% if intf_vars.rstp.cost is defined %}
- {% if intf_vars.rstp.cost %}
- spanning-tree rstp cost {{ intf_vars.rstp.cost }}
- {% else %}
- no spanning-tree rstp cost
- {% endif %}
- {% endif %}
- {% endif %}
- {% if intf_vars.msti is defined and intf_vars.msti %}
- {% for inst in intf_vars.msti %}
- {% if inst.instance_number is defined and inst.instance_number==0 or inst.instance_number %}
- {% if inst.priority is defined %}
- {% if inst.priority %}
- spanning-tree msti {{ inst.instance_number }} priority {{ inst.priority }}
- {% else %}
- no spanning-tree msti {{ inst.instance_number }} priority 1
- {% endif %}
- {% endif %}
- {% if inst.cost is defined %}
- {% if inst.cost %}
- spanning-tree msti {{ inst.instance_number }} cost {{ inst.cost }}
- {% else %}
- no spanning-tree msti {{ inst.instance_number }} cost 1
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if intf_vars.vlan is defined and intf_vars.vlan %}
- {% for inst in intf_vars.vlan %}
- {% if inst.range_or_id is defined and inst.range_or_id %}
- {% if inst.priority is defined %}
- {% if inst.priority %}
- spanning-tree vlan {{ inst.range_or_id }} priority {{ inst.priority }}
- {% else %}
- no spanning-tree vlan {{ inst.range_or_id }} priority 1
- {% endif %}
- {% endif %}
- {% if inst.cost is defined %}
- {% if inst.cost %}
- spanning-tree vlan {{ inst.range_or_id }} cost {{ inst.cost }}
- {% else %}
- no spanning-tree vlan {{ inst.range_or_id }} cost 1
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endfor %}
-{% endif %}
-
-{% endif %}
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/inventory.yaml b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/inventory.yaml
deleted file mode 100644
index b1ff63e66..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/inventory.yaml
+++ /dev/null
@@ -1,22 +0,0 @@
----
-spine1 ansible_host=10.11.182.25 ansible_network_os="dellemc.os10.os10"
-spine2 ansible_host=10.16.151.220 ansible_network_os="dellemc.os10.os10"
-
-leaf1 ansible_host=10.11.182.23 ansible_network_os="dellemc.os10.os10"
-leaf2 ansible_host=10.11.182.16 ansible_network_os="dellemc.os10.os10"
-leaf3 ansible_host=10.11.182.17 ansible_network_os="dellemc.os10.os10"
-leaf4 ansible_host=10.16.148.79 ansible_network_os="dellemc.os10.os10"
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/main.yaml b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/main.yaml
deleted file mode 100644
index 7b55af458..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/main.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-# vars file for dellemc.os10.os10_xstp,
-# below gives a sample configuration
-# Sample variables for OS10 device
-os10_xstp:
- type: rstp
- enable: true
- path_cost: true
- mac_flush_timer: 4
- rstp:
- max_age: 6
- hello_time: 7
- forward_time: 7
- force_version: stp
- bridge_priority: 4096
- mac_flush_threshold: 5
- pvst:
- vlan:
- - range_or_id: 10
- max_age: 6
- enable: true
- hello_time: 7
- forward_time: 7
- bridge_priority: 4096
- mac_flush_threshold: 9
- mstp:
- max_age: 6
- max_hops: 6
- hello_time: 7
- forward_time: 7
- force_version: stp
- mstp_instances:
- - number_or_range: 1
- enable: true
- mac_flush_threshold: 9
- bridge_priority: 4096
- mst_config:
- name: cfg1
- revision: 5
- cfg_list:
- - number: 1
- vlans: 10,12
- vlans_state: present
- intf:
- ethernet 1/1/8:
- edge_port: true
- bpdu_filter: true
- bpdu_guard: true
- guard: loop
- enable: true
- link_type: point-to-point
- msti:
- - instance_number: 1
- priority: 32
- cost: 1
- rstp:
- priority: 32
- cost: 7
- vlan:
- - range_or_id: 6
- priority: 16
- cost: 8
-
-# Sample variables for OS10 devices to configure root in pvst and mst
-#---
-#os10_xstp:
-# pvst:
-# vlan:
-# - range_or_id: 10
-# root: primary
-# mstp:
-# mstp_instances:
-# - number_or_range: 1
-# root: secondary
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/test.yaml b/ansible_collections/dellemc/os10/roles/os10_xstp/tests/test.yaml
deleted file mode 100644
index 438212cab..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os10.os10_xstp
diff --git a/ansible_collections/dellemc/os10/roles/os10_xstp/vars/main.yml b/ansible_collections/dellemc/os10/roles/os10_xstp/vars/main.yml
deleted file mode 100644
index b3eb721a4..000000000
--- a/ansible_collections/dellemc/os10/roles/os10_xstp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os10.os10_xstp
diff --git a/ansible_collections/dellemc/os10/tests/integration/target-prefixes.network b/ansible_collections/dellemc/os10/tests/integration/target-prefixes.network
deleted file mode 100644
index 69b59b3fb..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/target-prefixes.network
+++ /dev/null
@@ -1 +0,0 @@
-os10
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2
deleted file mode 100644
index aa8ad40ac..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/01_aaa_configure.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-os10_aaa:
- tacacs_server:
- host:
- - ip: 10.10.10.10
- key: 0
- value: "aaaa"
- auth_port: 3
- state: present
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: present
- re_authenticate: false
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2
deleted file mode 100644
index 89728ab97..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/02_aaa_update.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-os10_aaa:
- radius_server:
- retransmit: 6
- timeout: 9
- host:
- - ip: 10.10.10.10
- key: 0
- value: "abc"
- auth_port: 3
- state: present
- tacacs_server:
- host:
- - ip: 10.10.10.10
- key: 0
- value: "aaaa"
- auth_port: 3
- state: present
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: present
- re_authenticate: false
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2
deleted file mode 100644
index 1950e4993..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/03_aaa_empty_string.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-os10_aaa:
- radius_server:
- retransmit:
- timeout:
- host:
- - ip: 10.10.10.10
- key:
- value: "abc"
- auth_port:
- state: present
- tacacs_server:
- host:
- - ip: 10.10.10.10
- key: 0
- value: "aaaa"
- auth_port: 3
- state: present
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: "absent"
- re_authenticate: false
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2
deleted file mode 100644
index b7a86a2db..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/04_aaa_absent.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-os10_aaa:
- radius_server:
- retransmit: 6
- timeout: 9
- host:
- - ip: 10.10.10.10
- key: 0
- value: "abc"
- auth_port: 3
- state: absent
- tacacs_server:
- host:
- - ip: 10.10.10.10
- key: 0
- value: "aaaa"
- auth_port: 3
- state: absent
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: "absent"
- re_authenticate: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2
deleted file mode 100644
index 8beb9cc1b..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/05_aaa_stateless.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-os10_aaa:
- radius_server:
- retransmit: 6
- timeout: 9
- host:
- - ip: 10.10.10.10
- key: 0
- value: "abc"
- auth_port: 3
- tacacs_server:
- host:
- - ip: 10.10.10.10
- key: 0
- value: "aaaa"
- auth_port: 3
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- re_authenticate: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2
deleted file mode 100644
index d9fa0fe4c..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/templates/aaa_basic/steps/teardown.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-os10_aaa:
- radius_server:
- retransmit:
- timeout:
- host:
- - ip: 10.10.10.10
- key: 0
- value: "abc"
- auth_port: 3
- state: absent
- tacacs_server:
- host:
- - ip: 10.10.10.10
- key: 0
- value: "aaaa"
- auth_port: 3
- state: absent
- aaa_authentication:
- login:
- - console: true
- type: group radius group tacacs+ local
- state: "absent"
- re_authenticate:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/vars/main.yaml
deleted file mode 100644
index 3e7e9546f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_aaa_role/vars/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-test_roles:
- - dellemc.os10.os10_aaa
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2
deleted file mode 100644
index f3a5d7b80..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/01_acl_config.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-idempotent: true
-
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl
- remark:
- - description: 1
- number: 2
- state: present
- entries:
- - number: 14
- permit: true
- protocol: tcp
- source: any
- src_condition: neq 6
- destination: any
- dest_condition: eq 4
- other_options: count
- state: present
- lineterminal:
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2
deleted file mode 100644
index db65b97f9..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/02_acl_stage.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-idempotent: false
-
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl
- stage_ingress:
- - name: "{{ os10_interface_1 }}"
- state: present
- - name: "{{ os10_interface_2 }}"
- state: present
- stage_egress:
- - name: "{{ os10_interface_3 }}"
- state: present
- lineterminal:
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2
deleted file mode 100644
index 0d9f1834d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/03_acl_update.j2
+++ /dev/null
@@ -1,31 +0,0 @@
-idempotent: false
-
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl ssh
- remark:
- - description: acl remark
- number: 3
- state: present
- entries:
- - number: 15
- permit: false
- protocol: udp
- source: any
- src_condition: gt 4
- destination: any
- dest_condition: lt 5
- other_options: fragment
- state: present
- stage_ingress:
- - name: "{{ os10_interface_1 }}"
- state: present
- - name: "{{ os10_interface_2 }}"
- state: present
- stage_egress:
- - name: "{{ os10_interface_3 }}"
- state: present
- lineterminal:
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2
deleted file mode 100644
index afa89bbf9..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/04_acl_suboptions_absent.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-idempotent: false
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl ssh
- remark:
- - description: acl remark
- number: 3
- state: absent
- entries:
- - number: 15
- permit: false
- protocol: udp
- source: any
- src_condition: gt 4
- destination: any
- dest_condition: lt 5
- other_options: fragment
- state: absent
- stage_ingress:
- - name: "{{ os10_interface_1 }}"
- state: absent
- - name: "{{ os10_interface_2 }}"
- state: absent
- stage_egress:
- - name: "{{ os10_interface_3 }}"
- state: absent
- lineterminal:
- state: absent
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2
deleted file mode 100644
index 518758d15..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/05_acl_suboptions_stateless.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-idempotent: false
-
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl ssh
- remark:
- - description: acl remark
- number: 3
- entries:
- - number: 14
- permit: true
- protocol: tcp
- source: any
- src_condition: neq 6
- destination: any
- dest_condition: eq 4
- other_options: count
- - number: 15
- permit: false
- protocol: udp
- source: any
- src_condition: gt 4
- destination: any
- dest_condition: lt 5
- other_options: fragment
- stage_ingress:
- - name: "{{ os10_interface_1 }}"
- - name: "{{ os10_interface_2 }}"
- stage_egress:
- - name: "{{ os10_interface_3 }}"
- lineterminal:
- state:
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2
deleted file mode 100644
index 9bce3bf00..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/06_acl_absent.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-idempotent: false
-
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl ssh
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2
deleted file mode 100644
index 9381300e6..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/07_acl_stateless.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-idempotent: false
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl ssh
- remark:
- - description: acl remark
- number: 3
- entries:
- - number: 14
- permit: true
- protocol: tcp
- source: any
- src_condition: neq 6
- destination: any
- dest_condition: eq 4
- other_options: count
- stage_ingress:
- - name: "{{ os10_interface_1 }}"
- - name: "{{ os10_interface_2 }}"
- stage_egress:
- - name: "{{ os10_interface_3 }}"
- lineterminal:
- state:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2
deleted file mode 100644
index 2fcc4b18a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/templates/acl_basic/steps/teardown.j2
+++ /dev/null
@@ -1,29 +0,0 @@
-os10_acl:
- - name: ssh
- type: ipv4
- description: acl ssh
- remark:
- - description: acl remark
- number: 3
- state: absent
- entries:
- - number: 15
- permit: false
- protocol: udp
- source: any
- src_condition: gt 4
- destination: any
- dest_condition: lt 5
- other_options: fragment
- state: absent
- stage_ingress:
- - name: "{{ os10_interface_1 }}"
- state: absent
- - name: "{{ os10_interface_2 }}"
- state: absent
- stage_egress:
- - name: "{{ os10_interface_3 }}"
- state: absent
- lineterminal:
- state: absent
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/vars/main.yaml
deleted file mode 100644
index b40350ab3..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_acl_role/vars/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-test_roles:
- - dellemc.os10.os10_acl
-
-idempotent_roles:
- - dellemc.os10.os10_acl
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2
deleted file mode 100644
index 947bca963..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/01_bgp_default_vrf_config.j2
+++ /dev/null
@@ -1,191 +0,0 @@
-idempotent: false
-os10_bgp:
- asn: 12
- router_id: 90.1.1.4
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: true
- fast_ext_fallover: true
- always_compare_med: true
- default_loc_pref: 1000
- as_notation: asdot
- enforce_first_as: true
- non_deterministic_med: true
- outbound_optimization: true
- confederation:
- identifier: 25
- peers: 23 24
- peers_state: present
- route_reflector:
- client_to_client: true
- cluster_id: 4294967295
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- summary_only: true
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- default_metric: 10
- distance_bgp:
- value: 3 4 6
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- address_family:
- - type: ipv4
- activate: true
- state: present
- max_prefix:
- count: 20
- threshold: 90
- warning: true
- state: present
- listen:
- - subnet: 4.4.4.4/32
- limit: 4
- subnet_state: present
- - subnet: 20::/64
- limit: 4
- subnet_state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- password: bgppassword
- route_reflector_client: true
- adv_start: 100
- adv_start_state: present
- conn_retry_timer: 20
- remove_pri_as: present
- address_family:
- - type: ipv4
- activate: true
- state: present
- max_prefix:
- count: 10
- threshold: 40
- warning: true
- state: present
- default_originate:
- route_map: aa
- state: present
- distribute_list:
- in: XX
- in_state: present
- out: YY
- out_state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: true
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- sender_loop_detect: true
- password: bgppassword
- address_family:
- - type: ipv4
- activate: true
- sender_loop_detect: true
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- - type: l2vpn
- activate: true
- sender_loop_detect: true
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2
deleted file mode 100644
index 0e4f173c9..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/02_bgp_default_vrf_unconfig.j2
+++ /dev/null
@@ -1,185 +0,0 @@
-idempotent: false
-os10_bgp:
- asn: 12
- router_id:
- maxpath_ibgp:
- maxpath_ebgp:
- graceful_restart: false
- log_neighbor_changes: false
- fast_ext_fallover: false
- always_compare_med: false
- default_loc_pref:
- as_notation: asdot
- enforce_first_as: false
- non_deterministic_med: false
- outbound_optimization: false
- confederation:
- identifier: 25
- peers: 23 24
- peers_state: absent
- route_reflector:
- client_to_client: false
- cluster_id:
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: absent
- summary_only: false
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: absent
- ibgp_redist_internal:
- state: absent
- default_metric:
- distance_bgp:
- value: 3 4 6
- state: absent
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: absent
- summary_only: false
- ibgp_redist_internal:
- state: absent
- best_path:
- as_path: ignore
- as_path_state: absent
- ignore_router_id: false
- med:
- - attribute: confed
- state: absent
- neighbor:
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: absent
- address_family:
- - type: l2vpn
- activate: false
- state: absent
- admin: up
- state: absent
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: absent
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: absent
- password: bgppassword
- route_reflector_client: false
- adv_start: 100
- adv_start_state: absent
- conn_retry_timer: 20
- remove_pri_as: absent
- address_family:
- - type: ipv4
- activate: false
- state: absent
- max_prefix:
- count: 10
- threshold: 40
- warning: false
- state: absent
- default_originate:
- route_map: aa
- state: absent
- distribute_list:
- in: XX
- in_state: absent
- out: YY
- out_state: absent
- send_community:
- - type: standard
- state: absent
- state: absent
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: false
- allow_as_in: 5
- next_hop_self: false
- soft_reconf: false
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: absent
- state: absent
- state: absent
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: absent
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: absent
- sender_loop_detect: false
- password: bgppassword
- address_family:
- - type: ipv4
- activate: false
- sender_loop_detect: false
- state: absent
- allow_as_in: 5
- next_hop_self: false
- soft_reconf: false
- - type: l2vpn
- activate: false
- sender_loop_detect: false
- state: absent
- send_community:
- - type: standard
- state: absent
- admin: up
- state: absent
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: absent
- address_family:
- - type: ipv4
- activate: false
- state: absent
- max_prefix:
- count: 20
- threshold: 90
- warning: false
- state: absent
- listen:
- - subnet: 4.4.4.4/32
- limit: 4
- subnet_state: absent
- - subnet: 20::/64
- limit: 4
- subnet_state: absent
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: absent
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: absent
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: absent
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2
deleted file mode 100644
index 1ec5c308a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/03_bgp_non_default_vrf_config.j2
+++ /dev/null
@@ -1,153 +0,0 @@
-idempotent: false
-os10_bgp:
- asn: 12
- vrfs:
- - name: "GREEN"
- router_id: 50.1.1.1
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: true
- fast_ext_fallover: true
- always_compare_med: true
- default_loc_pref: 1000
- route_reflector:
- client_to_client: true
- cluster_id: 1
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- - attribute: missing-as-worst
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan10
- description: "U_site2 vlan"
- send_community:
- - type: extended
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.20.1
- name: peer1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- route_reflector_client: true
- address_family:
- - type: ipv4
- activate: true
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.15.3
- address_family:
- - type: ipv4
- activate: true
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- address_family:
- - type: ipv4
- activate: true
- sender_loop_detect: true
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- - route_type: connected
- route_map_name: bb
- address_type: ipv4
- state: present
- - route_type: l2vpn
- route_map_name: cc
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- state: present
- - name: "BLUE"
- router_id: 6.6.6.6
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2
deleted file mode 100644
index 69d90adda..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/04_bgp_non_default_vrf_unconfig.j2
+++ /dev/null
@@ -1,147 +0,0 @@
-idempotent: false
-os10_bgp:
- asn: 12
- vrfs:
- - name: "GREEN"
- router_id:
- maxpath_ibgp:
- maxpath_ebgp:
- graceful_restart: false
- log_neighbor_changes: false
- fast_ext_fallover: false
- always_compare_med: false
- default_loc_pref: 1000
- route_reflector:
- client_to_client: false
- cluster_id:
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: absent
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: absent
- ibgp_redist_internal:
- state: absent
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: absent
- summary_only: false
- ibgp_redist_internal:
- state: absent
- best_path:
- as_path: ignore
- as_path_state: absent
- ignore_router_id: false
- med:
- - attribute: confed
- state: absent
- - attribute: missing-as-worst
- state: absent
- neighbor:
- - type: ipv4
- interface: vlan10
- description: "U_site2 vlan"
- send_community:
- - type: extended
- state: absent
- admin: up
- state: absent
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.20.1
- name: peer1
- peergroup: peer1
- peergroup_state: absent
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: absent
- route_reflector_client: false
- address_family:
- - type: ipv4
- activate: false
- state: absent
- send_community:
- - type: standard
- state: absent
- state: absent
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.15.3
- address_family:
- - type: ipv4
- activate: false
- allow_as_in: 5
- next_hop_self: false
- soft_reconf: false
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: absent
- state: absent
- state: absent
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: absent
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: absent
- address_family:
- - type: ipv4
- activate: false
- sender_loop_detect: false
- state: absent
- allow_as_in: 5
- next_hop_self: false
- soft_reconf: false
- send_community:
- - type: standard
- state: absent
- admin: up
- state: absent
- - name: peer1
- type: peergroup
- bfd: yes
- state: absent
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: absent
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: absent
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: absent
- - route_type: connected
- route_map_name: bb
- address_type: ipv4
- state: absent
- - route_type: l2vpn
- route_map_name: cc
- address_type: ipv4
- state: absent
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: absent
- state: present
- - name: "BLUE"
- router_id:
- state: absent
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2
deleted file mode 100644
index cd530af4a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/05_bgp_default_non_default_vrf_config.j2
+++ /dev/null
@@ -1,314 +0,0 @@
-idempotent: true
-os10_bgp:
- asn: 12
- router_id: 90.1.1.4
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- always_compare_med: true
- default_loc_pref: 1000
- as_notation: asdot
- non_deterministic_med: true
- outbound_optimization: true
- confederation:
- identifier: 25
- peers: 23 24
- peers_state: present
- route_reflector:
- cluster_id: 4294967295
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- summary_only: true
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- default_metric: 10
- distance_bgp:
- value: 3 4 6
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- address_family:
- - type: ipv4
- state: present
- max_prefix:
- count: 20
- threshold: 90
- warning: true
- state: present
- listen:
- - subnet: 4.4.4.4/32
- limit: 4
- subnet_state: present
- - subnet: 20::/64
- limit: 4
- subnet_state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- state: present
- admin: up
- state: present
- - type: ipv4
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- route_reflector_client: true
- adv_start: 100
- adv_start_state: present
- conn_retry_timer: 20
- remove_pri_as: present
- address_family:
- - type: ipv4
- state: present
- max_prefix:
- count: 10
- threshold: 40
- warning: true
- state: present
- default_originate:
- route_map: aa
- state: present
- distribute_list:
- in: XX
- in_state: present
- out: YY
- out_state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- address_family:
- - type: ipv4
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- - type: l2vpn
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- vrfs:
- - name: "GREEN"
- router_id: 50.1.1.1
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- always_compare_med: true
- default_loc_pref: 1000
- route_reflector:
- cluster_id: 1
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- - attribute: missing-as-worst
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan10
- description: U_site2 vlan
- send_community:
- - type: extended
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.20.1
- name: peer1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- route_reflector_client: true
- address_family:
- - type: ipv4
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.15.3
- address_family:
- - type: ipv4
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- address_family:
- - type: ipv4
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- - route_type: connected
- route_map_name: bb
- address_type: ipv4
- state: present
- - route_type: l2vpn
- route_map_name: cc
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2
deleted file mode 100644
index 1fa12ee20..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/06_bgp_default_non_default_vrf_unconfig.j2
+++ /dev/null
@@ -1,313 +0,0 @@
-idempotent: false
-os10_bgp:
- asn: 12
- router_id: 90.1.1.4
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- log_neighbor_changes: true
- always_compare_med: true
- default_loc_pref: 1000
- as_notation: asdot
- non_deterministic_med: true
- outbound_optimization: true
- confederation:
- identifier: 25
- peers: 23 24
- peers_state: present
- route_reflector:
- cluster_id: 4294967295
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- summary_only: true
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- default_metric: 10
- distance_bgp:
- value: 3 4 6
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- address_family:
- - type: ipv4
- state: present
- max_prefix:
- count: 20
- threshold: 90
- warning: true
- state: present
- listen:
- - subnet: 4.4.4.4/32
- limit: 4
- subnet_state: present
- - subnet: 20::/64
- limit: 4
- subnet_state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan20
- send_community:
- - type: extended
- state: present
- address_family:
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2-spine1"
- remote_asn: 11
- ip: 192.168.10.1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- password: bgppassword
- route_reflector_client: true
- adv_start: 100
- adv_start_state: present
- conn_retry_timer: 20
- remove_pri_as: present
- address_family:
- - type: ipv4
- activate: true
- state: present
- max_prefix:
- count: 10
- threshold: 40
- warning: true
- state: present
- default_originate:
- route_map: aa
- state: present
- distribute_list:
- in: XX
- in_state: present
- out: YY
- out_state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.12.3
- address_family:
- - type: ipv4
- activate: true
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- sender_loop_detect: true
- password: bgppassword
- address_family:
- - type: ipv4
- activate: true
- sender_loop_detect: true
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- - type: l2vpn
- activate: true
- state: present
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- vrfs:
- - name: "GREEN"
- router_id: 50.1.1.1
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- always_compare_med: true
- default_loc_pref: 1000
- route_reflector:
- cluster_id: 1
- address_family_ipv4:
- aggregate_address:
- - ip_and_mask: 1.1.1.1/16
- state: present
- dampening:
- value: 15 750 2000 60
- route_map: qq
- state: present
- ibgp_redist_internal:
- state: present
- address_family_ipv6:
- aggregate_address:
- - ip_and_mask: 2001:4898:5808:ffa0::/126
- state: present
- summary_only: true
- ibgp_redist_internal:
- state: present
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- - attribute: missing-as-worst
- state: present
- neighbor:
- - name: peer1
- type: peergroup
- bfd: yes
- state: present
- - name: ebgp_pg
- type: peergroup
- bfd: yes
- state: present
- - name: ibgp_pg
- type: peergroup
- weight: 10
- state: present
- - type: ipv4
- interface: vlan10
- description: U_site2 vlan
- send_community:
- - type: extended
- state: present
- admin: up
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 11
- ip: 192.168.20.1
- name: peer1
- peergroup: peer1
- peergroup_state: present
- peergroup_type: ibgp
- adv_interval: 40
- fall_over: present
- route_reflector_client: true
- address_family:
- - type: ipv4
- state: present
- send_community:
- - type: standard
- state: present
- state: present
- - type: ipv4
- description: "U_site2 spine1"
- remote_asn: 13
- local_as: 10
- weight: 10
- ip: 192.168.15.3
- address_family:
- - type: ipv4
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- add_path: both 3
- route_map:
- - name: qq
- filter: in
- state: present
- state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- state: present
- - type: ipv6
- description: "U_site2-spine1-Auto Discovered peers"
- auto_peer: unnumbered-auto
- ibgp_peergroup: ibgp_pg
- ibgp_peergroup_state: present
- address_family:
- - type: ipv4
- state: present
- allow_as_in: 5
- next_hop_self: true
- soft_reconf: true
- send_community:
- - type: standard
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- address_type: ipv4
- state: present
- - route_type: connected
- route_map_name: bb
- address_type: ipv4
- state: present
- - route_type: l2vpn
- route_map_name: cc
- address_type: ipv4
- state: present
- bfd_all_neighbors:
- interval: 200
- min_rx: 200
- multiplier: 3
- role: active
- state: present
- state: present
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2
deleted file mode 100644
index ccf217d39..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/setup.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-os10_vrf:
- vrfdetails:
- - vrf_name: GREEN
- state: present
- - vrf_name: BLUE
- state: present
-os10_vlan:
- vlan 10:
- description: "red"
- state: present
- vlan 20:
- description: "yellow"
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2
deleted file mode 100644
index 1c0d524e2..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/templates/bgp_vrf/steps/teardown.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-os10_vrf:
- vrfdetails:
- - vrf_name: GREEN
- state: absent
- - vrf_name: BLUE
- state: absent
-os10_vlan:
- vlan 10:
- description: "red"
- state: absent
- vlan 20:
- description: "yellow"
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/vars/main.yaml
deleted file mode 100644
index 631238919..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_bgp_role/vars/main.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vrf
- - dellemc.os10.os10_vlan
- - dellemc.os10.os10_bgp
-
-idempotent_roles:
- - dellemc.os10.os10_bgp
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/cli.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/cli.yaml
deleted file mode 100644
index 8c11e106f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/cli.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact:
- test_items: "{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ test_case_to_run }}"
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: test_case_to_run
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/main.yaml
deleted file mode 100644
index 415c99d8b..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] }
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/bad_operator.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/bad_operator.yaml
deleted file mode 100644
index 879a3d3f0..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/bad_operator.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- debug: msg="START cli/bad_operator.yaml"
-
-- name: test bad operator
- os10_command:
- commands:
- - show version
- - show interface ethernet 1/1/1
- wait_for:
- - "result[0] contains 'Description : blah'"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed == true"
- - "result.msg is defined"
-
-- debug: msg="END cli/bad_operator.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/contains.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/contains.yaml
deleted file mode 100644
index 1aa705820..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/contains.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- debug: msg="START cli/contains.yaml"
-
-- name: test contains operator
- os10_command:
- commands:
- - show version
- - show interface ethernet 1/1/1
- wait_for:
- - "result[0] contains OS10 Enterprise"
- - "result[1] contains Ethernet "
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
-
-- debug: msg="END cli/contains.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/invalid.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/invalid.yaml
deleted file mode 100644
index 0eff61708..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/invalid.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- debug: msg="START cli/invalid.yaml"
-
-- name: run invalid command
- os10_command:
- commands: ['show foo']
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "'Error: Unrecognized command' in result.msg"
-
-- name: run commands that include invalid command
- os10_command:
- commands:
- - show version
- - show foo
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "'Error: Unrecognized command' in result.msg"
-
-- debug: msg="END cli/invalid.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/output.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/output.yaml
deleted file mode 100644
index 0c85c0f85..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/output.yaml
+++ /dev/null
@@ -1,27 +0,0 @@
----
-- debug: msg="START cli/output.yaml"
-
-- name: get output for single command
- os10_command:
- commands: ['show version']
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
-
-- name: get output for multiple commands
- os10_command:
- commands:
- - show version
- - show interface Eth 1/1/1
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
- - "result.stdout | length == 2"
-
-- debug: msg="END cli/output.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/timeout.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/timeout.yaml
deleted file mode 100644
index 65e5a82ec..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_command/tests/cli/timeout.yaml
+++ /dev/null
@@ -1,18 +0,0 @@
----
-- debug: msg="START cli/timeout.yaml"
-
-- name: test bad condition
- os10_command:
- commands:
- - show version
- wait_for:
- - "result[0] contains bad_value_string"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed == true"
- - "result.msg is defined"
-
-- debug: msg="END cli/timeout.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/cli.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/cli.yaml
deleted file mode 100644
index d675462dd..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/cli.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ test_case_to_run }}"
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: test_case_to_run
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/main.yaml
deleted file mode 100644
index 415c99d8b..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] }
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel.yaml
deleted file mode 100644
index d376e6ebf..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- debug: msg="START cli/sublevel.yaml"
-
-- name: setup test
- os10_config:
- lines:
- - 'no ip access-list test'
- match: none
-
-- name: configure sub level command
- os10_config:
- lines: ['seq 5 permit ip any any count byte']
- parents: ['ip access-list test']
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'ip access-list test' in result.updates"
- - "'seq 5 permit ip any any count byte' in result.updates"
-
-- name: configure sub level command idempotent check
- os10_config:
- lines: ['seq 5 permit ip any any count byte']
- parents: ['ip access-list test']
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os10_config:
- lines:
- - 'no ip access-list test'
- match: none
-
-- debug: msg="END cli/sublevel.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml
deleted file mode 100644
index ad598f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_block.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
----
-- debug: msg="START cli/sublevel_block.yaml"
-
-- name: setup
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.2
- - neighbor 1.1.1.3
- parents: ['router bgp 10']
- before: ['no router bgp']
- after: ['exit']
- match: none
-
-- name: configure sub level command using block resplace
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.2
- - neighbor 1.1.1.3
- - neighbor 1.1.1.4
- parents: ['router bgp 10']
- replace: block
- after: ['exit']
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'router bgp 10' in result.updates"
- - "'neighbor 1.1.1.1' in result.updates"
- - "'neighbor 1.1.1.2' in result.updates"
- - "'neighbor 1.1.1.3' in result.updates"
- - "'neighbor 1.1.1.4' in result.updates"
-
-- name: check sub level command using block replace
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.2
- - neighbor 1.1.1.3
- - neighbor 1.1.1.4
- parents: ['router bgp 10']
- replace: block
- after: ['exit']
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os10_config:
- lines:
- - no router bgp
- match: none
-
-- debug: msg="END cli/sublevel_block.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml
deleted file mode 100644
index 0093e4c70..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_exact.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-- debug: msg="START cli/sublevel_exact.yaml"
-
-- name: setup
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.2
- - neighbor 1.1.1.3
- - neighbor 1.1.1.4
- - neighbor 1.1.1.5
- parents: ['router bgp 10']
- before: ['no router bgp']
- after: ['exit']
- match: none
-
-- name: configure sub level command using exact match
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.2
- - neighbor 1.1.1.3
- - neighbor 1.1.1.4
- parents: ['router bgp 10']
- after: ['exit']
- match: exact
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'router bgp 10' in result.updates"
- - "'neighbor 1.1.1.1' in result.updates"
- - "'neighbor 1.1.1.2' in result.updates"
- - "'neighbor 1.1.1.3' in result.updates"
- - "'neighbor 1.1.1.4' in result.updates"
- - "'neighbor 1.1.1.5' not in result.updates"
-
-- name: check sub level command using exact match
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.2
- - neighbor 1.1.1.3
- - neighbor 1.1.1.4
- - neighbor 1.1.1.5
- parents: ['router bgp 10']
- after: ['exit']
- match: exact
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os10_config:
- lines:
- - no router bgp
- match: none
-
-- debug: msg="END cli/sublevel_exact.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml
deleted file mode 100644
index 388653404..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/sublevel_strict.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-- debug: msg="START cli/sublevel_strict.yaml"
-
-- name: setup
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.2
- - neighbor 1.1.1.3
- - neighbor 1.1.1.4
- - neighbor 1.1.1.5
- parents: ['router bgp 10']
- before: ['no router bgp']
- after: ['exit']
- match: none
-
-- name: configure sub level command using strict match
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.2
- - neighbor 1.1.1.3
- - neighbor 1.1.1.4
- parents: ['router bgp 10']
- match: strict
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: check sub level command using strict match
- os10_config:
- lines:
- - neighbor 1.1.1.1
- - neighbor 1.1.1.3
- - neighbor 1.1.1.2
- parents: ['router bgp 10']
- after: ['exit']
- match: strict
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'router bgp 10' in result.updates"
- - "'neighbor 1.1.1.1' not in result.updates"
- - "'neighbor 1.1.1.2' in result.updates"
- - "'neighbor 1.1.1.3' in result.updates"
- - "'neighbor 1.1.1.4' not in result.updates"
- - "'neighbor 1.1.1.5' not in result.updates"
-
-- name: teardown
- os10_config:
- lines:
- - no router bgp
- match: none
-
-- debug: msg="END cli/sublevel_strict.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel.yaml
deleted file mode 100644
index c90037b1e..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-- debug: msg="START cli/toplevel.yaml"
-
-- name: setup
- os10_config:
- lines: ['hostname {{ inventory_hostname_short }}']
- match: none
-
-- name: configure top level command
- os10_config:
- lines: ['hostname foo']
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
-
-- name: configure top level command idempotent check
- os10_config:
- lines: ['hostname foo']
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os10_config:
- lines: ['hostname {{ inventory_hostname_short }}']
- match: none
-
-- debug: msg="END cli/toplevel.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml
deleted file mode 100644
index 7a50790f2..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_after.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- debug: msg="START cli/toplevel_after.yaml"
-
-- name: setup
- os10_config:
- lines:
- - "snmp-server contact ansible"
- - "hostname {{ inventory_hostname_short }}"
- match: none
-
-- name: configure top level command with before
- os10_config:
- lines: ['hostname foo']
- after: ['snmp-server contact bar']
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
- - "'snmp-server contact bar' in result.updates"
-
-- name: configure top level command with before idempotent check
- os10_config:
- lines: ['hostname foo']
- after: ['snmp-server contact foo']
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os10_config:
- lines:
- - "no snmp-server contact"
- - "hostname {{ inventory_hostname_short }}"
- match: none
-
-- debug: msg="END cli/toplevel_after.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml
deleted file mode 100644
index 3af72fa8f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_before.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-- debug: msg="START cli/toplevel_before.yaml"
-
-- name: setup
- os10_config:
- lines:
- - "snmp-server contact ansible"
- - "hostname {{ inventory_hostname_short }}"
- match: none
-
-- name: configure top level command with before
- os10_config:
- lines: ['hostname foo']
- before: ['snmp-server contact bar']
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
- - "'snmp-server contact bar' in result.updates"
-
-- name: configure top level command with before idempotent check
- os10_config:
- lines: ['hostname foo']
- before: ['snmp-server contact foo']
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os10_config:
- lines:
- - "no snmp-server contact"
- - "hostname {{ inventory_hostname_short }}"
- match: none
-
-- debug: msg="END cli/toplevel_before.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml
deleted file mode 100644
index 14ee21c37..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_config/tests/cli/toplevel_nonidempotent.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-- debug: msg="START cli/toplevel_nonidempotent.yaml"
-
-- name: setup
- os10_config:
- lines: ['hostname {{ inventory_hostname_short }}']
- match: none
-
-- name: configure top level command
- os10_config:
- lines: ['hostname foo']
- match: strict
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
-
-- name: configure top level command idempotent check
- os10_config:
- lines: ['hostname foo']
- match: strict
- register: result
-
-- assert:
- that:
- - "result.changed == true"
-
-- name: teardown
- os10_config:
- lines: ['hostname {{ inventory_hostname_short }}']
- match: none
-
-- debug: msg="END cli/toplevel_nonidempotent.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2
deleted file mode 100644
index 52d49ff2c..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/01_ecmp_configure.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-idempotent: true
-os10_ecmp:
- ecmp_group_max_paths: 3
- trigger_threshold: 50
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2
deleted file mode 100644
index 6c0bb03fd..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/02_ecmp_update.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-idempotent: true
-os10_ecmp:
- ecmp_group_max_paths: 29
- trigger_threshold: 86
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2
deleted file mode 100644
index 0187b2c1c..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/03_ecmp_unconfig.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-idempotent: false
-os10_ecmp:
- ecmp_group_max_paths:
- trigger_threshold:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2
deleted file mode 100644
index d2cb816ae..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/templates/ecmp_basic/steps/teardown.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-os10_ecmp:
- ecmp_group_max_paths:
- trigger_threshold:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/vars/main.yaml
deleted file mode 100644
index 1ee0bd401..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ecmp_role/vars/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-test_roles:
- - dellemc.os10.os10_ecmp
-
-idempotent_roles:
- - dellemc.os10.os10_ecmp
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/cli.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/cli.yaml
deleted file mode 100644
index 8c11e106f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/cli.yaml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact:
- test_items: "{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ test_case_to_run }}"
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: test_case_to_run
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/main.yaml
deleted file mode 100644
index 415c99d8b..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] }
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tests/cli/facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tests/cli/facts.yaml
deleted file mode 100644
index d68efadcb..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_facts/tests/cli/facts.yaml
+++ /dev/null
@@ -1,45 +0,0 @@
----
-- debug: msg="START cli/facts.yaml"
-
-- name: test all facts
- os10_facts:
- gather_subset:
- - all
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts is defined"
- - "result.ansible_facts.ansible_net_interfaces is defined"
- - "result.ansible_facts.ansible_net_memfree_mb is defined"
- - "result.ansible_facts.ansible_net_model is defined"
- - "result.ansible_facts.ansible_net_servicetag is defined"
- - "result.ansible_facts.ansible_net_version is defined"
-
-- name: test all facts except hardware
- os10_facts:
- gather_subset:
- - "!hardware"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts.ansible_net_interfaces is defined"
- - "result.ansible_facts.ansible_net_memfree_mb is not defined"
-
-- name: test interface facts
- os10_facts:
- gather_subset:
- - interfaces
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts.ansible_net_interfaces is defined"
- - "result.ansible_facts.ansible_net_memfree_mb is not defined"
-
-
-- debug: msg="END cli/facts.yaml"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2
deleted file mode 100644
index fb1845836..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/01_flow_monitor_configure.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-os10_flow_monitor:
- session 1:
- session_type: local
- description: "Discription goes here"
- port_match:
- - interface_name: {{ os10_interface_1 }}
- location: source
- state: present
- - interface_name: {{ os10_interface_2 }}
- location: destination
- state: present
- flow_based: true
- shutdown: up
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2
deleted file mode 100644
index 2f20dd25d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/02_flow_monitory_update.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-os10_flow_monitor:
- session 1:
- session_type: local
- description: "session1 desc"
- port_match:
- - interface_name: {{ os10_interface_1 }}
- location: source
- state: present
- - interface_name: {{ os10_interface_2 }}
- location: destination
- state: present
- flow_based: true
- shutdown: down
- state: present
- session 2:
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2
deleted file mode 100644
index 00225e2fb..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/03_flow_monitor_options_default.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-os10_flow_monitor:
- session 1:
- session_type: local
- description:
- port_match:
- - interface_name: {{ os10_interface_1 }}
- location:
- state: present
- - interface_name: {{ os10_interface_2 }}
- location: destination
- state: present
- flow_based:
- shutdown:
- state: present
- session 2:
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2
deleted file mode 100644
index c7eeeae16..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/setup.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-os10_interface:
- {{ os10_interface_2 }}:
- switchport: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2
deleted file mode 100644
index 94690f2a9..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/templates/flow_monitor_basic/steps/teardown.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-test_roles:
- - dellemc.os10.os10_flow_monitor
- - dellemc.os10.os10_interface
-os10_interface:
- {{ os10_interface_2 }}:
- switchport: true
- portmode: access
-os10_flow_monitor:
- session 1:
- session_type: local
- description: "Discription goes here"
- port_match:
- - interface_name: {{ os10_interface_1 }}
- location: source
- state: absent
- - interface_name: {{ os10_interface_2 }}
- location: destination
- state: absent
- flow_based: true
- shutdown: up
- state: absent
- session 2:
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/vars/main.yaml
deleted file mode 100644
index 8183f2827..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_flow_monitor_role/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-test_roles:
- - dellemc.os10.os10_interface
- - dellemc.os10.os10_flow_monitor
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2
deleted file mode 100644
index bb4364d61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/01_interface_config.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-os10_interface:
- {{ os10_interface_1 }}:
- desc: "Connected to Core 2"
- mtu: 2500
- admin: up
- switchport: False
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2
deleted file mode 100644
index 725cbdedc..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/02_interface_dynamic_ip.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-os10_interface:
- {{ os10_interface_1 }}:
- desc: "DHCP Ip"
- mtu: 2500
- admin: up
- switchport: False
- ip_type_dynamic: True
- ipv6_type_dynamic: True
- {{ os10_interface_2 }}:
- desc: "IPV6 Auto config"
- mtu: 2000
- admin: up
- switchport: False
- ipv6_autoconfig: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2
deleted file mode 100644
index 74273ab0d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/03_interface_static_ip.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-os10_interface:
- {{ os10_interface_1 }}:
- desc: "Static Ip"
- mtu: 2500
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 10.9.0.4/31
- ipv6_and_mask: 2001:4898:5809:faa2::10/126
- ipv6_autoconfig: false
- ip_type_dynamic: false
- ipv6_type_dynamic: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2
deleted file mode 100644
index c77200ea0..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/04_interface_flowcontrol.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-os10_interface:
- {{ os10_interface_1 }}:
- desc: "Static Ip"
- mtu: 2500
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 10.9.0.4/31
- ipv6_and_mask: 2001:4898:5809:faa2::10/126
- ipv6_autoconfig: false
- flowcontrol:
- mode: "receive"
- enable: "on"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2
deleted file mode 100644
index a38709cc0..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/05_interface_switchport.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-os10_interface:
- {{ os10_interface_3 }}:
- desc: "Switch port"
- mtu: 3000
- portmode: trunk
- admin: up
- switchport: true
- {{ os10_interface_1 }}:
- ip_and_mask:
- ipv6_and_mask:
- ipv6_autoconfig: false
- state_ipv6: absent
- {{ os10_interface_2 }}:
- ip_and_mask:
- ipv6_and_mask:
- ipv6_autoconfig: false
- state_ipv6: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2
deleted file mode 100644
index ea4414de7..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/06_interface_vlan.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vlan
- - dellemc.os10.os10_interface
-
-os10_vlan:
- vlan 10:
- state: present
-
-os10_interface:
- vlan 10:
- ip_and_mask:
- ipv6_and_mask: 2001:4898:5808:ffaf::1/64
- state_ipv6: present
- ip_helper:
- - ip: 10.0.0.33
- state: present
- admin: up
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2
deleted file mode 100644
index 310dcd39d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/07_interface_range.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-os10_interface:
- range ethernet {{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }},{{ os10_interface_3.split()[1] }}:
- mtu: 2500
- admin: up
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2
deleted file mode 100644
index e51374b70..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/templates/interface_basic/steps/teardown.j2
+++ /dev/null
@@ -1,45 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vlan
- - dellemc.os10.os10_interface
-
-os10_vlan:
- vlan 10:
- state: absent
-
-os10_interface:
- {{ os10_interface_1 }}:
- desc:
- mtu:
- portmode: access
- admin: down
- switchport: true
- ip_and_mask:
- ipv6_and_mask:
- ip_type_dynamic: false
- ipv6_type_dynamic: false
- ipv6_autoconfig: false
- flowcontrol:
- mode: receive
- enable: off
- state: absent
- {{ os10_interface_2 }}:
- desc:
- mtu:
- portmode: access
- admin: down
- switchport: true
- ip_and_mask:
- ipv6_and_mask:
- ip_type_dynamic: false
- ipv6_type_dynamic: false
- ipv6_autoconfig: false
- flowcontrol:
- mode: receive
- enable: off
- state: absent
- {{ os10_interface_3 }}:
- desc:
- mtu:
- portmode: access
- admin: down
- switchport: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/vars/main.yaml
deleted file mode 100644
index ff3aa0eba..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_interface_role/vars/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-test_roles:
- - dellemc.os10.os10_interface
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2
deleted file mode 100644
index fc74977a4..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/01_lag_configure.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-os10_lag:
- Po 12:
- type: dynamic
- min_links: 2
- max_bundle_size: 2
- lacp_system_priority: 2
- channel_members:
- - port: {{ os10_interface_1 }}
- mode: "active"
- port_priority: 3
- lacp_rate_fast: true
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2
deleted file mode 100644
index e8c2338ec..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/02_lag_update.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-os10_lag:
- Po 12:
- type: dynamic
- min_links: 32
- max_bundle_size: 32
- lacp_system_priority: 5
- channel_members:
- - port: {{ os10_interface_1 }}
- mode: passive
- port_priority: 4
- lacp_rate_fast: false
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2
deleted file mode 100644
index 03d18fd2c..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/03_lag_options_reset.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-os10_lag:
- Po 12:
- type: dynamic
- min_links:
- max_bundle_size:
- lacp_system_priority:
- channel_members:
- - port: {{ os10_interface_1 }}
- mode:
- port_priority:
- lacp_rate_fast:
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2
deleted file mode 100644
index c9e7b820f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/04_lag_absent.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-os10_lag:
- Po 12:
- type: dynamic
- min_links: 2
- max_bundle_size: 2
- lacp_system_priority: 2
- channel_members:
- - port: {{ os10_interface_1 }}
- mode: "active"
- port_priority: 3
- lacp_rate_fast: true
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2
deleted file mode 100644
index 09b6c3ea6..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/templates/lag_basic/steps/teardown.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-os10_lag:
- Po 12:
- type: dynamic
- min_links: 2
- max_bundle_size: 2
- lacp_system_priority:
- channel_members:
- - port: {{ os10_interface_1 }}
- mode: "active"
- port_priority: 3
- lacp_rate_fast: true
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/vars/main.yaml
deleted file mode 100644
index 4b1840fa4..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lag_role/vars/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-test_roles:
- - dellemc.os10.os10_lag
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2
deleted file mode 100644
index aa8066948..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/01_lldp_configure.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-os10_lldp:
- enable: true
- multiplier: 3
- reinit: 2
- timer: 5
- advertise:
- med:
- fast_start_repeat_count: 4
- application:
- - name: guest-voice
- network_policy_id: 0
- vlan_id: 2
- vlan_type: tag
- l2_priority: 3
- code_point_value: 4
- state: present
- - name: voice
- network_policy_id: 1
- vlan_id: 3
- vlan_type: untag
- l2_priority: 3
- code_point_value: 4
- state: present
- local_interface:
- "{{ os10_interface_1 }}":
- mode: rx
- mode_state: present
- advertise:
- med:
- enable: true
- tlv: inventory
- tlv_state: present
- application:
- - network_policy_id: 4
- state: present
- tlv:
- - name: basic-tlv
- value: management-address port-description
- state: present
- - name: dcbxp-appln
- value: iscsi
- state: present
- - name: dcbxp
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2
deleted file mode 100644
index 8b0272abc..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/02_lldp_update.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-os10_lldp:
- enable: true
- multiplier: 10
- reinit: 10
- timer: 15
- advertise:
- med:
- fast_start_repeat_count: 10
- application:
- - name: guest-voice
- network_policy_id: 1
- vlan_id: 5
- vlan_type: untag
- l2_priority: 7
- code_point_value: 20
- state: present
- - name: voice
- network_policy_id: 1
- vlan_id: 3
- vlan_type: untag
- l2_priority: 3
- code_point_value: 4
- state: present
- local_interface:
- "{{ os10_interface_1 }}":
- mode: transmit
- mode_state: present
- advertise:
- med:
- enable: true
- tlv: network-policy
- tlv_state: present
- application:
- - network_policy_id: 5
- state: present
- tlv:
- - name: basic-tlv
- value: management-address port-description
- state: present
- - name: dcbxp-appln
- value: iscsi
- state: present
- - name: dcbxp
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2
deleted file mode 100644
index 7ded6af6c..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/03_lldp_options_absent.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-os10_lldp:
- enable: false
- multiplier: 3
- reinit: 2
- timer: 5
- advertise:
- med:
- fast_start_repeat_count: 4
- application:
- - name: guest-voice
- network_policy_id: 0
- vlan_id: 2
- vlan_type: tag
- l2_priority: 3
- code_point_value: 4
- state: present
- - name: voice
- network_policy_id: 1
- vlan_id: 3
- vlan_type: untag
- l2_priority: 3
- code_point_value: 4
- state: absent
- local_interface:
- "{{ os10_interface_1 }}":
- mode: rx
- mode_state: present
- advertise:
- med:
- enable: true
- tlv: inventory
- tlv_state: present
- application:
- - network_policy_id: 4
- state: absent
- tlv:
- - name: basic-tlv
- value: management-address port-description
- state: present
- - name: dcbxp-appln
- value: iscsi
- state: present
- - name: dcbxp
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2
deleted file mode 100644
index 9f372d8ac..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/templates/lldp_basic/steps/teardown.j2
+++ /dev/null
@@ -1,46 +0,0 @@
-os10_lldp:
- enable: true
- multiplier:
- reinit:
- timer:
- advertise:
- med:
- fast_start_repeat_count:
- application:
- - name: guest-voice
- network_policy_id: 0
- vlan_id: 2
- vlan_type: tag
- l2_priority: 3
- code_point_value: 4
- state: absent
- - name: voice
- network_policy_id: 1
- vlan_id: 3
- vlan_type: untag
- l2_priority: 3
- code_point_value: 4
- state: absent
- local_interface:
- "{{ os10_interface_1 }}":
- mode: rx
- mode_state: present
- advertise:
- med:
- enable: true
- tlv: inventory
- tlv_state: absent
- application:
- - network_policy_id: 4
- state: absent
- - network_policy_id: 5
- state: absent
- tlv:
- - name: basic-tlv
- value: management-address port-description
- state: present
- - name: dcbxp-appln
- value: iscsi
- state: present
- - name: dcbxp
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/vars/main.yaml
deleted file mode 100644
index 031311ec5..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_lldp_role/vars/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-test_roles:
- - dellemc.os10.os10_lldp
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2
deleted file mode 100644
index 75a47735d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/01_logging_enable.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-idempotent: true
-os10_logging:
- console:
- severity: log-err
- log_file:
- severity: log-err
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2
deleted file mode 100644
index ea10cb8e4..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/02_logging_update.j2
+++ /dev/null
@@ -1,6 +0,0 @@
-idempotent: true
-os10_logging:
- console:
- severity: log-debug
- log_file:
- severity: log-debug
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2
deleted file mode 100644
index f34f7ced4..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/03_logging_disable.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-idempotent: true
-os10_logging:
- console:
- enable: false
- severity: log-err
- log_file:
- enable: false
- severity: log-err
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2
deleted file mode 100644
index d714efd2a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/04_logging_server_add.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-idempotent: false
-os10_logging:
- logging:
- - ip: 1.1.1.1
- state: present
- console:
- enable: True
- severity: log-err
- log_file:
- enable: True
- severity: log-err
- source_interface: "{{ os10_interface_1 }}"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2
deleted file mode 100644
index 992bafac6..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/05_logging_server_del.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-idempotent: false
-os10_logging:
- logging:
- - ip: 1.1.1.1
- state: absent
- console:
- enable: True
- severity: log-err
- log_file:
- enable: True
- severity: log-err
- source_interface: "{{ os10_interface_1 }}"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2
deleted file mode 100644
index d8d31880d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/templates/logging_basic/steps/teardown.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-os10_logging:
- logging:
- - ip: 1.1.1.1
- state: absent
- console:
- enable: True
- severity: log-notice
- log_file:
- enable: True
- severity: log-notice
- source_interface:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/vars/main.yaml
deleted file mode 100644
index 55afbe1dd..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_logging_role/vars/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-test_roles:
- - dellemc.os10.os10_logging
-
-idempotent_roles:
- - dellemc.os10.os10_logging
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2
deleted file mode 100644
index 35d8889a2..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/01_ntp_configure.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-os10_ntp:
- source: "{{ os10_interface_1 }}"
- master: 5
- authenticate: true
- authentication_key:
- - key_num: 123
- key_string_type: 0
- key_string: test
- state: present
- trusted_key:
- - key_num: 1323
- state: present
- server:
- - ip: 2.2.2.2
- key: 345
- prefer: true
- state: present
- intf:
- "{{ os10_interface_1 }}":
- disable: true
- broadcast: true
- vrf:
- name: red
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2
deleted file mode 100644
index f2b11184d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/02_ntp_update.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-os10_ntp:
- master: 4
- authenticate: false
- authentication_key:
- - key_num: 456
- key_string_type: 0
- key_string: ntptest
- state: present
- trusted_key:
- - key_num: 4626
- state: present
- server:
- - ip: 2.2.2.2
- key: 567
- prefer: false
- state: present
- intf:
- "{{ os10_interface_1 }}":
- disable: false
- broadcast: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2
deleted file mode 100644
index 56aff6af7..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/03_ntp_suboptions_absent.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-os10_ntp:
- authentication_key:
- - key_num: 456
- key_string_type: 0
- key_string: ntptest
- state: absent
- trusted_key:
- - key_num: 4626
- state: absent
- server:
- - ip: 2.2.2.2
- key: 567
- prefer: false
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2
deleted file mode 100644
index 61c784722..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/04_ntp_suboptions_stateless.j2
+++ /dev/null
@@ -1,11 +0,0 @@
-os10_ntp:
- authentication_key:
- - key_num: 456
- key_string_type: 0
- key_string: ntptest
- trusted_key:
- - key_num: 4626
- server:
- - ip: 2.2.2.2
- key: 567
- prefer: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2
deleted file mode 100644
index a23265215..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/05_ntp_empty_string.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-os10_ntp:
- authenticate: ''
- authentication_key:
- - key_num: 456
- key_string_type: 0
- key_string: ntptest
- trusted_key:
- - key_num: 4626
- server:
- - ip: 2.2.2.2
- key: 567
- prefer: ''
- intf:
- "{{ os10_interface_1 }}":
- disable: ''
- broadcast: ''
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2
deleted file mode 100644
index 7ea26a0a0..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/setup.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-os10_vrf:
- vrfdetails:
- - vrf_name: red
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2
deleted file mode 100644
index a916200c3..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/templates/ntp_basic/steps/teardown.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-test_roles:
- - dellemc.os10.os10_ntp
- - dellemc.os10.os10_vrf
-os10_ntp:
- source:
- master:
- authenticate:
- authentication_key:
- - key_num: 123
- key_string_type: 0
- key_string: test
- state: absent
- trusted_key:
- - key_num: 1323
- state: absent
- server:
- - ip: 2.2.2.2
- key: 345
- prefer: true
- state: absent
- intf:
- "{{ os10_interface_1 }}":
- disable: false
- broadcast: false
- vrf:
- name: red
- state: absent
-
-os10_vrf:
- vrfdetails:
- - vrf_name: red
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/vars/main.yaml
deleted file mode 100644
index b42240ddd..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_ntp_role/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vrf
- - dellemc.os10.os10_ntp
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2
deleted file mode 100644
index 4e9d912bf..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/01_prefix_list_config.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-idempotent: true
-os10_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2
deleted file mode 100644
index 3481d358e..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/02_prefix_list_update.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-idempotent: true
-os10_prefix_list:
- - name: testpl
- type: ipv4
- description: prefixlistdesc
- entries:
- - number: 18
- permit: false
- net_num: 20.0.0.0
- mask: 24
- condition_list:
- - condition: le
- prelen: 14
- - condition: ge
- prelen: 13
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2
deleted file mode 100644
index d14b8ddac..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/03_prefix_list_entry_absent.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-idempotent: false
-os10_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: absent
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2
deleted file mode 100644
index b349bd900..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/04_prefix_list_absent.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-os10_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: absent
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2
deleted file mode 100644
index b349bd900..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/templates/prefix_list_basic/steps/teardown.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-os10_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: absent
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/vars/main.yaml
deleted file mode 100644
index 706b6a875..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_prefix_list_role/vars/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-test_roles:
- - dellemc.os10.os10_prefix_list
-
-idempotent_roles:
- - dellemc.os10.os10_prefix_list
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2
deleted file mode 100644
index eec0f565a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/01_qos_config.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-idempotent: true
-os10_qos:
- policy_map:
- - name: testpolicy
- type: qos
- state: present
- class_map:
- - name: testclas
- type: qos
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2
deleted file mode 100644
index ba5936830..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/02_qos_update.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-idempotent: true
-os10_qos:
- policy_map:
- - name: testpolicy
- type:
- state: present
- class_map:
- - name: testclas
- type:
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2
deleted file mode 100644
index 3310a13e8..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/03_qos_unconfig.j2
+++ /dev/null
@@ -1,10 +0,0 @@
-idempotent: false
-os10_qos:
- policy_map:
- - name: testpolicy
- type:
- state: absent
- class_map:
- - name: testclas
- type:
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2
deleted file mode 100644
index 2e7960c68..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/templates/qos_basic/steps/teardown.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-os10_qos:
- policy_map:
- - name: testpolicy
- type:
- state: absent
- class_map:
- - name: testclas
- type:
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/vars/main.yaml
deleted file mode 100644
index 9a1913c94..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_qos_role/vars/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-test_roles:
- - dellemc.os10.os10_qos
-
-idempotent_roles:
- - dellemc.os10.os10_qos
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2
deleted file mode 100644
index 1c0adec04..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/01_route_map_configure.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-idempotent: true
-os10_route_map:
- as_path:
- - access_list: aa
- permit: true
- regex: www
- state: present
- community_list:
- - type: standard
- name: qqq
- permit: false
- community: internet
- state: present
- extcommunity_list:
- - type: standard
- name: qqq
- permit: false
- community: "rt 22:33"
- state: present
- route_map:
- - name: test
- permit: true
- seq_num: 1
- continue: 20
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- set:
- local_pref: 1200
- metric_type: type-1
- metric: + 30
- origin: igp
- weight: 50
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: present
- community: internet
- comm_list:
- add: qq
- delete: qqq
- extcommunity: "22:33"
- extcomm_list:
- add: aa
- delete: aa
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2
deleted file mode 100644
index fb464397f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/02_route_map_update.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-idempotent: true
-os10_route_map:
- as_path:
- - access_list: aa
- permit: false
- regex: abc
- state: present
- community_list:
- - type: standard
- name: qqq
- permit: false
- regex: internet
- state: present
- extcommunity_list:
- - type: standard
- name: qqq
- permit: false
- community: "rt 22:35"
- state: present
- route_map:
- - name: test
- permit: false
- seq_num: 5
- continue: 20
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- set:
- local_pref: 1500
- metric_type: type-1
- metric: "- 20"
- origin: egp
- weight: 60
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: present
- community: no-export
- comm_list:
- add: commstd
- delete: commex
- extcommunity: "25:37"
- extcomm_list:
- add: commstd
- delete: commex
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2
deleted file mode 100644
index a10c5d0ab..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/03_route_map_options_unconfig.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-idempotent: false
-os10_route_map:
- as_path:
- - access_list: aa
- permit:
- regex: www
- state: present
- community_list:
- - type: standard
- name: qqq
- permit:
- community: internet
- state: present
- extcommunity_list:
- - type: standard
- name: qqq
- permit:
- community: "rt 22:33"
- state: present
- route_map:
- - name: test
- permit:
- seq_num:
- continue:
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- set:
- local_pref:
- metric_type:
- metric:
- origin:
- weight:
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: present
- community:
- comm_list:
- add:
- delete:
- extcommunity:
- extcomm_list:
- add:
- delete:
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2
deleted file mode 100644
index 181d8823f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/04_route_map_unconfig.j2
+++ /dev/null
@@ -1,47 +0,0 @@
-os10_route_map:
- as_path:
- - access_list: aa
- permit: true
- regex: www
- state: absent
- community_list:
- - type: standard
- name: qqq
- permit: false
- community: internet
- state: absent
- extcommunity_list:
- - type: standard
- name: qqq
- permit: false
- community: "rt 22:33"
- state: absent
- route_map:
- - name: test
- permit: true
- seq_num: 1
- continue: 20
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- set:
- local_pref: 1200
- metric_type: type-1
- metric: + 30
- origin: igp
- weight: 50
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: absent
- community: internet
- comm_list:
- add: qq
- delete: qqq
- extcommunity: "22:33"
- extcomm_list:
- add: aa
- delete: aa
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2
deleted file mode 100644
index e7380b3dd..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/templates/route_map_basic/steps/teardown.j2
+++ /dev/null
@@ -1,47 +0,0 @@
-os10_route_map:
- as_path:
- - access_list: aa
- permit: true
- regex: www
- state: absent
- community_list:
- - type: standard
- name: qqq
- permit: false
- community: internet
- state: absent
- extcommunity_list:
- - type: standard
- name: qqq
- permit: false
- community: "rt 22:33"
- state: absent
- route_map:
- - name: test
- permit: true
- seq_num: 1
- continue: 20
- match:
- - ip_type: ipv4
- access_group: testaccess
- prefix_list: testprefix
- set:
- local_pref: 1200
- metric_type: internal
- metric: + 30
- origin: igp
- weight: 50
- next_hop:
- - type: ip
- address: 10.1.1.1
- track_id: 3
- state: absent
- community: internet
- comm_list:
- add: qq
- delete: qqq
- extcommunity: "22:33"
- extcomm_list:
- add: aa
- delete: aa
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/vars/main.yaml
deleted file mode 100644
index 853134724..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_route_map_role/vars/main.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
-test_roles:
- - dellemc.os10.os10_route_map
-
-idempotent_roles:
- - dellemc.os10.os10_route_map
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2
deleted file mode 100644
index bbc976421..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/01_snmp_configure.j2
+++ /dev/null
@@ -1,135 +0,0 @@
-test_roles:
- - dellemc.os10.os10_snmp
-os10_snmp:
- snmp_source_interface: mgmt 1/1/1
- snmp_location: Chennai
- snmp_community:
- - name: public
- access_mode: ro
- access_list:
- name: test_acl
- state: present
- - name: test
- access_mode: ro
- access_list:
- name: test_acl
- state: present
- state: present
- snmp_traps:
- - name: all
- state: present
- snmp_engine_id: 123456789
- snmp_remote_engine_id:
- - host: 1.1.1.1
- engine_id: '0xab'
- - host: 1.1.1.1
- engine_id: '0xcd'
- udpport: 200
- - host: 2.1.1.1
- engine_id: '0xef'
- udpport: 200
- snmp_vrf: red
- snmp_group:
- - name: group_1
- version: 1
- write_view:
- name: view_2
- - name: group_2
- version: 2c
- state: present
- access_list:
- name: test_acl
- read_view:
- name: view_1
- write_view:
- name: view_2
- notify_view:
- name: view_3
- - name: group_3
- version: 3
- security_level: auth
- read_view:
- name: view_1
- write_view:
- name: view_2
- state: present
- - name: group_4
- version: 3
- security_level: priv
- notify_view:
- name: view_1
- state: present
- snmp_host:
- - ip: 1.1.1.1
- communitystring: c1
- version: "2c"
- udpport: 4
- state: present
- - ip: 2.2.2.2
- version: 1
- communitystring: c3
- state: present
- - ip: 2.1.1.1
- version: 1
- communitystring: c3
- trap_categories:
- dom: true
- entity: true
- snmp: true
- state: present
- - ip: 3.1.1.1
- version: 3
- security_level: priv
- security_name: test
- notification_type: informs
- udpport: 200
- trap_categories:
- dom: true
- entity: true
- envmon: true
- lldp: true
- state: present
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: true
- state: present
- snmp_user:
- - name: user_1
- group_name: group_1
- version: 3
- authentication:
- localized: false
- algorithm: md5
- password: 9fc53d9d908118b2804fe80e3ba8763d
- encryption:
- algorithm: aes
- password: d0452401a8c3ce42804fe80e3ba8763d
- state: present
- - name: user_2
- group_name: group_1
- version: 3
- authentication:
- localized: true
- algorithm: md5
- password: '0x9fc53d9d908118b2804fe80e3ba8763d'
- encryption:
- algorithm: aes
- password: '0xd0452401a8c3ce42804fe80e3ba8763d'
- state: present
- - name: user_3
- group_name: group_1
- version: 2c
- state: present
- - name: user_4
- group_name: group_1
- version: 3
- state: present
- - name: user_5
- group_name: group_2
- version: 2c
- remote:
- ip: 1.1.1.1
- udpport: 200
- access_list: test_acl
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2
deleted file mode 100644
index 8c43046d2..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/02_snmp_update.j2
+++ /dev/null
@@ -1,144 +0,0 @@
-os10_snmp:
- snmp_source_interface: mgmt 1/1/1
- snmp_location: Chennai
- snmp_community:
- - name: public
- access_mode: ro
- access_list:
- name: test_acl
- state: absent
- - name: test
- access_mode: ro
- access_list:
- name: test_acl
- state: present
- state: present
- snmp_traps:
- - name: all
- state: present
- snmp_engine_id: 123456789
- snmp_remote_engine_id:
- - host: 1.1.1.1
- engine_id: '0xab'
- - host: 1.1.1.1
- engine_id: '0xcd'
- udpport: 200
- - host: 2.1.1.1
- engine_id: '0xef'
- udpport: 200
- snmp_vrf: red
- snmp_group:
- - name: group_1
- version: 1
- access_list:
- name: test_acl
- read_view:
- name: view_1
- write_view:
- name: view_2
- - name: group_2
- version: 2c
- state: present
- access_list:
- name: test_acl
- read_view:
- name: view_1
- write_view:
- name: view_2
- notify_view:
- name: view_3
- state: absent
- - name: group_3
- version: 3
- security_level: auth
- read_view:
- name: view_1
- write_view:
- name: view_2
- state: present
- - name: group_4
- version: 3
- security_level: priv
- notify_view:
- name: view_1
- state: present
- read_view:
- name: view_1
- write_view:
- name: view_2
- state: present
- snmp_host:
- - ip: 1.1.1.1
- communitystring: c1
- version: "2c"
- udpport: 4
- state: present
- - ip: 2.2.2.2
- version: 1
- communitystring: c3
- state: present
- - ip: 2.1.1.1
- version: 1
- communitystring: c3
- trap_categories:
- dom: true
- entity: true
- snmp: true
- lldp: true
- state: present
- - ip: 3.1.1.1
- version: 3
- security_level: priv
- security_name: test
- notification_type: informs
- udpport: 200
- trap_categories:
- dom: true
- entity: true
- envmon: false
- lldp: false
- state: present
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: true
- state: present
- snmp_user:
- - name: user_1
- group_name: group_1
- version: 3
- authentication:
- localized: false
- algorithm: md5
- password: 9fc53d9d908118b2804fe80e3ba8763d
- encryption:
- algorithm: aes
- password: d0452401a8c3ce42804fe80e3ba8763d
- state: present
- - name: user_2
- group_name: group_1
- version: 3
- authentication:
- localized: true
- algorithm: md5
- password: '0x9fc53d9d908118b2804fe80e3ba8763d'
- encryption:
- algorithm: aes
- password: '0xd0452401a8c3ce42804fe80e3ba8763d'
- state: present
- - name: user_3
- group_name: group_1
- version: 2c
- state: present
- - name: user_4
- group_name: group_1
- version: 3
- state: present
- - name: user_5
- group_name: group_2
- version: 2c
- remote:
- ip: 1.1.1.1
- udpport: 200
- access_list: test_acl
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2
deleted file mode 100644
index 4b4a86830..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/03_snmp_unconfig.j2
+++ /dev/null
@@ -1,147 +0,0 @@
-os10_snmp:
- snmp_source_interface: mgmt 1/1/1
- snmp_location: Chennai
- snmp_community:
- - name: public
- access_mode: ro
- access_list:
- name: test_acl
- state: absent
- - name: test
- access_mode: ro
- access_list:
- name: test_acl
- state: present
- state: absent
- snmp_traps:
- - name: all
- state: absent
- snmp_engine_id:
- snmp_remote_engine_id:
- - host: 1.1.1.1
- engine_id: '0xab'
- - host: 1.1.1.1
- engine_id: '0xcd'
- udpport: 200
- - host: 2.1.1.1
- engine_id: '0xef'
- udpport: 200
- state: absent
- snmp_vrf: red
- snmp_group:
- - name: group_1
- version: 1
- access_list:
- name: test_acl
- read_view:
- name: view_1
- write_view:
- name: view_2
- - name: group_2
- version: 2c
- state: present
- access_list:
- name: test_acl
- read_view:
- name: view_1
- write_view:
- name: view_2
- notify_view:
- name: view_3
- state: absent
- - name: group_3
- version: 3
- security_level: auth
- read_view:
- name: view_1
- write_view:
- name: view_2
- state: present
- - name: group_4
- version: 3
- security_level: priv
- notify_view:
- name: view_1
- state: absent
- read_view:
- name: view_1
- state: absent
- write_view:
- name: view_2
- state: absent
- state: absent
- snmp_host:
- - ip: 1.1.1.1
- communitystring: c1
- version: "2c"
- udpport: 4
- state: present
- - ip: 2.2.2.2
- version: 1
- communitystring: c3
- state: present
- - ip: 2.1.1.1
- version: 1
- communitystring: c3
- trap_categories:
- dom: true
- entity: true
- snmp: true
- lldp: true
- state: present
- - ip: 3.1.1.1
- version: 3
- security_level: priv
- security_name: test
- notification_type: informs
- udpport: 200
- trap_categories:
- dom: true
- entity: true
- envmon: false
- lldp: false
- state: absent
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: true
- state: absent
- snmp_user:
- - name: user_1
- group_name: group_1
- version: 3
- authentication:
- localized: false
- algorithm: md5
- password: 9fc53d9d908118b2804fe80e3ba8763d
- encryption:
- algorithm: aes
- password: d0452401a8c3ce42804fe80e3ba8763d
- state: present
- - name: user_2
- group_name: group_1
- version: 3
- authentication:
- localized: true
- algorithm: md5
- password: '0x9fc53d9d908118b2804fe80e3ba8763d'
- encryption:
- algorithm: aes
- password: '0xd0452401a8c3ce42804fe80e3ba8763d'
- state: present
- - name: user_3
- group_name: group_1
- version: 2c
- state: absent
- - name: user_4
- group_name: group_1
- version: 3
- state: absent
- - name: user_5
- group_name: group_2
- version: 2c
- remote:
- ip: 1.1.1.1
- udpport: 200
- access_list: test_acl
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2
deleted file mode 100644
index 7ea26a0a0..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/setup.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-os10_vrf:
- vrfdetails:
- - vrf_name: red
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2
deleted file mode 100644
index 1a3ba013d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/templates/snmp_basic/steps/teardown.j2
+++ /dev/null
@@ -1,147 +0,0 @@
-test_roles:
- - dellemc.os10.os10_snmp
- - dellemc.os10.os10_vrf
-os10_vrf:
- vrfdetails:
- - vrf_name: "red"
- state: "absent"
-os10_snmp:
- snmp_source_interface:
- snmp_location:
- snmp_community:
- - name: public
- access_mode: ro
- access_list:
- name: test_acl
- state: absent
- - name: test
- access_mode: ro
- access_list:
- name: test_acl
- state: absent
- state: absent
- snmp_traps:
- - name: all
- state: absent
- snmp_engine_id:
- snmp_remote_engine_id:
- - host: 1.1.1.1
- engine_id: '0xab'
- state: absent
- - host: 1.1.1.1
- engine_id: '0xcd'
- udpport: 200
- state: absent
- - host: 2.1.1.1
- engine_id: '0xef'
- udpport: 200
- state: absent
- snmp_group:
- - name: group_1
- version: 1
- write_view:
- name: view_2
- state: absent
- - name: group_2
- version: 2c
- state: present
- access_list:
- name: test_acl
- read_view:
- name: view_1
- write_view:
- name: view_2
- notify_view:
- name: view_3
- state: absent
- - name: group_3
- version: 3
- security_level: auth
- read_view:
- name: view_1
- write_view:
- name: view_2
- state: present
- state: absent
- - name: group_4
- version: 3
- security_level: priv
- notify_view:
- name: view_1
- state: present
- state: absent
- snmp_host:
- - ip: 1.1.1.1
- communitystring: c1
- version: "2c"
- udpport: 4
- state: absent
- - ip: 2.2.2.2
- version: 1
- communitystring: c3
- state: absent
- - ip: 2.1.1.1
- version: 1
- communitystring: c3
- trap_categories:
- dom: true
- entity: true
- snmp: true
- state: absent
- - ip: 3.1.1.1
- version: 3
- security_level: priv
- security_name: test
- notification_type: informs
- udpport: 200
- trap_categories:
- dom: true
- entity: true
- envmon: true
- lldp: true
- state: absent
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: true
- state: absent
- snmp_vrf:
- snmp_user:
- - name: user_1
- group_name: group_1
- version: 3
- authentication:
- localized: false
- algorithm: md5
- password: 9fc53d9d908118b2804fe80e3ba8763d
- encryption:
- algorithm: aes
- password: d0452401a8c3ce42804fe80e3ba8763d
- state: absent
- - name: user_2
- group_name: group_1
- version: 3
- authentication:
- localized: true
- algorithm: md5
- password: '0x9fc53d9d908118b2804fe80e3ba8763d'
- encryption:
- algorithm: aes
- password: '0xd0452401a8c3ce42804fe80e3ba8763d'
- state: absent
- - name: user_3
- group_name: group_1
- version: 2c
- state: absent
- - name: user_4
- group_name: group_1
- version: 3
- state: absent
- - name: user_5
- group_name: group_2
- version: 2c
- remote:
- ip: 1.1.1.1
- udpport: 200
- access_list: test_acl
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/vars/main.yaml
deleted file mode 100644
index 0e3995a5e..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_snmp_role/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vrf
- - dellemc.os10.os10_snmp
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2
deleted file mode 100644
index 135499850..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/01_system_configure.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-os10_system:
- hostname: OS10
- hardware_forwarding: scaled-l3-hosts
- hash_algo:
- algo:
- - name: lag
- mode: crc
- state: present
- - name: ecmp
- mode: xor
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: source-ip
- state: present
- ipv6_selection:
- - field: source-ip
- state: present
- mac_selection:
- - field: source-mac
- state: present
- tcp_udp_selection:
- - field: l4-source-port
- state: present
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2
deleted file mode 100644
index 7263204bd..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/02_system_update.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-os10_system:
- hostname: OS10
- hardware_forwarding: scaled-l2
- hash_algo:
- algo:
- - name: ecmp
- mode: random
- state: present
- - name: lag
- mode: xor
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: destination-ip
- state: present
- ipv6_selection:
- - field: destination-ip
- state: present
- mac_selection:
- - field: destination-mac
- state: present
- tcp_udp_selection:
- - field: l4-destination-port
- state: present
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2
deleted file mode 100644
index 17c84d336..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/03_system_unconfig.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-os10_system:
- hostname: OS10
- hardware_forwarding:
- hash_algo:
- algo:
- - name: ecmp
- mode: random
- state: absent
- - name: lag
- mode: xor
- state: present
- load_balance:
- ingress_port:
- ip_selection:
- - field: destination-ip
- state: absent
- ipv6_selection:
- - field: destination-ip
- state: absent
- mac_selection:
- - field: destination-mac
- state: absent
- tcp_udp_selection:
- - field: l4-destination-port
- state: absent
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2
deleted file mode 100644
index 480ce1145..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/templates/system_basic/steps/teardown.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-os10_system:
- hostname:
- hardware_forwarding:
- hash_algo:
- algo:
- - name: ecmp
- mode: random
- state: absent
- - name: lag
- mode: xor
- state: absent
- load_balance:
- ingress_port:
- ip_selection:
- - field: destination-ip
- state: absent
- ipv6_selection:
- - field: destination-ip
- state: absent
- mac_selection:
- - field: destination-mac
- state: absent
- tcp_udp_selection:
- - field: l4-destination-port
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/vars/main.yaml
deleted file mode 100644
index d847941e5..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_system_role/vars/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-test_roles:
- - dellemc.os10.os10_system
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2
deleted file mode 100644
index fd7a91655..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/01_uplink_configure.j2
+++ /dev/null
@@ -1,32 +0,0 @@
-test_roles:
- - dellemc.os10.os10_uplink
-os10_uplink:
- uplink_state_group:
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: "present"
- - type: "downstream"
- intf: "ethernet{{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }}"
- state: "present"
- downstream:
- disable_links: all
- auto_recover: false
- defer_time: 50
- state: "present"
- - id: 2
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel2"
- state: "present"
- - type: "downstream"
- intf: "{{ os10_interface_3 }}"
- state: "present"
- downstream:
- disable_links: 10
- auto_recover: true
- defer_time: 50
- state: "present"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2
deleted file mode 100644
index d412e3e02..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/02_uplink_update.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-os10_uplink:
- uplink_state_group:
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: absent
- - type: "downstream"
- intf: "ethernet{{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }}"
- state: "present"
- downstream:
- disable_links: 10
- auto_recover: false
- defer_time: 50
- state: "present"
- - id: 2
- enable: false
- uplink_type:
- - type: "upstream"
- intf: "port-channel2"
- state: "present"
- - type: "downstream"
- intf: "{{ os10_interface_3 }}"
- state: absent
- downstream:
- disable_links: 15
- auto_recover: false
- defer_time: 40
- state: "present"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2
deleted file mode 100644
index b2d3da079..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/03_uplink_absent.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-os10_uplink:
- uplink_state_group:
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: "absent"
- - type: "downstream"
- intf: "ethernet{{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }}"
- state: "absent"
- downstream:
- disable_links: all
- auto_recover: false
- defer_time: 50
- state: "absent"
- - id: 2
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel2"
- state: "absent"
- - type: "downstream"
- intf: "{{ os10_interface_3 }}"
- state: "absent"
- downstream:
- disable_links: 10
- auto_recover: true
- defer_time: 50
- state: "absent"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2
deleted file mode 100644
index 2f5796161..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/setup.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-os10_lag:
- Po 1:
- state: present
- Po 2:
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2
deleted file mode 100644
index dcd90471f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/templates/uplink_basic/steps/teardown.j2
+++ /dev/null
@@ -1,38 +0,0 @@
-test_roles:
- - dellemc.os10.os10_uplink
- - dellemc.os10.os10_lag
-os10_lag:
- Po 1:
- state: absent
- Po 2:
- state: absent
-os10_uplink:
- uplink_state_group:
- - id: 1
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel1"
- state: "absent"
- - type: "downstream"
- intf: "ethernet{{ os10_interface_1.split()[1] }},{{ os10_interface_2.split()[1] }}"
- state: "absent"
- downstream:
- disable_links: all
- auto_recover: false
- defer_time: 50
- state: "absent"
- - id: 2
- enable: True
- uplink_type:
- - type: "upstream"
- intf: "port-channel2"
- state: "absent"
- - type: "downstream"
- intf: "{{ os10_interface_3 }}"
- state: "absent"
- downstream:
- disable_links: 10
- auto_recover: true
- defer_time: 50
- state: "absent"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/vars/main.yaml
deleted file mode 100644
index c3d3900d4..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_uplink_role/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-test_roles:
- - dellemc.os10.os10_lag
- - dellemc.os10.os10_uplink
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks_old/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks_old/main.yaml
deleted file mode 100644
index 4de62fb9f..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/tasks_old/main.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ test_case_to_run }}"
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: test_case_to_run
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2
deleted file mode 100644
index b53a5bb03..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/01_users_add.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-os10_users:
- - username: test
- password: a1a2a3a4!@#$
- role: sysadmin
- state: present
- - username: u1
- password: a1a2a3a4!@#$
- role: netadmin
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2
deleted file mode 100644
index abb30cfad..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/02_users_del.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-os10_users:
- - username: test
- password: a1a2a3a4!@#$
- role: sysadmin
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2
deleted file mode 100644
index f817f7b27..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/templates/users_basic/steps/teardown.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-os10_users:
- - username: test
- password: a1a2a3a4!@#$
- role: sysadmin
- state: absent
- - username: u1
- password: a1a2a3a4!@#$
- role: netadmin
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/vars/main.yaml
deleted file mode 100644
index 0a54dcad7..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_users_role/vars/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
-test_roles:
- - dellemc.os10.os10_users
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2
deleted file mode 100644
index 593fb33d4..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/01_vlan_configure.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-idempotent: true
-os10_vlan:
- vlan 100:
- description: "Blue"
- tagged_members:
- - port: {{ os10_interface_1 }}
- state: present
- untagged_members:
- - port: {{ os10_interface_2 }}
- state: present
- state: present
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: "present"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2
deleted file mode 100644
index a2fd82f4b..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/02_vlan_update.j2
+++ /dev/null
@@ -1,19 +0,0 @@
-idempotent: true
-os10_vlan:
- vlan 100:
- description: "Blue VLAN"
- tagged_members:
- - port: {{ os10_interface_1 }}
- state: present
- - port: {{ os10_interface_3 }}
- state: present
- untagged_members:
- - port: {{ os10_interface_2 }}
- state: present
- state: present
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: "present"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2
deleted file mode 100644
index bbff49b19..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/03_vlan_member_port_range.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-idempotent: false
-os10_vlan:
- vlan 100:
- description: "Blue VLAN"
- tagged_members:
- - port: range ethernet {{ os10_interface_1.split()[1] }},{{ os10_interface_3.split()[1] }}
- state: present
- untagged_members:
- - port: range {{ os10_interface_2 }}
- state: present
- state: present
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: "present"
- state: "present"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2
deleted file mode 100644
index 2cc502d2d..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/04_vlan_absent.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-idempotent: false
-os10_vlan:
- default_vlan_id: 1
- vlan 100:
- description:
- tagged_members:
- - port: {{ os10_interface_1 }}
- state: present
- - port: {{ os10_interface_3 }}
- state: absent
- untagged_members:
- - port: {{ os10_interface_2 }}
- state: present
- state: present
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: "present"
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2
deleted file mode 100644
index 81cff710c..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/setup.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-os10_lag:
- Po 1:
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2
deleted file mode 100644
index e640d0c19..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/templates/vlan_basic/steps/teardown.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vlan
- - dellemc.os10.os10_lag
-os10_lag:
- Po 1:
- state: absent
-os10_vlan:
- default_vlan_id:
- vlan 100:
- description:
- tagged_members:
- - port: {{ os10_interface_1 }}
- state: absent
- - port: {{ os10_interface_3 }}
- state: absent
- untagged_members:
- - port: {{ os10_interface_2 }}
- state: absent
- state: absent
- vlan 888:
- description: "NSX_Cluster"
- untagged_members:
- - port: port-channel 1
- state: absent
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/vars/main.yaml
deleted file mode 100644
index 7acc287d9..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlan_role/vars/main.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-test_roles:
- - dellemc.os10.os10_lag
- - dellemc.os10.os10_vlan
-
-idempotent_roles:
- - dellemc.os10.os10_vlan
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/defaults/main.yaml
deleted file mode 100644
index 967b3f143..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/defaults/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-testcase: "*"
-test_idempotency: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2
deleted file mode 100644
index 90ca59883..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/01_vlt_configure.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-idempotent: true
-test_roles:
- - dellemc.os10.os10_vlt
-os10_vlt:
- domain: 1
- backup_destination: "192.168.211.175"
- destination_type: "ipv4"
- discovery_intf: {{ os10_interface_1.split()[1] }}
- discovery_intf_state: present
- peer_routing: True
- vlt_mac: aa:aa:aa:aa:aa:aa
- vlt_peers:
- Po 12:
- peer_lag: 13
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2
deleted file mode 100644
index e1406d215..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/02_vlt_update.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-idempotent: false
-os10_vlt:
- domain: 1
- backup_destination: "192.168.211.175"
- destination_type: "ipv4"
- backup_destination_vrf:
- discovery_intf: {{ os10_interface_1.split()[1] }}
- discovery_intf_state: absent
- peer_routing: false
- vlt_mac: aa:aa:aa:aa:aa:ab
- vlt_peers:
- Po 12:
- peer_lag: 14
- state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2
deleted file mode 100644
index 1d667cca0..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/03_vlt_absent.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-os10_vlt:
- domain: 1
- backup_destination: "192.168.211.175"
- destination_type: "ipv4"
- discovery_intf: {{ os10_interface_1.split()[1] }}
- discovery_intf_state: present
- peer_routing: True
- vlt_mac: aa:aa:aa:aa:aa:aa
- vlt_peers:
- Po 12:
- peer_lag: 13
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2
deleted file mode 100644
index 17245d040..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/setup.j2
+++ /dev/null
@@ -1,7 +0,0 @@
-os10_lag:
- Po 12:
- state: present
-
-os10_interface:
- {{ os10_interface_1 }}:
- switchport: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2
deleted file mode 100644
index 4460af862..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/templates/vlt_basic/steps/teardown.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vlt
- - dellemc.os10.os10_lag
- - dellemc.os10.os10_interface
-os10_interface:
- {{ os10_interface_1 }}:
- portmode: access
- switchport: true
-os10_lag:
- Po 12:
- state: absent
-os10_vlt:
- domain: 1
- backup_destination: "192.168.211.175"
- destination_type: "ipv4"
- discovery_intf: {{ os10_interface_1.split()[1] }}
- discovery_intf_state: present
- peer_routing: True
- vlt_mac: aa:aa:aa:aa:aa:aa
- vlt_peers:
- Po 12:
- peer_lag: 13
- state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/vars/main.yaml
deleted file mode 100644
index e18e8364c..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vlt_role/vars/main.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-test_roles:
- - dellemc.os10.os10_interface
- - dellemc.os10.os10_lag
- - dellemc.os10.os10_vlt
-
-idempotent_roles:
- - dellemc.os10.os10_vlt
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2
deleted file mode 100644
index 0f95b5156..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/01_vrrp_configure.j2
+++ /dev/null
@@ -1,43 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vrrp
-os10_vrrp:
- vrrp:
- delay_reload: 2
- version: 3
- {{ os10_interface_1 }}:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: present
- - ip: 3001:4828:5808:ffa3::9
- state: present
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 200
- state: present
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: present
- - ip: 4.1.1.2
- state: present
- - ip: 4.1.1.3
- state: absent
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- adv_interval_centisecs: 200
- state: present
- vlan100:
- vrrp_active_active_mode: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2
deleted file mode 100644
index b3fc61ce5..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/02_vrrp_update.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-os10_vrrp:
- vrrp:
- delay_reload: 2
- version: 3
- {{ os10_interface_1 }}:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 200
- preempt: true
- track_interface:
- - resource_id: 3
- priority_cost: 30
- state: present
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: present
- - ip: 3001:4828:5808:ffa3::9
- state: absent
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 300
- state: present
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: present
- - ip: 4.1.1.2
- state: absent
- - ip: 4.1.1.3
- state: absent
- priority: 140
- preempt: true
- track_interface:
- - resource_id: 3
- priority_cost: 20
- state: present
- adv_interval_centisecs: 300
- state: present
- vlan100:
- vrrp_active_active_mode: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2
deleted file mode 100644
index f013293a7..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/03_vrrp_options_absent.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-os10_vrrp:
- vrrp:
- delay_reload: 2
- version: 3
- {{ os10_interface_1 }}:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: absent
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: present
- - ip: 3001:4828:5808:ffa3::9
- state: present
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 200
- state: present
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: present
- - ip: 4.1.1.2
- state: present
- - ip: 4.1.1.3
- state: absent
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: absent
- adv_interval_centisecs: 200
- state: present
- vlan100:
- vrrp_active_active_mode: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2
deleted file mode 100644
index 8e79b3198..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/04_vrrp_absent.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-os10_vrrp:
- vrrp:
- delay_reload: 2
- version: 3
- {{ os10_interface_1 }}:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: present
- - ip: 3001:4828:5808:ffa3::9
- state: present
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 200
- state: absent
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: present
- - ip: 4.1.1.2
- state: present
- - ip: 4.1.1.3
- state: absent
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- adv_interval_centisecs: 200
- state: absent
- vlan100:
- vrrp_active_active_mode: true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2
deleted file mode 100644
index dd6b16b43..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/setup.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-os10_interface:
- {{ os10_interface_1 }}:
- switchport: false
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2
deleted file mode 100644
index 2d876f557..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/templates/vrrp_basic/steps/teardown.j2
+++ /dev/null
@@ -1,47 +0,0 @@
-test_roles:
- - dellemc.os10.os10_vrrp
- - dellemc.os10.os10_vlan
-os10_vlan:
- vlan 100:
- state: absent
-os10_vrrp:
- vrrp:
- delay_reload: 0
- version:
- {{ os10_interface_1 }}:
- vrrp_group:
- - group_id: 2
- type: ipv6
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: absent
- virtual_address:
- - ip: 2001:4828:5808:ffa3::9
- state: absent
- - ip: 3001:4828:5808:ffa3::9
- state: absent
- - ip: 4001:4828:5808:ffa3::9
- state: absent
- adv_interval_centisecs: 200
- state: absent
- - group_id: 4
- virtual_address:
- - ip: 4.1.1.1
- state: absent
- - ip: 4.1.1.2
- state: absent
- - ip: 4.1.1.3
- state: absent
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: absent
- adv_interval_centisecs: 200
- state: absent
- vlan100:
- vrrp_active_active_mode:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/vars/main.yaml
deleted file mode 100644
index 1eb402bf5..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_vrrp_role/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-test_roles:
- - dellemc.os10.os10_interface
- - dellemc.os10.os10_vrrp
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/defaults/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2
deleted file mode 100644
index f1a85dbd2..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/01_xstp_rstp_configure.j2
+++ /dev/null
@@ -1,14 +0,0 @@
-test_roles:
- - dellemc.os10.os10_xstp
-os10_xstp:
- type: rstp
- enable: true
- path_cost: false
- mac_flush_timer: 5
- rstp:
- max_age: 7
- hello_time: 8
- forward_time: 9
- force_version: stp
- bridge_priority: 8192
- mac_flush_threshold: 6
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2
deleted file mode 100644
index f6c324bdf..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/02_xstp_rstp_defaults.j2
+++ /dev/null
@@ -1,12 +0,0 @@
-os10_xstp:
- type: rstp
- enable: true
- path_cost: false
- mac_flush_timer:
- rstp:
- max_age:
- hello_time:
- forward_time:
- force_version: stp
- bridge_priority:
- mac_flush_threshold:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2
deleted file mode 100644
index 07ed32db4..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/03_xstp_pvst_configure.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-os10_xstp:
- type: rapid-pvst
- enable: true
- path_cost: true
- mac_flush_timer: 5
- pvst:
- vlan:
- - range_or_id: 10
- max_age: 10
- enable: true
- hello_time: 8
- forward_time: 8
- bridge_priority: 8192
- mac_flush_threshold: 6
- root: secondary
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2
deleted file mode 100644
index 82469210e..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/04_xstp_pvst_defaults.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-os10_xstp:
- type: rapid-pvst
- enable: true
- path_cost: false
- mac_flush_timer:
- pvst:
- vlan:
- - range_or_id: 10
- max_age:
- enable:
- hello_time:
- forward_time:
- bridge_priority:
- mac_flush_threshold:
- root:
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2
deleted file mode 100644
index ded2976b3..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/05_xstp_mstp_configure.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-os10_xstp:
- type: mst
- enable: true
- path_cost: true
- mac_flush_timer: 5
- mstp:
- max_age: 8
- max_hops: 7
- hello_time: 8
- forward_time: 8
- force_version: stp
- mstp_instances:
- - number_or_range: 1
- enable: true
- root: secondary
- mac_flush_threshold: 8
- bridge_priority: 8192
- mst_config:
- name: cfg1
- revision: 3
- cfg_list:
- - number: 1
- vlans: 4,5
- vlans_state: present
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2
deleted file mode 100644
index c39becdc2..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/06_xstp_mstp_defaults.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-os10_xstp:
- type: mst
- enable: true
- path_cost: false
- mac_flush_timer:
- mstp:
- max_age:
- max_hops:
- hello_time:
- forward_time:
- force_version:
- mstp_instances:
- - number_or_range: 1
- enable:
- root:
- mac_flush_threshold:
- bridge_priority:
- mst_config:
- name: cfg1
- revision:
- cfg_list:
- - number: 1
- vlans: 4,5
- vlans_state: absent
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2
deleted file mode 100644
index 2ec48bc22..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/07_xstp_interface_configure.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-os10_xstp:
- type: rstp
- enable: true
- path_cost: true
- mac_flush_timer:
- intf:
- {{ os10_interface_1 }}:
- edge_port: true
- bpdu_filter: true
- bpdu_guard: true
- guard: loop
- enable: true
- link_type: point-to-point
- msti:
- - instance_number: 1
- priority: 32
- cost: 1
- rstp:
- priority: 32
- cost: 7
- vlan:
- - range_or_id: 6
- priority: 16
- cost: 8
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2
deleted file mode 100644
index b28ea4510..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/08_xstp_interface_defaults.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-os10_xstp:
- type: rstp
- enable: true
- path_cost: true
- mac_flush_timer:
- intf:
- {{ os10_interface_1 }}:
- edge_port:
- bpdu_filter:
- bpdu_guard:
- guard:
- enable: false
- link_type:
- msti:
- - instance_number: 1
- priority:
- cost:
- rstp:
- priority:
- cost:
- vlan:
- - range_or_id: 6
- priority:
- cost:
-
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2
deleted file mode 100644
index e01f16e4b..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/setup.j2
+++ /dev/null
@@ -1,4 +0,0 @@
-os10_interface:
- {{ os10_interface_1 }}:
- switchport: true
- portmode: access
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2 b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2
deleted file mode 100644
index e1e5ffa81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/templates/xstp_basic/steps/teardown.j2
+++ /dev/null
@@ -1,60 +0,0 @@
-os10_xstp:
- type:
- enable: true
- path_cost: false
- mac_flush_timer:
- rstp:
- max_age:
- hello_time:
- forward_time:
- force_version:
- bridge_priority:
- mac_flush_threshold:
- pvst:
- vlan:
- - range_or_id: 10
- max_age:
- enable: true
- hello_time:
- forward_time:
- bridge_priority:
- mac_flush_threshold:
- root:
- mstp:
- max_age:
- max_hops:
- hello_time:
- forward_time:
- force_version:
- mstp_instances:
- - number_or_range: 1
- enable: true
- root:
- mac_flush_threshold:
- bridge_priority:
- mst_config:
- name:
- revision:
- cfg_list:
- - number: 1
- vlans: 4,5
- vlans_state: absent
- intf:
- {{ os10_interface_1 }}:
- edge_port:
- bpdu_filter:
- bpdu_guard:
- guard:
- enable: true
- link_type:
- msti:
- - instance_number: 1
- priority:
- cost:
- rstp:
- priority:
- cost:
- vlan:
- - range_or_id: 6
- priority:
- cost:
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/vars/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/vars/main.yaml
deleted file mode 100644
index 001043cd5..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/os10_xstp_role/vars/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
-test_roles:
- - dellemc.os10.os10_interface
- - dellemc.os10.os10_xstp
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/main.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/main.yaml
deleted file mode 100644
index 88be0f207..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/main.yaml
+++ /dev/null
@@ -1,3 +0,0 @@
----
-- { include: prepare_test_facts.yaml, tags: ['role']}
-- { include: tests.yaml, tags: ['role']}
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/prepare_test_facts.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/prepare_test_facts.yaml
deleted file mode 100644
index a86b71f61..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/prepare_test_facts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- block:
- - name: Collect interface list
- import_role:
- name: os10_template
- tasks_from: show_ip_interface_brief.yaml
- register: result
- - name: Set interface list
- set_fact:
- os10_interface_1: "{{ ip_interface_facts[0].INTERFACE_NAME | lower }}"
- os10_interface_2: "{{ ip_interface_facts[1].INTERFACE_NAME | lower }}"
- os10_interface_3: "{{ ip_interface_facts[2].INTERFACE_NAME | lower }}"
-
- when: prepare_os10_role_tests_task | default(True) | bool
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml
deleted file mode 100644
index 395d2fe81..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/include_os10_role.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-- include_role:
- name: "{{ os10_role_name }}"
-
-- debug: msg="Role completed {{ os10_role_name }}"
- notify: "os10_role completed"
-
-- block:
- - name: "Testing idempotency for {{ os10_role_name }}"
- include_role:
- name: "{{ os10_role_name }}"
- - name: "idempotency result for {{ os10_role_name }}"
- fail:
- msg: "idempotency test failed for {{ os10_role_name }}"
- when: output.changed
-
- when: >
- ( test_idempotency is defined and test_idempotency and
- idempotent_roles is defined and os10_role_name in idempotent_roles and
- idempotent is defined and idempotent
- )
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml
deleted file mode 100644
index c84b1033a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_case.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
-- name: set test case
- set_fact:
- role_testcase: "{{ role_testcase_path | basename | splitext | first }}"
-
-- name: set test case output dir
- set_fact:
- testcase_outdir: "{{ output_dir }}/{{ role_name }}/{{ role_testcase_path }}"
-
-
-- name: Prepare testcase output dir
- file:
- path: "{{ testcase_outdir }}"
- state: directory
-
-- name: Source testcase variables
- include_vars: "{{ item }}"
- vars:
- params:
- files:
- - "{{ role_testcase }}.yaml"
- paths:
- - "vars"
- loop: "{{ query('first_found', params, errors='ignore') }}"
-
-- name: Include Testcase tasks
- include: "{{ role_testcase_path }}"
-
-- name: Identifying steps
- block:
- - name: Identifying steps
- find:
- paths: "{{ role_path }}/templates/{{ role_testcase }}/steps"
- patterns: "*.j2"
- register: step_files
- - set_fact: teststeps_files="{{ step_files.files | map(attribute='path') | map('basename') | list }}"
- - set_fact: teststeps="{{ teststeps_files | map('splitext') | map('first') | reject('equalto', 'setup') | reject('equalto', 'teardown') | list | sort }}"
- when: teststeps is not defined
-
-- name: Check if setup step exists
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/setup.j2"
- ignore_errors: true
- register: setup_template
-
-- name: Setup Testcase
- include: testcase/run_test_step.yaml role_teststep=setup idempotent=false
- when: setup_template.stat.exists == true
-
-- name: Run Test Steps
- block:
- - name: Run Test Steps
- include: testcase/run_test_step.yaml idempotent=false
- with_items: "{{ teststeps }}"
- loop_control:
- loop_var: role_teststep
- always:
- - name: Check teardown
- stat: path="{{ role_path }}/templates/{{ role_testcase }}/steps/teardown.j2"
- ignore_errors: true
- register: teardown_template
- - name: Run teardown
- include: testcase/run_test_step.yaml role_teststep=teardown idempotent=false
- when: teardown_template.stat.exists == true
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml
deleted file mode 100644
index 624325e54..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/testcase/run_test_step.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-- name: Running step {{ role_teststep }}
- debug: msg="{{ role_teststep }}"
-
-- name: Set step vars file
- set_fact:
- teststep_var_template: "{{ role_testcase }}/steps/{{ role_teststep }}.j2"
- teststep_vars_file: "{{ testcase_outdir }}/{{ role_teststep }}.yaml"
-
-- name: Preparing step variables
- template:
- src: "{{ teststep_var_template }}"
- dest: "{{ teststep_vars_file }}"
-
-- name: Load step variables
- include_vars:
- file: "{{ teststep_vars_file }}"
-
-- name: Including roles for the step
- include: testcase/include_os10_role.yaml os10_role_name="{{ step_role }}"
- # include_role:
- # name: "{{ step_role }}"
- with_items: "{{ test_roles }}"
- loop_control:
- loop_var: step_role
- when: test_roles is defined
diff --git a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/tests.yaml b/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/tests.yaml
deleted file mode 100644
index 572c2538a..000000000
--- a/ansible_collections/dellemc/os10/tests/integration/targets/role_test/tasks/tests.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
-- name: collect all test cases
- find:
- paths: "{{ role_path }}/tests"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: testcase/run_test_case.yaml
- with_items: "{{ test_items }}"
- loop_control:
- loop_var: role_testcase_path
diff --git a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.10.txt
deleted file mode 100644
index bfed9cf67..000000000
--- a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.10.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-plugins/action/os10.py action-plugin-docs
-plugins/action/textfsm_parser.py action-plugin-docs
-plugins/modules/bgp_validate.py validate-modules:parameter-list-no-elements
-plugins/modules/mtu_validate.py validate-modules:parameter-list-no-elements
-plugins/modules/vlt_validate.py validate-modules:parameter-list-no-elements
-plugins/modules/wiring_validate.py validate-modules:parameter-list-no-elements
-plugins/modules/show_system_network_summary.py validate-modules:parameter-list-no-elements
-plugins/modules/os10_config.py validate-modules:parameter-list-no-elements
-plugins/modules/os10_facts.py validate-modules:parameter-list-no-elements
-plugins/modules/os10_command.py validate-modules:parameter-list-no-elements
-plugins/modules/os10_config.py validate-modules:doc-default-does-not-match-spec
-plugins/modules/os10_facts.py validate-modules:doc-default-does-not-match-spec
-plugins/modules/os10_command.py validate-modules:doc-default-does-not-match-spec
-plugins/module_utils/network/base_network_show.py import-2.6
-plugins/modules/base_xml_to_dict.py import-2.6
-plugins/modules/bgp_validate.py import-2.6
-plugins/modules/mtu_validate.py import-2.6
-plugins/modules/show_system_network_summary.py import-2.6
-plugins/modules/vlt_validate.py import-2.6
-plugins/modules/wiring_validate.py import-2.6
diff --git a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.11.txt
deleted file mode 100644
index 16dc721fb..000000000
--- a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.11.txt
+++ /dev/null
@@ -1,13 +0,0 @@
-plugins/action/os10.py action-plugin-docs
-plugins/action/textfsm_parser.py action-plugin-docs
-plugins/modules/bgp_validate.py validate-modules:parameter-list-no-elements
-plugins/modules/mtu_validate.py validate-modules:parameter-list-no-elements
-plugins/modules/vlt_validate.py validate-modules:parameter-list-no-elements
-plugins/modules/wiring_validate.py validate-modules:parameter-list-no-elements
-plugins/modules/show_system_network_summary.py validate-modules:parameter-list-no-elements
-plugins/modules/os10_config.py validate-modules:parameter-list-no-elements
-plugins/modules/os10_facts.py validate-modules:parameter-list-no-elements
-plugins/modules/os10_command.py validate-modules:parameter-list-no-elements
-plugins/modules/os10_config.py validate-modules:doc-default-does-not-match-spec
-plugins/modules/os10_facts.py validate-modules:doc-default-does-not-match-spec
-plugins/modules/os10_command.py validate-modules:doc-default-does-not-match-spec
diff --git a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/os10/tests/sanity/ignore-2.9.txt
deleted file mode 100644
index ee4573d7b..000000000
--- a/ansible_collections/dellemc/os10/tests/sanity/ignore-2.9.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-plugins/action/os10.py action-plugin-docs
-plugins/action/textfsm_parser.py action-plugin-docs
-plugins/modules/show_system_network_summary.py validate-modules:missing-module-utils-import
diff --git a/ansible_collections/dellemc/os10/tests/sanity/requirements.txt b/ansible_collections/dellemc/os10/tests/sanity/requirements.txt
deleted file mode 100644
index 3e3a96692..000000000
--- a/ansible_collections/dellemc/os10/tests/sanity/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-packaging # needed for update-bundled and changelog
-sphinx ; python_version >= '3.5' # docs build requires python 3+
-sphinx-notfound-page ; python_version >= '3.5' # docs build requires python 3+
-straight.plugin ; python_version >= '3.5' # needed for hacking/build-ansible.py which will host changelog generation and requires python 3+
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_config.cfg b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_config.cfg
deleted file mode 100644
index 83e3e8911..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_config.cfg
+++ /dev/null
@@ -1,13 +0,0 @@
-!
-hostname router
-!
-interface ethernet1/1/2
- ip address 1.2.3.4/24
- description test string
-!
-interface ethernet1/1/3
- ip address 6.7.8.9/24
- description test string
- shutdown
-!
-
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_src.cfg b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_src.cfg
deleted file mode 100644
index 7303a0c47..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/os10_config_src.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-!
-hostname foo
-!
-interface ethernet1/1/2
- no ip address
-!
-interface ethernet1/1/3
- ip address 6.7.8.9/24
- description test string
- shutdown
-!
-
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_interface__display-xml b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_interface__display-xml
deleted file mode 100644
index bd13f0e65..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_interface__display-xml
+++ /dev/null
@@ -1,19467 +0,0 @@
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/1</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/1</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/1</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>17305068</if-index>
- <phys-address>14:18:77:09:ae:01</phys-address>
- <speed>40000000000</speed>
- <statistics>
- <in-octets>884475</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>5429</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>6212880</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>88684</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>94113</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>7097355</ether-octets>
- <ether-pkts>94113</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>88684</ether-tx-no-errors>
- <ether-rx-no-errors>5429</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>4</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>5425</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>83258</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>5426</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>5429</in-pkts>
- <out-pkts>88684</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>true</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:01</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>16272700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/1</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/2</name>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/2</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2DISABLED</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/2</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>17305094</if-index>
- <phys-address>14:18:77:09:ae:05</phys-address>
- <speed>40000000000</speed>
- <statistics>
- <in-octets>6220575</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>88787</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>892090</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>5523</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>94310</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>7112665</ether-octets>
- <ether-pkts>94310</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>5523</ether-tx-no-errors>
- <ether-rx-no-errors>88787</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>83350</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>5437</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>77</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>6</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>5440</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>88787</in-pkts>
- <out-pkts>5523</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>true</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:05</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>16306900</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- <link-local-addr>fe80::1618:77ff:fe09:ae05/64</link-local-addr>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/2</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/3</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/3</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/3</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>17305120</if-index>
- <phys-address>14:18:77:09:ae:09</phys-address>
- <speed>40000000000</speed>
- <statistics>
- <in-octets>6396220</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>91295</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>911207</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>5593</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>96888</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>7307427</ether-octets>
- <ether-pkts>96888</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>5593</ether-tx-no-errors>
- <ether-rx-no-errors>91295</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>85705</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>5590</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>4</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>5589</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>91295</in-pkts>
- <out-pkts>5593</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>true</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:09</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>16764600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/3</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/4</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/4</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/4</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>17305146</if-index>
- <phys-address>14:18:77:09:ae:0d</phys-address>
- <speed>40000000000</speed>
- <statistics>
- <in-octets>919800</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>5693</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>6410845</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>91497</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>97190</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>7330645</ether-octets>
- <ether-pkts>97190</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>91497</ether-tx-no-errors>
- <ether-rx-no-errors>5693</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>77</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>6</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>5610</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>85890</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>5607</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>5693</in-pkts>
- <out-pkts>91497</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>true</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:0d</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>16818100</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/4</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/5</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/5</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/5</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305172</if-index>
- <phys-address>14:18:77:09:ae:11</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:11</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>16913000</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/5</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/6</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/6</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/6</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305198</if-index>
- <phys-address>14:18:77:09:ae:15</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:15</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>16950900</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/6</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/7</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/7</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/7</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305224</if-index>
- <phys-address>14:18:77:09:ae:19</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:19</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>16995200</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/7</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/8</name>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/8</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2DISABLED</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/8</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305250</if-index>
- <phys-address>14:18:77:09:ae:1d</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:1d</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17021600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/8</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/9</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/9</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/9</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305276</if-index>
- <phys-address>14:18:77:09:ae:21</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:21</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17027900</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/9</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/10</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/10</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/10</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305302</if-index>
- <phys-address>14:18:77:09:ae:25</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:25</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17039500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/10</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/11</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/11</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/11</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305328</if-index>
- <phys-address>14:18:77:09:ae:29</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:29</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17048300</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/11</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/13</name>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/13</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2DISABLED</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <vrrp-ipv4>
- <vrrp-instance>
- <vrid>4</vrid>
- <log-state-change>false</log-state-change>
- <preempt>
- <enable>false</enable>
- <hold-time>0</hold-time>
- </preempt>
- <priority>120</priority>
- <advertise-interval>
- <advertise-interval-sec>1</advertise-interval-sec>
- <advertise-interval-centi-sec>200</advertise-interval-centi-sec>
- </advertise-interval>
- <virtual-ip-address>4.1.1.1</virtual-ip-address>
- <virtual-ip-address>4.1.1.2</virtual-ip-address>
- <accept-mode>false</accept-mode>
- <auth-type>no-authentication</auth-type>
- <protocol>ip</protocol>
- <track-list>
- <track-id>3</track-id>
- <priority-decrement>25</priority-decrement>
- </track-list>
- </vrrp-instance>
- </vrrp-ipv4>
- <vrrp-ipv6>
- <vrrp-instance>
- <vrid>2</vrid>
- <log-state-change>false</log-state-change>
- <preempt>
- <enable>false</enable>
- <hold-time>0</hold-time>
- </preempt>
- <priority>120</priority>
- <advertise-interval>
- <advertise-interval-sec>1</advertise-interval-sec>
- <advertise-interval-centi-sec>200</advertise-interval-centi-sec>
- </advertise-interval>
- <virtual-ip-address>fe80::10</virtual-ip-address>
- <virtual-ip-address>3001:4828:5808:ffa3::9</virtual-ip-address>
- <accept-mode>false</accept-mode>
- <auth-type>no-authentication</auth-type>
- <protocol>ip</protocol>
- <track-list>
- <track-id>3</track-id>
- <priority-decrement>25</priority-decrement>
- </track-list>
- </vrrp-instance>
- </vrrp-ipv6>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/13</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305380</if-index>
- <phys-address>14:18:77:09:ae:31</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:31</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17074100</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/13</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/16</name>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/16</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2DISABLED</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/16</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305458</if-index>
- <phys-address>14:18:77:09:ae:34</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:34</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17087400</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/16</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/17</name>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/17</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2DISABLED</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/17</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305484</if-index>
- <phys-address>14:18:77:09:ae:35</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:35</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17090400</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/17</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/18</name>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/18</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2DISABLED</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/18</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305510</if-index>
- <phys-address>14:18:77:09:ae:39</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:39</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17116100</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/18</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/19</name>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/19</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2DISABLED</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/19</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305536</if-index>
- <phys-address>14:18:77:09:ae:3d</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:3d</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17128600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/19</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/20</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/20</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/20</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305562</if-index>
- <phys-address>14:18:77:09:ae:41</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:41</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17135100</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/20</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/21</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/21</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/21</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305588</if-index>
- <phys-address>14:18:77:09:ae:45</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:45</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17145300</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/21</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/23</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/23</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/23</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305640</if-index>
- <phys-address>14:18:77:09:ae:4d</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:4d</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17164100</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/23</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/24</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/24</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/24</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305666</if-index>
- <phys-address>14:18:77:09:ae:51</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:51</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17164900</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/24</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/25</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/25</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/25</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305692</if-index>
- <phys-address>14:18:77:09:ae:55</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:55</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17424700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/25</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/26</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/26</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/26</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305718</if-index>
- <phys-address>14:18:77:09:ae:59</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:59</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17431600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/26</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/27</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/27</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/27</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305744</if-index>
- <phys-address>14:18:77:09:ae:5d</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:5d</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17470200</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/27</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/28</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/28</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/28</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305770</if-index>
- <phys-address>14:18:77:09:ae:61</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:61</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17477600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/28</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/30</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/30</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/30</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305822</if-index>
- <phys-address>14:18:77:09:ae:66</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:66</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17491400</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/30</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/31</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/31</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/31</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305848</if-index>
- <phys-address>14:18:77:09:ae:67</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:67</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17493000</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/31</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>ethernet1/1/32</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>ethernet1/1/32</name>
- <type>ianaift:ethernetCsmacd</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <lldp-med-cfg>
- <med-enable>true</med-enable>
- <notif-enable>false</notif-enable>
- </lldp-med-cfg>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/32</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305874</if-index>
- <phys-address>14:18:77:09:ae:68</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- <dcbx-stats>
- <ets-conf-tx-tlv-count>0</ets-conf-tx-tlv-count>
- <ets-conf-rx-tlv-count>0</ets-conf-rx-tlv-count>
- <ets-conf-rx-tlv-errors>0</ets-conf-rx-tlv-errors>
- <ets-reco-tx-tlv-count>0</ets-reco-tx-tlv-count>
- <ets-reco-rx-tlv-count>0</ets-reco-rx-tlv-count>
- <ets-reco-rx-tlv-errors>0</ets-reco-rx-tlv-errors>
- <pfc-tx-tlv-count>0</pfc-tx-tlv-count>
- <pfc-rx-tlv-count>0</pfc-rx-tlv-count>
- <pfc-rx-tlv-errors>0</pfc-rx-tlv-errors>
- <app-prio-tx-tlv-count>0</app-prio-tx-tlv-count>
- <app-prio-rx-tlv-count>0</app-prio-rx-tlv-count>
- <app-prio-rx-tlv-errors>0</app-prio-rx-tlv-errors>
- <dcbx-frames-tx-total>0</dcbx-frames-tx-total>
- <dcbx-frames-rx-total>0</dcbx-frames-rx-total>
- <dcbx-frames-error-total>0</dcbx-frames-error-total>
- <dcbx-frames-unrecognized>0</dcbx-frames-unrecognized>
- </dcbx-stats>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:68</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17498900</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <qos-if-params>
- <flow-control-rx>false</flow-control-rx>
- <flow-control-tx>false</flow-control-tx>
- <pfc-mode>false</pfc-mode>
- <ets-mode>false</ets-mode>
- <unknown-unicast-rate-pps>0</unknown-unicast-rate-pps>
- <weight-info>
- <queue-id>0</queue-id>
- <weight>1</weight>
- </weight-info>
- <weight-info>
- <queue-id>1</queue-id>
- <weight>2</weight>
- </weight-info>
- <weight-info>
- <queue-id>2</queue-id>
- <weight>3</weight>
- </weight-info>
- <weight-info>
- <queue-id>3</queue-id>
- <weight>4</weight>
- </weight-info>
- <weight-info>
- <queue-id>4</queue-id>
- <weight>5</weight>
- </weight-info>
- <weight-info>
- <queue-id>5</queue-id>
- <weight>10</weight>
- </weight-info>
- <weight-info>
- <queue-id>6</queue-id>
- <weight>25</weight>
- </weight-info>
- <weight-info>
- <queue-id>7</queue-id>
- <weight>50</weight>
- </weight-info>
- </qos-if-params>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>ethernet1/1/32</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr/>
- <interfaces>
- <interface>
- <name>mgmt1/1/1</name>
- <type>base-if:management</type>
- <enabled>true</enabled>
- <mtu>1500</mtu>
- <mac-learn>HW</mac-learn>
- <duplex>auto</duplex>
- <speed>AUTO</speed>
- <keepalive>true</keepalive>
- <ipv4>
- <address>
- <primary-addr>10.16.148.144/16</primary-addr>
- </address>
- </ipv4>
- <ipv6>
- <autoconfig>true</autoconfig>
- <intf-v6-enabled>true</intf-v6-enabled>
- </ipv6>
- <lldp>
- <tx-enable>true</tx-enable>
- <rx-enable>true</rx-enable>
- </lldp>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>mgmt1/1/1</name>
- <type>base-if:management</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>35454736</if-index>
- <phys-address>00:a0:c9:00:00:00</phys-address>
- <speed>1000000000</speed>
- <statistics>
- <in-octets>74473686</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>66</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>17129927</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>889900</in-pkts>
- <out-pkts>52776</out-pkts>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>10MBPS</supported-speed>
- <supported-speed>100MBPS</supported-speed>
- <supported-speed>1GIGE</supported-speed>
- <supported-autoneg>NOT_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <bind-ni-name>default</bind-ni-name>
- <current-phys-address>00:a0:c9:00:00:00</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17509300</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <ipv4-info>
- <assignment-mode>manual-cfg</assignment-mode>
- <addr>10.16.148.144/16</addr>
- </ipv4-info>
- <ipv6>
- <enable-status>true</enable-status>
- <link-local-addr>fe80::2a0:c9ff:fe00:0/64</link-local-addr>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>mgmt1/1/1</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr/>
- <interfaces>
- <interface>
- <name>vlan1</name>
- <type>ianaift:l2vlan</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <vlan-type>DATA</vlan-type>
- <untagged-ports>ethernet1/1/1</untagged-ports>
- <untagged-ports>ethernet1/1/3</untagged-ports>
- <untagged-ports>ethernet1/1/4</untagged-ports>
- <untagged-ports>ethernet1/1/5</untagged-ports>
- <untagged-ports>ethernet1/1/6</untagged-ports>
- <untagged-ports>ethernet1/1/7</untagged-ports>
- <untagged-ports>ethernet1/1/9</untagged-ports>
- <untagged-ports>ethernet1/1/10</untagged-ports>
- <untagged-ports>ethernet1/1/11</untagged-ports>
- <untagged-ports>ethernet1/1/20</untagged-ports>
- <untagged-ports>ethernet1/1/21</untagged-ports>
- <untagged-ports>ethernet1/1/22</untagged-ports>
- <untagged-ports>ethernet1/1/23</untagged-ports>
- <untagged-ports>ethernet1/1/24</untagged-ports>
- <untagged-ports>ethernet1/1/25</untagged-ports>
- <untagged-ports>ethernet1/1/26</untagged-ports>
- <untagged-ports>ethernet1/1/27</untagged-ports>
- <untagged-ports>ethernet1/1/28</untagged-ports>
- <untagged-ports>ethernet1/1/29</untagged-ports>
- <untagged-ports>ethernet1/1/30</untagged-ports>
- <untagged-ports>ethernet1/1/31</untagged-ports>
- <untagged-ports>ethernet1/1/32</untagged-ports>
- <untagged-ports>port-channel12</untagged-ports>
- <vlt-control>false</vlt-control>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>vlan1</name>
- <type>ianaift:l2vlan</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>69208865</if-index>
- <phys-address>14:18:77:09:af:01</phys-address>
- <speed>10000000000</speed>
- <statistics>
- <in-octets>8587506</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>9015054</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-in-vlan-discards>0</if-in-vlan-discards>
- <if-out-qlen>0</if-out-qlen>
- <in-pkts>107065</in-pkts>
- <out-pkts>107059</out-pkts>
- </statistics>
- <current-phys-address>14:18:77:09:af:01</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>17517200</last-counter-clear>
- <mtu>1532</mtu>
- <vlan-type>DATA</vlan-type>
- <untagged-ports>ethernet1/1/1</untagged-ports>
- <untagged-ports>ethernet1/1/3</untagged-ports>
- <untagged-ports>ethernet1/1/4</untagged-ports>
- <untagged-ports>ethernet1/1/5</untagged-ports>
- <untagged-ports>ethernet1/1/6</untagged-ports>
- <untagged-ports>ethernet1/1/7</untagged-ports>
- <untagged-ports>ethernet1/1/9</untagged-ports>
- <untagged-ports>ethernet1/1/10</untagged-ports>
- <untagged-ports>ethernet1/1/11</untagged-ports>
- <untagged-ports>ethernet1/1/20</untagged-ports>
- <untagged-ports>ethernet1/1/21</untagged-ports>
- <untagged-ports>ethernet1/1/22</untagged-ports>
- <untagged-ports>ethernet1/1/23</untagged-ports>
- <untagged-ports>ethernet1/1/24</untagged-ports>
- <untagged-ports>ethernet1/1/25</untagged-ports>
- <untagged-ports>ethernet1/1/26</untagged-ports>
- <untagged-ports>ethernet1/1/27</untagged-ports>
- <untagged-ports>ethernet1/1/28</untagged-ports>
- <untagged-ports>ethernet1/1/29</untagged-ports>
- <untagged-ports>ethernet1/1/30</untagged-ports>
- <untagged-ports>ethernet1/1/31</untagged-ports>
- <untagged-ports>ethernet1/1/32</untagged-ports>
- <untagged-ports>port-channel12</untagged-ports>
- <vlt-control>false</vlt-control>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- <link-local-addr>fe80::1618:77ff:fe09:af01/64</link-local-addr>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>vlan1</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr/>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>vlan4094</name>
- <type>ianaift:l2vlan</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>69212958</if-index>
- <phys-address>14:18:77:09:af:01</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-in-vlan-discards>0</if-in-vlan-discards>
- <if-out-qlen>0</if-out-qlen>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- </statistics>
- <current-phys-address>14:18:77:09:af:01</current-phys-address>
- <last-change-time>3992</last-change-time>
- <last-counter-clear>17135400</last-counter-clear>
- <mtu>1532</mtu>
- <vlan-type>INTERNAL</vlan-type>
- <vlt-control>true</vlt-control>
- <ipv4-info/>
- <ipv6>
- <enable-status>true</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>vlan4094</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr>
- <interface-in-running>
- <name>port-channel12</name>
- <untagged-vlan>vlan1</untagged-vlan>
- </interface-in-running>
- </cms-interface-backptr>
- <interfaces>
- <interface>
- <name>port-channel12</name>
- <type>ianaift:ieee8023adLag</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- <mode>MODE_L2</mode>
- <load-interval>299</load-interval>
- <min-links>1</min-links>
- <lag-mode>STATIC</lag-mode>
- <vlti-lag>false</vlti-lag>
- <vlt>
- <vlt-id>13</vlt-id>
- </vlt>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>port-channel12</name>
- <type>ianaift:ieee8023adLag</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>85886092</if-index>
- <phys-address>14:18:77:09:ae:8d</phys-address>
- <speed>0</speed>
- <statistics>
- <in-octets>0</in-octets>
- <in-unicast-pkts>0</in-unicast-pkts>
- <in-broadcast-pkts>0</in-broadcast-pkts>
- <in-multicast-pkts>0</in-multicast-pkts>
- <in-discards>0</in-discards>
- <in-errors>0</in-errors>
- <in-unknown-protos>0</in-unknown-protos>
- <out-octets>0</out-octets>
- <out-unicast-pkts>0</out-unicast-pkts>
- <out-broadcast-pkts>0</out-broadcast-pkts>
- <out-multicast-pkts>0</out-multicast-pkts>
- <out-discards>0</out-discards>
- <out-errors>0</out-errors>
- <if-out-qlen>0</if-out-qlen>
- <ether-drop-events>0</ether-drop-events>
- <ether-multicast-pkts>0</ether-multicast-pkts>
- <ether-broadcast-pkts>0</ether-broadcast-pkts>
- <ether-undersize-pkts>0</ether-undersize-pkts>
- <ether-fragments>0</ether-fragments>
- <ether-oversize-pkts>0</ether-oversize-pkts>
- <ether-rx-oversize-pkts>0</ether-rx-oversize-pkts>
- <ether-tx-oversize-pkts>0</ether-tx-oversize-pkts>
- <ether-jabbers>0</ether-jabbers>
- <ether-octets>0</ether-octets>
- <ether-pkts>0</ether-pkts>
- <ether-collisions>0</ether-collisions>
- <ether-crc-align-errors>0</ether-crc-align-errors>
- <ether-tx-no-errors>0</ether-tx-no-errors>
- <ether-rx-no-errors>0</ether-rx-no-errors>
- <green-discard-dropped-packets>0</green-discard-dropped-packets>
- <green-discard-dropped-bytes>0</green-discard-dropped-bytes>
- <yellow-discard-dropped-packets>0</yellow-discard-dropped-packets>
- <yellow-discard-dropped-bytes>0</yellow-discard-dropped-bytes>
- <red-discard-dropped-packets>0</red-discard-dropped-packets>
- <red-discard-dropped-bytes>0</red-discard-dropped-bytes>
- <discard-dropped-packets>0</discard-dropped-packets>
- <discard-dropped-bytes>0</discard-dropped-bytes>
- <ether-in-pkts-64-octets>0</ether-in-pkts-64-octets>
- <ether-in-pkts-65-to-127-octets>0</ether-in-pkts-65-to-127-octets>
- <ether-in-pkts-128-to-255-octets>0</ether-in-pkts-128-to-255-octets>
- <ether-in-pkts-256-to-511-octets>0</ether-in-pkts-256-to-511-octets>
- <ether-in-pkts-512-to-1023-octets>0</ether-in-pkts-512-to-1023-octets>
- <ether-in-pkts-1024-to-1518-octets>0</ether-in-pkts-1024-to-1518-octets>
- <ether-in-pkts-1519-to-2047-octets>0</ether-in-pkts-1519-to-2047-octets>
- <ether-in-pkts-2048-to-4095-octets>0</ether-in-pkts-2048-to-4095-octets>
- <ether-in-pkts-4096-to-9216-octets>0</ether-in-pkts-4096-to-9216-octets>
- <ether-in-pkts-9217-to-16383-octets>0</ether-in-pkts-9217-to-16383-octets>
- <ether-out-pkts-64-octets>0</ether-out-pkts-64-octets>
- <ether-out-pkts-65-to-127-octets>0</ether-out-pkts-65-to-127-octets>
- <ether-out-pkts-128-to-255-octets>0</ether-out-pkts-128-to-255-octets>
- <ether-out-pkts-256-to-511-octets>0</ether-out-pkts-256-to-511-octets>
- <ether-out-pkts-512-to-1023-octets>0</ether-out-pkts-512-to-1023-octets>
- <ether-out-pkts-1024-to-1518-octets>0</ether-out-pkts-1024-to-1518-octets>
- <ether-out-pkts-1519-to-2047-octets>0</ether-out-pkts-1519-to-2047-octets>
- <ether-out-pkts-2048-to-4095-octets>0</ether-out-pkts-2048-to-4095-octets>
- <ether-out-pkts-4096-to-9216-octets>0</ether-out-pkts-4096-to-9216-octets>
- <ether-out-pkts-9217-to-16383-octets>0</ether-out-pkts-9217-to-16383-octets>
- <pause-rx-pkts>0</pause-rx-pkts>
- <pause-tx-pkts>0</pause-tx-pkts>
- <in-pkts>0</in-pkts>
- <out-pkts>0</out-pkts>
- <utilization-percentage>0</utilization-percentage>
- <in-bit-rate>0</in-bit-rate>
- <in-pkt-rate>0</in-pkt-rate>
- <in-line-rate>0</in-line-rate>
- <out-bit-rate>0</out-bit-rate>
- <out-pkt-rate>0</out-pkt-rate>
- <out-line-rate>0</out-line-rate>
- <link-bundle-monitoring-alarm>false</link-bundle-monitoring-alarm>
- <lacp-statistics>
- <lacpdus-rx>0</lacpdus-rx>
- <lacpdus-tx>0</lacpdus-tx>
- <unknown-rx>0</unknown-rx>
- <illegal-rx>0</illegal-rx>
- <marker-pdus-tx>0</marker-pdus-tx>
- <marker-response-pdus-tx>0</marker-response-pdus-tx>
- <marker-pdus-rx>0</marker-pdus-rx>
- <marker-response-pdus-rx>0</marker-response-pdus-rx>
- </lacp-statistics>
- </statistics>
- <auto-negotiation>false</auto-negotiation>
- <supported-autoneg>NOT_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <current-phys-address>14:18:77:09:ae:8d</current-phys-address>
- <last-change-time>3750</last-change-time>
- <last-counter-clear>17160800</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <min-links>1</min-links>
- <lag-mode>STATIC</lag-mode>
- <num-ports-up>0</num-ports-up>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>port-channel12</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <cms-interface-backptr/>
- <interfaces>
- <interface>
- <name>null0</name>
- <type>base-if:null</type>
- <enabled>true</enabled>
- <mtu>1532</mtu>
- <mac-learn>HW</mac-learn>
- </interface>
- </interfaces>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data>
- <interface>
- <name>null0</name>
- <type>base-if:null</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>119690512</if-index>
- <last-change-time>39</last-change-time>
- <last-counter-clear>17549600</last-counter-clear>
- <ipv4-info/>
- <ipv6>
- <enable-status>false</enable-status>
- </ipv6>
- <fc-intf-params>
- <port-type>Disabled</port-type>
- <pwwn>00:00:00:00:00:00:00:00</pwwn>
- <fc-id>00:00:00</fc-id>
- <bb-credit>0</bb-credit>
- </fc-intf-params>
- </interface>
- </data>
- <last-keys>
- <name>null0</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-<?xml version="1.0"?>
-<rpc-reply>
- <data>
- <ip-config>
- <ipv4>
- <arp>
- <timeout>60</timeout>
- </arp>
- </ipv4>
- </ip-config>
- <ports>
- <ports-state>
- <port>
- <name>phy-eth1/1/1</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/2</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/3</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>131137546</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/4</name>
- <base-ID-fields>
- <media-laser-wavelength>38</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-category>QSFP-PLUS</media-category>
- <media-type>AR_QSFP_40GBASE_CR4_1M</media-type>
- <present>true</present>
- </port>
- <port>
- <name>phy-eth1/1/5</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/6</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/7</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/8</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/9</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/10</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/11</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/12</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/13</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/14</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/15</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/16</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/17</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/18</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/19</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/20</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/21</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/22</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/23</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/24</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/25</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/26</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3758089944</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/27</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3840747224</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/28</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/29</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/30</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3857532632</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/31</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3865925336</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- <port>
- <name>phy-eth1/1/32</name>
- <base-ID-fields>
- <media-laser-wavelength>0</media-laser-wavelength>
- </base-ID-fields>
- <channel>
- <sub-port>3849139928</sub-port>
- <rx-power>0.0</rx-power>
- </channel>
- <media-type>AR_POPTICS_NOTPRESENT</media-type>
- <present>false</present>
- </port>
- </ports-state>
- </ports>
- <system-state>
- <system-status>
- <uptime>162762</uptime>
- </system-status>
- </system-state>
- </data>
- <bulk>
- <data/>
- </bulk>
-</rpc-reply>
-
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml
deleted file mode 100644
index de3ad4ed7..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_lldp_neighbors__display-xml
+++ /dev/null
@@ -1,855 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<rpc-reply>
- <bulk>
- <data>
- <interface>
- <name>ethernet1/1/1</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>17305068</if-index>
- <phys-address>14:18:77:09:ae:01</phys-address>
- <speed>40000000000</speed>
- <auto-negotiation>true</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:01</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>17570800</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info>
- <info>
- <rem-lldp-time-mark>330300</rem-lldp-time-mark>
- <rem-lldp-index>4</rem-lldp-index>
- <dest-mac-addr-index>1</dest-mac-addr-index>
- <rem-if-index>17305068</rem-if-index>
- <rem-local-port-num>260</rem-local-port-num>
- <rem-lldp-chassis-id>FBh3Ca4A
-</rem-lldp-chassis-id>
- <rem-lldp-port-id>ZXRoZXJuZXQxLzEvMw==
-</rem-lldp-port-id>
- <rem-lldp-chassis-id-subtype>mac-address</rem-lldp-chassis-id-subtype>
- <rem-lldp-port-subtype>interface-alias</rem-lldp-port-subtype>
- <rem-ttl>120</rem-ttl>
- <rem-last-update-time>172395</rem-last-update-time>
- <rem-info-valid-time>107</rem-info-valid-time>
- <rem-system-desc>OS10</rem-system-desc>
- <rem-port-desc>ethernet1/1/3</rem-port-desc>
- <rem-system-name>os10</rem-system-name>
- <rem-port-vlan-id>1</rem-port-vlan-id>
- <rem-max-frame-size>1532</rem-max-frame-size>
- <rem-agg-link-status>false</rem-agg-link-status>
- <rem-sys-cap-supported>router bridge repeater</rem-sys-cap-supported>
- <rem-sys-cap-enabled>router bridge repeater</rem-sys-cap-enabled>
- <rem-remote-changes>true</rem-remote-changes>
- <rem-too-many-neighbors>false</rem-too-many-neighbors>
- <rem-dot3-auto-neg-supported>true</rem-dot3-auto-neg-supported>
- <rem-dot3-auto-neg-enabled>true</rem-dot3-auto-neg-enabled>
- <rem-dot3-auto-neg-adv-cap>b-1000base-t</rem-dot3-auto-neg-adv-cap>
- </info>
- </lldp-rem-neighbor-info>
- </interface>
- <interface>
- <name>ethernet1/1/2</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>17305094</if-index>
- <phys-address>14:18:77:09:ae:05</phys-address>
- <speed>40000000000</speed>
- <auto-negotiation>true</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:05</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info>
- <info>
- <rem-lldp-time-mark>330300</rem-lldp-time-mark>
- <rem-lldp-index>5</rem-lldp-index>
- <dest-mac-addr-index>1</dest-mac-addr-index>
- <rem-if-index>17305094</rem-if-index>
- <rem-local-port-num>264</rem-local-port-num>
- <rem-lldp-chassis-id>FBh3Ca4A
-</rem-lldp-chassis-id>
- <rem-lldp-port-id>ZXRoZXJuZXQxLzEvNA==
-</rem-lldp-port-id>
- <rem-lldp-chassis-id-subtype>mac-address</rem-lldp-chassis-id-subtype>
- <rem-lldp-port-subtype>interface-alias</rem-lldp-port-subtype>
- <rem-ttl>120</rem-ttl>
- <rem-last-update-time>172395</rem-last-update-time>
- <rem-info-valid-time>107</rem-info-valid-time>
- <rem-system-desc>OS10</rem-system-desc>
- <rem-port-desc>ethernet1/1/4</rem-port-desc>
- <rem-system-name>os10</rem-system-name>
- <rem-port-vlan-id>1</rem-port-vlan-id>
- <rem-max-frame-size>1532</rem-max-frame-size>
- <rem-agg-link-status>false</rem-agg-link-status>
- <rem-sys-cap-supported>router bridge repeater</rem-sys-cap-supported>
- <rem-sys-cap-enabled>router bridge repeater</rem-sys-cap-enabled>
- <rem-remote-changes>true</rem-remote-changes>
- <rem-too-many-neighbors>false</rem-too-many-neighbors>
- <rem-dot3-auto-neg-supported>true</rem-dot3-auto-neg-supported>
- <rem-dot3-auto-neg-enabled>true</rem-dot3-auto-neg-enabled>
- <rem-dot3-auto-neg-adv-cap>b-1000base-t</rem-dot3-auto-neg-adv-cap>
- </info>
- </lldp-rem-neighbor-info>
- </interface>
- <interface>
- <name>ethernet1/1/3</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>17305120</if-index>
- <phys-address>14:18:77:09:ae:09</phys-address>
- <speed>40000000000</speed>
- <auto-negotiation>true</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:09</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info>
- <info>
- <rem-lldp-time-mark>330300</rem-lldp-time-mark>
- <rem-lldp-index>2</rem-lldp-index>
- <dest-mac-addr-index>1</dest-mac-addr-index>
- <rem-if-index>17305120</rem-if-index>
- <rem-local-port-num>268</rem-local-port-num>
- <rem-lldp-chassis-id>FBh3Ca4A
-</rem-lldp-chassis-id>
- <rem-lldp-port-id>ZXRoZXJuZXQxLzEvMQ==
-</rem-lldp-port-id>
- <rem-lldp-chassis-id-subtype>mac-address</rem-lldp-chassis-id-subtype>
- <rem-lldp-port-subtype>interface-alias</rem-lldp-port-subtype>
- <rem-ttl>120</rem-ttl>
- <rem-last-update-time>172395</rem-last-update-time>
- <rem-info-valid-time>107</rem-info-valid-time>
- <rem-system-desc>OS10</rem-system-desc>
- <rem-port-desc>ethernet1/1/1</rem-port-desc>
- <rem-system-name>os10</rem-system-name>
- <rem-port-vlan-id>1</rem-port-vlan-id>
- <rem-max-frame-size>1532</rem-max-frame-size>
- <rem-agg-link-status>false</rem-agg-link-status>
- <rem-sys-cap-supported>router bridge repeater</rem-sys-cap-supported>
- <rem-sys-cap-enabled>router bridge repeater</rem-sys-cap-enabled>
- <rem-remote-changes>true</rem-remote-changes>
- <rem-too-many-neighbors>false</rem-too-many-neighbors>
- <rem-dot3-auto-neg-supported>true</rem-dot3-auto-neg-supported>
- <rem-dot3-auto-neg-enabled>true</rem-dot3-auto-neg-enabled>
- <rem-dot3-auto-neg-adv-cap>b-1000base-t</rem-dot3-auto-neg-adv-cap>
- </info>
- </lldp-rem-neighbor-info>
- </interface>
- <interface>
- <name>ethernet1/1/4</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>17305146</if-index>
- <phys-address>14:18:77:09:ae:0d</phys-address>
- <speed>40000000000</speed>
- <auto-negotiation>true</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:0d</current-phys-address>
- <last-change-time>44</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info>
- <info>
- <rem-lldp-time-mark>330300</rem-lldp-time-mark>
- <rem-lldp-index>3</rem-lldp-index>
- <dest-mac-addr-index>1</dest-mac-addr-index>
- <rem-if-index>17305146</rem-if-index>
- <rem-local-port-num>272</rem-local-port-num>
- <rem-lldp-chassis-id>FBh3Ca4A
-</rem-lldp-chassis-id>
- <rem-lldp-port-id>ZXRoZXJuZXQxLzEvMg==
-</rem-lldp-port-id>
- <rem-lldp-chassis-id-subtype>mac-address</rem-lldp-chassis-id-subtype>
- <rem-lldp-port-subtype>interface-alias</rem-lldp-port-subtype>
- <rem-ttl>120</rem-ttl>
- <rem-last-update-time>172395</rem-last-update-time>
- <rem-info-valid-time>107</rem-info-valid-time>
- <rem-system-desc>OS10</rem-system-desc>
- <rem-port-desc>ethernet1/1/2</rem-port-desc>
- <rem-system-name>os10</rem-system-name>
- <rem-port-vlan-id>1</rem-port-vlan-id>
- <rem-max-frame-size>1532</rem-max-frame-size>
- <rem-agg-link-status>false</rem-agg-link-status>
- <rem-sys-cap-supported>router bridge repeater</rem-sys-cap-supported>
- <rem-sys-cap-enabled>router bridge repeater</rem-sys-cap-enabled>
- <rem-remote-changes>true</rem-remote-changes>
- <rem-too-many-neighbors>false</rem-too-many-neighbors>
- <rem-dot3-auto-neg-supported>true</rem-dot3-auto-neg-supported>
- <rem-dot3-auto-neg-enabled>true</rem-dot3-auto-neg-enabled>
- <rem-dot3-auto-neg-adv-cap>b-1000base-t</rem-dot3-auto-neg-adv-cap>
- </info>
- </lldp-rem-neighbor-info>
- </interface>
- <interface>
- <name>ethernet1/1/5</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305172</if-index>
- <phys-address>14:18:77:09:ae:11</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:11</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/6</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305198</if-index>
- <phys-address>14:18:77:09:ae:15</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:15</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/7</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305224</if-index>
- <phys-address>14:18:77:09:ae:19</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:19</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/8</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305250</if-index>
- <phys-address>14:18:77:09:ae:1d</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:1d</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/9</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305276</if-index>
- <phys-address>14:18:77:09:ae:21</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:21</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/10</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305302</if-index>
- <phys-address>14:18:77:09:ae:25</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:25</current-phys-address>
- <last-change-time>36</last-change-time>
- <last-counter-clear>17570700</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/11</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305328</if-index>
- <phys-address>14:18:77:09:ae:29</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:29</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/12</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305354</if-index>
- <phys-address>14:18:77:09:ae:2d</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:2d</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/13</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305380</if-index>
- <phys-address>14:18:77:09:ae:31</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:31</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/14</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305406</if-index>
- <phys-address>14:18:77:09:ae:32</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:32</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/15</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305432</if-index>
- <phys-address>14:18:77:09:ae:33</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:33</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/16</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305458</if-index>
- <phys-address>14:18:77:09:ae:34</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:34</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/17</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305484</if-index>
- <phys-address>14:18:77:09:ae:35</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:35</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/18</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305510</if-index>
- <phys-address>14:18:77:09:ae:39</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:39</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/19</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305536</if-index>
- <phys-address>14:18:77:09:ae:3d</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:3d</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/20</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305562</if-index>
- <phys-address>14:18:77:09:ae:41</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:41</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/21</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305588</if-index>
- <phys-address>14:18:77:09:ae:45</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:45</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/22</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305614</if-index>
- <phys-address>14:18:77:09:ae:49</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:49</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570600</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/23</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305640</if-index>
- <phys-address>14:18:77:09:ae:4d</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:4d</current-phys-address>
- <last-change-time>37</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/24</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305666</if-index>
- <phys-address>14:18:77:09:ae:51</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:51</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/25</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305692</if-index>
- <phys-address>14:18:77:09:ae:55</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:55</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/26</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305718</if-index>
- <phys-address>14:18:77:09:ae:59</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:59</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/27</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305744</if-index>
- <phys-address>14:18:77:09:ae:5d</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:5d</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/28</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305770</if-index>
- <phys-address>14:18:77:09:ae:61</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:61</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/29</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305796</if-index>
- <phys-address>14:18:77:09:ae:65</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:65</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/30</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305822</if-index>
- <phys-address>14:18:77:09:ae:66</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:66</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/31</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305848</if-index>
- <phys-address>14:18:77:09:ae:67</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:67</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>ethernet1/1/32</name>
- <type>ianaift:ethernetCsmacd</type>
- <admin-status>up</admin-status>
- <oper-status>down</oper-status>
- <if-index>17305874</if-index>
- <phys-address>14:18:77:09:ae:68</phys-address>
- <speed>0</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>40GIGE</supported-speed>
- <supported-autoneg>BOTH_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <npu-speed>0MBPS</npu-speed>
- <current-phys-address>14:18:77:09:ae:68</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570500</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <eee-state>n/a</eee-state>
- <lldp-rem-neighbor-info/>
- </interface>
- <interface>
- <name>mgmt1/1/1</name>
- <type>base-if:management</type>
- <admin-status>up</admin-status>
- <oper-status>up</oper-status>
- <if-index>35454736</if-index>
- <phys-address>00:a0:c9:00:00:00</phys-address>
- <speed>1000000000</speed>
- <auto-negotiation>false</auto-negotiation>
- <supported-speed>10MBPS</supported-speed>
- <supported-speed>100MBPS</supported-speed>
- <supported-speed>1GIGE</supported-speed>
- <supported-autoneg>NOT_SUPPORTED</supported-autoneg>
- <fec>not-supported</fec>
- <bind-ni-name>default</bind-ni-name>
- <current-phys-address>00:a0:c9:00:00:00</current-phys-address>
- <last-change-time>38</last-change-time>
- <last-counter-clear>17570400</last-counter-clear>
- <load-interval>30</load-interval>
- <mtu>1532</mtu>
- <lldp-rem-neighbor-info>
- <info>
- <rem-lldp-time-mark>3100</rem-lldp-time-mark>
- <rem-lldp-index>1</rem-lldp-index>
- <dest-mac-addr-index>1</dest-mac-addr-index>
- <rem-if-index>35454736</rem-if-index>
- <rem-local-port-num>4</rem-local-port-num>
- <rem-lldp-chassis-id>kLEc9C9t
-</rem-lldp-chassis-id>
- <rem-lldp-port-id>VGVuR2lnYWJpdEV0aGVybmV0IDAvMA==
-</rem-lldp-port-id>
- <rem-lldp-chassis-id-subtype>mac-address</rem-lldp-chassis-id-subtype>
- <rem-lldp-port-subtype>interface-name</rem-lldp-port-subtype>
- <rem-ttl>20</rem-ttl>
- <rem-last-update-time>175667</rem-last-update-time>
- <rem-info-valid-time>15</rem-info-valid-time>
- <rem-system-desc>Dell Real Time Operating System Software. Dell Operating System Version: 2.0. Dell Application Software Version: 9.11(2.0) Copyright (c) 1999-2017Dell Inc. All Rights Reserved.Build Time: Tue Apr 25 21:22:59 2017</rem-system-desc>
- <rem-port-desc>TenGigabitEthernet 0/0</rem-port-desc>
- <rem-system-name>swlab1-maa-tor-A2</rem-system-name>
- <rem-port-vlan-id>148</rem-port-vlan-id>
- <rem-max-frame-size>0</rem-max-frame-size>
- <rem-agg-link-status>false</rem-agg-link-status>
- <rem-sys-cap-supported>router bridge repeater</rem-sys-cap-supported>
- <rem-sys-cap-enabled>router bridge repeater</rem-sys-cap-enabled>
- <rem-remote-changes>false</rem-remote-changes>
- <rem-too-many-neighbors>false</rem-too-many-neighbors>
- <rem-dot3-auto-neg-supported>false</rem-dot3-auto-neg-supported>
- <rem-dot3-auto-neg-enabled>false</rem-dot3-auto-neg-enabled>
- <rem-dot3-auto-neg-adv-cap/>
- </info>
- </lldp-rem-neighbor-info>
- </interface>
- </data>
- <last-keys>
- <name>mgmt1/1/1</name>
- </last-keys>
- </bulk>
-</rpc-reply>
-
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_ b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_
deleted file mode 100644
index 78903b697..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_processes_node-id_1__grep_Mem_colon_
+++ /dev/null
@@ -1 +0,0 @@
-KiB Mem: 8127144 total, 2297272 used, 5829872 free, 137360 buffers
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_running-config b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_running-config
deleted file mode 100644
index ff7ff2794..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_running-config
+++ /dev/null
@@ -1,252 +0,0 @@
-! Version 10.4.0E(R1)
-! Last configuration change at Jan 11 12:26:08 2018
-!
-snmp-server contact http://www.dell.com/support
-snmp-server host 192.0.2.1 traps version 1 c4 udp-port 5
-snmp-server host 192.0.2.1 traps version 2c c1 udp-port 4
-snmp-server host 192.0.2.2 traps version 1 c3 udp-port 162
-ip community-list expanded commex deny aaa
-ip community-list standard commstd deny internet
-ip community-list standard commstd permit no-advertise
-ip as-path access-list accesslist deny abc
-ip as-path access-list accesslist deny www
-ip extcommunity-list expanded extcommex deny aaa
-ip extcommunity-list standard extcommstd deny rt 22:33
-ip extcommunity-list standard extcommstd permit soo 22:33
-hostname os10
-interface breakout 1/1/1 map 40g-1x
-interface breakout 1/1/2 map 40g-1x
-interface breakout 1/1/3 map 40g-1x
-interface breakout 1/1/4 map 40g-1x
-interface breakout 1/1/5 map 40g-1x
-interface breakout 1/1/6 map 40g-1x
-interface breakout 1/1/7 map 40g-1x
-interface breakout 1/1/8 map 40g-1x
-interface breakout 1/1/9 map 40g-1x
-interface breakout 1/1/10 map 40g-1x
-interface breakout 1/1/11 map 40g-1x
-interface breakout 1/1/12 map 40g-1x
-interface breakout 1/1/13 map 40g-1x
-interface breakout 1/1/14 map 40g-1x
-interface breakout 1/1/15 map 40g-1x
-interface breakout 1/1/16 map 40g-1x
-interface breakout 1/1/17 map 40g-1x
-interface breakout 1/1/18 map 40g-1x
-interface breakout 1/1/19 map 40g-1x
-interface breakout 1/1/20 map 40g-1x
-interface breakout 1/1/21 map 40g-1x
-interface breakout 1/1/22 map 40g-1x
-interface breakout 1/1/23 map 40g-1x
-interface breakout 1/1/24 map 40g-1x
-interface breakout 1/1/25 map 40g-1x
-interface breakout 1/1/26 map 40g-1x
-interface breakout 1/1/27 map 40g-1x
-interface breakout 1/1/28 map 40g-1x
-interface breakout 1/1/29 map 40g-1x
-interface breakout 1/1/30 map 40g-1x
-interface breakout 1/1/31 map 40g-1x
-interface breakout 1/1/32 map 40g-1x
-username admin password $6$q9QBeYjZ$jfxzVqGhkxX3smxJSH9DDz7/3OJc6m5wjF8nnLD7/VKx8SloIhp4NoGZs0I/UNwh8WVuxwfd9q4pWIgNs5BKH. role sysadmin
-aaa authentication local
-iscsi target port 860
-iscsi target port 3260
-hash-algorithm ecmp xor
-logging console disable
-vrrp delay reload 5
-vrrp version 3
-spanning-tree mode rstp
-!
-interface vlan1
- no shutdown
-!
-interface vlan100
- no shutdown
-!
-interface port-channel12
- no shutdown
- switchport access vlan 1
- vlt-port-channel 13
-!
-interface ethernet1/1/1
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/2
- no shutdown
- no switchport
-!
-interface ethernet1/1/3
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/4
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/5
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/6
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/7
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/8
- no shutdown
- no switchport
-!
-interface ethernet1/1/9
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/10
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/11
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/12
- no shutdown
- no switchport
-!
-interface ethernet1/1/13
- no shutdown
- no switchport
- !
- vrrp-group 4
- priority 120
- track 3 priority-cost 25
- virtual-address 4.1.1.1
- virtual-address 4.1.1.2
- advertise-interval centisecs 200
- no preempt
- !
- vrrp-ipv6-group 2
- priority 120
- track 3 priority-cost 25
- virtual-address 3001:4828:5808:ffa3::9
- virtual-address fe80::10
- advertise-interval centisecs 200
- no preempt
-!
-interface ethernet1/1/14
- no shutdown
- no switchport
-!
-interface ethernet1/1/15
- no shutdown
- no switchport
-!
-interface ethernet1/1/16
- no shutdown
- no switchport
-!
-interface ethernet1/1/17
- no shutdown
- no switchport
-!
-interface ethernet1/1/18
- no shutdown
- no switchport
-!
-interface ethernet1/1/19
- no shutdown
- no switchport
-!
-interface ethernet1/1/20
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/21
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/22
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/23
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/24
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/25
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/26
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/27
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/28
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/29
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/30
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/31
- no shutdown
- switchport access vlan 1
-!
-interface ethernet1/1/32
- no shutdown
- switchport access vlan 1
-!
-interface mgmt1/1/1
- no shutdown
- ip address 10.16.148.144/16
- ipv6 address autoconfig
-!
-route-map test permit 1
- match ip address prefix-list testprefix
- match ip address testaccess
- continue 20
- set comm-list commstd delete
- set comm-list commex add
- set community internet
- set extcommunity rt 22:33
- set extcomm-list extcommstd delete
- set extcomm-list extcommex add
- set ip next-hop 10.1.1.1 track-id 3
- set local-preference 1200
- set metric + 30
- set metric-type internal
- set origin igp
- set weight 50
-!
-route-map test deny 10
- match ip address prefix-list testprefix
- match ip address testaccess
- set ip next-hop 10.1.1.1 track-id 3
-!
-support-assist
-!
-policy-map type application policy-iscsi
-!
-class-map type application class-iscsi
-!
-class-map type qos class-trust
-!
-vlt-domain 1
- discovery-interface ethernet1/1/12
- vlt-mac aa:aa:aa:aa:aa:aa
-
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_system__display-xml b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_system__display-xml
deleted file mode 100644
index bb496cc93..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_system__display-xml
+++ /dev/null
@@ -1,194 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<rpc-reply>
- <data>
- <system>
- <node>
- <node-id>1</node-id>
- <node-mac>14:18:77:09:ae:00</node-mac>
- <number-of-mac-addresses>384</number-of-mac-addresses>
- <mfg-info>
- <vendor-name>DELL</vendor-name>
- <product-name>S6010-ON</product-name>
- <hw-version>X01</hw-version>
- <platform-name>x86_64-dell_s6010_c2538-r0</platform-name>
- <ppid>0088</ppid>
- <service-tag/>
- <service-code/>
- </mfg-info>
- <unit>
- <unit-id>1</unit-id>
- <current-unit-model>S6010</current-unit-model>
- <provisioned-unit-model>S6010</provisioned-unit-model>
- <unit-state>up</unit-state>
- <software-version>10.4.0E(R1)</software-version>
- <port-info>32x40GbE</port-info>
- <mfg-info>
- <vendor-name>DELL</vendor-name>
- <product-name>S6010-ON</product-name>
- <hw-version>X01</hw-version>
- <platform-name>x86_64-dell_s6010_c2538-r0</platform-name>
- <ppid>0088</ppid>
- <part-number>083R0P</part-number>
- <service-tag/>
- <service-code/>
- </mfg-info>
- <down-reason>user-triggered</down-reason>
- <descr>S6010-ON 32x40GbE QSFP+ Interface Module</descr>
- <beacon_led_state>false</beacon_led_state>
- <system-identifier-id>1</system-identifier-id>
- <firmware>
- <firmware-name>BIOS</firmware-name>
- <firmware-version>3.26.0.1</firmware-version>
- </firmware>
- <firmware>
- <firmware-name>System CPLD</firmware-name>
- <firmware-version>10</firmware-version>
- </firmware>
- <firmware>
- <firmware-name>Master CPLD</firmware-name>
- <firmware-version>9</firmware-version>
- </firmware>
- <firmware>
- <firmware-name>Slave CPLD</firmware-name>
- <firmware-version>4</firmware-version>
- </firmware>
- </unit>
- <power-supply>
- <psu-id>1</psu-id>
- <status>fail</status>
- <mfg-info/>
- </power-supply>
- <power-supply>
- <psu-id>2</psu-id>
- <status>up</status>
- <power-type>UNKNOWN</power-type>
- <fan-info>
- <fan-id>1</fan-id>
- <fan-status>fail</fan-status>
- <fan-speed-rpm>1920</fan-speed-rpm>
- </fan-info>
- <air-flow>NORMAL</air-flow>
- <mfg-info>
- <vendor-name/>
- <product-name/>
- <hw-version/>
- <platform-name/>
- <ppid/>
- <part-number/>
- <service-tag/>
- <service-code/>
- </mfg-info>
- </power-supply>
- <fan-tray>
- <fan-tray-id>1</fan-tray-id>
- <status>up</status>
- <fan-info>
- <fan-id>1</fan-id>
- <fan-status>up</fan-status>
- <fan-speed-rpm>22090</fan-speed-rpm>
- </fan-info>
- <air-flow>NORMAL</air-flow>
- <mfg-info>
- <vendor-name/>
- <product-name/>
- <hw-version>F01</hw-version>
- <platform-name/>
- <ppid>CN123456FAN100589021</ppid>
- <part-number>P1FAN1</part-number>
- <service-tag/>
- <service-code/>
- </mfg-info>
- </fan-tray>
- <fan-tray>
- <fan-tray-id>2</fan-tray-id>
- <status>up</status>
- <fan-info>
- <fan-id>1</fan-id>
- <fan-status>up</fan-status>
- <fan-speed-rpm>22215</fan-speed-rpm>
- </fan-info>
- <air-flow>NORMAL</air-flow>
- <mfg-info>
- <vendor-name/>
- <product-name/>
- <hw-version>F02</hw-version>
- <platform-name/>
- <ppid>CN123456FAN200589031</ppid>
- <part-number>P2FAN2</part-number>
- <service-tag/>
- <service-code/>
- </mfg-info>
- </fan-tray>
- <fan-tray>
- <fan-tray-id>3</fan-tray-id>
- <status>up</status>
- <fan-info>
- <fan-id>1</fan-id>
- <fan-status>up</fan-status>
- <fan-speed-rpm>22215</fan-speed-rpm>
- </fan-info>
- <air-flow>NORMAL</air-flow>
- <mfg-info>
- <vendor-name/>
- <product-name/>
- <hw-version>F03</hw-version>
- <platform-name/>
- <ppid>CN123456FAN300589041</ppid>
- <part-number>P3FAN3</part-number>
- <service-tag/>
- <service-code/>
- </mfg-info>
- </fan-tray>
- <fan-tray>
- <fan-tray-id>4</fan-tray-id>
- <status>up</status>
- <fan-info>
- <fan-id>1</fan-id>
- <fan-status>up</fan-status>
- <fan-speed-rpm>22215</fan-speed-rpm>
- </fan-info>
- <air-flow>NORMAL</air-flow>
- <mfg-info>
- <vendor-name/>
- <product-name/>
- <hw-version>F04</hw-version>
- <platform-name/>
- <ppid>CN123456FAN400589051</ppid>
- <part-number>P4FAN4</part-number>
- <service-tag/>
- <service-code/>
- </mfg-info>
- </fan-tray>
- <fan-tray>
- <fan-tray-id>5</fan-tray-id>
- <status>up</status>
- <fan-info>
- <fan-id>1</fan-id>
- <fan-status>up</fan-status>
- <fan-speed-rpm>21724</fan-speed-rpm>
- </fan-info>
- <air-flow>NORMAL</air-flow>
- <mfg-info>
- <vendor-name/>
- <product-name/>
- <hw-version>F05</hw-version>
- <platform-name/>
- <ppid>CN123456FAN500589061</ppid>
- <part-number>P5FAN5</part-number>
- <service-tag/>
- <service-code/>
- </mfg-info>
- </fan-tray>
- </node>
- </system>
- <system-state>
- <system-status>
- <hostname>os10</hostname>
- <uptime>161826</uptime>
- <date-time>2018-01-12T13:42:36.20+00:00</date-time>
- <boot-datetime>2018-01-10T16:45:30+00:00</boot-datetime>
- </system-status>
- </system-state>
- </data>
-</rpc-reply>
-
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version
deleted file mode 100644
index b9aa8feaa..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version
+++ /dev/null
@@ -1,9 +0,0 @@
-Dell EMC Networking OS10 Enterprise
-Copyright (c) 1999-2017 by Dell Inc. All Rights Reserved.
-OS Version: 10.4.0E(R1)
-Build Version: 10.4.0E(R1.56)
-Build Time: 2017-12-19T22:11:00-0800
-System Type: S6000-VM
-Architecture: x86_64
-Up Time: 6 days 00:33:35
-
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version__display-xml b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version__display-xml
deleted file mode 100644
index 50f84ab67..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/fixtures/show_version__display-xml
+++ /dev/null
@@ -1,27 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<rpc-reply>
- <data>
- <system-state>
- <system-status>
- <hostname>os10</hostname>
- <uptime>162698</uptime>
- <date-time>2018-01-12T13:57:08.58+00:00</date-time>
- <boot-datetime>2018-01-10T16:45:30+00:00</boot-datetime>
- </system-status>
- </system-state>
- <system-sw-state>
- <sw-version>
- <sw-version>10.4.0E(R1)</sw-version>
- <sw-name>Enterprise</sw-name>
- <sw-name-long>Dell EMC Networking OS10 Enterprise</sw-name-long>
- <sw-platform>S6010-ON</sw-platform>
- <sw-platform-long>Dell EMC OS10 Enterprise Edition Blueprint 1.0.0</sw-platform-long>
- <cpu-arch>x86_64</cpu-arch>
- <sw-build-date>2017-12-14T23:39:27-0800</sw-build-date>
- <sw-build-version>10.4.0E(R1.55)</sw-build-version>
- <copyright>Copyright (c) 1999-2017 by Dell Inc. All Rights Reserved.</copyright>
- </sw-version>
- </system-sw-state>
- </data>
-</rpc-reply>
-
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/os10_module.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/os10_module.py
deleted file mode 100644
index 8f990d739..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/os10_module.py
+++ /dev/null
@@ -1,90 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import json
-
-from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
-
-
-fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
-fixture_data = {}
-
-
-def load_fixture(name):
- path = os.path.join(fixture_path, name)
-
- if path in fixture_data:
- return fixture_data[path]
-
- with open(path) as f:
- data = f.read()
-
- try:
- data = json.loads(data)
- except Exception:
- pass
-
- fixture_data[path] = data
- return data
-
-
-class TestDellos10Module(ModuleTestCase):
-
- def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
-
- self.load_fixtures(commands)
-
- if failed:
- result = self.failed()
- self.assertTrue(result['failed'], result)
- else:
- result = self.changed(changed)
- self.assertEqual(result['changed'], changed, result)
-
- if commands is not None:
- if sort:
- self.assertEqual(sorted(commands), sorted(result['updates']), result['updates'])
- else:
- self.assertEqual(commands, result['updates'], result['updates'])
-
- return result
-
- def failed(self):
- with self.assertRaises(AnsibleFailJson) as exc:
- self.module.main()
-
- result = exc.exception.args[0]
- self.assertTrue(result['failed'], result)
- return result
-
- def changed(self, changed=False):
- with self.assertRaises(AnsibleExitJson) as exc:
- self.module.main()
-
- result = exc.exception.args[0]
- self.assertEqual(result['changed'], changed, result)
- return result
-
- def load_fixtures(self, commands=None):
- pass
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_command.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_command.py
deleted file mode 100644
index 77f6f1a74..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_command.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from units.compat.mock import patch
-from ansible_collections.dellemc.os10.plugins.modules import os10_command
-from units.modules.utils import set_module_args
-from .os10_module import TestDellos10Module, load_fixture
-
-
-class TestDellos10CommandModule(TestDellos10Module):
-
- module = os10_command
-
- def setUp(self):
- super(TestDellos10CommandModule, self).setUp()
-
- self.mock_run_commands = patch('ansible.modules.network.os10.os10_command.run_commands')
- self.run_commands = self.mock_run_commands.start()
-
- def tearDown(self):
- super(TestDellos10CommandModule, self).tearDown()
- self.mock_run_commands.stop()
-
- def load_fixtures(self, commands=None):
-
- def load_from_file(*args, **kwargs):
- module, commands = args
- output = list()
-
- for item in commands:
- try:
- obj = json.loads(item['command'])
- command = obj['command']
- except ValueError:
- command = item['command']
- filename = str(command).replace(' ', '_')
- output.append(load_fixture(filename))
- return output
-
- self.run_commands.side_effect = load_from_file
-
- def test_os10_command_simple(self):
- set_module_args(dict(commands=['show version']))
- result = self.execute_module()
- self.assertEqual(len(result['stdout']), 1)
- self.assertTrue(result['stdout'][0].startswith('Dell EMC Networking'))
-
- def test_os10_command_multiple(self):
- set_module_args(dict(commands=['show version', 'show version']))
- result = self.execute_module()
- self.assertEqual(len(result['stdout']), 2)
- self.assertTrue(result['stdout'][0].startswith('Dell EMC Networking'))
-
- def test_os10_command_wait_for(self):
- wait_for = 'result[0] contains "Dell EMC"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for))
- self.execute_module()
-
- def test_os10_command_wait_for_fails(self):
- wait_for = 'result[0] contains "test string"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for))
- self.execute_module(failed=True)
- self.assertEqual(self.run_commands.call_count, 10)
-
- def test_os10_command_retries(self):
- wait_for = 'result[0] contains "test string"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
- self.execute_module(failed=True)
- self.assertEqual(self.run_commands.call_count, 2)
-
- def test_os10_command_match_any(self):
- wait_for = ['result[0] contains "Dell EMC"',
- 'result[0] contains "test string"']
- set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
- self.execute_module()
-
- def test_os10_command_match_all(self):
- wait_for = ['result[0] contains "Dell EMC"',
- 'result[0] contains "OS10 Enterprise"']
- set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
- self.execute_module()
-
- def test_os10_command_match_all_failure(self):
- wait_for = ['result[0] contains "Dell EMC"',
- 'result[0] contains "test string"']
- commands = ['show version', 'show version']
- set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
- self.execute_module(failed=True)
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_config.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_config.py
deleted file mode 100644
index e38d124a7..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_config.py
+++ /dev/null
@@ -1,150 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from units.compat.mock import patch
-from ansible_collections.dellemc.os10.plugins.modules import os10_config
-from units.modules.utils import set_module_args
-from .os10_module import TestDellos10Module, load_fixture
-
-
-class TestDellos10ConfigModule(TestDellos10Module):
-
- module = os10_config
-
- def setUp(self):
- super(TestDellos10ConfigModule, self).setUp()
-
- self.mock_get_config = patch('ansible.modules.network.os10.os10_config.get_config')
- self.get_config = self.mock_get_config.start()
-
- self.mock_load_config = patch('ansible.modules.network.os10.os10_config.load_config')
- self.load_config = self.mock_load_config.start()
-
- self.mock_run_commands = patch('ansible.modules.network.os10.os10_config.run_commands')
- self.run_commands = self.mock_run_commands.start()
-
- def tearDown(self):
- super(TestDellos10ConfigModule, self).tearDown()
- self.mock_get_config.stop()
- self.mock_load_config.stop()
- self.mock_run_commands.stop()
-
- def load_fixtures(self, commands=None):
- config_file = 'os10_config_config.cfg'
- self.get_config.return_value = load_fixture(config_file)
- self.load_config.return_value = None
-
- def test_os10_config_unchanged(self):
- src = load_fixture('os10_config_config.cfg')
- set_module_args(dict(src=src))
- self.execute_module()
-
- def test_os10_config_src(self):
- src = load_fixture('os10_config_src.cfg')
- set_module_args(dict(src=src))
- commands = ['hostname foo', 'interface ethernet1/1/2',
- 'no ip address']
- self.execute_module(changed=True, commands=commands)
-
- def test_os10_config_backup(self):
- set_module_args(dict(backup=True))
- result = self.execute_module()
- self.assertIn('__backup__', result)
-
- def test_os10_config_save(self):
- set_module_args(dict(save=True))
- self.execute_module(changed=True)
- self.assertEqual(self.run_commands.call_count, 1)
- self.assertEqual(self.get_config.call_count, 0)
- self.assertEqual(self.load_config.call_count, 0)
- args = self.run_commands.call_args[0][1]
- self.assertDictContainsSubset({'command': 'copy running-config startup-config'}, args[0])
-# self.assertIn('copy running-config startup-config\r', args)
-
- def test_os10_config_lines_wo_parents(self):
- set_module_args(dict(lines=['hostname foo']))
- commands = ['hostname foo']
- self.execute_module(changed=True, commands=commands)
-
- def test_os10_config_lines_w_parents(self):
- set_module_args(dict(lines=['shutdown'], parents=['interface ethernet1/1/2']))
- commands = ['interface ethernet1/1/2', 'shutdown']
- self.execute_module(changed=True, commands=commands)
-
- def test_os10_config_before(self):
- set_module_args(dict(lines=['hostname foo'], before=['snmp-server contact bar']))
- commands = ['snmp-server contact bar', 'hostname foo']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os10_config_after(self):
- set_module_args(dict(lines=['hostname foo'], after=['snmp-server contact bar']))
- commands = ['hostname foo', 'snmp-server contact bar']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os10_config_before_after_no_change(self):
- set_module_args(dict(lines=['hostname router'],
- before=['snmp-server contact bar'],
- after=['snmp-server location chennai']))
- self.execute_module()
-
- def test_os10_config_config(self):
- config = 'hostname localhost'
- set_module_args(dict(lines=['hostname router'], config=config))
- commands = ['hostname router']
- self.execute_module(changed=True, commands=commands)
-
- def test_os10_config_replace_block(self):
- lines = ['description test string', 'test string']
- parents = ['interface ethernet1/1/2']
- set_module_args(dict(lines=lines, replace='block', parents=parents))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands)
-
- def test_os10_config_match_none(self):
- lines = ['hostname router']
- set_module_args(dict(lines=lines, match='none'))
- self.execute_module(changed=True, commands=lines)
-
- def test_os10_config_match_none(self):
- lines = ['ip address 1.2.3.4/24', 'description test string']
- parents = ['interface ethernet1/1/2']
- set_module_args(dict(lines=lines, parents=parents, match='none'))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os10_config_match_strict(self):
- lines = ['ip address 1.2.3.4/24', 'description test string',
- 'shutdown']
- parents = ['interface ethernet1/1/2']
- set_module_args(dict(lines=lines, parents=parents, match='strict'))
- commands = parents + ['shutdown']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os10_config_match_exact(self):
- lines = ['ip address 1.2.3.4/24', 'description test string',
- 'shutdown']
- parents = ['interface ethernet1/1/2']
- set_module_args(dict(lines=lines, parents=parents, match='exact'))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands, sort=False)
diff --git a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_facts.py b/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_facts.py
deleted file mode 100644
index 7f03eab80..000000000
--- a/ansible_collections/dellemc/os10/tests/unit/modules/network/os10/test_os10_facts.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from units.compat.mock import patch
-from units.modules.utils import set_module_args
-from .os10_module import TestDellos10Module, load_fixture
-from ansible_collections.dellemc.os10.plugins.modules import os10_facts
-
-
-class TestDellos10Facts(TestDellos10Module):
-
- module = os10_facts
-
- def setUp(self):
- super(TestDellos10Facts, self).setUp()
-
- self.mock_run_command = patch(
- 'ansible.modules.network.os10.os10_facts.run_commands')
- self.run_command = self.mock_run_command.start()
-
- def tearDown(self):
- super(TestDellos10Facts, self).tearDown()
-
- self.mock_run_command.stop()
-
- def load_fixtures(self, commands=None):
-
- def load_from_file(*args, **kwargs):
- module, commands = args
- output = list()
-
- for item in commands:
- try:
- obj = json.loads(item)
- command = obj['command']
- except ValueError:
- command = item
- if '|' in command:
- command = str(command).replace('|', '')
- filename = str(command).replace(' ', '_')
- filename = filename.replace('/', '7')
- filename = filename.replace(':', '_colon_')
- output.append(load_fixture(filename))
- return output
-
- self.run_command.side_effect = load_from_file
-
- def test_os10_facts_gather_subset_default(self):
- set_module_args(dict())
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals('os10', ansible_facts['ansible_net_hostname'])
- self.assertIn('ethernet1/1/8', ansible_facts['ansible_net_interfaces'].keys())
- self.assertEquals(7936, ansible_facts['ansible_net_memtotal_mb'])
- self.assertEquals(5693, ansible_facts['ansible_net_memfree_mb'])
-
- def test_os10_facts_gather_subset_config(self):
- set_module_args({'gather_subset': 'config'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals('os10', ansible_facts['ansible_net_hostname'])
- self.assertIn('ansible_net_config', ansible_facts)
-
- def test_os10_facts_gather_subset_hardware(self):
- set_module_args({'gather_subset': 'hardware'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals('x86_64', ansible_facts['ansible_net_cpu_arch'])
- self.assertEquals(7936, ansible_facts['ansible_net_memtotal_mb'])
- self.assertEquals(5693, ansible_facts['ansible_net_memfree_mb'])
-
- def test_os10_facts_gather_subset_interfaces(self):
- set_module_args({'gather_subset': 'interfaces'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('ethernet1/1/8', ansible_facts['ansible_net_interfaces'].keys())
- self.assertEquals(sorted(['mgmt1/1/1', 'ethernet1/1/4', 'ethernet1/1/2', 'ethernet1/1/3', 'ethernet1/1/1']),
- sorted(list(ansible_facts['ansible_net_neighbors'].keys())))
- self.assertIn('ansible_net_interfaces', ansible_facts)
diff --git a/ansible_collections/dellemc/os6/.ansible-lint b/ansible_collections/dellemc/os6/.ansible-lint
deleted file mode 100644
index d8c4900d7..000000000
--- a/ansible_collections/dellemc/os6/.ansible-lint
+++ /dev/null
@@ -1,2 +0,0 @@
-skip_list:
- - '208'
diff --git a/ansible_collections/dellemc/os6/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/os6/.github/workflows/ansible-test.yml
deleted file mode 100644
index e5692b0a4..000000000
--- a/ansible_collections/dellemc/os6/.github/workflows/ansible-test.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-name: CI
-on:
-- pull_request
-
-jobs:
- sanity:
- name: Sanity (${{ matrix.ansible }})
- strategy:
- matrix:
- ansible:
- - stable-2.10
- - devel
- runs-on: ubuntu-latest
- steps:
-
- - name: Check out code
- uses: actions/checkout@v1
- with:
- path: ansible_collections/dellemc/os6
-
- - name: Set up Python 3.6
- uses: actions/setup-python@v1
- with:
- python-version: 3.6
-
- - name: Install ansible-base (${{ matrix.ansible }})
- run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
-
- - name: Install ansible_collections.ansible.netcommon
- run: ansible-galaxy collection install ansible.netcommon -p ../../
-
- - name: Run sanity tests
- run: ansible-test sanity --docker -v --color --python 3.6
diff --git a/ansible_collections/dellemc/os6/.gitignore b/ansible_collections/dellemc/os6/.gitignore
deleted file mode 100644
index c6fc14ad0..000000000
--- a/ansible_collections/dellemc/os6/.gitignore
+++ /dev/null
@@ -1,387 +0,0 @@
-
-# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
-# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
-
-### dotenv ###
-.env
-
-### Emacs ###
-# -*- mode: gitignore; -*-
-*~
-\#*\#
-/.emacs.desktop
-/.emacs.desktop.lock
-*.elc
-auto-save-list
-tramp
-.\#*
-
-# Org-mode
-.org-id-locations
-*_archive
-
-# flymake-mode
-*_flymake.*
-
-# eshell files
-/eshell/history
-/eshell/lastdir
-
-# elpa packages
-/elpa/
-
-# reftex files
-*.rel
-
-# AUCTeX auto folder
-/auto/
-
-# cask packages
-.cask/
-dist/
-
-# Flycheck
-flycheck_*.el
-
-# server auth directory
-/server/
-
-# projectiles files
-.projectile
-
-# directory configuration
-.dir-locals.el
-
-# network security
-/network-security.data
-
-
-### Git ###
-# Created by git for backups. To disable backups in Git:
-# $ git config --global mergetool.keepBackup false
-*.orig
-
-# Created by git when using merge tools for conflicts
-*.BACKUP.*
-*.BASE.*
-*.LOCAL.*
-*.REMOTE.*
-*_BACKUP_*.txt
-*_BASE_*.txt
-*_LOCAL_*.txt
-*_REMOTE_*.txt
-
-#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!#
-
-### Linux ###
-
-# temporary files which can be created if a process still has a handle open of a deleted file
-.fuse_hidden*
-
-# KDE directory preferences
-.directory
-
-# Linux trash folder which might appear on any partition or disk
-.Trash-*
-
-# .nfs files are created when an open file is removed but is still being accessed
-.nfs*
-
-### PyCharm+all ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-# Generated files
-.idea/**/contentModel.xml
-
-# Sensitive or high-churn files
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-.idea/**/dbnavigator.xml
-
-# Gradle
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn. Uncomment if using
-# auto-import.
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-cmake-build-*/
-
-# Mongo Explorer plugin
-.idea/**/mongoSettings.xml
-
-# File-based project format
-*.iws
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
-
-# Editor-based Rest Client
-.idea/httpRequests
-
-# Android studio 3.1+ serialized cache file
-.idea/caches/build_file_checksums.ser
-
-### PyCharm+all Patch ###
-# Ignores the whole .idea folder and all .iml files
-# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
-
-.idea/
-
-# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
-
-*.iml
-modules.xml
-.idea/misc.xml
-*.ipr
-
-# Sonarlint plugin
-.idea/sonarlint
-
-### pydev ###
-.pydevproject
-
-### Python ###
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-pip-wheel-metadata/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-.hypothesis/
-.pytest_cache/
-
-# Translations
-*.mo
-*.pot
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# pyenv
-.python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# celery beat schedule file
-celerybeat-schedule
-
-# SageMath parsed files
-*.sage.py
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# Mr Developer
-.mr.developer.cfg
-.project
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-### Vim ###
-# Swap
-[._]*.s[a-v][a-z]
-[._]*.sw[a-p]
-[._]s[a-rt-v][a-z]
-[._]ss[a-gi-z]
-[._]sw[a-p]
-
-# Session
-Session.vim
-Sessionx.vim
-
-# Temporary
-.netrwhist
-# Auto-generated tag files
-tags
-# Persistent undo
-[._]*.un~
-
-### WebStorm ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-
-# Generated files
-
-# Sensitive or high-churn files
-
-# Gradle
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn. Uncomment if using
-# auto-import.
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-
-# Mongo Explorer plugin
-
-# File-based project format
-
-# IntelliJ
-
-# mpeltonen/sbt-idea plugin
-
-# JIRA plugin
-
-# Cursive Clojure plugin
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-
-# Editor-based Rest Client
-
-# Android studio 3.1+ serialized cache file
-
-### WebStorm Patch ###
-# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
-
-# *.iml
-# modules.xml
-# .idea/misc.xml
-# *.ipr
-
-# Sonarlint plugin
-.idea/**/sonarlint/
-
-# SonarQube Plugin
-.idea/**/sonarIssues.xml
-
-# Markdown Navigator plugin
-.idea/**/markdown-navigator.xml
-.idea/**/markdown-navigator/
-
-### Windows ###
-# Windows thumbnail cache files
-Thumbs.db
-Thumbs.db:encryptable
-ehthumbs.db
-ehthumbs_vista.db
-
-# Dump file
-*.stackdump
-
-# Folder config file
-[Dd]esktop.ini
-
-# Recycle Bin used on file shares
-$RECYCLE.BIN/
-
-# Windows Installer files
-*.cab
-*.msi
-*.msix
-*.msm
-*.msp
-
-# Windows shortcuts
-*.lnk
-
-# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
diff --git a/ansible_collections/dellemc/os6/COPYING b/ansible_collections/dellemc/os6/COPYING
deleted file mode 100644
index 10926e87f..000000000
--- a/ansible_collections/dellemc/os6/COPYING
+++ /dev/null
@@ -1,675 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (C) <year> <name of author>
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (C) <year> <name of author>
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.
-
diff --git a/ansible_collections/dellemc/os6/FILES.json b/ansible_collections/dellemc/os6/FILES.json
deleted file mode 100644
index cf23654de..000000000
--- a/ansible_collections/dellemc/os6/FILES.json
+++ /dev/null
@@ -1,2987 +0,0 @@
-{
- "files": [
- {
- "format": 1,
- "ftype": "dir",
- "chksum_sha256": null,
- "name": ".",
- "chksum_type": null
- },
- {
- "ftype": "file",
- "chksum_sha256": "0c29a1ae51505d7a5d1e7f80c5abac708f68c44c5bd96fc94f0afff2408daeca",
- "name": ".ansible-lint",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/sanity",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c8c055821f8876eba6a702956071e467c89976dbf574600421b0cde8491d9744",
- "name": "tests/sanity/ignore-2.9.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c8a4ac4bfdef88e75d6e748e35a42fb4915947dfa2b7dd788626fd829600e014",
- "name": "tests/sanity/requirements.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2049a8032bd8451483531ee5f5376c9ab37a3f4f3f4194b8e04df6643cb933b1",
- "name": "tests/sanity/ignore-2.10.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6cac17a5998aa8480af2ea636ab0534293389d9e8303941d33bb591009d4f2a7",
- "name": "tests/sanity/ignore-2.11.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_facts",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_facts/os6_facts",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_facts/os6_facts/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_facts/os6_facts/tests/cli",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8317cea049586b1ac611ed0414663e3a0e6a07804f4a056ec463f3dfd8cf433d",
- "name": "tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_facts/os6_facts/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "name": "tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_facts/os6_facts/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8b99fcd12715fcec0cba3d649252a55c86f4710650f9f2e0fdab0bb958fb88f1",
- "name": "tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "81ae4136ca3d879f645bc323268dd5af5a89467b0d776010965374f56ef07eb0",
- "name": "tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_command",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_command/os6_command",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_command/os6_command/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_command/os6_command/tests/cli",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "824ab4e366ae3b1f839019886fae66bd8be5852ec91ecc40490437562df4aa70",
- "name": "tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d3c99fb4690aa3f479f810768bcb8a147b00ec579f8581fdfde66fedc3a00e4c",
- "name": "tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ceca17eebf5d04dcc3ec39adf08a8291d71b30e17a65b16f02c1a278b165c254",
- "name": "tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "dd6945de4ad85b2fa1373aa9c167423b41ba6ab8cd7cd766b41dea238f5518cb",
- "name": "tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "aed021038fc74a5d58e1935744f8eec94725f56464f7a70aa52f43d17ed6019a",
- "name": "tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_command/os6_command/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "name": "tests/integration/targets/os6_command/os6_command/defaults/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_command/os6_command/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8b99fcd12715fcec0cba3d649252a55c86f4710650f9f2e0fdab0bb958fb88f1",
- "name": "tests/integration/targets/os6_command/os6_command/tasks/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0c3d448b4f0a8de268734dd08d79db1ab073d9402de62d777d2d9f79340c05db",
- "name": "tests/integration/targets/os6_command/os6_command/tasks/cli.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_config",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_config/os6_config",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_config/os6_config/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a8acbe90eb42e7161e40e560bb8ae6ef38b59992505d87390d0871fa6e8f557c",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e6eb3df6c455d89e9e143cb424b15ccfbdf7beef726fb5ccee09f1485b146601",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a5af25d5e357ad36a366a00a494d6b45b6a6a484f0d278013c0b8923d2d83c58",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b602bf2052373f5e7e9df68adabec84d52c060842d65295907c687ca278b55a8",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1fe97966921c1e53b7ec280a1d7d7d232d0393b8a37cc089bb5c52cfd1cab71c",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c79daefb569c4128eb236e734e97de568e030ee98ed1f45d0cdd0d62570c1131",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "24225e8b46caaa034d2b40eb50591c6022ad3fd825467a1dcef84ad14eae3777",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8f3367a21641a0f445cff8434becc77cbc852670b29576ecde56371fc574ff5a",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6f509679767b1c8467bcbc72f419ac24fad67d697181f4d7c1c1515373df5ad1",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2f0876fb112582e491b8c771901f0c4abd15c2481ee1e19aca53509596032335",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "73fb7bbde923be1d01019de5a546137572ddbdd36acc4301dd4452d1faa77171",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f75521089a6b55df79214fc31e7b4b676999d191ebabb210503256901ddfe73a",
- "name": "tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_config/os6_config/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "name": "tests/integration/targets/os6_config/os6_config/defaults/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os6_config/os6_config/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8b99fcd12715fcec0cba3d649252a55c86f4710650f9f2e0fdab0bb958fb88f1",
- "name": "tests/integration/targets/os6_config/os6_config/tasks/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0c3d448b4f0a8de268734dd08d79db1ab073d9402de62d777d2d9f79340c05db",
- "name": "tests/integration/targets/os6_config/os6_config/tasks/cli.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600",
- "name": "tests/.gitignore",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit/modules",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit/modules/network",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit/modules/network/os6",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "140157fdc99fb9e439a4d3be0aad737aaceafcb6da8799c90243d548315158b8",
- "name": "tests/unit/modules/network/os6/test_os6_command.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "tests/unit/modules/network/os6/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6c2284a2c56cb6fa05ecb3e9fb49d957309c0bd2119c6ae351d9c71eb0a3527d",
- "name": "tests/unit/modules/network/os6/test_os6_facts.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8c42ab3958dfa26f512a20fcce57a261601797f75f8563df7ba8acb030e1af1c",
- "name": "tests/unit/modules/network/os6/os6_module.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit/modules/network/os6/fixtures",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9b4d1295a7098ece89dcfbdd241327022d0994c1ab53adbda8a71d43ce80c719",
- "name": "tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "09a6e09bcde3874bbd0755a1a2842b6356c8b9a7f1c053c1fedcdd3423bcb4e1",
- "name": "tests/unit/modules/network/os6/fixtures/os6_config_src.cfg",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "dbead93951c6ae31171cbe355dd89d88a862a0e27ba9911facc961056ddaf2d6",
- "name": "tests/unit/modules/network/os6/fixtures/show_interfaces",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c4c6643ce44ee771014c871eccf320921efe5e6d9bd2d03a0940739102228f78",
- "name": "tests/unit/modules/network/os6/fixtures/show_memory_cpu",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ece82c342beda30022b119fd410c364edeb54d55147d9f0c2a6b1afbe88f88cf",
- "name": "tests/unit/modules/network/os6/fixtures/show_interfaces_status",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "dd90ed90e19d6d7f48492f6de1f0cea6f5775c079abf4da219aa851cebcb0cfa",
- "name": "tests/unit/modules/network/os6/fixtures/show_version",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "175c44bf6d745d134eaec45f1422e983b498ad61785ab963d7722b1cfa94285b",
- "name": "tests/unit/modules/network/os6/fixtures/show_ip_int",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6893a3c5f2ef207b48eb4374df58e66c8cc390da3413a7168eb3d0207dc4cad4",
- "name": "tests/unit/modules/network/os6/fixtures/os6_config_config.cfg",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "db4102606a12c3c3c3b72d603a7289caff2c8072210368cf5f6bba0d49f3e12f",
- "name": "tests/unit/modules/network/os6/fixtures/show_running-config",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "414b84267a6c20d6b8d7f67eb9fac07c357320d899f26dd0a784eb4580d0ddae",
- "name": "tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ac27540a277fa3b1094c0d0caf9581955b411f1d09f17774607e245a58f498d3",
- "name": "tests/unit/modules/network/os6/fixtures/show_lldp",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "924e55fa0c98396d7fc365ddbd75df2037f73bbb21729433408ed61bda71ce5c",
- "name": "tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "952a955b0a6cb013c968737f63ecda29abf0449f34f3c39393b0b242781a8c2f",
- "name": "tests/unit/modules/network/os6/test_os6_config.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6ba2d44d2272d8dd4c24b362a95ed270bee2105f7ade0150045f183270d1fc7c",
- "name": "meta/runtime.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": ".github",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": ".github/workflows",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d2b86353e3881830dab4a73db505985b635a647a3288e1987d9069e3905ae867",
- "name": ".github/workflows/ansible-test.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/module_utils",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/module_utils/network",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9b4ad754c7fd54be06cef308e2a6296194c708248905fc3958e316ecb6f9fdff",
- "name": "plugins/module_utils/network/os6.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/module_utils/network/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/action",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "417ab2baa65d27eefc01cd6c130f2930f09dadfc140eeceeddca3cedb7bb11b1",
- "name": "plugins/action/os6.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/action/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/terminal",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8ac39b1c679da46a21dc582100e3cb12fb56b6fe599464e615a08840c0b48684",
- "name": "plugins/terminal/os6.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/terminal/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/cliconf",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4e9d0c393cbd23b0b305b85cbf2b15b73cd996d4f8ab75e0f6a175ec6f400ab1",
- "name": "plugins/cliconf/os6.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/cliconf/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/doc_fragments",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "051fd978ce9149eed8d1f82210faa9d09cbbaadd7440c76e01f7a2a8bfcb47bb",
- "name": "plugins/doc_fragments/os6.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/doc_fragments/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/modules",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/modules/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "83a17696f13b1a1b4a8f4c6c64d97a6febfaed6a8a2aa8480a248ee606c7e7b2",
- "name": "plugins/modules/os6_command.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "455b54c0a3026a62aa614672d1d9b1cfc46f3bb4e55d8afe0f5210791aa8b36a",
- "name": "plugins/modules/os6_facts.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f21de03ce85e6feb7a2057173cd9ed98babd669a6c383232fdd33952e5edae20",
- "name": "plugins/modules/os6_config.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227",
- "name": "COPYING",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "258e4be3cfda40797fe734b375b6f94c110c9a9bebce196fedce319a457ce720",
- "name": ".gitignore",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "changelogs",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2b6f825d35f7a2b89f5ff73b498eeccb10165f423b5d4babbb7c1f5aaabfbb49",
- "name": "changelogs/config.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "358846a679247201321be96384821e4d2f6ecf4d6f2f8bf093efc97c21dd399e",
- "name": "changelogs/changelog.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1b8442430a6cb14a263f40104bc78d65df935398f7175b91f5fee40fead79cf8",
- "name": "changelogs/CHANGELOG.rst",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "docs",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6a6c2890becf81f87d57ed5da9c8c3bc6872c8a7eeef4e24fcb9bd7fba1c0471",
- "name": "docs/os6_bgp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5a6ebdb3535baa818835da111f9c89c9eadfc0ffac539f42ad3f7b536209d490",
- "name": "docs/os6_qos.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "944f16070757f6e903f717dfdfdfb6256faafb1c49e9ca88c6c4565b119eae87",
- "name": "docs/os6_snmp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "05ef9ab09f593d61256adc8a96fd2d4e3f8a2146f593969e7563907d09eb20b4",
- "name": "docs/os6_xstp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ac44a6208cf2c79612d543f45b0537752487b31272c26821f9633546f2c1b6c6",
- "name": "docs/roles.rst",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9a95d328cf1bf27671bba9df1534e171a3284f2ee2423b4cccd0f7b769bb4dd8",
- "name": "docs/os6_interface.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ee8ceb504133f6d404a928073786b95f733a86e697aae7a0bf367ece75c3fe71",
- "name": "docs/os6_acl.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6186b5006e3bb89a11e8c7a4fc7fbb69d0f559db5b8e875497e8400dd28ae023",
- "name": "docs/os6_lldp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2bd34914da5ee317c0dce534b3eb907c33cefb1d2408e79a530d3155f42e673e",
- "name": "docs/os6_vlan.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "15551d1995afdb953a0e5bd532b8ff219708236ab473b9a837ac5cdc41fc6ed5",
- "name": "docs/os6_ntp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f067df9f87921cd7ba6f9bf4437fb2919111a44e619f476cdc3df3e2f3b79f8d",
- "name": "docs/os6_lag.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "82184a0e54c3f7d7cc10fade2f2ec306db907284622ba149cd989155a9488a19",
- "name": "docs/os6_vrrp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "263c5fc6dd50f3d00e66aa38a06710c3e754a8248d70e4cda0b1971d3be69fcd",
- "name": "docs/os6_logging.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "31f3bfd5a7c28cdf3f614929a7592e6794d1aeb1ad44ad0dbf063a30219c4aad",
- "name": "docs/os6_users.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b3b0a11e433186c5452577abcb41cf9dc18d36633bc3c826196f2c768255ccfd",
- "name": "docs/os6_system.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7cb9a2fa4b0d1b5393ac7e745fa81668f1977a6a6e9e9ca7a5970562c46829d2",
- "name": "docs/os6_aaa.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2f14c0c53085dd3bd638b743c0633f3e033864ee15199b83478050bda953fa8b",
- "name": "README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "playbooks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "playbooks/ibgp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "playbooks/ibgp/group_vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a4dcd6b5ab317541bc24f9ff0125cf67f9d421a6bc5a6af41105b206d5313a79",
- "name": "playbooks/ibgp/group_vars/all",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9d3f7db46fb66d404ef752cf999295fcf7b0d22e2cd0f5dc953958c1ecf3e9d9",
- "name": "playbooks/ibgp/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d1819a593a0da5bfb520f9cc0016d6f9a772d91b9f5fd2067f7dc9956d7dce17",
- "name": "playbooks/ibgp/os6switch.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8e4ffdb53fb7979e97567fece37df85405c12e11da9af3b03d9a91bf3c91e62a",
- "name": "playbooks/ibgp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "playbooks/ibgp/host_vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "809b97cd311b1815f41d16edfad0ce38b3f6e71238f139a8ac349a265d8f3b54",
- "name": "playbooks/ibgp/host_vars/switch1.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a9f9a7e49808177dc5887e34711c3123581c7007fa3f582242f2cdedc5dad682",
- "name": "playbooks/ibgp/host_vars/switch2.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_xstp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_xstp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "803c5e860e5d8a7eb300be934ce8cbd68e3e869329f9392df91b4f71d31d8a35",
- "name": "roles/os6_xstp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_xstp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_xstp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_xstp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6ce58745ddc603524750b1e333257ba3ec441f83f19afd26fdf2e7f0add9dfb9",
- "name": "roles/os6_xstp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "372224afb6413913603cb411f026c3b45d8d6a968e66a215324074f022f5850b",
- "name": "roles/os6_xstp/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_xstp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "546039128bbdb58e82bd748f879928d0454aa88e72bcea47c4a151fdf23286e9",
- "name": "roles/os6_xstp/templates/os6_xstp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_xstp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e104eb31847e55c38c3bb29947a64ccc654403d8b4a32c65f9251fd851791e31",
- "name": "roles/os6_xstp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_xstp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0031377870ad92e182c393f50f6246dc3f531e28f97652a1d047bbe9c1a3cd80",
- "name": "roles/os6_xstp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "be4219c0315e68d60560105f4f311d2d38ffe4abc7a0243392aa626e3954450c",
- "name": "roles/os6_xstp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_xstp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1346c427518f75ebeaa5e971bfdb1e91f50a8a8a40d02c805385b6d9784cac01",
- "name": "roles/os6_xstp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_xstp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1790db61b96fb9f8c3e5fc7463c6ddfb2f267b8ca8dceaa0e2677f0764dd7020",
- "name": "roles/os6_xstp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_users",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_users/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a8842aa89c1070a6a4046429bdf96d68fa8fcc7b5f3ce5cc8fe1e3aed69ebd1e",
- "name": "roles/os6_users/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_users/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_users/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_users/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3826d6ef51782ac0fa23bd7e294af5cadc7621c70c9da81f14e7d020f5f67eb2",
- "name": "roles/os6_users/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8152c65af2b224d448cd01f4bb71ed055f4cb68d290c8add5ff848ca018b87c1",
- "name": "roles/os6_users/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_users/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6475570465d1c051d3d2e8726367a81ef84eabea5858fa5e126f74da11f87cd2",
- "name": "roles/os6_users/templates/os6_users.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_users/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a573eb01484b13cead3c181c38a1de473ab9010dbb70e54186e812b4bf6a6d21",
- "name": "roles/os6_users/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_users/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ee1e43cce86751cb7faca77c654c0c6a13755d27a8c659a740e831566f65fd28",
- "name": "roles/os6_users/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "40bfec1ec536a53cc3eb0a85eef64b8db0a414aa86d12818e3807f8f288e9026",
- "name": "roles/os6_users/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_users/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "08c816f5389fe337cb524d11a7cc0f39d73fabb43bb228122c4fddec77175f6e",
- "name": "roles/os6_users/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_users/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "90d00857c5b7af44e5d17fcf65caf4fb1c75a39fa3298b775063f9d30780276a",
- "name": "roles/os6_users/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_system",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_system/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7e63a91921eee58c938b7f543019705fa270472fe85ad8b9d87dc5a86d2046ff",
- "name": "roles/os6_system/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_system/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_system/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_system/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "821cf9c0f6e08b6ec3b90ac5fcbf62dfc71780a83d0f8b71af536e28230793e8",
- "name": "roles/os6_system/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9113d5ead08435c38a153c1aaa0613a5fdd0d27f734e1a45ae5e7282c96423ce",
- "name": "roles/os6_system/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_system/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "780b3fa89934626febc37bd06b94f95a994b792cef2e3aa2891d486c155be436",
- "name": "roles/os6_system/templates/os6_system.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_system/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "adac9abe9d800edad25875485db442e6b99a3f5ea46105df5085246ac71dbf72",
- "name": "roles/os6_system/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_system/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0d244afe331461734522bb38ca0e2e1b59aaa06ea43489b235ff8d9cb79d1364",
- "name": "roles/os6_system/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "aca109e63d63e9833221cfabcc46b3d8cf28cad129c9a4878f0139e484193535",
- "name": "roles/os6_system/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_system/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "43f1e0c3a3418f33b7cb2cbf8c500b56ac3b7520c144367698c008e367cb57ae",
- "name": "roles/os6_system/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_system/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "04a65ba21c0d5c1a224b3c844b06dd07c1fe70fedb3f963b087e578791f72285",
- "name": "roles/os6_system/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_logging",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_logging/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d9521a1781f5f231c12566af9f6b17175e595b469596a038a7289e92e15d5e76",
- "name": "roles/os6_logging/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_logging/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_logging/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_logging/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a8ea92c60561762438c369c8332255d03cabcd3ca5002bde7ddb0addfa586992",
- "name": "roles/os6_logging/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "be69ac8448f04d06a0c54dde157d1c4c76f1f641a49ad588c64102bdaf3e67c3",
- "name": "roles/os6_logging/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_logging/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "96c8f0e9b76943c27a02befaa17b3c0ecc878924f8f84572f2482296f4f2de7e",
- "name": "roles/os6_logging/templates/os6_logging.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_logging/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1ca704c754fdd9ac5ef40e6f26bef56aedd9ffb4a43179efcd7d9865a9ad31e1",
- "name": "roles/os6_logging/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_logging/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "aba3fe03dfddec68b995081167c8fd268206386b6f5815ec11bec8d54e5a4080",
- "name": "roles/os6_logging/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "034634e1325cc86a086438b12880fb70c2b4ca5730547f5edfa812964f06165f",
- "name": "roles/os6_logging/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_logging/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "94d552e53d5213567f56cfead012518b68162b5fceb70e657731bf6ebd477c70",
- "name": "roles/os6_logging/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_logging/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c6cf973492367a4b3255157408bb5d0b53283a7db01c80c548f1e6b6cdc1a9d7",
- "name": "roles/os6_logging/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_snmp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_snmp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "82ec73f254aead6543bc058687405e7097c20aec5cbe19433eef54ce1e259ba7",
- "name": "roles/os6_snmp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_snmp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_snmp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_snmp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0e7c27518f72dfbcfc15802f8a26d8381a8dae5ef38dab5f54ba5fac7ca083b1",
- "name": "roles/os6_snmp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "19e27ac25bfeadd3a26601f61993620e28a890fbe2ef811304e5e252e7be0516",
- "name": "roles/os6_snmp/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_snmp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "763907e3f886c1c57f69f7e427147838e111c81229624ff3e0e7c52f13d3a423",
- "name": "roles/os6_snmp/templates/os6_snmp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_snmp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6d2b78261f5fff90d93b85804400100f843e1fa11d14db07e4c7386fc5d74260",
- "name": "roles/os6_snmp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_snmp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5a075e4051133ed3cb59a4146b14b7dd67265060e1fd724d17c890c3b0f5b57d",
- "name": "roles/os6_snmp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6d83dce278e238db0f9e04d56b60857cf7e49b72e6d6c71bdea61fc446516030",
- "name": "roles/os6_snmp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_snmp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2201e5200b746937a66a21fcc93ac8cd8b15c0c916364e5c4c7a71035788d527",
- "name": "roles/os6_snmp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_snmp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "06e8ffcf45a3945bb9e712e76913f95e5020231a31afec6724e8ae1cdce2aaad",
- "name": "roles/os6_snmp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lldp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lldp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "da5e40cdaa1b7a69c4054b8d942793f93aa785c82c9da1ce415424ddecbc919d",
- "name": "roles/os6_lldp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_lldp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lldp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_lldp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d2693a92653a238f48e00a37047a95386d1b02f3115095fae8b8fe606473e769",
- "name": "roles/os6_lldp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0dbfa24c6644b5a06a598dd51521c4524b22d5aa20215ccf9a3a84aaafcc7061",
- "name": "roles/os6_lldp/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lldp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9d372a3a9d65866666088e09d2dc20e397fe3dc4fbcbd3ea436f9d86f3a4b5fc",
- "name": "roles/os6_lldp/templates/os6_lldp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lldp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "319fa0a08bdccbe50ffd946b199791cea0681413777814a2b360545220d2e711",
- "name": "roles/os6_lldp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lldp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0921c0bbcff88263cd4a1297bad9b1592c827b79f309be56f75a823d22f9192c",
- "name": "roles/os6_lldp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2e11e57e4b40bfb99c0ad7c38168b843fec9cf864953ff04e2bf78c73176e748",
- "name": "roles/os6_lldp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lldp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d41badac74d65ae5b358513b95a0a5056967df584346aec0509b30cea1e7e746",
- "name": "roles/os6_lldp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lldp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "61888d51ad1c72e82c4b02bb4be5eb5e254d8f853659f8537642b60d7f25b7a7",
- "name": "roles/os6_lldp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vrrp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vrrp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "74d6275a2afaf7fd1f23f91fc4f9f49a03d40d3626b9db180806c2cecf340c52",
- "name": "roles/os6_vrrp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_vrrp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vrrp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_vrrp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "da3347db91a9316e25d2b47998a9124d89c8bb1739c70e9b97d594e9cffda9bb",
- "name": "roles/os6_vrrp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1176136a75c61f0d9267805b41a1274332057eca97c97cb234890cbb277be02e",
- "name": "roles/os6_vrrp/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vrrp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8e91701f7b51dcad75504f0172806ef6f65dafb6cb246e033dd8ee8a56f5fe6f",
- "name": "roles/os6_vrrp/templates/os6_vrrp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vrrp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4d75afdb62b5f59f0ebdd88038a48c2fb1c2cc5ecb0547184186a4f031f06872",
- "name": "roles/os6_vrrp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vrrp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3b60c1578bd9a8dc29ef85e5c873b59edeaf4da6bd6720cfaa707ec4d83a1f2a",
- "name": "roles/os6_vrrp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c290c23bb9b8c2cfae65e0b00d01817cab1c3b9f4e7e94f7ca45f3d592f33d1b",
- "name": "roles/os6_vrrp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vrrp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "71de967c3d4a1b11dc36e95c6258fdb8b0aef6222c759fa4957bfc8eda3c314b",
- "name": "roles/os6_vrrp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vrrp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "052c782bbb023db26ccd40d93254b5b2af7bf6b124426e25d594239f415140f2",
- "name": "roles/os6_vrrp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vlan",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vlan/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c2732fdca439d06e069471d14f8a87a37faee828b19e613e0312dd25363127b4",
- "name": "roles/os6_vlan/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_vlan/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vlan/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_vlan/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "96e149fd5329afa78c33edc35ab347914ee95afa22e0bf7c62fa31aeef464d56",
- "name": "roles/os6_vlan/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6c06f4e556311c5a9dc74f2e1914164d6333c59fc620b989c205418c8d88f33b",
- "name": "roles/os6_vlan/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vlan/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6645c1ecbc6555841a261e14863b1d87f9631ade8a29fb56e866192d70cc0759",
- "name": "roles/os6_vlan/templates/os6_vlan.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vlan/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "67289fe1f09bc1ed85278eb5dc511097363d1fc753be3578f902de6e126b55f7",
- "name": "roles/os6_vlan/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vlan/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f1aebe1ba927d520109c7d3bfc483574abc369988f2524663ddc5aaba4c66d2f",
- "name": "roles/os6_vlan/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "db20aa1cfff396930e3511a34e11d703f7e212959e802b63fa3f42afdf089d3f",
- "name": "roles/os6_vlan/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vlan/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "dca829e2d9e9fffca5a0b7d43e196606e9a160f6af26336973757a50d93d3d21",
- "name": "roles/os6_vlan/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_vlan/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3b961c92256ddc4c88064b72cf947b4e1e866fda7c1be5e849695a62c2baaa16",
- "name": "roles/os6_vlan/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lag",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lag/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "22810b5b1ad7e24085bbf11547eb842ce297dc0bfb2a49d2c047551a85d2c560",
- "name": "roles/os6_lag/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_lag/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lag/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_lag/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c8c1a8a56bd960a41e25288853ebbbc8e17b67316fabac82e28c1a1bc7cc7c5c",
- "name": "roles/os6_lag/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cabc7457274088b5ac6661e4790aff7e913a535c67c0ce79c9610050d317b4d1",
- "name": "roles/os6_lag/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lag/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e80b023e177c635bcf7cc9fb240ce6e3b93d38a146b1d5f7d65039d991d4e2ae",
- "name": "roles/os6_lag/templates/os6_lag.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lag/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6a09aefebf59af427b44818dd420e1017135d3172c6dec56cfd49f975d397b97",
- "name": "roles/os6_lag/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lag/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ba602fd62628ba43f77bdddc168ba905a58c9e6a8913f92a4939df005f3004d9",
- "name": "roles/os6_lag/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9294b173cd96c01140bba5121848bb293e1fb3e4764fed799699c15d49d7a537",
- "name": "roles/os6_lag/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lag/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6bbde0ade400e3853d1cb029590a055517431c66d43dcb535728f21047e9d1fa",
- "name": "roles/os6_lag/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_lag/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4afba3d2195b36af0f0cd3c324aed8e8f570b2244a0afc002322d79d05f266c3",
- "name": "roles/os6_lag/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_aaa",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_aaa/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "63134ab7c27b4cc5a64dd03bdc10211031e811667ae27906a90e745019f3f129",
- "name": "roles/os6_aaa/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_aaa/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_aaa/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_aaa/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cda0f026e47460f4983b4d10902206939991281f25fe1bedcc6333b91b4a7ce7",
- "name": "roles/os6_aaa/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8508ca8af9718ed4ba1d3a97ae63d42fd521a36a6a383aac5a21ea5de40cf70f",
- "name": "roles/os6_aaa/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_aaa/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "781463a6d4ea96eb12fff81ecfbc8c6be9621650cd6aa750619a1d47cb654094",
- "name": "roles/os6_aaa/templates/os6_aaa.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_aaa/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "809402b4bdb9cb3112d2285b6b1e9bfab2980e37f6472674c743db148f1c2800",
- "name": "roles/os6_aaa/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_aaa/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "af0d0c9fed4994986f076864f1f93dcb041ac3c79ad5d783aec69b3a7f584c42",
- "name": "roles/os6_aaa/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f0a6662d0137775c9eb175370cded387cf84225265add1ac11afacd60a25a67b",
- "name": "roles/os6_aaa/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_aaa/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "765a854c27d68838a34cb96a1b59269a0d2397e50a0986b81764ff28e3e93c06",
- "name": "roles/os6_aaa/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_aaa/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "39b3fb9d024aa2518897c83e7f63e455fe55e6d39ca07cb6f4347ebf01000972",
- "name": "roles/os6_aaa/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_qos",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_qos/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "93b3ecffbb3be43fa0c310ebddc61eb0b9ba75e23268463e3a6446ee202b5704",
- "name": "roles/os6_qos/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_qos/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_qos/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbcaa96e775c2a68ff2cabdc67e4baeeae90964e2692159ce4472cf34b589df5",
- "name": "roles/os6_qos/tests/test.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "605557eaef5f8dfc2365202894cd9b0591b77a3b77ddc5ee1e7e56884c3241a4",
- "name": "roles/os6_qos/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
- "name": "roles/os6_qos/tests/inventory",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_qos/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ca90cefe5596a51c88fd5f8a777a4f1dc9d27ba7128853f176ebf17e995d98ff",
- "name": "roles/os6_qos/templates/os6_qos.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_qos/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "07585253cf6cb7bd222744d6ddfffe8078c0aa669e257c573df25b778f5d6c9f",
- "name": "roles/os6_qos/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_qos/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "bfb1de30e19eaaa33dfafac9b3cb45767ac089dc8511c1016b8d7d23c23ece25",
- "name": "roles/os6_qos/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4364ba19d60713e73830321f1d32a1cf2b5bf6e45af37951a0ee796e48aabb5c",
- "name": "roles/os6_qos/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_qos/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5220e3c142d2f58f1c7d5525ac6f71c59226e82bd11b34155a5c943f41371849",
- "name": "roles/os6_qos/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_qos/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f2423f8dd4614917fced06db2c05495911c9f206ecd16d99325ba2d1c5f63a7c",
- "name": "roles/os6_qos/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_acl",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_acl/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2404b4a0ddae981fcccab3064ad96bc733e9b3698e885fc1cb26a3c10c4d6bdb",
- "name": "roles/os6_acl/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_acl/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_acl/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_acl/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "92f7529157aba5313231d21c978e0058045c1ba805fa74aa667777b7801676ef",
- "name": "roles/os6_acl/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3f2697959dbccac3047eb04365c52dabb8bab254b671be0d370fa5fd6c5cac79",
- "name": "roles/os6_acl/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_acl/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a25418525c08a11dd6bca320e044c2a9429af4bc5ecc7e3628bb96205022a937",
- "name": "roles/os6_acl/templates/os6_acl.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_acl/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e499c9510aaa522477920a93e1d590047ceabeca8aef307b98e5a69ae4f92317",
- "name": "roles/os6_acl/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_acl/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9cae65a8516d0400f6091fe2380a9a257876a08f85a39fa614dbe760a9c58fe9",
- "name": "roles/os6_acl/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9f00b8413b2410763c2cca2a948e286e3261d156361aa7c913fba3ce3da9def3",
- "name": "roles/os6_acl/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_acl/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "abe3440632da83602afd2f067d74f6c78f0d8ceda20d7ec3a7a4a0a6efe80f0d",
- "name": "roles/os6_acl/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_acl/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4f178f2310a8c9d40983ede3ff18f38f8cc9cf29f7de9f42fb550ef909d8f22c",
- "name": "roles/os6_acl/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_bgp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_bgp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "23f4af4ab2764fab3a88e6d7390876d3804392d6a96323a4932af3cacb6acda5",
- "name": "roles/os6_bgp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_bgp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_bgp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "11b4ce2f58f779973bac45a5ff5b0a86272f4e1d29cea5c8cd95d2cf194285e4",
- "name": "roles/os6_bgp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "37371f94256c61221fa5f52d2bdc9a7681d1950a81f94d890b9762722326eab2",
- "name": "roles/os6_bgp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "38cd90a98839739453a2b86bfd28367b513a5ef69fcb6a7b5190fcc2e8250e1f",
- "name": "roles/os6_bgp/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_bgp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "978118306940cb92dd374cdf91197785baae190598e0e404ed484de7a2bea385",
- "name": "roles/os6_bgp/templates/os6_bgp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_bgp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d310c5523a47ce3dc47ed3684ddbc0dfc27469942f8c6f4c20fe90fcdcb85610",
- "name": "roles/os6_bgp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_bgp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3920731d533222b279bc7fde4b8947d7e47a4e6e834ed98d52a2b8149007779f",
- "name": "roles/os6_bgp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "09f866d5a0529aaaaba4bf0236e377b388369495fbd61fffee71f961adf6b0ff",
- "name": "roles/os6_bgp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_bgp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9522bb923f414ccabbaf73341a226c4bc2a161b1570950bbc531a9437a277bb9",
- "name": "roles/os6_bgp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_bgp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7c97aa0e67a4888c3d31c094f0cee0a07d44c310f36e4ca2c0992f5c5838e249",
- "name": "roles/os6_bgp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_ntp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_ntp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "91c3c194de2106a16ee51da04a2cd57d95821298c23022aa4e856a2199763a1c",
- "name": "roles/os6_ntp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_ntp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_ntp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "510cf11fd5cd5f601c8da4171355892d32289ec90c1ba3d4febad6dbe746f84f",
- "name": "roles/os6_ntp/tests/test.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c67f34a06350c4fe36d01a03d4c6c4132733715819fa568bbf7c47bab9bea937",
- "name": "roles/os6_ntp/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e02233819b1a09844410549191813f7cc7ba360f21298578f4ba1727a27d87fc",
- "name": "roles/os6_ntp/tests/inventory",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_ntp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a1cc9d2eb86ba5b0d74edc8dbd4a29a1caba4b550597c77f4f87c59e71289edd",
- "name": "roles/os6_ntp/templates/os6_ntp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_ntp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4e1453927e4f3f0da3d28299e7999c97d621f110acb5c17b964175173c5441b9",
- "name": "roles/os6_ntp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_ntp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "bbebd2d2fed07efe0555c8eb380aa715fd52089a694673bc6340d20346febbc5",
- "name": "roles/os6_ntp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b1d6519f386c6c11226abd13273303945beb6d1de8fd08ae5a1bea02084258d9",
- "name": "roles/os6_ntp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_ntp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1cfaf6cb22e9e6f3b898f7ebcf805c79996b41555e4e247420c32989b2bec638",
- "name": "roles/os6_ntp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_ntp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5461ad3fd435dd6f5778ecb2b66e5c1a03e874bf17b20f0a1cdbf7f2b2ef88d7",
- "name": "roles/os6_ntp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_interface",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_interface/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "06dada9df09601459174f4beef9acab7363b43e75af2c7c6232cc622c8c7c6f6",
- "name": "roles/os6_interface/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os6_interface/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_interface/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbb3fe682723f4ac4d306230cd68e39d75611294d3ed60848cd2b4777c201c2",
- "name": "roles/os6_interface/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8e0874badbc6d9b601696717109a02439c461a1b55fa8044f8af47beafdfda00",
- "name": "roles/os6_interface/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c0a8610a990c2048db010b007e7490257215a70722d9ffdaec80cb97c3b2d7b8",
- "name": "roles/os6_interface/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_interface/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b41045b5a1642a1b9fdac94a89edf172d72828b6f32489ae0ef179d61d7d47c6",
- "name": "roles/os6_interface/templates/os6_interface.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_interface/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d10b2645e5b9caf6f2eebdf1c2079bf9ab5031c6c78e5315769f39388261de98",
- "name": "roles/os6_interface/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_interface/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b45d0f61c394fa77e7f1641b2fc686037d847658ab8a93cd232314ff76215c81",
- "name": "roles/os6_interface/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0671759da708abf9810f9f7e6aa1afc00f530f7e6596e422c848e4d15502c9d8",
- "name": "roles/os6_interface/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_interface/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "63fd57470a2eefef819d902e96d71e49395a3d2a69e7d104ba10c42ccb475d21",
- "name": "roles/os6_interface/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os6_interface/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "654dfb8baff5953f105aabe76d62750ec3e75597cdfedd1adae1fa466e58e8f7",
- "name": "roles/os6_interface/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- }
- ],
- "format": 1
-} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/LICENSE b/ansible_collections/dellemc/os6/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/MANIFEST.json b/ansible_collections/dellemc/os6/MANIFEST.json
deleted file mode 100644
index c56cf40e2..000000000
--- a/ansible_collections/dellemc/os6/MANIFEST.json
+++ /dev/null
@@ -1,37 +0,0 @@
-{
- "collection_info": {
- "description": "Ansible Network Collection for Dell EMC OS6",
- "repository": "https://github.com/ansible-collections/dellemc.os6",
- "tags": [
- "dell",
- "dellemc",
- "os6",
- "emc",
- "networking"
- ],
- "dependencies": {
- "ansible.netcommon": ">=1.0.0"
- },
- "authors": [
- "Komal Patil <Komal_uttamrao_Patil@dell.com>",
- "Senthil Ganesan Ganesan <Senthil_Kumar_Ganesa@Dell.com>"
- ],
- "issues": "https://github.com/ansible-collections/dellemc.os6/issues",
- "name": "os6",
- "license": [],
- "documentation": "https://github.com/ansible-collections/dellemc.os6/tree/master/docs",
- "namespace": "dellemc",
- "version": "1.0.7",
- "readme": "README.md",
- "license_file": "LICENSE",
- "homepage": "https://github.com/ansible-collections/dellemc.os6"
- },
- "file_manifest_file": {
- "format": 1,
- "ftype": "file",
- "chksum_sha256": "ccb112e42f4caf7be04b0a0ac31e199bc114f01cc74cae9fc02aa5844b3ecd3e",
- "name": "FILES.json",
- "chksum_type": "sha256"
- },
- "format": 1
-} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/README.md b/ansible_collections/dellemc/os6/README.md
deleted file mode 100644
index eaa413cff..000000000
--- a/ansible_collections/dellemc/os6/README.md
+++ /dev/null
@@ -1,98 +0,0 @@
-# Ansible Network Collection for Dell EMC OS6
-
-## Collection contents
-
-This collection includes the Ansible modules, plugins and roles needed to privision and manage Dell EMC PowerSwitch platforms running Dell EMC OS6. Sample playbooks and documentation are also included to show how the collection can be used.
-
-### Collection core modules
-
-- **os6_command.py** — Run commands on devices running OS6
-
-- **os6_config.py** — Manage configuration on devices running OS6
-
-- **os6_facts.py** — Collect facts from devices running OS6
-
-### Collection roles
-
-These roles facilitate provisioning and administration of devices running Dell EMC OS6. There are over 15 roles available that provide a comprehensive coverage of most OS6 resources, including os6_interface, os6_aaa, os6_bgp, and os6_xstp. The documentation for each role is at [OS6 roles](https://github.com/ansible-collections/dellemc.os6/blob/master/docs/roles.rst).
-
-### Sample use case playbooks
-
-This collection inlcudes the following sample playbook that illustrate end to end use cases:
-
- - [iBGP](https://github.com/ansible-collections/dellemc.os6/blob/master/playbooks/ibgp/README.md) — Example playbook to configure iBGP between two routers
-
-## Installation
-
-Use this command to install the latest version of the OS6 collection from Ansible Galaxy:
-
-```
-ansible-galaxy collection install dellemc.os6
-
-```
-To install a specific version, a version range identifier must be specified. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0:
-
-```
-ansible-galaxy collection install 'dellemc.os6:>=1.0.0,<2.0.0'
-
-```
-
-## Version compatibility
-
-* Ansible version 2.10 or higher
-* Python 2.7 or higher and Python 3.5 or higher
-
-> **NOTE**: For Ansible versions lower than 2.10, use the legacy [dellos6 modules](https://ansible-dellos-docs.readthedocs.io/en/latest/modules.html#os6-modules) and [dellos roles](https://ansible-dellos-docs.readthedocs.io/en/latest/roles.html).
-
-## Sample playbook
-
-**playbook.yaml**
-
-```
-- hosts: os6_switches
- connection: network_cli
- collections:
- - dellemc.os6
- roles:
- - os6_vlan
-
-```
-
-**host_vars/os6_sw1.yaml**
-
-```
-hostname: os6_sw1
-# Parameters for connection type network_cli
-ansible_ssh_user: xxxx
-ansible_ssh_pass: xxxx
-ansible_become: yes
-ansible_become_method: enable
-ansible_network_os: dellemc.os6.os6
-
-# Create vlan100 and delete vlan888
-os6_vlan:
- vlan 100:
- name: "Blue"
- state: present
- vlan 888:
- state: absent
-
-
-```
-
-**inventory.yaml**
-
-```
-[os6_sw1]
-os6_sw1 ansible_host= 100.94.51.40
-
-[os6_sw2]
-os6_sw2 ansible_host= 100.94.52.38
-
-[os6_switches:children]
-os6_sw1
-os6_sw2
-
-```
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/changelogs/CHANGELOG.rst b/ansible_collections/dellemc/os6/changelogs/CHANGELOG.rst
deleted file mode 100644
index 93b6adcb5..000000000
--- a/ansible_collections/dellemc/os6/changelogs/CHANGELOG.rst
+++ /dev/null
@@ -1,98 +0,0 @@
-======================================================================
-Ansible Network Collection for Dell EMC OS6 Release Notes
-======================================================================
-
-.. contents:: Topics
-
-v1.0.7
-======
-
-Release Summary
----------------
-
-- Fixed sanity error found during the sanity tst of automation hub upload
-- os6 interface role readme updated
-
-v1.0.6
-======
-
-Bugfixes
----------------
-
-- module utils fix for exit handling in multilevel parent commands
-- config module fix to handle multiline banner
-- terminal plugin fix to handle error reported by management access lists
-
-v1.0.5
-======
-
-Bugfixes
----------------
-
-- config module fix to handle issues faced while parsing running config and fixing idempotency issue for banner config
-- command module change to keep similar changes across all dell networking OSs
-- terminal plugin fix to send "terminal length 0" command
-
-v1.0.4
-======
-
-Bugfixes
----------------
-
-- Fix issue in using list of strings for `commands` argument for `os6_command` module
-- Fix issue in using "os6_facts" module for non-legacy n-series platofrms
-
-v1.0.3
-======
-
-Release Summary
----------------
-
-Added bug fixes for bugs found during System Test.
-
-v1.0.2
-======
-
-Release Summary
----------------
-
-Added changelogs.
-
-v1.0.1
-======
-
-Release Summary
----------------
-
-Updated documentation review comments.
-
-v1.0.0
-======
-
-New Modules
------------
-
-- os6_command - Run commands on devices running Dell EMC os6.
-- os6_config - Manage configuration on devices running os6.
-- os6_facts - Collect facts from devices running os6.
-
-New Roles
----------
-
-- os6_aaa - Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server.
-- os6_acl - Facilitates the configuration of Access Control lists.
-- os6_bgp - Facilitates the configuration of border gateway protocol (BGP) attributes.
-- os6_interface - Facilitates the configuration of interface attributes.
-- os6_lag - Facilitates the configuration of link aggregation group (LAG) attributes.
-- os6_lldp - Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level.
-- os6_logging - Facilitates the configuration of global logging attributes and logging servers.
-- os6_ntp - Facilitates the configuration of network time protocol (NTP) attributes.
-- os6_qos - Facilitates the configuration of quality of service attributes including policy-map and class-map.
-- os6_snmp - Facilitates the configuration of global SNMP attributes.
-- os6_system - Facilitates the configuration of hostname and hashing algorithm.
-- os6_users - Facilitates the configuration of global system user attributes.
-- os6_vlan - Facilitates the configuration of virtual LAN (VLAN) attributes.
-- os6_vrrp - Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes.
-- os6_xstp - Facilitates the configuration of xSTP attributes.
-
-\(c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
diff --git a/ansible_collections/dellemc/os6/changelogs/changelog.yaml b/ansible_collections/dellemc/os6/changelogs/changelog.yaml
deleted file mode 100644
index 8b9f34564..000000000
--- a/ansible_collections/dellemc/os6/changelogs/changelog.yaml
+++ /dev/null
@@ -1,112 +0,0 @@
-ancestor: null
-releases:
- 1.0.0:
- modules:
- - description: Run commands on devices running Dell EMC os6.
- name: os6_command
- namespace: ''
- - description: Manage configuration on devices running os6.
- name: os6_config
- namespace: ''
- - description: Collect facts from devices running os6.
- name: os6_facts
- namespace: ''
- roles:
- - description: Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server.
- name: os6_aaa
- namespace: ''
- - description: Facilitates the configuration of Access Control lists.
- name: os6_acl
- namespace: ''
- - description: Facilitates the configuration of border gateway protocol (BGP) attributes.
- name: os6_bgp
- namespace: ''
- - description: Facilitates the configuration of interface attributes.
- name: os6_interface
- namespace: ''
- - description: Facilitates the configuration of link aggregation group (LAG) attributes.
- name: os6_lag
- namespace: ''
- - description: Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level.
- name: os6_lldp
- namespace: ''
- - description: Facilitates the configuration of global logging attributes and logging servers.
- name: os6_logging
- namespace: ''
- - description: Facilitates the configuration of network time protocol (NTP) attributes.
- name: os6_ntp
- namespace: ''
- - description: Facilitates the configuration of quality of service attributes including policy-map and class-map.
- name: os6_qos
- namespace: ''
- - description: Facilitates the configuration of global SNMP attributes.
- name: os6_snmp
- namespace: ''
- - description: Facilitates the configuration of hostname and hashing algorithm.
- name: os6_system
- namespace: ''
- - description: Facilitates the configuration of global system user attributes.
- name: os6_users
- namespace: ''
- - description: Facilitates the configuration of virtual LAN (VLAN) attributes.
- name: os6_vlan
- namespace: ''
- - description: Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes.
- name: os6_vrrp
- namespace: ''
- - description: Facilitates the configuration of xSTP attributes.
- name: os6_xstp
- namespace: ''
- release_date: '2020-07-31'
- 1.0.1:
- changes:
- release_summary: Updated documentation review comments
- fragments:
- - 1.0.1.yaml
- release_date: '2020-08-04'
- 1.0.2:
- changes:
- release_summary: Added changelogs.
- fragments:
- - 1.0.2.yaml
- release_date: '2020-08-18'
- 1.0.3:
- changes:
- release_summary: Added bug fixes for bugs found during System Test.
- fragments:
- - 1.0.3.yaml
- release_date: '2020-10-09'
- 1.0.4:
- changes:
- bugfixes:
- - Fix issue in using list of strings for `commands` argument for `os6_command` module
- - Fix issue in using "os6_facts" module for non-legacy n-series platofrms
- fragments:
- - 1.0.4.yaml
- release_date: '2020-11-17'
- 1.0.5:
- changes:
- bugfixes:
- - config module fix to handle issues faced while parsing running config and fixing idempotency issue for banner config
- - command module change to keep similar changes across all dell networking OSs
- - terminal plugin fix to send "terminal length 0" command
- fragments:
- - 1.0.5.yaml
- release_date: '2020-12-09'
- 1.0.6:
- changes:
- bugfixes:
- - module utils fix for exit handling in multilevel parent commands
- - config module fix to handle multiline banner
- - terminal plugin fix to handle error reported by management access lists
- fragments:
- - 1.0.6.yaml
- release_date: '2020-12-18'
- 1.0.7:
- changes:
- release_summary:
- - Fixed sanity error found during the sanity tst of automation hub upload
- - os6 interface role readme updated for proper syntax of ip address and mask
- fragments:
- - 1.0.7.yaml
- release_date: '2021-02-15' \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/changelogs/config.yaml b/ansible_collections/dellemc/os6/changelogs/config.yaml
deleted file mode 100644
index f1a020eba..000000000
--- a/ansible_collections/dellemc/os6/changelogs/config.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-changelog_filename_template: CHANGELOG.rst
-changelog_filename_version_depth: 0
-changes_file: changelog.yaml
-changes_format: combined
-keep_fragments: false
-mention_ancestor: true
-new_plugins_after_name: removed_features
-notesdir: fragments
-prelude_section_name: release_summary
-prelude_section_title: Release Summary
-flatmap: true
-sections:
-- - major_changes
- - Major Changes
-- - minor_changes
- - Minor Changes
-- - breaking_changes
- - Breaking Changes / Porting Guide
-- - deprecated_features
- - Deprecated Features
-- - removed_features
- - Removed Features (previously deprecated)
-- - security_fixes
- - Security Fixes
-- - bugfixes
- - Bugfixes
-- - known_issues
- - Known Issues
-title: Ansible Network Collection for Dell OS6
-trivial_section_name: trivial
diff --git a/ansible_collections/dellemc/os6/docs/os6_aaa.md b/ansible_collections/dellemc/os6/docs/os6_aaa.md
deleted file mode 100644
index b3d5783ab..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_aaa.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_aaa/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_acl.md b/ansible_collections/dellemc/os6/docs/os6_acl.md
deleted file mode 100644
index 6224f56a1..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_acl.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_acl/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_bgp.md b/ansible_collections/dellemc/os6/docs/os6_bgp.md
deleted file mode 100644
index 376f0e03d..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_bgp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_bgp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_interface.md b/ansible_collections/dellemc/os6/docs/os6_interface.md
deleted file mode 100644
index 6b8009608..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_interface.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_interface/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_lag.md b/ansible_collections/dellemc/os6/docs/os6_lag.md
deleted file mode 100644
index 623771fa8..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_lag.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_lag/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_lldp.md b/ansible_collections/dellemc/os6/docs/os6_lldp.md
deleted file mode 100644
index 3f367237a..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_lldp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_lldp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_logging.md b/ansible_collections/dellemc/os6/docs/os6_logging.md
deleted file mode 100644
index f9888545b..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_logging.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_logging/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_ntp.md b/ansible_collections/dellemc/os6/docs/os6_ntp.md
deleted file mode 100644
index dee2f2b2c..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_ntp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_ntp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_qos.md b/ansible_collections/dellemc/os6/docs/os6_qos.md
deleted file mode 100644
index d7dc1fb2e..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_qos.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_qos/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_snmp.md b/ansible_collections/dellemc/os6/docs/os6_snmp.md
deleted file mode 100644
index dd6f97f3e..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_snmp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_snmp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_system.md b/ansible_collections/dellemc/os6/docs/os6_system.md
deleted file mode 100644
index 64a2c5ec9..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_system.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_system/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_users.md b/ansible_collections/dellemc/os6/docs/os6_users.md
deleted file mode 100644
index 2b05877b0..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_users.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_users/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_vlan.md b/ansible_collections/dellemc/os6/docs/os6_vlan.md
deleted file mode 100644
index c28686f8a..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_vlan.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_vlan/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_vrrp.md b/ansible_collections/dellemc/os6/docs/os6_vrrp.md
deleted file mode 100644
index 95a1dc26b..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_vrrp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_vrrp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/os6_xstp.md b/ansible_collections/dellemc/os6/docs/os6_xstp.md
deleted file mode 100644
index 9f0ff54fb..000000000
--- a/ansible_collections/dellemc/os6/docs/os6_xstp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os6_xstp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/docs/roles.rst b/ansible_collections/dellemc/os6/docs/roles.rst
deleted file mode 100644
index 2aab9b845..000000000
--- a/ansible_collections/dellemc/os6/docs/roles.rst
+++ /dev/null
@@ -1,94 +0,0 @@
-##############################################################
-Ansible Network Collection Roles for Dell EMC OS6
-##############################################################
-
-The roles facilitate provisioning of Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-AAA role
-********
-
-The `os6_aaa <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_aaa/README.md>`_ role facilitates the configuration of authentication, authorization, and acccounting (AAA). It supports the configuration of TACACS and RADIUS server, and AAA.
-
-
-ACL role
-********
-
-The `os6_acl <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_acl/README.md>`_ role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to line terminals.
-
-
-BGP role
-********
-
-The `os6_bgp <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_bgp/README.md>`_ role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum path.
-
-
-Interface role
-**************
-
-The `os6_interface <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_interface/README.md>`_ role facilitates the configuration of interface attributes. It supports the configuration of administrative state, description, MTU, IP address, IP helper, and port mode.
-
-
-LAG role
-********
-
-The `os6_lag <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_lag/README.md>`_ role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of type (static/dynamic), hash scheme, and minimum required link.
-
-
-LLDP role
-*********
-
-The `os6_lldp <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_lldp/README.md>`_ role facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. This role supports the configuration of hello, mode, multiplier, advertise tlvs, management interface, fcoe, iscsi at global and interface levels.
-
-
-Logging role
-************
-
-The `os6_logging <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_logging/README.md>`_ role facilitates the configuration of global logging attributes, and supports the configuration of logging servers.
-
-
-NTP role
-********
-
-The `os6_ntp <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_ntp/README.md>`_ role facilitates the configuration of network time protocol (NTP) attributes.
-
-
-QoS role
-********
-
-The `os6_qos <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_qos/README.md>`_ role facilitates the configuration of quality of service (QoS) attributes including policy-map and class-map.
-
-
-SNMP role
-*********
-
-The `os6_snmp <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_snmp/README.md>`_ role facilitates the configuration of global simple network management protocol (SNMP) attributes. It supports the configuration of SNMP server attributes like users, group, community, location, and traps.
-
-
-System role
-***********
-
-The `os6_system <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_system/README.md>`_ role facilitates the configuration of global system attributes. This role specifically enables configuration of hostname and enable password for OS6.
-
-
-Users role
-**********
-
-The `os6_users <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_users/README.md>`_ role facilitates the configuration of global system user attributes. This role supports the configuration of CLI users.
-
-
-VLAN role
-*********
-
-The `os6_vlan <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_vlan/README.md>`_ role facilitates configuring virtual LAN (VLAN) attributes. This role supports the creation and deletion of a VLAN and its member ports.
-
-
-VRRP role
-*********
-
-The `os6_vrrp <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_vrrp/README.md>`_ role facilitates configuration of virtual router redundancy protocol (VRRP) attributes. This role supports the creation of VRRP groups for interfaces, and setting the VRRP group attributes.
-
-
-xSTP role
-*********
-
-The `os6_xstp <https://github.com/ansible-collections/dellemc.os6/blob/master/roles/os6_xstp/README.md>`_ role facilitates the configuration of extended spanning-tree protocol (xSTP) attributes. This role supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP) protocol, multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). This role supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/meta/runtime.yml b/ansible_collections/dellemc/os6/meta/runtime.yml
deleted file mode 100644
index f31652cca..000000000
--- a/ansible_collections/dellemc/os6/meta/runtime.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-plugin_routing:
- action:
- os6_config:
- redirect: dellemc.os6.os6
- os6_command:
- redirect: dellemc.os6.os6
- os6_facts:
- redirect: dellemc.os6.os6
diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/README.md b/ansible_collections/dellemc/os6/playbooks/ibgp/README.md
deleted file mode 100644
index ab3e4e404..000000000
--- a/ansible_collections/dellemc/os6/playbooks/ibgp/README.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Provision OS6 Switch Stack using the Ansible Network Collection for Dell EMC OS6
-
-This example describes how to use Ansible to configure Dell EMC PowerSwitch platforms running Dell EMC OS6. The sample topology contains two OS6 switches connected with each other. This example configures iBGP between two routers using the same AS.
-
-## Create a simple Ansible playbook
-
-**1**. Create an inventory file called `inventory.yaml`, then specify the device IP addresses under use in the inventory.
-
-**2**. Create a group variable file called `group_vars/all`, then define credentials common to all hosts.
-
-**3**. Create a host variable file called `host_vars/switch1.yaml`, then define credentials, hostname for switch1.
-
-**4**. Create a host variable file called `host_vars/switch2.yaml`, then define credentials and hostname for switch2.
-
-**5**. Create a playbook called `os6switch.yaml`.
-
-**6**. Run the playbook.
-
- ansible-playbook -i inventory.yaml os6switch.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/group_vars/all b/ansible_collections/dellemc/os6/playbooks/ibgp/group_vars/all
deleted file mode 100644
index a24c1a2e8..000000000
--- a/ansible_collections/dellemc/os6/playbooks/ibgp/group_vars/all
+++ /dev/null
@@ -1,4 +0,0 @@
-ansible_ssh_user: xxxx
-ansible_ssh_pass: xxxx
-ansible_network_os: dellemc.os6.os6
-build_dir: ../tmp/tmp_os6
diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch1.yaml b/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch1.yaml
deleted file mode 100644
index 8e3a1b365..000000000
--- a/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch1.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-hostname: switch1
-ansible_ssh_user: xxxx
-ansible_ssh_pass: xxxx
-ansible_become: yes
-ansible_become_method: enable
-ansible_network_os: dellemc.os6.os6
-switch1_hostname: "switch1"
-
-os6_system:
- hostname: "{{ switch1_hostname }}"
- #enable_password: xxxx
- mtu: 2000
-
-os6_vlan:
- vlan 20:
- default_vlan: False
- name: "os6vlan"
- untagged_members:
- - port: Te7/0/1
- state: present
- state: present
-
-os6_interface:
- Te7/0/1:
- desc: "bgp"
- admin: up
- portmode: access
-
- vlan 20:
- ip_type_dynamic: False
- ip_and_mask: 20.20.20.3 255.255.255.0
-
-os6_bgp:
- asn: 4545
- router_id: 20.20.20.3
- maxpath_ibgp: 3
- ipv4_network:
- - address: 20.20.20.3 255.255.255.255
- state: present
- neighbor:
- - type: ipv4
- ip: 20.20.20.2
- remote_asn: 4545
- timer: 5 10
- default_originate: True
- state: present
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch2.yaml b/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch2.yaml
deleted file mode 100644
index 0416fab8f..000000000
--- a/ansible_collections/dellemc/os6/playbooks/ibgp/host_vars/switch2.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
-hostname: switch2
-ansible_ssh_user: xxxx
-ansible_ssh_pass: xxxx
-ansible_become: yes
-ansible_become_method: enable
-ansible_network_os: dellemc.os6.os6
-switch2_hostname: "switch2"
-
-os6_system:
- hostname: "{{ switch2_hostname }}"
- #enable_password: xxxx
- mtu: 2000
-
-os6_vlan:
- vlan 20:
- default_vlan: False
- name: "os6vlan"
- untagged_members:
- - port: Te1/0/48
- state: present
- state: present
-
-os6_interface:
- Te1/0/48:
- desc: "bgp"
- admin: up
- portmode: access
-
- vlan 20:
- ip_type_dynamic: False
- ip_and_mask: 20.20.20.2 255.255.255.0
-
-os6_bgp:
- asn: 4545
- router_id: 20.20.20.2
- maxpath_ibgp: 3
- ipv4_network:
- - address: 20.20.20.2 255.255.255.255
- state: present
- neighbor:
- - type: ipv4
- ip: 20.20.20.3
- remote_asn: 4545
- timer: 5 10
- default_originate: True
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/inventory.yaml b/ansible_collections/dellemc/os6/playbooks/ibgp/inventory.yaml
deleted file mode 100644
index 1cda8da38..000000000
--- a/ansible_collections/dellemc/os6/playbooks/ibgp/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40
-switch2 ansible_host=100.94.52.38
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/playbooks/ibgp/os6switch.yaml b/ansible_collections/dellemc/os6/playbooks/ibgp/os6switch.yaml
deleted file mode 100644
index 59eb0dc80..000000000
--- a/ansible_collections/dellemc/os6/playbooks/ibgp/os6switch.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- hosts: os6switches
- gather_facts: no
- become: yes
- become_method: enable
- connection: network_cli
- collections:
- - dellemc.os6
- roles:
- - os6_system
- - os6_vlan
- - os6_interface
- - os6_bgp
diff --git a/ansible_collections/dellemc/os6/plugins/action/os6.py b/ansible_collections/dellemc/os6/plugins/action/os6.py
deleted file mode 100644
index a7f16df37..000000000
--- a/ansible_collections/dellemc/os6/plugins/action/os6.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import sys
-import copy
-
-from ansible import constants as C
-from ansible.module_utils._text import to_text
-from ansible.module_utils.connection import Connection
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import os6_provider_spec
-from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
-from ansible.utils.display import Display
-
-display = Display()
-
-
-class ActionModule(ActionNetworkModule):
-
- def run(self, tmp=None, task_vars=None):
- del tmp # tmp no longer has any effect
-
- module_name = self._task.action.split('.')[-1]
- self._config_module = True if module_name == 'os6_config' else False
- socket_path = None
- persistent_connection = self._play_context.connection.split('.')[-1]
-
- if persistent_connection == 'network_cli':
- provider = self._task.args.get('provider', {})
- if any(provider.values()):
- display.warning('provider is unnecessary when using network_cli and will be ignored')
- del self._task.args['provider']
- elif self._play_context.connection == 'local':
- provider = load_provider(os6_provider_spec, self._task.args)
- pc = copy.deepcopy(self._play_context)
- pc.connection = 'network_cli'
- pc.network_os = 'dellemc.os6.os6'
- pc.remote_addr = provider['host'] or self._play_context.remote_addr
- pc.port = int(provider['port'] or self._play_context.port or 22)
- pc.remote_user = provider['username'] or self._play_context.connection_user
- pc.password = provider['password'] or self._play_context.password
- pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
- command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
- pc.become = provider['authorize'] or False
- if pc.become:
- pc.become_method = 'enable'
- pc.become_pass = provider['auth_pass']
-
- display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
- connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
- connection.set_options(direct={'persistent_command_timeout': command_timeout})
-
- socket_path = connection.run()
- display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
- if not socket_path:
- return {'failed': True,
- 'msg': 'unable to open shell. Please see: ' +
- 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
-
- task_vars['ansible_socket'] = socket_path
-
- # make sure we are in the right cli context which should be
- # enable mode and not config module
- if socket_path is None:
- socket_path = self._connection.socket_path
-
- conn = Connection(socket_path)
- out = conn.get_prompt()
- while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
- display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
- conn.send_command('exit')
- out = conn.get_prompt()
-
- result = super(ActionModule, self).run(task_vars=task_vars)
- return result
diff --git a/ansible_collections/dellemc/os6/plugins/cliconf/os6.py b/ansible_collections/dellemc/os6/plugins/cliconf/os6.py
deleted file mode 100644
index 9c9290da7..000000000
--- a/ansible_collections/dellemc/os6/plugins/cliconf/os6.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
----
-cliconf: os6
-short_description: Use os6 cliconf to run command on Dell OS6 platform
-description:
- - This os6 plugin provides low level abstraction apis for
- sending and receiving CLI commands from Dell OS6 network devices.
-"""
-
-import re
-import json
-
-from itertools import chain
-
-from ansible.module_utils._text import to_bytes, to_text
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
-from ansible.plugins.cliconf import CliconfBase, enable_mode
-
-
-class Cliconf(CliconfBase):
-
- def get_device_info(self):
- device_info = {}
-
- device_info['network_os'] = 'dellemc.os6.os6'
- reply = self.get('show version')
- data = to_text(reply, errors='surrogate_or_strict').strip()
-
- match = re.search(r'Software Version (\S+)', data)
- if match:
- device_info['network_os_version'] = match.group(1)
-
- match = re.search(r'System Type (\S+)', data, re.M)
- if match:
- device_info['network_os_model'] = match.group(1)
-
- reply = self.get('show running-config | grep hostname')
- data = to_text(reply, errors='surrogate_or_strict').strip()
- match = re.search(r'^hostname (.+)', data, re.M)
- if match:
- device_info['network_os_hostname'] = match.group(1)
-
- return device_info
-
- @enable_mode
- def get_config(self, source='running', format='text', flags=None):
- if source not in ('running', 'startup'):
- return self.invalid_params("fetching configuration from %s is not supported" % source)
-# if source == 'running':
-# cmd = 'show running-config all'
- else:
- cmd = 'show startup-config'
- return self.send_command(cmd)
-
- @enable_mode
- def edit_config(self, command):
- for cmd in chain(['configure terminal'], to_list(command), ['end']):
- self.send_command(cmd)
-
- def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
- return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
-
- def get_capabilities(self):
- result = super(Cliconf, self).get_capabilities()
- return json.dumps(result)
diff --git a/ansible_collections/dellemc/os6/plugins/doc_fragments/os6.py b/ansible_collections/dellemc/os6/plugins/doc_fragments/os6.py
deleted file mode 100644
index 32daca592..000000000
--- a/ansible_collections/dellemc/os6/plugins/doc_fragments/os6.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2020, Peter Sprygada <psprygada@ansible.com>
-# Copyright: (c) 2020, Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard files documentation fragment
- DOCUMENTATION = r'''
-options:
- provider:
- description:
- - A dict object containing connection details.
- type: dict
- suboptions:
- host:
- description:
- - Specifies the DNS host name or address for connecting to the remote
- device over the specified transport. The value of host is used as
- the destination address for the transport.
- type: str
- port:
- description:
- - Specifies the port to use when building the connection to the remote
- device.
- type: int
- username:
- description:
- - User to authenticate the SSH session to the remote device. If the
- value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_USERNAME) will be used instead.
- type: str
- password:
- description:
- - Password to authenticate the SSH session to the remote device. If the
- value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_PASSWORD) will be used instead.
- type: str
- ssh_keyfile:
- description:
- - Path to an ssh key used to authenticate the SSH session to the remote
- device. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
- type: path
- timeout:
- description:
- - Specifies idle timeout (in seconds) for the connection. Useful if the
- console freezes before continuing. For example when saving
- configurations.
- type: int
- authorize:
- description:
- - Instructs the module to enter privileged mode on the remote device before
- sending any commands. If not specified, the device will attempt to execute
- all commands in non-privileged mode. If the value is not specified in the
- task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be
- used instead.
- type: bool
- auth_pass:
- description:
- - Specifies the password to use if required to enter privileged mode on the
- remote device. If I(authorize) is false, then this argument does nothing.
- If the value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_AUTH_PASS) will be used instead.
- type: str
-notes:
- - For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking).
-'''
diff --git a/ansible_collections/dellemc/os6/plugins/module_utils/network/os6.py b/ansible_collections/dellemc/os6/plugins/module_utils/network/os6.py
deleted file mode 100644
index aeea2cfc0..000000000
--- a/ansible_collections/dellemc/os6/plugins/module_utils/network/os6.py
+++ /dev/null
@@ -1,278 +0,0 @@
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# (c) 2020 Red Hat, Inc
-#
-# Copyright (c) 2020 Dell Inc.
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-import re
-import json
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import exec_command
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, ConfigLine, ignore_line
-from ansible.module_utils._text import to_bytes
-from ansible.module_utils.connection import Connection, ConnectionError
-
-_DEVICE_CONFIGS = {}
-
-WARNING_PROMPTS_RE = [
- r"[\r\n]?\[confirm yes/no\]:\s?$",
- r"[\r\n]?\[y/n\]:\s?$",
- r"[\r\n]?\[yes/no\]:\s?$"
-]
-
-os6_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
- 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
- 'timeout': dict(type='int'),
-}
-os6_argument_spec = {
- 'provider': dict(type='dict', options=os6_provider_spec),
-}
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_connection(module):
- if hasattr(module, "_os6_connection"):
- return module._os6_connection
-
- capabilities = get_capabilities(module)
- network_api = capabilities.get("network_api")
- if network_api in ["cliconf"]:
- module._os6_connection = Connection(module._socket_path)
- else:
- module.fail_json(msg="Invalid connection type %s" % network_api)
-
- return module._os6_connection
-
-
-def get_capabilities(module):
- if hasattr(module, "_os6_capabilities"):
- return module._os6_capabilities
- try:
- capabilities = Connection(module._socket_path).get_capabilities()
- except ConnectionError as exc:
- module.fail_json(msg=to_text(exc, errors="surrogate_then_replace"))
- module._os6_capabilities = json.loads(capabilities)
- return module._os6_capabilities
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- cmd = 'show running-config'
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- rc, out, err = exec_command(module, cmd)
- if rc != 0:
- module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
- cfg = to_text(out, errors='surrogate_or_strict').strip()
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict(),
- 'sendonly': dict(),
- 'newline': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- commands = to_commands(module, to_list(commands))
- for cmd in commands:
- cmd = module.jsonify(cmd)
- rc, out, err = exec_command(module, cmd)
- if check_rc and rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
- responses.append(to_text(out, errors='surrogate_or_strict'))
- return responses
-
-
-def load_config(module, commands):
- rc, out, err = exec_command(module, 'configure terminal')
- if rc != 0:
- module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
-
- for command in to_list(commands):
- if command == 'end':
- continue
- rc, out, err = exec_command(module, command)
- if rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
- exec_command(module, 'end')
-
-
-def get_sublevel_config(running_config, module):
- contents = list()
- current_config_contents = list()
- sublevel_config = NetworkConfig(indent=0)
- obj = running_config.get_object(module.params['parents'])
- if obj:
- contents = obj._children
- for c in contents:
- if isinstance(c, ConfigLine):
- current_config_contents.append(c.raw)
- sublevel_config.add(current_config_contents, module.params['parents'])
- return sublevel_config
-
-
-def os6_parse(lines, indent=None, comment_tokens=None):
- sublevel_cmds = [
- re.compile(r'^vlan\s[\d,-]+.*$'),
- re.compile(r'^stack.*$'),
- re.compile(r'^interface.*$'),
- re.compile(r'datacenter-bridging.*$'),
- re.compile(r'line (console|telnet|ssh).*$'),
- re.compile(r'ip ssh !(server).*$'),
- re.compile(r'ip dhcp pool.*$'),
- re.compile(r'ip vrf (?!forwarding).*$'),
- re.compile(r'(ip|mac|management|arp) access-list.*$'),
- re.compile(r'ipv6 (dhcp pool|router).*$'),
- re.compile(r'mail-server.*$'),
- re.compile(r'vpc domain.*$'),
- re.compile(r'router\s.*$'),
- re.compile(r'route-map.*$'),
- re.compile(r'policy-map.*$'),
- re.compile(r'class-map match-all.*$'),
- re.compile(r'captive-portal.*$'),
- re.compile(r'admin-profile.*$'),
- re.compile(r'link-dependency group.*$'),
- re.compile(r'openflow.*$'),
- re.compile(r'support-assist.*$'),
- re.compile(r'template.*$'),
- re.compile(r'address-family.*$'),
- re.compile(r'spanning-tree mst configuration.*$'),
- re.compile(r'logging (?!.*(cli-command|buffered|console|email|facility|file|monitor|protocol|snmp|source-interface|traps|web-session)).*$'),
- re.compile(r'radius server (?!.*(attribute|dead-criteria|deadtime|timeout|key|load-balance|retransmit|source-interface|source-ip|vsa)).*$'),
- re.compile(r'(tacacs-server) host.*$')]
-
- childline = re.compile(r'^exit\s*$')
- config = list()
- parent = list()
- children = []
- parent_match = False
- for line in str(lines).split('\n'):
- line = str(line).strip()
- text = str(re.sub(r'([{};])', '', line)).strip()
- cfg = ConfigLine(text)
- cfg.raw = line
- if not text or ignore_line(text, comment_tokens):
- parent = list()
- children = []
- continue
-
- parent_match = False
- # handle sublevel parent
- for pr in sublevel_cmds:
- if pr.match(line):
- if len(parent) != 0:
- cfg._parents.extend(parent)
- parent.append(cfg)
- config.append(cfg)
- if children:
- children.insert(len(parent) - 1, [])
- children[len(parent) - 2].append(cfg)
- if not children and len(parent) > 1:
- configlist = [cfg]
- children.append(configlist)
- children.insert(len(parent) - 1, [])
- parent_match = True
- continue
- # handle exit
- if childline.match(line):
- if children:
- parent[len(children) - 1]._children.extend(children[len(children) - 1])
- if len(children) > 1:
- parent[len(children) - 2]._children.extend(parent[len(children) - 1]._children)
- cfg._parents.extend(parent)
- children.pop()
- parent.pop()
- if not children:
- children = list()
- if parent:
- cfg._parents.extend(parent)
- parent = list()
- config.append(cfg)
- # handle sublevel children
- elif parent_match is False and len(parent) > 0:
- if not children:
- cfglist = [cfg]
- children.append(cfglist)
- else:
- children[len(parent) - 1].append(cfg)
- cfg._parents.extend(parent)
- config.append(cfg)
- # handle global commands
- elif not parent:
- config.append(cfg)
- return config
-
-
-class NetworkConfig(NetworkConfig):
-
- def load(self, contents):
- self._items = os6_parse(contents, self._indent)
-
- def _diff_line(self, other, path=None):
- diff = list()
- for item in self.items:
- if str(item) == "exit":
- for diff_item in diff:
- if diff_item._parents:
- if item._parents == diff_item._parents:
- diff.append(item)
- break
- elif [e for e in item._parents if e == diff_item]:
- diff.append(item)
- break
- elif item not in other:
- diff.append(item)
- return diff
diff --git a/ansible_collections/dellemc/os6/plugins/modules/os6_command.py b/ansible_collections/dellemc/os6/plugins/modules/os6_command.py
deleted file mode 100644
index 99df00144..000000000
--- a/ansible_collections/dellemc/os6/plugins/modules/os6_command.py
+++ /dev/null
@@ -1,225 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2020, Peter Sprygada <psprygada@ansible.com>
-# Copyright: (c) 2020, Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = """
----
-module: os6_command
-author: "Abirami N (@abirami-n)"
-short_description: Run commands on devices running Dell EMC OS6
-description:
- - Sends arbitrary commands to a OS6 device and returns the results
- read from the device. This module includes an
- argument that will cause the module to wait for a specific condition
- before returning or timing out if the condition is not met.
- - This module does not support running commands in configuration mode.
- Please use M(dellemc_os6_os6_config) to configure OS6 devices.
-extends_documentation_fragment: dellemc.os6.os6
-options:
- commands:
- description:
- - List of commands to send to the remote os6 device over the
- configured provider. The resulting output from the command
- is returned. If the I(wait_for) argument is provided, the
- module is not returned until the condition is satisfied or
- the number of retries has expired.
- type: list
- required: true
- wait_for:
- description:
- - List of conditions to evaluate against the output of the
- command. The task will wait for each condition to be true
- before moving forward. If the conditional is not true
- within the configured number of I(retries), the task fails.
- See examples.
- type: list
- elements: str
- match:
- description:
- - The I(match) argument is used in conjunction with the
- I(wait_for) argument to specify the match policy. Valid
- values are C(all) or C(any). If the value is set to C(all)
- then all conditionals in the wait_for must be satisfied. If
- the value is set to C(any) then only one of the values must be
- satisfied.
- type: str
- default: all
- choices: [ all, any ]
- retries:
- description:
- - Specifies the number of retries a command should be tried
- before it is considered failed. The command is run on the
- target device every retry and evaluated against the
- I(wait_for) conditions.
- type: int
- default: 10
- interval:
- description:
- - Configures the interval in seconds to wait between retries
- of the command. If the command does not pass the specified
- conditions, the interval indicates how long to wait before
- trying the command again.
- type: int
- default: 1
-"""
-
-EXAMPLES = """
-tasks:
- - name: run show version on remote devices
- os6_command:
- commands: show version
- - name: run show version and check to see if output contains Dell
- os6_command:
- commands: show version
- wait_for: result[0] contains Dell
- - name: run multiple commands on remote nodes
- os6_command:
- commands:
- - show version
- - show interfaces
- - name: run multiple commands and evaluate the output
- os6_command:
- commands:
- - show version
- - show interfaces
- wait_for:
- - result[0] contains Dell
- - result[1] contains Access
-"""
-
-RETURN = """
-stdout:
- description: The set of responses from the commands
- returned: always apart from low level errors (such as action plugin)
- type: list
- sample: ['...', '...']
-stdout_lines:
- description: The value of stdout split into a list
- returned: always apart from low level errors (such as action plugin)
- type: list
- sample: [['...', '...'], ['...'], ['...']]
-failed_conditions:
- description: The list of conditionals that have failed
- returned: failed
- type: list
- sample: ['...', '...']
-warnings:
- description: The list of warnings (if any) generated by module based on arguments
- returned: always
- type: list
- sample: ['...', '...']
-"""
-
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import run_commands
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import os6_argument_spec, check_args
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional
-from ansible.module_utils.six import string_types
-
-
-def to_lines(stdout):
- for item in stdout:
- if isinstance(item, string_types):
- item = str(item).split('\n')
- yield item
-
-
-def parse_commands(module, warnings):
- command = ComplexList(dict(
- command=dict(key=True),
- prompt=dict(),
- answer=dict()
- ), module)
- commands = command(module.params['commands'])
- for index, item in enumerate(commands):
- if module.check_mode and not item['command'].startswith('show'):
- warnings.append(
- 'only show commands are supported when using check mode, not '
- 'executing `%s`' % item['command']
- )
- elif item['command'].startswith('conf'):
- module.fail_json(
- msg='os6_command does not support running config mode '
- 'commands. Please use os6_config instead'
- )
- return commands
-
-
-def main():
- """main entry point for module execution
- """
- argument_spec = dict(
- # { command: <str>, prompt: <str>, response: <str> }
- commands=dict(type='list', required=True),
-
- wait_for=dict(type='list', elements='str'),
- match=dict(default='all', choices=['all', 'any']),
-
- retries=dict(default=10, type='int'),
- interval=dict(default=1, type='int')
- )
-
- argument_spec.update(os6_argument_spec)
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- result = {'changed': False}
-
- warnings = list()
- check_args(module, warnings)
- commands = parse_commands(module, warnings)
- result['warnings'] = warnings
-
- wait_for = module.params['wait_for'] or list()
- conditionals = [Conditional(c) for c in wait_for]
-
- retries = module.params['retries']
- interval = module.params['interval']
- match = module.params['match']
-
- while retries > 0:
- responses = run_commands(module, commands)
-
- for item in list(conditionals):
- if item(responses):
- if match == 'any':
- conditionals = list()
- break
- conditionals.remove(item)
-
- if not conditionals:
- break
-
- time.sleep(interval)
- retries -= 1
-
- if conditionals:
- failed_conditions = [item.raw for item in conditionals]
- msg = 'One or more conditional statements have not been satisfied'
- module.fail_json(msg=msg, failed_conditions=failed_conditions)
-
- result.update({
- 'changed': False,
- 'stdout': responses,
- 'stdout_lines': list(to_lines(responses))
- })
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os6/plugins/modules/os6_config.py b/ansible_collections/dellemc/os6/plugins/modules/os6_config.py
deleted file mode 100644
index b4321e9fd..000000000
--- a/ansible_collections/dellemc/os6/plugins/modules/os6_config.py
+++ /dev/null
@@ -1,410 +0,0 @@
-#!/usr/bin/python
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# Copyright (c) 2020 Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = """
----
-module: os6_config
-author: "Abirami N (@abirami-n)"
-short_description: Manage Dell EMC OS6 configuration sections
-description:
- - OS6 configurations use a simple block indent file syntax
- for segmenting configuration into sections. This module provides
- an implementation for working with OS6 configuration sections in
- a deterministic way.
-extends_documentation_fragment: dellemc.os6.os6
-options:
- lines:
- description:
- - The ordered set of commands that should be configured in the
- section. The commands must be the exact same commands as found
- in the device running-config. Be sure to note the configuration
- command syntax as some commands are automatically modified by the
- device config parser. This argument is mutually exclusive with I(src).
- type: list
- aliases: ['commands']
- parents:
- description:
- - The ordered set of parents that uniquely identify the section or hierarchy
- the commands should be checked against. If the parents argument
- is omitted, the commands are checked against the set of top
- level or global commands.
- type: list
- src:
- description:
- - Specifies the source path to the file that contains the configuration
- or configuration template to load. The path to the source file can
- either be the full path on the Ansible control host or a relative
- path from the playbook or role root directory. This argument is
- mutually exclusive with I(lines).
- type: path
- before:
- description:
- - The ordered set of commands to push on to the command stack if
- a change needs to be made. This allows the playbook designer
- the opportunity to perform configuration commands prior to pushing
- any changes without affecting how the set of commands are matched
- against the system.
- type: list
- after:
- description:
- - The ordered set of commands to append to the end of the command
- stack if a change needs to be made. Just like with I(before) this
- allows the playbook designer to append a set of commands to be
- executed after the command set.
- type: list
- match:
- description:
- - Instructs the module on the way to perform the matching of
- the set of commands against the current device config. If
- match is set to I(line), commands are matched line by line. If
- match is set to I(strict), command lines are matched with respect
- to position. If match is set to I(exact), command lines
- must be an equal match. Finally, if match is set to I(none), the
- module will not attempt to compare the source configuration with
- the running configuration on the remote device.
- type: str
- default: line
- choices: ['line', 'strict', 'exact', 'none']
- replace:
- description:
- - Instructs the module on the way to perform the configuration
- on the device. If the replace argument is set to I(line) then
- the modified lines are pushed to the device in configuration
- mode. If the replace argument is set to I(block) then the entire
- command block is pushed to the device in configuration mode if any
- line is not correct.
- type: str
- default: line
- choices: ['line', 'block']
- update:
- description:
- - The I(update) argument controls how the configuration statements
- are processed on the remote device. Valid choices for the I(update)
- argument are I(merge) and I(check). When you set this argument to
- I(merge), the configuration changes merge with the current
- device running configuration. When you set this argument to I(check)
- the configuration updates are determined but not actually configured
- on the remote device.
- type: str
- default: merge
- choices: ['merge', 'check']
- save:
- description:
- - The C(save) argument instructs the module to save the running-
- config to the startup-config at the conclusion of the module
- running. If check mode is specified, this argument is ignored.
- type: bool
- default: 'no'
- config:
- description:
- - The module, by default, will connect to the remote device and
- retrieve the current running-config to use as a base for comparing
- against the contents of source. There are times when it is not
- desirable to have the task get the current running-config for
- every task in a playbook. The I(config) argument allows the
- implementer to pass in the configuration to use as the base
- config for comparison.
- type: str
- backup:
- description:
- - This argument will cause the module to create a full backup of
- the current C(running-config) from the remote device before any
- changes are made. If the C(backup_options) value is not given,
- the backup file is written to the C(backup) folder in the playbook
- root directory. If the directory does not exist, it is created.
- type: bool
- default: 'no'
- backup_options:
- description:
- - This is a dict object containing configurable options related to backup file path.
- The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
- to I(no) this option will be silently ignored.
- suboptions:
- filename:
- description:
- - The filename to be used to store the backup configuration. If the the filename
- is not given it will be generated based on the hostname, current time and date
- in format defined by <hostname>_config.<current-date>@<current-time>
- type: str
- dir_path:
- description:
- - This option provides the path ending with directory name in which the backup
- configuration file will be stored. If the directory does not exist it will be first
- created and the filename is either the value of C(filename) or default filename
- as described in C(filename) options description. If the path value is not given
- in that case a I(backup) directory will be created in the current working directory
- and backup configuration will be copied in C(filename) within I(backup) directory.
- type: path
- type: dict
-"""
-
-EXAMPLES = """
-- os6_config:
- lines: ['hostname {{ inventory_hostname }}']
-- os6_config:
- lines:
- - 10 permit ip 1.1.1.1 any log
- - 20 permit ip 2.2.2.2 any log
- - 30 permit ip 3.3.3.3 any log
- - 40 permit ip 4.4.4.4 any log
- - 50 permit ip 5.5.5.5 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- match: exact
-- os6_config:
- lines:
- - 10 permit ip 1.1.1.1 any log
- - 20 permit ip 2.2.2.2 any log
- - 30 permit ip 3.3.3.3 any log
- - 40 permit ip 4.4.4.4 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- replace: block
-- os6_config:
- lines: ['hostname {{ inventory_hostname }}']
- backup: yes
- backup_options:
- filename: backup.cfg
- dir_path: /home/user
-"""
-
-RETURN = """
-updates:
- description: The set of commands that will be pushed to the remote device.
- returned: always
- type: list
- sample: ['interface Te1/0/1', 'no shutdown', 'exit']
-commands:
- description: The set of commands that will be pushed to the remote device
- returned: always
- type: list
- sample: ['interface Te1/0/1', 'no shutdown', 'exit']
-saved:
- description: Returns whether the configuration is saved to the startup
- configuration or not.
- returned: When not check_mode.
- type: bool
- sample: True
-backup_path:
- description: The full path to the backup file
- returned: when backup is yes
- type: str
- sample: /playbooks/ansible/backup/os6_config.2017-07-16@22:28:34
-"""
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import get_config, get_sublevel_config, NetworkConfig
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import os6_argument_spec, check_args
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import load_config, run_commands
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import WARNING_PROMPTS_RE
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import dumps
-import re
-from ansible.module_utils.six import iteritems
-from ansible.module_utils.connection import exec_command
-from ansible.module_utils._text import to_bytes
-
-
-def get_candidate(module):
- candidate = NetworkConfig(indent=0)
- banners = {}
- if module.params['src']:
- src, banners = extract_banners(module.params['src'])
- candidate.load(src)
- elif module.params['lines']:
- parents = module.params['parents'] or list()
- commands = module.params['lines'][0]
- if (isinstance(commands, dict)) and (isinstance(commands['command'], list)):
- candidate.add(commands['command'], parents=parents)
- elif (isinstance(commands, dict)) and (isinstance(commands['command'], str)):
- candidate.add([commands['command']], parents=parents)
- else:
- lines, banners = extract_banners(module.params['lines'])
- candidate.add(lines, parents=parents)
- return candidate, banners
-
-
-def extract_banners(config):
- flag = False
- if isinstance(config, list):
- str1 = "\n"
- config = str1.join(config)
- flag = True
- banners = {}
- banner_cmds = re.findall(r'^banner (\w+)', config, re.M)
- for cmd in banner_cmds:
- regex = r'banner %s \"(.+?)\".*' % cmd
- match = re.search(regex, config, re.S)
- if match:
- key = 'banner %s' % cmd
- banners[key] = match.group(1).strip()
-
- for cmd in banner_cmds:
- regex = r'banner %s \"(.+?)\".*' % cmd
- match = re.search(regex, config, re.S)
- if match:
- config = config.replace(str(match.group(1)), '')
- config = re.sub(r'banner \w+ \"\"', '', config)
- if flag:
- config = config.split("\n")
- return (config, banners)
-
-
-def diff_banners(want, have):
- candidate = {}
- for key, value in iteritems(want):
- if value != have.get(key):
- candidate[key] = value
- return candidate
-
-
-def get_running_config(module):
- contents = module.params['config']
- if not contents:
- contents = get_config(module)
- contents, banners = extract_banners(contents)
- return contents, banners
-
-
-def load_banners(module, banners):
- result_banners = []
- exec_command(module, 'configure terminal')
- for each in banners:
- delimiter = '"'
- cmdline = ""
- for key, value in each.items():
- cmdline = key + " " + delimiter + value + delimiter
- for cmd in cmdline.split("\n"):
- rc, out, err = exec_command(module, module.jsonify({'command': cmd, 'sendonly': True}))
- result_banners.append(cmdline)
- exec_command(module, 'end')
- return result_banners
-
-
-def main():
- backup_spec = dict(
- filename=dict(),
- dir_path=dict(type='path')
- )
- argument_spec = dict(
- lines=dict(aliases=['commands'], type='list'),
- parents=dict(type='list'),
-
- src=dict(type='path'),
-
- before=dict(type='list'),
- after=dict(type='list'),
-
- match=dict(default='line',
- choices=['line', 'strict', 'exact', 'none']),
- replace=dict(default='line', choices=['line', 'block']),
-
- update=dict(choices=['merge', 'check'], default='merge'),
- save=dict(type='bool', default=False),
- config=dict(),
- backup=dict(type='bool', default=False),
- backup_options=dict(type='dict', options=backup_spec)
- )
-
- argument_spec.update(os6_argument_spec)
- mutually_exclusive = [('lines', 'src'),
- ('parents', 'src')]
-
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True)
-
- parents = module.params['parents'] or list()
-
- match = module.params['match']
- replace = module.params['replace']
-
- warnings = list()
- check_args(module, warnings)
- result = dict(changed=False, saved=False, warnings=warnings)
-
- candidate, want_banners = get_candidate(module)
- if module.params['backup']:
- if not module.check_mode:
- result['__backup__'] = get_config(module)
-
- commands = list()
- if any((module.params['lines'], module.params['src'])):
- if match != 'none':
- config, have_banners = get_running_config(module)
- config = NetworkConfig(contents=config, indent=0)
- if parents:
- config = get_sublevel_config(config, module)
- configobjs = candidate.difference(config, match=match, replace=replace)
- else:
- configobjs = candidate.items
- have_banners = {}
- diffbanners = diff_banners(want_banners, have_banners)
- banners = list()
- if diffbanners:
- banners.append(diffbanners)
- if configobjs or banners:
- commands = dumps(configobjs, 'commands')
- if ((isinstance(module.params['lines'], list)) and
- (isinstance(module.params['lines'][0], dict)) and
- set(['prompt', 'answer']).issubset(module.params['lines'][0])):
- cmd = {'command': commands,
- 'prompt': module.params['lines'][0]['prompt'],
- 'answer': module.params['lines'][0]['answer']}
- commands = [module.jsonify(cmd)]
- else:
- if commands:
- commands = commands.split('\n')
-
- if module.params['before']:
- commands[:0], before_banners = extract_banners(module.params['before'])
- if before_banners:
- banners.insert(0, before_banners)
-
- if module.params['after']:
- commands_after, after_banners = extract_banners(module.params['after'])
- commands.extend(commands_after)
- if after_banners:
- banners.insert(len(banners), after_banners)
-
- if not module.check_mode and module.params['update'] == 'merge':
- if commands:
- load_config(module, commands)
- if banners:
- result_banners = load_banners(module, banners)
- else:
- result_banners = []
-
- result['changed'] = True
- result['commands'] = commands
- result['updates'] = commands if commands else []
- result['banners'] = result_banners
- if result['banners']:
- result['updates'].extend(result_banners)
-
- if module.params['save']:
- result['changed'] = True
- if not module.check_mode:
- cmd = {'command': 'copy running-config startup-config',
- 'prompt': r'\(y/n\)\s?$', 'answer': 'y'}
- run_commands(module, [cmd])
- result['saved'] = True
- else:
- module.warn('Skipping command `copy running-config startup-config`'
- 'due to check_mode. Configuration not copied to '
- 'non-volatile storage')
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os6/plugins/modules/os6_facts.py b/ansible_collections/dellemc/os6/plugins/modules/os6_facts.py
deleted file mode 100644
index ce439ad40..000000000
--- a/ansible_collections/dellemc/os6/plugins/modules/os6_facts.py
+++ /dev/null
@@ -1,478 +0,0 @@
-#!/usr/bin/python
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# Copyright (c) 2020 Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: os6_facts
-author: "Abirami N (@abirami-n)"
-short_description: Collect facts from devices running Dell EMC OS6
-description:
- - Collects a base set of device facts from a remote device that
- is running OS6. This module prepends all of the
- base network fact keys with C(ansible_net_<fact>). The facts
- module will always collect a base set of facts from the device
- and can enable or disable collection of additional facts.
-extends_documentation_fragment: dellemc.os6.os6
-options:
- gather_subset:
- description:
- - When supplied, this argument will restrict the facts collected
- to a given subset. Possible values for this argument include
- all, hardware, config, and interfaces. Can specify a list of
- values to include a larger subset. Values can also be used
- with an initial C(M(!)) to specify that a specific subset should
- not be collected.
- type: list
- default: [ '!config' ]
-"""
-
-EXAMPLES = """
-# Collect all facts from the device
-- os6_facts:
- gather_subset: all
-# Collect only the config and default facts
-- os6_facts:
- gather_subset:
- - config
-# Do not collect hardware facts
-- os6_facts:
- gather_subset:
- - "!interfaces"
-"""
-
-RETURN = """
-ansible_net_gather_subset:
- description: The list of fact subsets collected from the device.
- returned: always.
- type: list
-# default
-ansible_net_model:
- description: The model name returned from the device.
- returned: always.
- type: str
-ansible_net_serialnum:
- description: The serial number of the remote device.
- returned: always.
- type: str
-ansible_net_version:
- description: The operating system version running on the remote device.
- returned: always.
- type: str
-ansible_net_hostname:
- description: The configured hostname of the device.
- returned: always.
- type: str
-ansible_net_image:
- description: The image file that the device is running.
- returned: always
- type: str
-# hardware
-ansible_net_memfree_mb:
- description: The available free memory on the remote device in MB.
- returned: When hardware is configured.
- type: int
-ansible_net_memtotal_mb:
- description: The total memory on the remote device in MB.
- returned: When hardware is configured.
- type: int
-# config
-ansible_net_config:
- description: The current active config from the device.
- returned: When config is configured.
- type: str
-# interfaces
-ansible_net_interfaces:
- description: A hash of all interfaces running on the system.
- returned: When interfaces is configured.
- type: dict
-ansible_net_neighbors:
- description: The list of LLDP neighbors from the remote device.
- returned: When interfaces is configured.
- type: dict
-"""
-import re
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import run_commands
-from ansible_collections.dellemc.os6.plugins.module_utils.network.os6 import os6_argument_spec, check_args
-from ansible.module_utils.six import iteritems
-
-
-class FactsBase(object):
-
- COMMANDS = list()
-
- def __init__(self, module):
- self.module = module
- self.facts = dict()
- self.responses = None
-
- def populate(self):
- self.responses = run_commands(self.module, self.COMMANDS, check_rc=False)
-
- def run(self, cmd):
- return run_commands(self.module, cmd, check_rc=False)
-
-
-class Default(FactsBase):
-
- COMMANDS = [
- 'show version',
- 'show running-config | include hostname'
- ]
-
- def populate(self):
- super(Default, self).populate()
- data = self.responses[0]
- self.facts['version'] = self.parse_version(data)
- self.facts['serialnum'] = self.parse_serialnum(data)
- self.facts['model'] = self.parse_model(data)
- self.facts['image'] = self.parse_image(data)
- hdata = self.responses[1]
- self.facts['hostname'] = self.parse_hostname(hdata)
-
- def parse_version(self, data):
- facts = dict()
- match = re.search(r'HW Version(.+)\s(\d+)', data)
- temp, temp_next = data.split('---- ----------- ----------- -------------- --------------')
- for en in temp_next.splitlines():
- if en == '':
- continue
- match_image = re.search(r'^(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', en)
- version = match_image.group(4)
- facts["Version"] = list()
- fact = dict()
- fact['HW Version'] = match.group(2)
- fact['SW Version'] = match_image.group(4)
- facts["Version"].append(fact)
- return facts
-
- def parse_hostname(self, data):
- match = re.search(r'\S+\s(\S+)', data, re.M)
- if match:
- return match.group(1)
-
- def parse_model(self, data):
- match = re.search(r'System Model ID(.+)\s([-A-Z0-9]*)\n', data, re.M)
- if match:
- return match.group(2)
-
- def parse_image(self, data):
- match = re.search(r'Image File(.+)\s([A-Z0-9a-z_.]*)\n', data)
- if match:
- return match.group(2)
-
- def parse_serialnum(self, data):
- match = re.search(r'Serial Number(.+)\s([A-Z0-9]*)\n', data)
- if match:
- return match.group(2)
-
-
-class Hardware(FactsBase):
-
- COMMANDS = [
- 'show memory cpu'
- ]
-
- def populate(self):
- super(Hardware, self).populate()
- data = self.responses[0]
- match = re.findall(r'\s(\d+)\s', data)
- if match:
- self.facts['memtotal_mb'] = int(match[0]) // 1024
- self.facts['memfree_mb'] = int(match[1]) // 1024
-
-
-class Config(FactsBase):
-
- COMMANDS = ['show running-config']
-
- def populate(self):
- super(Config, self).populate()
- self.facts['config'] = self.responses[0]
-
-
-class Interfaces(FactsBase):
- COMMANDS = [
- 'show interfaces',
- 'show interfaces status',
- 'show interfaces transceiver properties',
- 'show ip int',
- 'show lldp',
- 'show lldp remote-device all',
- 'show version'
- ]
-
- def populate(self):
- vlan_info = dict()
- super(Interfaces, self).populate()
- data = self.responses[0]
- interfaces = self.parse_interfaces(data)
- desc = self.responses[1]
- properties = self.responses[2]
- vlan = self.responses[3]
- version_info = self.responses[6]
- vlan_info = self.parse_vlan(vlan, version_info)
- self.facts['interfaces'] = self.populate_interfaces(interfaces, desc, properties)
- self.facts['interfaces'].update(vlan_info)
- if 'LLDP is not enabled' not in self.responses[4]:
- neighbors = self.responses[5]
- self.facts['neighbors'] = self.parse_neighbors(neighbors)
-
- def parse_vlan(self, vlan, version_info):
- facts = dict()
- if "N11" in version_info:
- match = re.search(r'IP Address(.+)\s([0-9.]*)\n', vlan)
- mask = re.search(r'Subnet Mask(.+)\s([0-9.]*)\n', vlan)
- vlan_id_match = re.search(r'Management VLAN ID(.+)\s(\d+)', vlan)
- vlan_id = "Vl" + vlan_id_match.group(2)
- if vlan_id not in facts:
- facts[vlan_id] = list()
- fact = dict()
- fact['address'] = match.group(2)
- fact['masklen'] = mask.group(2)
- facts[vlan_id].append(fact)
- else:
- vlan_info, vlan_info_next = vlan.split('---------- ----- --------------- --------------- -------')
- for en in vlan_info_next.splitlines():
- if en == '':
- continue
- match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en)
- intf = match.group(1)
- if intf not in facts:
- facts[intf] = list()
- fact = dict()
- matc = re.search(r'^([\w+\s\d]*)\s+(\S+)\s+(\S+)', en)
- fact['address'] = matc.group(2)
- fact['masklen'] = matc.group(3)
- facts[intf].append(fact)
- return facts
-
- def populate_interfaces(self, interfaces, desc, properties):
- facts = dict()
- for key, value in interfaces.items():
- intf = dict()
- intf['description'] = self.parse_description(key, desc)
- intf['macaddress'] = self.parse_macaddress(value)
- intf['mtu'] = self.parse_mtu(value)
- intf['bandwidth'] = self.parse_bandwidth(value)
- intf['mediatype'] = self.parse_mediatype(key, properties)
- intf['duplex'] = self.parse_duplex(value)
- intf['lineprotocol'] = self.parse_lineprotocol(value)
- intf['operstatus'] = self.parse_operstatus(value)
- intf['type'] = self.parse_type(key, properties)
- facts[key] = intf
- return facts
-
- def parse_neighbors(self, neighbors):
- facts = dict()
- neighbor, neighbor_next = neighbors.split('--------- ------- ------------------- ----------------- -----------------')
- for en in neighbor_next.splitlines():
- if en == '':
- continue
- intf = self.parse_lldp_intf(en.split()[0])
- if intf not in facts:
- facts[intf] = list()
- fact = dict()
- if len(en.split()) > 2:
- fact['port'] = self.parse_lldp_port(en.split()[3])
- if (len(en.split()) > 4):
- fact['host'] = self.parse_lldp_host(en.split()[4])
- else:
- fact['host'] = "Null"
- facts[intf].append(fact)
- return facts
-
- def parse_interfaces(self, data):
- parsed = dict()
- for line in data.split('\n'):
- if len(line) == 0:
- continue
- match = re.match(r'Interface Name(.+)\s([A-Za-z0-9/]*)', line, re.IGNORECASE)
- if match:
- key = match.group(2)
- parsed[key] = line
- else:
- parsed[key] += '\n%s' % line
- return parsed
-
- def parse_description(self, key, desc):
- desc_val, desc_info = "", ""
- desc = re.split(r'[-+\s](?:-+\s)[-+\s].*', desc)
- for desc_val in desc:
- if desc_val:
- for en in desc_val.splitlines():
- if key in en:
- match = re.search(r'^(\S+)\s+(\S+)', en)
- if match.group(2) in ['Full', 'N/A']:
- return "Null"
- else:
- return match.group(2)
-
- def parse_macaddress(self, data):
- match = re.search(r'Burned In MAC Address(.+)\s([A-Z0-9.]*)\n', data)
- if match:
- return match.group(2)
-
- def parse_mtu(self, data):
- match = re.search(r'MTU Size(.+)\s(\d+)\n', data)
- if match:
- return int(match.group(2))
-
- def parse_bandwidth(self, data):
- match = re.search(r'Port Speed\s*[:\s\.]+\s(\d+)\n', data)
- if match:
- return int(match.group(1))
-
- def parse_duplex(self, data):
- match = re.search(r'Port Mode\s([A-Za-z]*)(.+)\s([A-Za-z/]*)\n', data)
- if match:
- return match.group(3)
-
- def parse_mediatype(self, key, properties):
- mediatype, mediatype_next = properties.split('--------- ------- --------------------- --------------------- --------------')
- flag = 1
- for en in mediatype_next.splitlines():
- if key in en:
- flag = 0
- match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en)
- if match:
- strval = match.group(3)
- return strval
- if flag == 1:
- return "null"
-
- def parse_type(self, key, properties):
- type_val, type_val_next = properties.split('--------- ------- --------------------- --------------------- --------------')
- flag = 1
- for en in type_val_next.splitlines():
- if key in en:
- flag = 0
- match = re.search(r'^(\S+)\s+(\S+)\s+(\S+)', en)
- if match:
- strval = match.group(2)
- return strval
- if flag == 1:
- return "null"
-
- def parse_lineprotocol(self, data):
- data = data.splitlines()
- for d in data:
- match = re.search(r'^Link Status\s*[:\s\.]+\s(\S+)', d)
- if match:
- return match.group(1)
-
- def parse_operstatus(self, data):
- data = data.splitlines()
- for d in data:
- match = re.search(r'^Link Status\s*[:\s\.]+\s(\S+)', d)
- if match:
- return match.group(1)
-
- def parse_lldp_intf(self, data):
- match = re.search(r'^([A-Za-z0-9/]*)', data)
- if match:
- return match.group(1)
-
- def parse_lldp_host(self, data):
- match = re.search(r'^([A-Za-z0-9-]*)', data)
- if match:
- return match.group(1)
-
- def parse_lldp_port(self, data):
- match = re.search(r'^([A-Za-z0-9/]*)', data)
- if match:
- return match.group(1)
-
-
-FACT_SUBSETS = dict(
- default=Default,
- hardware=Hardware,
- interfaces=Interfaces,
- config=Config,
-)
-
-VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
-
-
-def main():
- """main entry point for module execution
- """
- argument_spec = dict(
- gather_subset=dict(default=['!config'], type='list')
- )
-
- argument_spec.update(os6_argument_spec)
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- gather_subset = module.params['gather_subset']
-
- runable_subsets = set()
- exclude_subsets = set()
-
- for subset in gather_subset:
- if subset == 'all':
- runable_subsets.update(VALID_SUBSETS)
- continue
-
- if subset.startswith('!'):
- subset = subset[1:]
- if subset == 'all':
- exclude_subsets.update(VALID_SUBSETS)
- continue
- exclude = True
- else:
- exclude = False
-
- if subset not in VALID_SUBSETS:
- module.fail_json(msg='Bad subset')
-
- if exclude:
- exclude_subsets.add(subset)
- else:
- runable_subsets.add(subset)
-
- if not runable_subsets:
- runable_subsets.update(VALID_SUBSETS)
-
- runable_subsets.difference_update(exclude_subsets)
- runable_subsets.add('default')
-
- facts = dict()
- facts['gather_subset'] = list(runable_subsets)
-
- instances = list()
- for key in runable_subsets:
- instances.append(FACT_SUBSETS[key](module))
-
- for inst in instances:
- inst.populate()
- facts.update(inst.facts)
-
- ansible_facts = dict()
- for key, value in iteritems(facts):
- key = 'ansible_net_%s' % key
- ansible_facts[key] = value
-
- warnings = list()
- check_args(module, warnings)
-
- module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os6/plugins/terminal/os6.py b/ansible_collections/dellemc/os6/plugins/terminal/os6.py
deleted file mode 100644
index b11995524..000000000
--- a/ansible_collections/dellemc/os6/plugins/terminal/os6.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import re
-import json
-
-from ansible.module_utils._text import to_text, to_bytes
-from ansible.plugins.terminal import TerminalBase
-from ansible.errors import AnsibleConnectionFailure
-
-
-class TerminalModule(TerminalBase):
-
- terminal_stdout_re = [
- re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
- re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
- ]
-
- terminal_stderr_re = [
- re.compile(br"% ?Bad secret"),
- re.compile(br"(\bInterface is part of a port-channel\b)"),
- re.compile(br"(\bThe maximum number of users have already been created\b)|(\bUse '-' for range\b)"),
- re.compile(br"(?:incomplete|ambiguous) command", re.I),
- re.compile(br"connection timed out", re.I),
- re.compile(br"\bParameter length should be exactly 32 characters\b"),
- re.compile(br"'[^']' +returned error code: ?\d+"),
- re.compile(br"Invalid|invalid.*$", re.I),
- re.compile(br"((\bout of range\b)|(\bnot found\b)|(\bCould not\b)|(\bUnable to\b)|(\bCannot\b)|(\bError\b)).*", re.I),
- re.compile(br"((\balready exists\b)|(\bnot exist\b)|(\bnot active\b)|(\bFailed\b)|(\bIncorrect\b)|(\bnot enabled\b)|(\bDeactivate\b)).*", re.I),
-
- ]
-
- terminal_initial_prompt = br"\(y/n\)"
-
- terminal_initial_answer = b"y"
-
- terminal_inital_prompt_newline = False
-
- def on_open_shell(self):
- try:
- if self._get_prompt().endswith(b'#'):
- self._exec_cli_command(b'terminal length 0')
- except AnsibleConnectionFailure:
- raise AnsibleConnectionFailure('unable to set terminal parameters')
-
- def on_become(self, passwd=None):
- if self._get_prompt().endswith(b'#'):
- return
-
- cmd = {u'command': u'enable'}
- if passwd:
- cmd[u'prompt'] = to_text(r"[\r\n]?password:$", errors='surrogate_or_strict')
- cmd[u'answer'] = passwd
- try:
- self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
- except AnsibleConnectionFailure:
- raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
- # in os6 the terminal settings are accepted after the privilege mode
- try:
- self._exec_cli_command(b'terminal length 0')
- except AnsibleConnectionFailure:
- raise AnsibleConnectionFailure('unable to set terminal parameters')
-
- def on_unbecome(self):
- prompt = self._get_prompt()
- if prompt is None:
- # if prompt is None most likely the terminal is hung up at a prompt
- return
-
- if prompt.strip().endswith(b')#'):
- self._exec_cli_command(b'end')
- self._exec_cli_command(b'disable')
-
- elif prompt.endswith(b'#'):
- self._exec_cli_command(b'disable')
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/LICENSE b/ansible_collections/dellemc/os6/roles/os6_aaa/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/README.md b/ansible_collections/dellemc/os6/roles/os6_aaa/README.md
deleted file mode 100644
index 1e142f375..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/README.md
+++ /dev/null
@@ -1,210 +0,0 @@
-AAA role
-=======
-
-This role facilitates the configuration of authentication, authorization, and acccounting (AAA), and supports the configuration of RADIUS and TACACS servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The AAA role requires an SSH connection for connectivity to Dell EMC OS6. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as the value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_aaa keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``radius_server`` | dictionary | Configures the RADIUS server (see ``radius_server.*``) | os6 |
-| ``radius_server.key`` | string (required): 0,7,LINE | Configures the authentication key for the RADIUS server | os6 |
-| ``radius_server.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *radius_server.key* is 7 or 0 | os6 |
-| ``radius_server.retransmit`` | integer | Configures the number of retransmissions; field to be left blank to remove the retransimission configuration for RADIUS server authentication | os6 |
-| ``radius_server.timeout`` | integer | Configures the timeout for retransmissions, timeout must be an integer 1 and 30; field needs to be left blank to remove the timeout configurations for RADIUS server authentication | os6 |
-| ``radius_server.host`` | dictionary | Configures the RADIUS server host (see ``host.*``) | os6 |
-| ``host.ip`` | string | Configures the RADIUS server host address | os6 |
-| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os6 |
-| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os6 |
-| ``host.retransmit`` | integer | Configures the number of retransmissions | os6 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os6 |
-| ``host.timeout`` | integer | Configures timeout for retransmissions | os6 |
-| ``host.state`` | string: present,absent | Removes the RADIUS server host if set to absent | os6 |
-| ``radius_server.acct`` | dictionary | Configures the RADIUS server acct (see ``host.*``) | os6 |
-| ``acct.ip`` | string | Configures the RADIUS server acct address | os6 |
-| ``acct.key`` | string (required); 0,7,LINE | Configures the authentication key | os6 |
-| ``acct.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os6 |
-| ``acct.auth_port`` | integer | Configures the authentication port (0 to 65535) | os6 |
-| ``acct.state`` | string: present,absent | Removes the RADIUS server acct if set to absent | os6 |
-| ``radius_server.auth`` | dictionary | Configures the RADIUS server auth (see ``auth.*``) | os6 |
-| ``auth.ip`` | string | Configures the RADIUS server host address | os6 |
-| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os6 |
-| ``auth.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os6 |
-| ``auth.name`` | string (required) | Configures the auth name of the RADIUS servers | os6 |
-| ``auth.usage`` | string (required) | Configures the usage type of the RADIUS servers | os6 |
-| ``auth.priority`` | integer | Configures the number of priority | os6 |
-| ``auth.retransmit`` | integer | Configures the number of retransmissions | os6 |
-| ``auth.auth_port`` | integer | Configures the authentication port (0 to 65535) | os6 |
-| ``auth.timeout`` | integer | Configures timeout for retransmissions | os6 |
-| ``auth.deadtime`` | integer | Configures the number of deadtime | os6 |
-| ``auth.attribute`` | dictionary | Configures the RADIUS server auth (see ``attribute.*``) | os6 |
-| ``attribute.id`` | integer | Configures the RADIUS server attribute ID (see ``attribute.*``) | os6 |
-| ``attribute.type`` | integer | Configures the RADIUS server attribute type based on ID | os6 |
-| ``attribute.state`` | string: present,absent | Removes the RADIUS server attribute if set to absent | os6 |
-| ``auth.state`` | string: present,absent | Removes the radius server auth if set to absent | os6 |
-| ``radius_server.attribute`` | dictionary | Configures the RADIUS server auth (see ``attribute.*``) | os6 |
-| ``attribute.id`` | integer | Configures the RADIUS server attribute ID (see ``attribute.*``) | os6 |
-| ``attribute.type`` | integer | Configures the RADIUS server attribute type based on ID | os6 |
-| ``attribute.state`` | string: present,absent | Removes the RADIUS server attribute if set to absent | os6 |
-| ``tacacs_server`` | dictionary | Configures the TACACS server (see ``tacacs_server.*``)| os6 |
-| ``tacacs_server.key`` | string (required): 0,7,LINE | Configures the authentication key for TACACS server | os6 |
-| ``tacacs_server.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *tacacs_server.key* is 7 or 0 | os6 |
-| ``tacacs_server.host`` | dictionary | Configures the TACACS server host (see ``host.*``) | os6 |
-| ``host.ip`` | string | Configures the TACACS sever host address | os6 |
-| ``host.key`` | string (required): 0,7,LINE | Configures the authentication key | os6 |
-| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os6 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os6 |
-| ``host.timeout`` | integer | Configures the timeout for retransmissions | os6 |
-| ``host.state`` | string: present,absent | Removes the TACACS server host if set to absent | os6 |
-| ``aaa_accounting`` | dictionary | Configures accounting parameters (see ``aaa_accounting.*``) | os6 |
-| ``aaa_accounting.dot1x`` | string: none,start-stop,stop-only,wait-start | Configures accounting for dot1x events | os6 |
-| ``aaa_authorization`` | dictionary | Configures authorization parameters (see ``aaa_authorization.*``) | os6 |
-| ``aaa_authorization.exec`` | list | Configures authorization for EXEC (shell) commands (see ``exec.*``) | os6 |
-| ``exec.authorization_list_name`` | string | Configures named authorization list for EXEC commands | os6 |
-| ``exec.authorization_method`` | string: none | Configures no authorization of EXEC commands | os6 |
-| ``exec.use_data`` | string: local,tacacs, radius | Configures data used for authorization | os6 |
-| ``exec.state`` | string: present,absent | Removes the named authorization list for the EXEC commands if set to absent | os6 |
-| ``aaa_authorization.network`` | string: none,radius,ias | Configures authorization for network events | os6 |
-| ``aaa_authentication.auth_list`` | list | Configures named authentication list for hosts (see ``host.*``) | os6 |
-| ``auth_list.name`` | string | Configures named authentication list | os6 |
-| ``auth_list.login_or_enable`` | string: enable,login | Configures authentication list for login or enable | os6 |
-| ``auth_list.server`` | string: radius,tacacs | Configures AAA to use this list of all server hosts | os6 |
-| ``auth_list.use_password`` | string: line,local,enable,none | Configures password to use for authentication | os6 |
-| ``auth_list.state`` | string: present,absent | Removes the named authentication list if set to absent | os6 |
-| ``aaa_authentication.dot1x`` | string: none,radius,ias | Configures authentication for dot1x events | os6 |
-| ``aaa_server`` | dictionary | Configures the AAA server (see ``aaa_server.*``) | os6 |
-| ``radius`` | dictionary | Configures the RADIUS server (see ``radius.*``) | os6 |
-| ``dynamic_author`` | dictionary | Configures the RADIUS server (see ``dynamic_author.*``) | os6 |
-| ``dynamic_author.auth_type`` | string | Configures the authentication type for the radius server | os6 |
-| ``dynamic_author.client`` | list | Configures the client for the RADIUS server | os6 |
-| ``client.ip`` | string | Configures the client IP for the radius server | os6 |
-| ``client.key`` | string (required): 0,7,LINE | Configures the authentication key for the RADIUS server | os6 |
-| ``client.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *radius_server.key* is 7 or 0 | os6 |
-| ``client.state`` | string: present,absent | Removes the accounting of client if set to absent | os6 |
-| ``dynamic_author.state`` | string: present,absent | Removes the accounting of client if set to absent | os6 |
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable. |
-| ``ansible_network_os`` | yes | os6, null\* | This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_aaa* role to configure AAA for RADIUS and TACACS servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, it is set to false and it writes a simple playbook that only references the *os6_aaa* role.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_aaa:
- radius_server:
- key: 7
- key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fb
- retransmit: 5
- timeout: 25
- host:
- - ip: 10.0.0.1
- key: 0
- key_string: aaa
- retransmit: 5
- auth_port: 3
- timeout: 2
- state: present
- tacacs_server:
- key: 7
- key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa
- host:
- - ip: 10.0.0.50
- key: 0
- key_string: aaa
- auth_port: 3
- timeout: 2
- state: present
- aaa_accounting:
- dot1x: none
- aaa_authorization:
- exec:
- - authorization_list_name: aaa
- authorization_method: none
- use_data: local
- state: present
- network: radius
- aaa_authentication:
- auth_list:
- - name: default
- login_or_enable: login
- server: radius
- use_password: local
- state: present
- - name: console
- server: tacacs
- login_or_enable: login
- use_password: local
- state: present
- aaa_server:
- radius:
- dynamic_author:
- auth_type:
- client:
- - ip: 10.0.0.1
- key: 0
- key_string: aaskjsksdkjsdda
- state: present
- - ip: 10.0.0.2
- key:
- key_string: aaskjsksdkjsdda
- state: present
- state: present
-
-
-
-**Simple playbook to setup system — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_aaa
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/defaults/main.yml
deleted file mode 100644
index 40a48c74d..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/defaults/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# defaults file for dellemc.os6.os6_aaa
-attribute_type:
- mandatory: mandatory
- on_for_login_auth: on-for-login-auth
- include_in_access_req: include-in-access-req
- mac: "mac format"
- mac_ietf: "mac format ietf"
- mac_ietf_lower_case: "mac format ietf lower-case"
- mac_ietf_upper_case: "mac format ietf upper-case"
- mac_legacy: "mac format legacy"
- mac_legacy_lower_case: "mac format legacy lower-case"
- mac_legacy_upper_case: "mac format legacy upper-case"
- mac_unformatted: "mac format unformatted"
- mac_unformatted_lower_case: "mac format unformatted lower-case"
- mac_unformatted_upper_case: "mac format unformatted upper-case" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/handlers/main.yml
deleted file mode 100644
index a0318e7fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_aaa
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/meta/main.yml
deleted file mode 100644
index 5d089cb21..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os6_aaa role facilitates the configuration of Authentication Authorization Acccounting (AAA) attributes
- in devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/tasks/main.yml
deleted file mode 100644
index 36a416e34..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating AAA configuration for os6"
- template:
- src: os6_aaa.j2
- dest: "{{ build_dir }}/aaa6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning AAA configuration for os6"
- dellemc.os6.os6_config:
- src: os6_aaa.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2 b/ansible_collections/dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2
deleted file mode 100644
index 541884566..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/templates/os6_aaa.j2
+++ /dev/null
@@ -1,437 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-
-Purpose:
-Configure AAA commands for os6 Devices
-
-os6_aaa:
- tacacs_server:
- key: 7
- key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa
- timeout: 10
- host:
- - ip: 10.0.0.50
- key: 0
- key_string: aaa
- port: 3
- timeout: 2
- state: present
- radius_server:
- key: 7
- key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fb
- retransmit: 5
- timeout: 10
- deadtime: 2000
- host:
- - ip: 10.0.0.1
- key: 0
- key_string: aaa
- name: radius
- retransmit: 5
- auth_port: 3
- timeout: 2
- state: present
- acct:
- - ip: 10.0.0.1
- key: 0
- key_string: aasdvsdvfssfsfa
- name: radius
- auth-port: 2
- state: present
- auth:
- - ip: 10.0.0.2
- key: 0
- key_string: asdnksfnfnksnddjknsdn
- name: radius
- usage: all
- priority: 2
- timeout: 2
- retransmit: 5
- auth_port: 3
- deadtime: 6
- attribute:
- - id: 6
- type: {{attribute_type.mandatory}}
- state: present
- - id: 31
- type: {{attribute_type.mac_ietf_lower_case}}
- state: present
- state: present
- attribute:
- - id: 6
- type: {{attribute_type.mandatory}}
- state: present
- - id: 31
- type: {{attribute_type.mac_ietf_lower_case}}
- state: present
-
-
- aaa_authentication:
- auth_list:
- - name: default
- login_or_enable: login
- server: tacacs
- use_password: local
- state: present
- - name: console
- server: radius
- login_or_enable: login
- use_password: local
- dot1x: none
- aaa_authorization:
- exec:
- - authorization_list_name: aaa
- authorization_method: none
- use_data: local
- state: present
- network: radius
- aaa_accounting:
- dot1x: none
- aaa_server:
- radius:
- dynamic_author:
- auth_type: all
- client:
- - ip: 10.0.0.1
- key: 0
- key_string: aaa
- state: present
- state: present
-
-##################################################}
-{% if os6_aaa is defined and os6_aaa %}
-{% for key in os6_aaa.keys() %}
- {% set aaa_vars = os6_aaa[key] %}
- {% if key == "tacacs_server" %}
- {% set server = "tacacs-server" %}
- {% endif %}
- {% if key == "radius_server" %}
- {% set server = "radius server" %}
- {% endif %}
- {% if server is defined and server %}
- {% if aaa_vars %}
- {% set item = aaa_vars %}
- {% if item.timeout is defined %}
- {% if item.timeout %}
-{{ server }} timeout {{ item.timeout }}
- {% else %}
-no {{ server }} timeout
- {% endif %}
- {% endif %}
- {% if item.retransmit is defined and server == "radius server" %}
- {% if item.retransmit %}
-{{ server }} retransmit {{ item.retransmit }}
- {% else %}
-no {{ server }} retransmit
- {% endif %}
- {% endif %}
- {% if item.deadtime is defined and server == "radius server" %}
- {% if item.deadtime %}
-{{ server }} deadtime {{ item.deadtime }}
- {% else %}
-no {{ server }} deadtime
- {% endif %}
- {% endif %}
- {% if item.key is defined %}
- {% if item.key == 0 or item.key == 7 %}
- {% if item.key_string is defined and item.key_string%}
-{{ server }} key {{ item.key }} {{ item.key_string }}
- {% endif %}
- {% elif item.key %}
-{{ server }} key {{ item.key }}
- {% else %}
-no {{ server }} key
- {% endif %}
- {% endif %}
- {% if item.host is defined and item.host %}
- {% for hostlist in item.host %}
- {% if hostlist.ip is defined and hostlist.ip %}
- {% if hostlist.state is defined and hostlist.state == "absent" %}
- {% if server == "tacacs-server" %}
-no {{ server }} host {{ hostlist.ip }}
- {% else %}
-no {{ server }} {{ hostlist.ip }}
- {% endif %}
- {% else %}
- {% if server == "tacacs-server" %}
-{{ server }} host {{ hostlist.ip }}
- {% if (hostlist.key is defined) %}
- {% if hostlist.key == 0 or hostlist.key == 7 %}
- {% if hostlist.key_string is defined and hostlist.key_string %}
-key {{ hostlist.key }} {{ hostlist.key_string }}
- {% endif %}
- {% elif hostlist.key %}
-key {{ hostlist.key }}
- {% else %}
-no key
- {% endif %}
- {% endif %}
- {% if (hostlist.timeout is defined and hostlist.timeout) %}
-timeout {{ hostlist.timeout }}
- {% endif %}
- {% if (hostlist.auth_port is defined and hostlist.auth_port) %}
-port {{ hostlist.auth_port }}
- {% endif %}
-exit
- {% elif server == "radius server" %}
-{{ server }} {{ hostlist.ip }}
- {% if (hostlist.key is defined) %}
- {% if hostlist.key == 0 or hostlist.key == 7 %}
- {% if hostlist.key_string is defined and hostlist.key_string %}
-key {{ hostlist.key }} {{ hostlist.key_string }}
- {% endif %}
- {% elif hostlist.key %}
-key {{ hostlist.key }}
- {% else %}
-no key
- {% endif %}
- {% endif %}
- {% if (hostlist.name is defined) %}
- {% if (hostlist.name) %}
-name "{{ hostlist.name }}"
- {% else %}
-no name
- {% endif %}
- {% endif %}
- {% if (hostlist.timeout is defined and hostlist.timeout) %}
-timeout {{ hostlist.timeout }}
- {% endif %}
- {% if (hostlist.auth_port is defined and hostlist.auth_port) %}
-auth-port {{ hostlist.auth_port }}
- {% endif %}
-exit
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if item.acct is defined and item.acct and server == "radius server" %}
- {% for acctlist in item.acct %}
- {% if acctlist.ip is defined and acctlist.ip %}
- {% if acctlist.state is defined and acctlist.state == "absent" %}
-no {{ server }} acct {{ acctlist.ip }}
- {% else %}
-{{ server }} acct {{ acctlist.ip }}
- {% if (acctlist.key is defined) %}
- {% if acctlist.key == 0 or acctlist.key == 7 %}
- {% if acctlist.key_string is defined and acctlist.key_string %}
-key {{ acctlist.key }} {{ acctlist.key_string }}
- {% endif %}
- {% elif acctlist.key %}
-key {{ acctlist.key }}
- {% else %}
-no key
- {% endif %}
- {% endif %}
- {% if (acctlist.name is defined) %}
- {% if (acctlist.name) %}
-name "{{ acctlist.name }}"
- {% else %}
-no name
- {% endif %}
- {% endif %}
- {% if (acctlist.auth_port is defined and acctlist.auth_port) %}
-acct-port {{ acctlist.auth_port }}
- {% endif %}
-exit
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if item.auth is defined and item.auth and server == "radius server" %}
- {% for authlist in item.auth %}
- {% if authlist.ip is defined and authlist.ip %}
- {% if authlist.state is defined and authlist.state == "absent" %}
-no {{ server }} auth {{ authlist.ip }}
- {% else %}
-{{ server }} auth {{ authlist.ip }}
- {% if (authlist.key is defined) %}
- {% if authlist.key == 0 or authlist.key == 7 %}
- {% if authlist.key_string is defined and authlist.key_string %}
-key {{ authlist.key }} {{ authlist.key_string }}
- {% endif %}
- {% elif authlist.key %}
-key {{ authlist.key }}
- {% else %}
-no key
- {% endif %}
- {% endif %}
- {% if (authlist.name is defined) %}
- {% if (authlist.name) %}
-name "{{ authlist.name }}"
- {% else %}
-no name
- {% endif %}
- {% endif %}
- {% if (authlist.auth_port is defined and authlist.auth_port) %}
-auth-port {{ authlist.auth_port }}
- {% endif %}
- {% if (authlist.priority is defined and authlist.priority) %}
-priority {{ authlist.priority }}
- {% endif %}
- {% if (authlist.timeout is defined and authlist.timeout) %}
-timeout {{ authlist.timeout }}
- {% endif %}
- {% if (authlist.retransmit is defined and authlist.retransmit) %}
-retransmit {{ authlist.retransmit }}
- {% endif %}
- {% if (authlist.deadtime is defined and authlist.deadtime) %}
-deadtime {{ authlist.deadtime }}
- {% endif %}
- {% if (authlist.usage is defined and authlist.usage) %}
-usage {{ authlist.usage }}
- {% endif %}
- {% if authlist.attribute is defined and authlist.attribute and server == "radius server" %}
- {% for attributelist in authlist.attribute %}
- {% if attributelist.id is defined and attributelist.id and attributelist.type is defined %}
- {% if attributelist.state is defined and attributelist.state == "absent" %}
-no attribute {{ attributelist.id }} {{ attributelist.type }}
- {% else %}
- {% if attributelist.type %}
-attribute {{ attributelist.id }} {{ attributelist.type }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-exit
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if item.attribute is defined and item.attribute and server == "radius server" %}
- {% for attributelist in item.attribute %}
- {% if attributelist.id is defined and attributelist.id and attributelist.type is defined %}
- {% if attributelist.state is defined and attributelist.state == "absent" %}
-no {{ server }} attribute {{ attributelist.id }} {{ attributelist.type }}
- {% else %}
- {% if attributelist.type %}
-{{ server }} attribute {{ attributelist.id }} {{ attributelist.type }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endfor %}
- {% if os6_aaa.aaa_authentication is defined and os6_aaa.aaa_authentication %}
- {% if os6_aaa.aaa_authentication.auth_list is defined and os6_aaa.aaa_authentication.auth_list %}
- {% for auth_list in os6_aaa.aaa_authentication.auth_list %}
- {% if auth_list.login_or_enable is defined and auth_list.login_or_enable %}
- {% if auth_list.name is defined and auth_list.name %}
- {% if auth_list.state is defined and auth_list.state == "absent" %}
-no aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }}
- {% else %}
- {% if auth_list.server is defined and auth_list.server %}
- {% if auth_list.use_password is defined and auth_list.use_password %}
-aaa authentication {{ auth_list.login_or_enable }} "{{ auth_list.name }}" {{ auth_list.server }} {{ auth_list.use_password }}
- {% else %}
-aaa authentication {{ auth_list.login_or_enable }} "{{ auth_list.name }}" {{ auth_list.server }}
- {% endif %}
- {% else %}
- {% if auth_list.use_password is defined and auth_list.use_password %}
-aaa authentication {{ auth_list.login_or_enable }} "{{ auth_list.name }}" {{ auth_list.use_password }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if os6_aaa.aaa_authentication.dot1x is defined %}
- {% set aaa_authentication = os6_aaa.aaa_authentication %}
- {% if aaa_authentication.dot1x == "none" or aaa_authentication.dot1x == "radius" or aaa_authentication.dot1x == "ias" %}
-aaa authentication dot1x default {{ aaa_authentication.dot1x }}
- {% else %}
-no aaa authentication dot1x default
- {% endif %}
- {% endif %}
- {% endif %}
- {% if os6_aaa.aaa_authorization is defined and os6_aaa.aaa_authorization %}
- {% set aaa_authorization = os6_aaa.aaa_authorization %}
- {% if aaa_authorization.exec is defined and aaa_authorization.exec %}
- {% for command in aaa_authorization.exec %}
- {% if command.authorization_list_name is defined and command.authorization_list_name %}
- {% if command.state is defined and command.state == "absent" %}
-no aaa authorization exec {{ command.authorization_list_name }}
- {% else %}
- {% if command.use_data is defined and command.use_data %}
- {% if command.authorization_method is defined and command.authorization_method %}
-aaa authorization exec "{{ command.authorization_list_name }}" {{ command.use_data }} {{ command.authorization_method }}
- {% else %}
-aaa authorization exec "{{ command.authorization_list_name }}" {{ command.use_data }}
- {% endif %}
- {% else %}
- {% if command.authorization_method is defined and command.authorization_method %}
-aaa authorization exec "{{ command.authorization_list_name }}" {{ command.authorization_method }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if os6_aaa.aaa_authorization.network is defined %}
- {% set aaa_authorization = os6_aaa.aaa_authorization %}
- {% if aaa_authorization.network %}
-aaa authorization network default {{ aaa_authorization.network }}
- {% else %}
-no aaa authorization network default radius
- {% endif %}
- {% endif %}
- {% endif %}
- {% if os6_aaa.aaa_accounting is defined and os6_aaa.aaa_accounting %}
- {% set aaa_accounting = os6_aaa.aaa_accounting %}
- {% if aaa_accounting.dot1x is defined %}
- {% if aaa_accounting.dot1x == "none" %}
-aaa accounting dot1x default none
- {% elif aaa_accounting.dot1x %}
-aaa accounting dot1x default {{ aaa_accounting.dot1x }} radius
- {% else %}
-no aaa accounting dot1x default
- {% endif %}
- {% endif %}
- {% endif %}
- {% if os6_aaa.aaa_server is defined and os6_aaa.aaa_server %}
- {% set aaa_server = os6_aaa.aaa_server %}
- {% if aaa_server.radius is defined and aaa_server.radius %}
- {% if aaa_server.radius.dynamic_author is defined and aaa_server.radius.dynamic_author %}
- {% set dynamic_author = aaa_server.radius.dynamic_author %}
- {% if dynamic_author.state is defined %}
- {% if dynamic_author.state == "absent" %}
-no aaa server radius dynamic-author
- {% else %}
-aaa server radius dynamic-author
- {% if dynamic_author.client is defined and dynamic_author.client %}
- {% for client in dynamic_author.client %}
- {% if ((client.state is defined and client.state) and (client.ip is defined and client.ip)) %}
- {% if client.state == "absent" %}
-no client {{ client.ip }}
- {% else %}
- {% if client.key is defined and (client.key == 0 or client.key == 7) %}
- {% if client.key_string is defined and client.key_string %}
-client {{ client.ip }} server-key {{ client.key }} {{ client.key_string }}
- {% endif %}
- {% elif client.key_string is defined and client.key_string %}
-client {{ client.ip }} server-key {{ client.key_string }}
- {% else %}
-client {{ client.ip }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if dynamic_author.auth_type is defined %}
- {% if dynamic_author.auth_type %}
-auth-type {{ dynamic_author.auth_type }}
- {% else %}
-no auth-type
- {% endif %}
- {% endif %}
-exit
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/main.os6.yaml
deleted file mode 100644
index 9fceb3895..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/main.os6.yaml
+++ /dev/null
@@ -1,111 +0,0 @@
----
-# vars file for dellemc.os6.os6_aaa,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
-os6_aaa:
- tacacs_server:
- key: 0
- key_string: aacdsvdfsvfsfvfsv
- host:
- - ip: 30.0.0.10
- key:
- key_string: ahvksjvskjvbkjsd
- auth_port: 6
- timeout: 6
- state: absent
- radius_server:
- key: 0
- key_string: ahvksjvskjvb
- retransmit: 4
- timeout: 5
- host:
- - ip: 40.0.0.10
- key: 0
- key_string: ahvksjvskjvbkjsd
- retransmit: 4
- auth_port: 6
- timeout: 6
- state: absent
- acct:
- - ip: 10.0.0.1
- key: 0
- key_string: asvkbjfssvfsf
- auth_port: 2
- state: present
- auth:
- - ip: 10.0.0.2
- key: 0
- key_string: asdnksfnfnksnddjknsdn
- name: radius
- usage: all
- priority: 2
- timeout: 2
- retransmit: 5
- auth_port: 3
- deadtime: 6
- attribute:
- - id: 6
- type: "{{attribute_type.mandatory}}"
- state: present
- - id: 31
- type: "{{attribute_type.mac_ietf_lower_case}}"
- state: present
- - id: 31
- type: "{{attribute_type.mac}}"
- state: absent
- state: present
- attribute:
- - id: 6
- type: "{{attribute_type.mandatory}}"
- state: present
- - id: 31
- type: "{{attribute_type.mac_ietf_lower_case}}"
- state: present
-
-
- aaa_authentication:
- auth_list:
- - name: default
- login_or_enable: login
- server: tacacs
- use_password: local
- state: absent
- - name: console
- server: radius
- login_or_enable: login
- use_password: local
- state: absent
- - name: tacp
- server: tacacs
- login_or_enable: enable
- use_password: enable
- state: absent
- dot1x: none
- aaa_authorization:
- exec:
- - authorization_list_name: aaa
- authorization_method: none
- use_data: local
- state: absent
- network: radius
- aaa_accounting:
- dot1x: none
- aaa_server:
- radius:
- dynamic_author:
- auth_type:
- client:
- - ip: 10.0.0.1
- key: 0
- key_string: aaskjsksdkjsdda
- state: present
- - ip: 10.0.0.2
- key:
- key_string: aaskjsksdkjsdda
- state: present
- - ip: 10.0.0.3
- key:
- key_string:
- state: present
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_aaa/tests/test.yaml
deleted file mode 100644
index e0cf41fc2..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_aaa
diff --git a/ansible_collections/dellemc/os6/roles/os6_aaa/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_aaa/vars/main.yml
deleted file mode 100644
index dedb2f7d6..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_aaa/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_aaa \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/LICENSE b/ansible_collections/dellemc/os6/roles/os6_acl/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/README.md b/ansible_collections/dellemc/os6/roles/os6_acl/README.md
deleted file mode 100644
index f8d97abed..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/README.md
+++ /dev/null
@@ -1,118 +0,0 @@
-ACL role
-========
-
-This role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to the line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The ACL role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_acl keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string (required): ipv4, ipv6, mac | Configures the L3 (IPv4/IPv6) or L2 (MAC) access-control list | os6 |
-| ``name`` | string (required) | Configures the name of the access-control list | os6 |
-| ``remark`` | list | Configures the ACL remark (see ``remark.*``) | os6 |
-| ``remark.description`` | string | Configures the remark description | os6 |
-| ``remark.state`` | string: absent,present\* | Deletes the configured remark for an ACL entry if set to absent | os6 |
-| ``entries`` | list | Configures ACL rules (see ``seqlist.*``) | os6 |
-| ``entries.number`` | integer (required) | Specifies the sequence number of the ACL rule | os6 |
-| ``entries.seq_number`` | integer (required) | Specifies the sequence number of the ACL rule | os6 |
-| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true; specifies to reject packets if set to false | os6 |
-| ``entries.protocol`` | string (required) | Specifies the type of protocol or the protocol number to filter | os6 |
-| ``entries.match_condition`` | string (required): any/ \<srcip>/ \<dstip>/ \<srcmask>/\<dstmask> | Specifies the command in string format | os6 |
-| ``entries.state`` | string: absent,present\* | Deletes the rule from the ACL if set to absent | os6 |
-| ``stage_ingress`` | list | Configures ingress ACL to the interface (see ``stage_ingress.*``) | os6 |
-| ``stage_ingress.name`` | string (required) | Configures the ingress ACL filter to the interface with this interface name | os6 |
-| ``stage_ingress.state`` | string: absent,present\* | Deletes the configured ACL from the interface if set to absent | os6 |
-| ``stage_ingress.seq_number`` | integer | Configure the sequence number (greater than 0) to rank precedence for this interface and direction | os6 |
-| ``stage_egress`` | list | Configures egress ACL to the interface (see ``stage_egress.*``) | os6 |
-| ``stage_egress.name`` | string (required) | Configures the egress ACL filter to the interface with this interface name | os6 |
-| ``stage_egress.state`` | string: absent,present\* | Deletes the configured egress ACL from the interface if set to absent | os6 |
-| ``stage_egress.seq_number`` | integer | Configure the sequence number (greater than 0) to rank precedence for this interface and direction | os6 |
-| ``state`` | string: absent,present\* | Deletes the ACL if set to absent | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_acl* role to configure different types of ACLs (standard and extended) for both IPv4 and IPv6 and assigns the access-class to the line terminals. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, it generates the configuration commands as a .part file in the *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os6_acl* role.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
- os6_acl:
- - type: ipv4
- name: ssh-only
- remark:
- - description: "ipv4remark"
- state: present
- entries:
- - number: 4
- seq_number: 1000
- permit: true
- protocol: tcp
- match_condition: any any
- state: present
- stage_ingress:
- - name: vlan 30
- state: present
- seq_number: 50
- stage_egress:
- - name: vlan 40
- state: present
- seq_number: 40
- state: present
-
-**Simple playbook to setup system - switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_acl
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/defaults/main.yml
deleted file mode 100644
index 92931d8bf..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_acl \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/handlers/main.yml
deleted file mode 100644
index eeab7f6fc..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_acl \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/meta/main.yml
deleted file mode 100644
index c7abf91b4..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_acl role facilitates the configuration of access control list (ACL) attributes in devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/tasks/main.yml
deleted file mode 100644
index 6ead1fa96..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating ACL configuration for os6"
- template:
- src: os6_acl.j2
- dest: "{{ build_dir }}/acl6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning ACL configuration for os6"
- dellemc.os6.os6_config:
- src: os6_acl.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/templates/os6_acl.j2 b/ansible_collections/dellemc/os6/roles/os6_acl/templates/os6_acl.j2
deleted file mode 100644
index 3d47c2eb7..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/templates/os6_acl.j2
+++ /dev/null
@@ -1,202 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-
-Purpose:
-Configure ACL commands for os6 devices
-
-os6_acl:
- - name: macl-implicit
- type: mac
- remark:
- - description: 1
- number: 3
- state: present
- entries:
- - number: 4
- seq_number: 1000
- permit: false
- protocol:
- match_condition: any 0000.1F3D.084B 0000.0000.0000
- state: present
- - number: 5
- seq_number: 1001
- permit: true
- protocol:
- match_condition: any any 0x0806
- state: present
- - number: 6
- seq_number: 2002
- permit: deny
- protocol:
- match_condition: any any
- state:
- stage_ingress:
- - name: vlan 30
- state: present
- seq_number: 40
- - name: vlan 50
- state: present
- seq_number: 50
- stage_egress:
- - name:
- state:
- seq_number:
- state: present
-#####################################}
-{% if os6_acl is defined and os6_acl %}
-{% set acl_dict = {} %}
- {% for val in os6_acl %}
- {% if val.name is defined and val.name %}
- {% if val.state is defined and val.state == "absent" %}
- {% if val.type is defined and val.type == "ipv4" %}
-no ip access-list {{ val.name }}
- {% elif val.type is defined and val.type == "ipv6" %}
-no ipv6 access-list {{ val.name }}
- {% elif val.type is defined and val.type == "mac" %}
-no mac access-list extended {{ val.name }}
- {% endif %}
- {% else %}
- {% if val.type is defined and (val.type == "ipv4" or val.type == "ipv6" or val.type == "mac") %}
- {% if val.type == "mac" %}
-{{ val.type }} access-list extended {{ val.name }}
- {% elif val.type == "ipv4" %}
-ip access-list {{ val.name }}
- {% else %}
-{{ val.type }} access-list {{ val.name }}
- {% endif %}
- {% if val.remark is defined and val.remark %}
- {% for remark in val.remark %}
- {% if remark.description is defined and remark.description %}
- {% if remark.state is defined and remark.state == "absent" %}
-no remark {{ remark.description }}
- {% else %}
-remark {{ remark.description }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if val.entries is defined and val.entries %}
- {% for rule in val.entries %}
- {% if rule.seq_number is defined and rule.seq_number %}
- {% if rule.state is defined and rule.state == "absent" %}
-no {{ rule.seq_number }}
- {% else %}
-{% set seq_num = rule.seq_number %}
- {% if rule.permit is defined %}
- {% if rule.permit %}
- {% set is_permit = "permit" %}
- {% else %}
- {% set is_permit = "deny" %}
- {% endif %}
- {% endif %}
- {% if rule.protocol is defined and rule.protocol %}
- {% set protocol = rule.protocol %}
- {% else %}
- {% set protocol = "" %}
- {% endif %}
- {% if rule.protocol is defined and rule.protocol and rule.match_condition is defined and rule.match_condition %}
-{{ seq_num }} {{ is_permit }} {{ protocol }} {{ rule.match_condition }}
- {% elif rule.protocol is defined and rule.protocol %}
-{{ seq_num }} {{ is_permit }} {{ protocol }}
- {% elif rule.match_condition is defined and rule.match_condition %}
-{{ seq_num }} {{ is_permit }} {{ rule.match_condition }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-exit
- {% if val.stage_ingress is defined and val.stage_ingress %}
- {% for intf in val.stage_ingress %}
- {% if intf.state is defined and intf.state == "absent" %}
- {% if intf.name is defined and intf.name %}
-{% set key = intf.name %}
-{% set key_val_list = acl_dict.setdefault(key, []) %}
- {% if val.type is defined and val.type == "mac" %}
-{% set value = ("no mac access-group " + val.name + " " + "in") %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% elif val.type is defined and val.type == "ipv4" %}
-{% set value = ("no ip access-group " + val.name + " " + "in") %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% endif %}
- {% endif %}
- {% else %}
- {% if intf.name is defined and intf.name %}
-{% set key = intf.name %}
-{% set key_val_list = acl_dict.setdefault(key, []) %}
- {% if val.type is defined and val.type == "mac" %}
- {% if intf.seq_number is defined and intf.seq_number %}
-{% set value1 = intf.seq_number|string %}
-{% set value = ("mac access-group " + val.name + " " + "in " + value1) %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% else %}
-{% set value = ("mac access-group " + val.name + " " + "in ") %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% endif %}
- {% elif val.type is defined and val.type == "ipv4" %}
- {% if intf.seq_number is defined and intf.seq_number %}
-{% set value1 = intf.seq_number|string %}
-{% set value = ("ip access-group " + val.name + " " + "in " + value1) %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% else %}
-{% set value = ("ip access-group " + val.name + " " + "in ") %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if val.stage_egress is defined and val.stage_egress %}
- {% for intf in val.stage_egress %}
- {% if intf.state is defined and intf.state == "absent" %}
- {% if intf.name is defined and intf.name %}
-{% set key = intf.name %}
-{% set key_val_list = acl_dict.setdefault(key, []) %}
- {% if val.type is defined and val.type == "mac" %}
-{% set value = ("no mac access-group " + val.name + " " + "out") %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% elif val.type is defined and val.type == "ipv4" %}
-{% set value = ("no ip access-group " + val.name + " " + "out") %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% endif %}
- {% endif %}
- {% else %}
- {% if intf.name is defined and intf.name %}
-{% set key = intf.name %}
-{% set key_val_list = acl_dict.setdefault(key, []) %}
- {% if val.type is defined and val.type == "mac" %}
- {% if intf.seq_number is defined and intf.seq_number %}
-{% set value1 = intf.seq_number|string %}
-{% set value = ("mac access-group " + val.name + " " + "out " + value1) %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% else %}
-{% set value = ("mac access-group " + val.name + " " + "out ") %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% endif %}
- {% elif val.type is defined and val.type == "ipv4" %}
- {% if intf.seq_number is defined and intf.seq_number %}
-{% set value1 = intf.seq_number|string %}
-{% set value = ("ip access-group " + val.name + " " + "out " + value1) %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% else %}
-{% set value = ("ip access-group " + val.name + " " + "out ") %}
-{% set acl_val = acl_dict[key].append(value) %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% for intf_name, acl_list in acl_dict.items() %}
-interface {{ intf_name }}
- {% for acl in acl_list %}
-{{ acl }}
- {% endfor %}
-exit
- {% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_acl/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_acl/tests/main.os6.yaml
deleted file mode 100644
index 43c3f17ce..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/tests/main.os6.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-# vars file for dellemc.os6.os6_acl,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
-
-os6_acl:
- - name: macl-implicit
- type: mac
- remark:
- - description: 1
- state: present
- entries:
- - number: 4
- seq_number: 1000
- permit: false
- protocol:
- match_condition: any 0000.1F3D.084B 0000.0000.0000
- state: present
- - number: 5
- seq_number: 1001
- permit: true
- protocol:
- match_condition: any any 0x0806
- state: present
- - number: 6
- seq_number: 2002
- permit: deny
- protocol:
- match_condition: any any
- state:
- stage_ingress:
- - name: vlan 30
- state: present
- seq_number: 40
- - name: vlan 50
- state: present
- seq_number: 50
- stage_egress:
- - name: vlan 40
- state: present
- seq_number:
- state: present
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_acl/tests/test.yaml
deleted file mode 100644
index f8ccf3be5..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_acl
diff --git a/ansible_collections/dellemc/os6/roles/os6_acl/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_acl/vars/main.yml
deleted file mode 100644
index f294863ad..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_acl/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_acl
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_bgp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/README.md b/ansible_collections/dellemc/os6/roles/os6_bgp/README.md
deleted file mode 100644
index 8b7b6f6ca..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/README.md
+++ /dev/null
@@ -1,153 +0,0 @@
-BGP role
-========
-
-This role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum path. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The BGP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If variable `os6_cfg_generate` is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-> **NOTE**: IP routing needs to be enabled on the switch prior to configuring BGP via the *os6_bgp* role.
-
-**os6_bgp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``asn`` | string (required) | Configures the autonomous system (AS) number of the local BGP instance | os6 |
-| ``router_id`` | string | Configures the IP address of the local BGP router instance | os6 |
-| ``maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) | os6 |
-| ``maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) | os6 |
-| ``ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | os6 |
-| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os6 |
-| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os6 |
-| ``ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os6 |
-| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os6 |
-| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os6 |
-| ``neighbor`` | list | Configures IPv4 BGP neighbors (see ``neighbor.*``) | os6 |
-| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os6 |
-| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os6 |
-| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os6 |
-| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os6 |
-| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer group if set to absent; supported only when *neighbor.type* is "peergroup" | os6 |
-| ``neighbor.timer`` | string | Configures neighbor timers (<int> <int>); 5 10, where 5 is the keepalive interval and 10 is the holdtime, field needs to be left blank to remove the timer configurations | os6 |
-| ``neighbor.default_originate`` | boolean: true, false\* | Configures default originate routes to the BGP neighbor, field needs to be left blank to remove the default originate routes | os6 |
-| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os6 |
-| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os6 |
-| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os6 |
-| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os6 |
-| ``neighbor.src_loopback_state`` | string: absent,present\* | Deletes the source for routing packets if set to absent | os6 |
-| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255), field needs to be left blank to remove the maximum hop count value | os6 |
-| ``neighbor.subnet`` | string (required) | Configures the passive BGP neighbor to this subnet | os6 |
-| ``neighbor.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4 BGP neighbor if set to absent | os6 |
-| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os6 |
-| ``redistribute`` | list | Configures the redistribute list to get information from other routing protocols (see ``redistribute.*``) | os6 |
-| ``redistribute.route_type`` | string (required): static,connected | Configures the name of the routing protocol to redistribute | os6 |
-| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os6 |
-| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os6 |
-| ``state`` | string: absent,present\* | Deletes the local router BGP instance if set to absent | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable. |
-| ``ansible_network_os`` | yes | os6, null\* | This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_bgp* role to configure the BGP network and neighbors. It creates a *hosts* file with the switch details, a *host_vars* file with connection variables and the corresponding role variables.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os6_bgp* role.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_bgp:
- asn: 11
- router_id: 192.168.3.100
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- ipv4_network:
- - address: 102.1.1.0 255.255.255.255
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - ip: 192.168.10.2
- type: ipv4
- remote_asn: 12
- timer: 5 10
- default_originate: False
- peergroup: per
- admin: up
- state: present
- - ip: 2001:4898:5808:ffa2::1
- type: ipv6
- remote_asn: 14
- peergroup: per
- state: present
- - name: peer1
- type: peergroup
- remote_asn: 14
- ebgp_multihop: 4
- subnet: 10.128.5.192/27
- state: present
- - ip: 172.20.12.1
- type: ipv4
- remote_asn: 64640
- timer: 3 9
- redistribute:
- - route_type: static
- address_type: ipv4
- state: present
- - route_type: connected
- address_type: ipv6
- state: present
- state: present
-
-**Simple playbook to configure BGP — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_bgp
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/defaults/main.yml
deleted file mode 100644
index 58e963bb7..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_bgp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/handlers/main.yml
deleted file mode 100644
index e43b3fd03..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_bgp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/meta/main.yml
deleted file mode 100644
index eb2d26ee4..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_bgp role facilitates the configuration of BGP attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/tasks/main.yml
deleted file mode 100644
index acc2257fc..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating BGP configuration for os6"
- template:
- src: os6_bgp.j2
- dest: "{{ build_dir }}/bgp6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning BGP configuration for os6"
- dellemc.os6.os6_config:
- src: os6_bgp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2 b/ansible_collections/dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2
deleted file mode 100644
index 4dd101977..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/templates/os6_bgp.j2
+++ /dev/null
@@ -1,255 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-
-Purpose:
-Configure BGP commands for os6 Devices
-
-os6_bgp:
- asn: 11
- router_id: 1.1.1.1
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- ipv4_network:
- - address: 101.1.2.0 255.255.255.0
- state: present
- ipv6_network:
- - address: 2001:4898:5808:ffa0::/126
- state: present
- neighbor:
- - type: ipv4
- ip: 10.10.234.16
- remote_asn: 64818
- timer: 2 5
- default_originate: True
- peergroup: MUX_HNV_ACCESS
- admin: up
- state: present
- - type: ipv6
- ip: 2001:4898:5808:ffa2::1
- timer: 2 4
- default_originate: True
- remote_asn: 64818
- peergroup: MUX_HNV_ACCESS
- state: present
- - type: peergroup
- name: MUX_HNV_ACCESS
- ebgp_multihop: 4
- subnet: 10.128.5.192/27
- remote_asn: 64918
- state: present
- redistribute:
- - route_type: connected
- address_type: ipv6
- state: present
- state: present
-
-################################}
-{% if os6_bgp is defined and os6_bgp %}
-{% set bgp_vars = os6_bgp %}
-{% if bgp_vars.asn is defined and bgp_vars.asn %}
- {% if bgp_vars.state is defined and bgp_vars.state=="absent" %}
-no router bgp {{ bgp_vars.asn }}
- {% else %}
-{# Add Feature to the switch #}
-router bgp {{ bgp_vars.asn }}
- {% if bgp_vars.router_id is defined %}
- {% if bgp_vars.router_id %}
-bgp router-id {{ bgp_vars.router_id }}
- {% else %}
-no bgp router-id
- {% endif %}
- {% endif %}
- {% if bgp_vars.maxpath_ebgp is defined %}
- {% if bgp_vars.maxpath_ebgp %}
-maximum-paths {{ bgp_vars.maxpath_ebgp }}
- {% else %}
-no maximum-paths
- {% endif %}
- {% endif %}
- {% if bgp_vars.maxpath_ibgp is defined %}
- {% if bgp_vars.maxpath_ibgp %}
-maximum-paths ibgp {{ bgp_vars.maxpath_ibgp }}
- {% else %}
-no maximum-paths ibgp
- {% endif %}
- {% endif %}
- {% if bgp_vars.ipv4_network is defined and bgp_vars.ipv4_network %}
- {% for net in bgp_vars.ipv4_network %}
- {% if net.address is defined and net.address %}
- {% set ip_and_mask= net.address.split(" ") %}
- {% if net.state is defined and net.state=="absent" %}
-{# remove BGP network announcement #}
-no network {{ ip_and_mask[0] }} mask {{ ip_and_mask[1] }}
-{# Add BGP network announcement #}
- {% else %}
-network {{ ip_and_mask[0] }} mask {{ ip_and_mask[1] }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if bgp_vars.neighbor is defined and bgp_vars.neighbor %}
- {% for neighbor in bgp_vars.neighbor %}
- {% if neighbor.type is defined %}
- {% if neighbor.type == "ipv4" or neighbor.type =="ipv6" %}
- {% if neighbor.ip is defined and neighbor.ip %}
- {% set tag_or_ip = neighbor.ip %}
- {% if neighbor.remote_asn is defined and neighbor.remote_asn %}
- {% if neighbor.state is defined and neighbor.state == "absent" %}
-no neighbor {{ tag_or_ip }} remote-as
- {% if neighbor.peergroup is defined and neighbor.peergroup %}
- {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %}
-no neighbor {{ tag_or_ip }} inherit peer {{ neighbor.peergroup }}
- {% endif %}
- {% endif %}
- {% else %}
-neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }}
- {% if neighbor.timer is defined %}
- {% if neighbor.timer %}
-neighbor {{ tag_or_ip }} timers {{ neighbor.timer }}
- {% else %}
-no neighbor {{ tag_or_ip }} timers
- {% endif %}
- {% endif %}
- {% if neighbor.default_originate is defined %}
- {% if neighbor.default_originate %}
-neighbor {{ tag_or_ip }} default-originate
- {% else %}
-no neighbor {{ tag_or_ip }} default-originate
- {% endif %}
- {% endif %}
- {% if neighbor.ebgp_multihop is defined %}
- {% if neighbor.ebgp_multihop %}
-neighbor {{ tag_or_ip }} ebgp-multihop {{ neighbor.ebgp_multihop }}
- {% else %}
-no neighbor {{ tag_or_ip }} ebgp-multihop
- {% endif %}
- {% endif %}
- {% if neighbor.src_loopback is defined and neighbor.src_loopback|int(-1) != -1 %}
- {% if neighbor.src_loopback_state is defined and neighbor.src_loopback_state == "absent" %}
-no neighbor {{ tag_or_ip }} update-source
- {% else %}
-neighbor {{ tag_or_ip }} update-source Loopback {{ neighbor.src_loopback }}
- {% endif %}
- {% endif %}
- {% if neighbor.peergroup is defined and neighbor.peergroup %}
- {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %}
-no neighbor {{ tag_or_ip }} inherit peer {{ neighbor.peergroup }}
- {% else %}
-neighbor {{ tag_or_ip }} inherit peer {{ neighbor.peergroup }}
- {% endif %}
- {% endif %}
- {% if neighbor.admin is defined %}
- {% if neighbor.admin == "up" %}
-no neighbor {{ tag_or_ip }} shutdown
- {% elif neighbor.admin =="down" %}
-neighbor {{ tag_or_ip }} shutdown
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif neighbor.type == "peergroup" %}
- {% if neighbor.name is defined and neighbor.name %}
- {% set tag_or_ip = neighbor.name %}
- {% if neighbor.state is defined and neighbor.state == "absent" %}
-no template peer {{ tag_or_ip }}
- {% else %}
- {% if neighbor.subnet is defined and neighbor.subnet %}
- {% if neighbor.subnet_state is defined and neighbor.subnet_state == "absent" %}
-no bgp listen range {{ neighbor.subnet }}
- {% else %}
-bgp listen range {{ neighbor.subnet }} inherit peer {{ tag_or_ip }}
- {% endif %}
- {% endif %}
-template peer {{ tag_or_ip }}
- {% if neighbor.remote_asn is defined and neighbor.remote_asn %}
- {% if neighbor.remote_asn_state is defined and neighbor.remote_asn_state == "absent" %}
-no remote-as {{ neighbor.remote_asn }}
- {% else %}
-remote-as {{ neighbor.remote_asn }}
- {% endif %}
- {% endif %}
- {% if neighbor.timer is defined %}
- {% if neighbor.timer %}
-timers {{ neighbor.timer }}
- {% else %}
-no timers
- {% endif %}
- {% endif %}
- {% if neighbor.ebgp_multihop is defined %}
- {% if neighbor.ebgp_multihop %}
-ebgp-multihop {{ neighbor.ebgp_multihop }}
- {% else %}
-no ebgp-multihop
- {% endif %}
- {% endif %}
- {% if neighbor.src_loopback is defined and neighbor.src_loopback|int(-1) != -1 %}
- {% if neighbor.src_loopback_state is defined and neighbor.src_loopback_state == "absent" %}
-no update-source
- {% else %}
-update-source Lo{{ neighbor.src_loopback }}
- {% endif %}
- {% endif %}
- {% if neighbor.admin is defined %}
- {% if neighbor.admin == "up" %}
-no shutdown
- {% elif neighbor.admin =="down" %}
-shutdown
- {% endif %}
- {% endif %}
- {% if neighbor.default_originate is defined %}
-address-family ipv4
- {% if neighbor.default_originate %}
-default-originate
- {% else %}
-no default-originate
- {% endif %}
-exit
- {% endif %}
-exit
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if bgp_vars.ipv6_network is defined and bgp_vars.ipv6_network %}
- {% for net in bgp_vars.ipv6_network %}
- {% if net.address is defined and net.address %}
-address-family ipv6
- {% if net.state is defined and net.state=="absent" %}
-{# remove BGP network announcement #}
-no network {{ net.address }}
-{# Add BGP network announcement #}
- {% else %}
-network {{ net.address }}
- {% endif %}
-exit
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if bgp_vars.redistribute is defined and bgp_vars.redistribute %}
- {% for routes in bgp_vars.redistribute %}
- {% if routes.route_type is defined and routes.route_type %}
- {% if routes.address_type is defined and routes.address_type=="ipv6" %}
-address-family ipv6
- {% if routes.state is defined and routes.state =="absent" %}
-no redistribute {{ routes.route_type }}
- {% else %}
-redistribute {{ routes.route_type }}
- {% endif %}
-exit
- {% elif routes.address_type is defined and routes.address_type=="ipv4" %}
- {% if routes.state is defined and routes.state =="absent" %}
-no redistribute {{ routes.route_type }}
- {% else %}
-redistribute {{ routes.route_type }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-exit
-{% endif %}
-{% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/inventory.yaml
deleted file mode 100644
index 388cb4900..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/inventory.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/main.os6.yaml
deleted file mode 100644
index c7625c60b..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/main.os6.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-# vars file for dellemc.os6.os6_bgp,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
- os6_bgp:
- asn: 11
- router_id: 1.1.1.1
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- ipv4_network:
- - address: 101.1.2.0 255.255.255.0
- state: present
- ipv6_network:
- - address: 2001:4898:5808:ffa0::/126
- state: present
- neighbor:
- - type: ipv4
- ip: 10.10.234.16
- remote_asn: 64818
- timer: 2 5
- default_originate: True
- peergroup: MUX_HNV_ACCESS
- admin: up
- src_loopback: 2
- src_loopback_state: present
- state: present
- - type: ipv6
- ip: 2001:4898:5808:ffa2::1
- timer: 2 4
- default_originate: True
- ebgp_multihop: 3
- remote_asn: 64818
- peergroup: MUX_HNV_ACCESS
- state: present
- - type: peergroup
- name: MUX_HNV_ACCESS
- timer: 2 4
- ebgp_multihop: 4
- remote_asn: 64918
- subnet: 10.128.5.192/27
- state: present
- redistribute:
- - route_type: connected
- address_type: ipv6
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_bgp/tests/test.yaml
deleted file mode 100644
index b92fb6cac..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_bgp
diff --git a/ansible_collections/dellemc/os6/roles/os6_bgp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_bgp/vars/main.yml
deleted file mode 100644
index 22d0d344a..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_bgp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_bgp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/LICENSE b/ansible_collections/dellemc/os6/roles/os6_interface/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/README.md b/ansible_collections/dellemc/os6/roles/os6_interface/README.md
deleted file mode 100644
index 2c6b359fa..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/README.md
+++ /dev/null
@@ -1,110 +0,0 @@
-Interface role
-==============
-
-This role facilitates the configuration of interface attributes. It supports the configuration of admin state, description, MTU, IP address, IP helper, suppress_ra, and port mode. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The interface role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os6_interface` (dictionary) holds a dictionary with the interface name; interface name can correspond to any of the valid OS interfaces with the unique interface identifier name
-- For physical interfaces, the interface name must be in *<interfacename> <tuple>* format; for logical interfaces, the interface must be in *<logical_interfacename> <id>* format; physical interface name can be *Te1/0/1* for os6 devices
-- For interface ranges, the interface name must be in *range <interface_type> <node/slot/port[:subport]-node/slot/port[:subport]>* format
-- Logical interface names can be *vlan 1* or *port-channel 1*
-- Variables and values are case-sensitive
-
-> **NOTE**: Only define supported variables for the interface type. For example, do not define the *switchport* variable for a logical interface, and do not define an IP address for physical interfaces in OS6 devices.
-
-**os6_interface name keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``desc`` | string | Configures a single line interface description | os6 |
-| ``portmode`` | string | Configures port-mode according to the device type | os6 (access and trunk) |
-| ``admin`` | string: up,down\* | Configures the administrative state for the interface; configuring the value as administratively "up" enables the interface; configuring the value as administratively "down" disables the interface | os6 |
-| ``suppress_ra`` | string; present,absent | Configures IPv6 router advertisements if set to present | os6 |
-| ``ip_type_dynamic`` | boolean: true,false | Configures IP address DHCP if set to true (*ip_and_mask* is ignored if set to true) | os6 |
-| ``ip_and_mask`` | string | configures the specified IP address to the interface VLAN on os6 devices (192.168.11.1 255.255.255.0 format) | os6 |
-| ``ipv6_and_mask`` | string | configures a specified IP address to the interface VLAN on os6 devices (2001:4898:5808:ffa2::1/126 format) | os6 |
-| ``ipv6_reachabletime`` | integer | Configures the reachability time for IPv6 neighbor discovery (0 to 3600000), field needs to be left blank to remove the reachability time | os6 |
-| ``ip_helper`` | list | Configures DHCP server address objects (see ``ip_helper.*``) | os6 |
-| ``ip_helper.ip`` | string (required) | Configures the IPv4 address of the DHCP server (A.B.C.D format) | os6 |
-| ``ip_helper.state`` | string: absent,present\* | Deletes the IP helper address if set to absent | os6 |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable. |
-| ``ansible_network_os`` | yes | os6, null\* | This value is used to load the correct terminal and cliconf plugins to communicate with the remote device. |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6-interface* role to set up description, MTU, admin status, portmode, and switchport details for an interface. The example creates a *hosts* file with the switch details and orresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, this variable is set to false. The example writes a simple playbook that only references the *os6-interface* role.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: "switch1"
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_interface:
- Te1/0/8:
- desc: "Connected to Spine1"
- portmode: trunk
- admin: up
- vlan 100:
- admin: down
- ip_and_mask: 3.3.3.3 255.255.255.0
- ipv6_and_mask: 2002:4898:5408:faaf::1/64
- suppress_ra: present
- ip_helper:
- - ip: 10.0.0.36
- state: absent
- ipv6_reachabletime: 600000
- vlan 20:
- suppress_ra: absent
- admin: up
-
-**Simple playbook to setup system — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_interface
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/defaults/main.yml
deleted file mode 100644
index 076dd792f..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_interface \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/handlers/main.yml
deleted file mode 100644
index a46800e06..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_interface
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/meta/main.yml
deleted file mode 100644
index ed39e191c..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_interface role facilitates the configuration of interface attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/tasks/main.yml
deleted file mode 100644
index 198d86007..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating interface configuration for os6"
- template:
- src: os6_interface.j2
- dest: "{{ build_dir }}/intf6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning interface configuration for os6"
- dellemc.os6.os6_config:
- src: os6_interface.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/templates/os6_interface.j2 b/ansible_collections/dellemc/os6/roles/os6_interface/templates/os6_interface.j2
deleted file mode 100644
index 72e72eaad..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/templates/os6_interface.j2
+++ /dev/null
@@ -1,94 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-
-Purpose:
-Configure interface commands for os6 Devices.
-
-os6_interface:
- Te1/0/1:
- desc: "connected to spine1"
- portmode: trunk
- admin: up
- vlan 100:
- ip_type_dynamic: False
- ip_and_mask: 3.3.3.3 255.255.255.0
- suppress_ra: present
- vlan 101:
- ipv6_and_mask: 2001:db8:3c4d:15::/64
- ipv6_reachabletime: 6000
- ip_helper:
- - ip: 10.0.0.33
- state: present
- vlan 102:
- ip_type_dynamic: True
-
-################################}
-{% if os6_interface is defined and os6_interface %}
-{% for key in os6_interface.keys() %}
-interface {{ key }}
-{% set intf_vars = os6_interface[key] %}
-{% if intf_vars.desc is defined %}
- {% if intf_vars.desc %}
-description "{{ intf_vars.desc }}"
- {% else %}
-no description
- {% endif %}
-{% endif %}
-{% if intf_vars.portmode is defined %}
- {% if intf_vars.portmode %}
-switchport mode {{ intf_vars.portmode }}
- {% else %}
-no switchport mode
- {% endif %}
-{% endif %}
-{% if intf_vars.admin is defined %}
- {% if intf_vars.admin == "up"%}
-no shutdown
- {% elif intf_vars.admin == "down" %}
-shutdown
- {% endif %}
-{% endif %}
-{% if intf_vars.ip_type_dynamic is defined and intf_vars.ip_type_dynamic %}
-ip address dhcp
-{% elif intf_vars.ip_and_mask is defined %}
- {% if intf_vars.ip_and_mask %}
-ip address {{ intf_vars.ip_and_mask }}
- {% else %}
-no ip address
- {% endif %}
-{% endif %}
-{% if intf_vars.suppress_ra is defined %}
- {% if intf_vars.suppress_ra == "present" %}
-ipv6 nd suppress-ra
- {% else %}
-no ipv6 nd suppress-ra
- {% endif %}
-{% endif %}
-{% if intf_vars.ipv6_and_mask is defined %}
- {% if intf_vars.ipv6_and_mask %}
-ipv6 address {{ intf_vars.ipv6_and_mask }}
- {% else %}
-no ipv6 address
- {% endif %}
-{% endif %}
-{% if intf_vars.ipv6_reachabletime is defined %}
- {% if intf_vars.ipv6_reachabletime %}
-ipv6 nd reachable-time {{ intf_vars.ipv6_reachabletime }}
- {% else %}
-no ipv6 nd reachable-time
- {% endif %}
-{% endif %}
-{% if intf_vars.ip_helper is defined and intf_vars.ip_helper %}
- {% for helper in intf_vars.ip_helper %}
- {% if helper.ip is defined and helper.ip %}
- {% if helper.state is defined and helper.state=="absent" %}
-no ip helper-address {{ helper.ip }}
- {% else %}
-ip helper-address {{ helper.ip }}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-exit
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_interface/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_interface/tests/main.os6.yaml
deleted file mode 100644
index 5513a7ebc..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/tests/main.os6.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
-# vars file for dellemc.os6.os6_interface
-# Sample variables for OS6 device
-
-os6_interface:
- Te1/0/2:
- desc: "Connected to Spine1"
- portmode: trunk
- admin: up
- Te1/0/1:
- desc: "Connected to Access"
- portmode: access
- admin: up
- vlan 100:
- ip_type_dynamic: True
- suppress_ra: present
- ip_and_mask: 3.3.3.3 255.255.255.0
- ip_helper:
- - ip: 10.0.0.36
- state: present
- ipv6_reachabletime: 600000
- vlan 101:
- ipv6_and_mask: 2001:db8:3c4d:15::/64
- suppress_ra: absent
- vlan 102:
- ip_type_dynamic: True
- suppress_ra:
- vlan 105:
- ip_and_mask: 1.1.1.1 255.255.255.0 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_interface/tests/test.yaml
deleted file mode 100644
index ffd90b31b..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_interface \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_interface/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_interface/vars/main.yml
deleted file mode 100644
index ab9d1f82e..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_interface/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_interface \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/LICENSE b/ansible_collections/dellemc/os6/roles/os6_lag/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/README.md b/ansible_collections/dellemc/os6/roles/os6_lag/README.md
deleted file mode 100644
index ff82984dd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/README.md
+++ /dev/null
@@ -1,96 +0,0 @@
-LAG role
-========
-
-This role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type as a static or dynamic LAG, hash scheme in os6 devices, and minimum required link. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The LAG role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- Object drives the tasks in this role
-- `os6_lag` (dictionary) contains the hostname (dictionary)
-- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value to any variable negates the corresponding configuration
-- `os6_lag` (dictionary) holds a dictionary with the port-channel ID key in `Po <ID>` format (1 to 128 for os6)
-- Variables and values are case-sensitive
-
-**port-channel ID keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string: static,dynamic | Configures the interface either as a static or dynamic LAG | os6 |
-| ``min_links`` | integer | Configures the minimum number of links in the LAG that must be in *operup* status (1 to 8), field needs to be left blank to remove the minimum number of links | os6 |
-| ``hash`` | integer | Configures the hash value for OS6 devices (1 to 7), field needs to be left blank to remove the hash value | os6 |
-| ``channel_members`` | list | Specifies the list of port members to be associated to the port channel (see ``channel_members.*``) | os6 |
-| ``channel_members.port`` | string | Specifies valid OS6 interface names to be configured as port channel members | os6 |
-| ``channel_members.state`` | string: absent,present | Deletes the port member association if set to absent | os6 |
-| ``state`` | string: absent,present\* | Deletes the LAG corresponding to the port channel ID if set to absent | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used. |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable. |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device. |
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_lag* role to setup port channel ID and description, and configures hash algorithm and minimum links for the LAG. Channel members can be configured for the port-channel either in static or dynamic mode. You can also delete the LAG with the port channel ID or delete the members associated to it. This example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6-lag* role.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_lag:
- Po 127:
- type: static
- hash: 7
- min_links: 3
- channel_members:
- - port: Fo4/0/1
- state: present
- - port: Fo4/0/1
- state: present
- state: present
-
-**Simple playbook to setup system — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_lag
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/defaults/main.yml
deleted file mode 100644
index e9a1b31fe..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_lag \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/handlers/main.yml
deleted file mode 100644
index 859d5c2bd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_lag
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/meta/main.yml
deleted file mode 100644
index 897a47aee..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_lag role facilitates the configuration of LAG attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/tasks/main.yml
deleted file mode 100644
index 832d5493f..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating LAG configuration for os6"
- template:
- src: os6_lag.j2
- dest: "{{ build_dir }}/lag6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning LAG configuration for os6"
- dellemc.os6.os6_config:
- src: os6_lag.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/templates/os6_lag.j2 b/ansible_collections/dellemc/os6/roles/os6_lag/templates/os6_lag.j2
deleted file mode 100644
index 39b0a53bd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/templates/os6_lag.j2
+++ /dev/null
@@ -1,78 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-
-Purpose:
-Configure LAG commands for os6 Devices.
-
-os6_lag:
- Po 1:
- type: static
- min_links: 2
- hash: 7
- channel_members:
- - port: Te1/0/2
- state: present
- - port: Te1/0/1
- state: absent
- state: present
-
-################################}
-{% if os6_lag is defined and os6_lag %}
-{% for key in os6_lag.keys() %}
-{% set channel_id = key.split(" ") %}
-{% set lag_vars = os6_lag[key] %}
- {% if lag_vars.state is defined and lag_vars.state=="absent" %}
-interface port-channel {{ channel_id[1] }}
-no shutdown
-no description
-no hashing-mode
-exit
- {% else %}
-interface port-channel {{ channel_id[1] }}
- {% if lag_vars.hash is defined %}
- {% if lag_vars.hash %}
-hashing-mode {{ lag_vars.hash }}
- {% else %}
-no hashing-mode
- {% endif %}
- {% endif %}
- {% if lag_vars.min_links is defined %}
- {% if lag_vars.min_links %}
-port-channel min-links {{ lag_vars.min_links }}
- {% else %}
-no port-channel min-links
- {% endif %}
- {% endif %}
-exit
- {% if lag_vars.channel_members is defined %}
- {% for ports in lag_vars.channel_members %}
- {% if lag_vars.type is defined and lag_vars.type == "static" %}
- {% if ports.port is defined and ports.port %}
- {% if ports.state is defined and ports.state=="absent" %}
-interface {{ ports.port }}
-no channel-group
-exit
- {% else %}
-interface {{ ports.port }}
-channel-group {{ channel_id[1] }} mode on
-exit
- {% endif %}
- {% endif %}
- {% elif lag_vars.type is defined and lag_vars.type == "dynamic" %}
- {% if ports.port is defined and ports.port %}
- {% if ports.state is defined and ports.state=="absent" %}
-interface {{ ports.port }}
-no channel-group
-exit
- {% else %}
-interface {{ ports.port }}
-channel-group {{ channel_id[1] }} mode active
-exit
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_lag/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_lag/tests/main.os6.yaml
deleted file mode 100644
index 77728edb5..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/tests/main.os6.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# vars file for dellemc.os6.os6_lag
-# Sample variables for OS6 device
-
-os6_lag:
- Po 128:
- type: static
- hash: 7
- min_links: 3
- channel_members:
- - port: Te1/0/1
- state: absent
- - port: Te1/0/2
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_lag/tests/test.yaml
deleted file mode 100644
index 44ee544ef..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_lag
diff --git a/ansible_collections/dellemc/os6/roles/os6_lag/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_lag/vars/main.yml
deleted file mode 100644
index 9cf929174..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lag/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_lag \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_lldp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/README.md b/ansible_collections/dellemc/os6/roles/os6_lldp/README.md
deleted file mode 100644
index d29653b29..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/README.md
+++ /dev/null
@@ -1,114 +0,0 @@
-LLDP role
-=========
-
-This role facilitates the configuration of link layer discovery protocol (LLDP) attributes at a global and interface level. It supports the configuration of hello, mode, multiplier, advertise TLVs, management interface, FCoE, and iSCSI at global and interface level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The LLDP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_lldp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``timers`` | dictionary | Configures the LLDP global timer value | os6 |
-| ``timers.interval`` | integer | Configures the interval in seconds to transmit local LLDP data (5 to 32768), field needs to be left blank to remove the interval | os6 |
-| ``timers.hold`` | integer | Configures the interval multiplier to set local LLDP data TTL (2 to 10), field needs to be left blank to remove the interval multiplier | os6 |
-| ``timers.reinit`` | integer | Configures the reinit value (1 to 10), field needs to be left blank to remove the reinit value | os6 |
-| ``notification_interval`` | integer | Configures the minimum interval to send remote data change notifications (5 to 3600), field needs to be left blank to remove the minimum interval | os6 |
-| ``advertise`` | dictionary | Configures LLDP-MED and TLV advertisement at the global level (see ``advertise.*``) | os6 |
-| ``advertise.med`` | dictionary | Configures MED TLVs advertisement (see ``med_tlv.*``) | os6 |
-| ``med.global_med`` | boolean | Configures global MED TLVs advertisement | os6 |
-| ``med.fast_start_repeat_count`` | integer | Configures MED fast start repeat count value (1 to 10), field needs to be left blank to remove the value | os6 |
-| ``med.config_notification`` | boolean | Configure all the ports to send the topology change notification | os6 |
-| ``local_interface`` | dictionary | Configures LLDP at the interface level (see ``local_interface.*``) | os6 |
-| ``local_interface.<interface name>`` | dictionary | Configures LLDP at the interface level (see ``<interface name>.*``) | os6 |
-| ``<interface name>.mode`` | dictionary: rx,tx | Configures LLDP mode configuration at the interface level | os6 |
-| ``<interface name>.mode.tx`` | boolean | Enables/disables LLDP transmit capability at interface level | os6 |
-| ``<interface name>.mode.rx`` | boolean | Enables/disables LLDP receive capability at interface level | os6 |
-| ``<interface name>.notification`` | boolean | Enables/disables LLDP remote data change notifications at interface level | os6 |
-| ``<interface name>.advertise`` | dictionary | Configures LLDP-MED TLV advertisement at the interface level (see ``advertise.*``) | os6 |
-| ``advertise.med`` | dictionary | Configures MED TLVs advertisement at the interface level (see ``med_tlv.*``) | os6 |
-| ``med.enable`` | boolean | Enables interface level MED capabilities | os6 |
-| ``med.config_notification`` | boolean | Configures sending the topology change notification |os6 |
-
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_lldp* role to configure protocol lldp. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6_lldp* role.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/os6
- os6_lldp:
- timers:
- reinit: 2
- interval: 5
- hold: 5
- notification_interval: 5
- advertise:
- med:
- global_med: true
- fast_start_repeat_count: 4
- config_notification: true
- local_interface:
- Gi1/0/1:
- mode:
- tx: true
- rx: false
- notification: true
- advertise:
- med:
- config_notification: true
- enable: true
-
-
-**Simple playbook to setup system — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_lldp
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/defaults/main.yml
deleted file mode 100644
index b2f3b089e..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_lldp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/handlers/main.yml
deleted file mode 100644
index d1beaa3dd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_lldp
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/meta/main.yml
deleted file mode 100644
index 044e7b462..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os6_lldp role facilitates the configuration of Link Layer Discovery Protocol(LLDP) attributes in devices
- running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/tasks/main.yml
deleted file mode 100644
index c84ca513d..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating LLDP configuration for os6"
- template:
- src: os6_lldp.j2
- dest: "{{ build_dir }}/lldp6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning LLDP configuration for os6"
- dellemc.os6.os6_config:
- src: os6_lldp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/templates/os6_lldp.j2 b/ansible_collections/dellemc/os6/roles/os6_lldp/templates/os6_lldp.j2
deleted file mode 100644
index e8d2e94cf..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/templates/os6_lldp.j2
+++ /dev/null
@@ -1,159 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{###################################################
-Purpose:
-Configure LLDP commands for os6 Devices.
-
-os6_lldp:
- timers:
- reinit: 2
- interval: 5
- hold: 5
- notification_interval: 5
- advertise:
- med:
- global_med: true
- fast_start_repeat_count: 4
- config_notification: true
- local_interface:
- Gi1/0/1:
- mode:
- tx: true
- rx: false
- notification: true
- advertise:
- med:
- config_notification: true
- enable: true
-
-
-{###############################################################################################}
-{% if os6_lldp is defined and os6_lldp %}
-{% for key,value in os6_lldp.items() %}
- {% if key == "timers" %}
- {% if value %}
- {% set item = os6_lldp.timers %}
- {% if item.reinit is defined and item.reinit and item.interval is defined and item.interval and item.hold is defined and item.hold %}
-lldp timers interval {{ item.interval }} hold {{ item.hold }} reinit {{ item.reinit }}
- {% elif item.reinit is defined and item.reinit and item.interval is defined and item.interval %}
-lldp timers interval {{ item.interval }} reinit {{ item.reinit }}
- {% elif item.reinit is defined and item.reinit and item.hold is defined and item.hold %}
-lldp timers hold {{ item.hold }} reinit {{ item.reinit }}
- {% elif item.interval is defined and item.interval and item.hold is defined and item.hold %}
-lldp timers interval {{ item.interval }} hold {{ item.hold }}
- {% else %}
- {% if item.reinit is defined %}
- {% if item.reinit %}
-lldp timers reinit {{ item.reinit }}
- {% else %}
-no lldp timers reinit {{ item.reinit }}
- {% endif %}
- {% endif %}
- {% if item.interval is defined %}
- {% if item.interval %}
-lldp timers interval {{ item.interval }}
- {% else %}
-no lldp timers interval {{ item.interval }}
- {% endif %}
- {% endif %}
- {% if item.hold is defined %}
- {% if item.hold %}
-lldp timers hold {{ item.hold }}
- {% else %}
-no lldp timers hold {{ item.hold }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif key == "notification_interval" %}
- {% if value %}
-lldp notification-interval {{ value }}
- {% else %}
-no lldp notification-interval
- {% endif %}
- {% elif key == "advertise" %}
- {% if value %}
- {% for ke,valu in value.items() %}
- {% if ke == "med" %}
- {% if valu %}
- {% for med,val in valu.items() %}
- {% if med == "fast_start_repeat_count" %}
- {% if val %}
-lldp med faststartrepeatcount {{ val }}
- {% else %}
-no lldp med faststartrepeatcount
- {% endif %}
- {% elif med == "config_notification" %}
- {% if val %}
-lldp med confignotification all
- {% else %}
-no lldp med confignotification all
- {% endif %}
- {% elif med == "global_med" %}
- {% if val %}
-lldp med all
- {% else %}
-no lldp med all
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %}
-{% if os6_lldp is defined and os6_lldp %}
-{% for key in os6_lldp.keys() %}
-{% set lldp_vars = os6_lldp[key] %}
-{% if key == "local_interface" %}
- {% for intf in lldp_vars.keys() %}
- {% set intf_vars = lldp_vars[intf] %}
-interface {{ intf }}
- {% if intf_vars.mode is defined and intf_vars.mode %}
- {% set intf_vars_mode = intf_vars.mode %}
- {% if intf_vars_mode.tx is defined %}
- {% if intf_vars_mode.tx %}
-lldp transmit
- {% else %}
-no lldp transmit
- {% endif %}
- {% endif %}
- {% if intf_vars_mode.rx is defined %}
- {% if intf_vars_mode.rx %}
-lldp receive
- {% else %}
-no lldp receive
- {% endif %}
- {% endif %}
- {% endif %}
- {% if intf_vars.notification is defined %}
- {% if intf_vars.notification %}
-lldp notification
- {% else %}
-no lldp notification
- {% endif %}
- {% endif %}
- {% if intf_vars.advertise is defined and intf_vars.advertise %}
- {% if intf_vars.advertise.med is defined and intf_vars.advertise.med %}
- {% if intf_vars.advertise.med.enable is defined %}
- {% if intf_vars.advertise.med.enable %}
-lldp med
- {% else %}
-no lldp med
- {% endif %}
- {% endif %}
- {% if intf_vars.advertise.med.config_notification is defined %}
- {% if intf_vars.advertise.med.config_notification %}
-lldp med confignotification
- {% else %}
-no lldp med confignotification
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-exit
- {% endfor %}
-{% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/main.os6.yaml
deleted file mode 100644
index 4d630fe46..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/main.os6.yaml
+++ /dev/null
@@ -1,26 +0,0 @@
----
-# vars file for dellemc.os6.os6_lldp,
-# below gives a sample configuration
- # Sample variables for OS6 device
-
-os6_lldp:
- timers:
- reinit: 2
- interval: 5
- hold: 5
- notification_interval: 5
- advertise:
- med:
- global_med: true
- fast_start_repeat_count: 4
- config_notification: true
- local_interface:
- Gi1/0/1:
- mode:
- tx: true
- rx: false
- notification: true
- advertise:
- med:
- config_notification: true
- enable: true \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_lldp/tests/test.yaml
deleted file mode 100644
index b443e0469..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_lldp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_lldp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_lldp/vars/main.yml
deleted file mode 100644
index d602eda7c..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_lldp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_lldp
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/LICENSE b/ansible_collections/dellemc/os6/roles/os6_logging/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/README.md b/ansible_collections/dellemc/os6/roles/os6_logging/README.md
deleted file mode 100644
index 037d37819..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-Logging role
-============
-
-This role facilitates the configuration of global logging attributes, and it supports the configuration of logging servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The Logging role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If the `os6_cfg_generate` variable is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_logging keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``logging`` | list | Configures the logging server (see ``logging.*``) | os6 |
-| ``logging.ip`` | string (required) | Configures the IPv4 address for the logging server (A.B.C.D format) | os6 |
-| ``logging.state`` | string: absent,present\* | Deletes the logging server if set to absent | os6 |
-| ``source_interface`` | string | Configures the source interface for logging, it can take values as loopback interface, vlan ID, out-of-band interface and tunnel ID only, field needs to be left blank to remove the source iterface | os6 |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_logging* role to completely set up logging servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-#### Sample host_vars/switch1
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_logging:
- logging:
- - ip : 1.1.1.1
- state: present
- - ip: 2.2.2.2
- state: present
- - ip: 3.3.3.3
- state: present
- source_interface: "vlan 10"
-
-**Simple playbook to setup logging — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_logging
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/defaults/main.yml
deleted file mode 100644
index 14475f6d2..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_logging \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/handlers/main.yml
deleted file mode 100644
index f88d8db55..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_logging
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/meta/main.yml
deleted file mode 100644
index 518c92a39..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_logging role facilitates the configuration of logging attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/tasks/main.yml
deleted file mode 100644
index eb47e41e2..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating logging configuration for os6"
- template:
- src: os6_logging.j2
- dest: "{{ build_dir }}/logging6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning logging configuration for os6"
- dellemc.os6.os6_config:
- src: os6_logging.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/templates/os6_logging.j2 b/ansible_collections/dellemc/os6/roles/os6_logging/templates/os6_logging.j2
deleted file mode 100644
index a1a30f252..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/templates/os6_logging.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-
-Purpose:
-Configure logging commands for os6 Devices
-
-os6_logging:
- logging:
- - ip: 1.1.1.1
- state: absent
- source_interface: "vlan 30"
-
-#####################################}
-{% if os6_logging is defined and os6_logging %}
-
-{% for key,value in os6_logging.items() %}
- {% if key == "logging" %}
- {% for item in value %}
- {% if item.ip is defined and item.ip %}
- {% if item.state is defined and item.state == "absent" %}
-no logging {{ item.ip }}
- {% else %}
-logging {{ item.ip }}
-exit
- {% endif %}
- {% endif %}
- {% endfor %}
- {% elif key == "source_interface" %}
- {% if value %}
-logging source-interface {{ value }}
- {% else %}
-no logging source-interface
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_logging/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_logging/tests/main.os6.yaml
deleted file mode 100644
index 550ed665c..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/tests/main.os6.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# vars file for dellemc.os6.os6_logging,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
-os6_logging:
- logging:
- - ip: 1.1.1.1
- state: present
- source_interface: "vlan 30" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_logging/tests/test.yaml
deleted file mode 100644
index 59ec49c19..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_logging
diff --git a/ansible_collections/dellemc/os6/roles/os6_logging/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_logging/vars/main.yml
deleted file mode 100644
index 329db6aff..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_logging/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_logging
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_ntp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/README.md b/ansible_collections/dellemc/os6/roles/os6_ntp/README.md
deleted file mode 100644
index 6fbdec55c..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/README.md
+++ /dev/null
@@ -1,82 +0,0 @@
-NTP role
-========
-
-This role facilitates the configuration of network time protocol (NTP) attributes, and it specifically enables configuration of NTP server. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The NTP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_ntp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``server`` | list | Configures the NTP server (see ``server.*``) | os6 |
-| ``server.ip`` | string (required) | Configures an IPv4 address for the NTP server (A.B.C.D format) | os6 |
-| ``server.state`` | string: absent,present\* | Deletes the NTP server if set to absent | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_ntp* role to set the NTP server, source ip, authentication and broadcast service. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When the `os6_cfg_generate` variable is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os6_ntp* role.
-By including the role, you automatically get access to all of the tasks to configure NTP attributes.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- host: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_ntp:
- server:
- - ip: 2.2.2.2
- state: absent
-
-**Simple playbook to setup NTP — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_ntp
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/defaults/main.yml
deleted file mode 100644
index 5cc2de16e..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_ntp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/handlers/main.yml
deleted file mode 100644
index 8a8a2a419..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_ntp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/meta/main.yml
deleted file mode 100644
index a6ba48d3a..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_ntp role facilitates the configuration of NTP attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/tasks/main.yml
deleted file mode 100644
index 3ba297043..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating NTP configuration for os6"
- template:
- src: os6_ntp.j2
- dest: "{{ build_dir }}/ntp6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False'))| bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning NTP configuration for os6"
- dellemc.os6.os6_config:
- src: os6_ntp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/templates/os6_ntp.j2 b/ansible_collections/dellemc/os6/roles/os6_ntp/templates/os6_ntp.j2
deleted file mode 100644
index 94e4561f4..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/templates/os6_ntp.j2
+++ /dev/null
@@ -1,27 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-
-Purpose:
-Configure NTP commands for os6 devices
-
-os6_ntp:
- server:
- - ip: 2.2.2.2
- state: absent
-
-#####################################}
-{% if os6_ntp is defined and os6_ntp %}
- {% for key,value in os6_ntp.items() %}
- {% if key == "server" and value %}
- {% for item in value %}
- {% if item.ip is defined and item.ip %}
- {% if item.state is defined and item.state == "absent" %}
-no sntp server {{ item.ip }}
- {% else %}
-sntp server {{ item.ip }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_ntp/tests/main.os6.yaml
deleted file mode 100644
index 924755044..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/main.os6.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# vars file for dellemc.os6.os6_ntp,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
-os6_ntp:
- server:
- - ip: 2.2.2.2
- state: present
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/test.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/tests/test.yml
deleted file mode 100644
index d24e3b53f..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os6.os6_ntp
diff --git a/ansible_collections/dellemc/os6/roles/os6_ntp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_ntp/vars/main.yml
deleted file mode 100644
index 4ec591c95..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_ntp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_ntp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/LICENSE b/ansible_collections/dellemc/os6/roles/os6_qos/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/README.md b/ansible_collections/dellemc/os6/roles/os6_qos/README.md
deleted file mode 100644
index 31be2719c..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/README.md
+++ /dev/null
@@ -1,102 +0,0 @@
-QoS role
-========
-
-This role facilitates the configuration of quality of service (QoS) attributes like policy-map and class-map. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The QoS role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take a `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_qos keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``policy_map`` | list | Configures the policy-map (see ``policy_map.*``) | os6 |
-| ``policy_map.name`` | string (required) | Configures the policy-map name | os6 |
-| ``policy_map.type`` | string: in, out in os6 | Configures the policy-map type | os6 |
-| ``policy_map.class_instances`` | list | Specifies the class instances for the policy | os6 |
-| ``class_instances.name`` | string | Specifies name of class instance | os6 |
-| ``class_instances.policy`` | list | Specifies list of associated policies for the class | os6 |
-| ``policy_map.state`` | string: present\*,absent | Deletes the policy-map if set to absent | os6 |
-| ``class_map`` | list | Configures the class-map (see ``class_map.*``) | os6 |
-| ``class_map.name`` | string (required) | Configures the class-map name | os6 |
-| ``class_map.type`` | string: match-all, match-any in os6 | Configures the class-map type | os6 |
-| ``class-map.match_condition`` | list | Specifies the type of match-conditions required for the class | os6 |
-| ``class_map.state`` | string: present\*,absent | Deletes the class-map if set to absent | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_qos* role to configure the policy-map class-map. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6_qos* role. By including the role, you automatically get access to all of the tasks to configure QoS features.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_qos:
- policy_map:
- - name: testpolicy
- type: qos
- class_instances:
- - name: video
- policy:
- - assign-queue 1
- state: present
- class_map:
- - name: testclass
- type: application
- match_condition:
- - ip dscp 26
- state: present
-
-**Simple playbook to setup qos — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_qos
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/defaults/main.yml
deleted file mode 100644
index 1b87a8f3a..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/defaults/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# defaults file for dellemc.os6.os6_qos
-match_type:
- match_all: match-all
- match_any: match-any
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/handlers/main.yml
deleted file mode 100644
index 1998b3a75..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_qos \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/meta/main.yml
deleted file mode 100644
index 47fff33e5..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_qos role facilitates the configuration of qos attributes in devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/tasks/main.yml
deleted file mode 100644
index 1c7d62821..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Provisioning Qos configuration for os6"
- dellemc.os6.os6_config:
- src: os6_qos.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
-
- - name: "Generating Qos configuration for os6"
- template:
- src: os6_qos.j2
- dest: "{{ build_dir }}/qos6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/templates/os6_qos.j2 b/ansible_collections/dellemc/os6/roles/os6_qos/templates/os6_qos.j2
deleted file mode 100644
index abb9ec7b4..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/templates/os6_qos.j2
+++ /dev/null
@@ -1,97 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#####################################################
-
-Purpose:
-Configure qos commands for os6 Devices.
-
-os6_qos:
- class_map:
- - name: CONTROL
- type: match-all
- match_condition:
- - ip dscp 40
- state: present
- - name: testclass
- type: match-all
- match_condition:
- - vlan 4
- state: present
- - name: test
- type: match-any
- match_condition:
- - vlan 5
- state: present
- policy_map:
- - name: testpolicy
- type: in
- class_instances:
- - name: testclass
- policy:
- - assign-queue 4
- - mirror Po1
- state: present
- - name: test
- policy:
- - assign-queue 4
- state: present
- state: present
-
-#####################################################}
-{% if os6_qos is defined and os6_qos %}
-{% for key in os6_qos.keys() %}
- {% if key =="class_map" %}
- {% for vars in os6_qos[key] %}
- {% if vars.name is defined and vars.name %}
- {% if vars.state is defined and vars.state == "absent" %}
-no class-map {{ vars.name }}
- {% else %}
- {% if vars.type is defined and vars.type %}
-class-map {{ vars.type }} {{ vars.name }}
- {% else %}
-class-map {{ vars.name }}
- {% endif %}
- {% if vars.match_condition is defined and vars.match_condition %}
- {% for match in vars.match_condition %}
-match {{ match }}
- {% endfor %}
- {% endif %}
-exit
- {% endif %}
- {% endif %}
- {% endfor %}
- {% elif key =="policy_map" %}
- {% for vars in os6_qos[key] %}
- {% if vars.name is defined and vars.name %}
- {% if vars.state is defined and vars.state == "absent" %}
-no policy-map {{ vars.name }}
- {% else %}
- {% if vars.type is defined and vars.type %}
-policy-map {{ vars.name }} {{ vars.type }}
- {% else %}
-policy-map {{ vars.name }}
- {% endif %}
- {% if vars.class_instances is defined and vars.class_instances %}
- {% for instance in vars.class_instances %}
- {% if instance.name is defined and instance.name %}
- {% if instance.state is defined and instance.state == "absent"%}
-no class {{ instance.name }}
- {% else %}
-class {{ instance.name }}
- {% endif %}
- {% if instance.policy is defined and instance.policy %}
- {% for policy in instance.policy %}
-{{ policy }}
- {% endfor %}
- {% endif %}
-exit
- {% endif %}
- {% endfor %}
- {% endif %}
-exit
- {% endif %}
- {% endif %}
- {% endfor %}
-
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_qos/tests/main.os6.yaml
deleted file mode 100644
index 7c8d983ad..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/tests/main.os6.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-# Sample variables for OS6 device
-
-os6_qos:
- class_map:
- - name: CONTROL
- type: match-all
- match_condition:
- - ip dscp 40
- state: present
- - name: VIDEO
- type: match-all
- match_condition:
- - ip dscp 34
- state: present
- - name: VOICE-TRAFFIC
- type: match-all
- match_condition:
- - ip dscp ef
- state: present
- - name: DATA-TRAFFIC
- type: match-any
- match_condition:
- - vlan 100
- - protocol tcp
- - cos 5
- - ip tos 11 11
- state: present
- policy_map:
- - name: Ingress_QoS
- type: in
- class_instances:
- - name: VIDEO
- policy:
- - assign-queue 1
- - name: CONTROL
- policy:
- - assign-queue 2
- - name: VOICE-TRAFFIC
- policy:
- - assign-queue 3
- state: present
- - name: Egress_QoS
- type: out
- class_instances:
- - name: VIDEO
- policy:
- - assign-queue 5
- - name: CONTROL
- policy:
- - assign-queue 5
- - name: VOICE-TRAFFIC
- policy:
- - assign-queue 5
- state: present
- - name: QoS_In
- type: in
- class_instances:
- - name: DATA-TRAFFIC
- policy:
- - assign-queue 1
- - redirect Gi1/0/3
- - police-simple 512000 64 conform-action set-cos-transmit 5 violate-action drop
- state: present
- - name: QoS_Out
- type: out
- class_instances:
- - name: DATA-TRAFFIC
- policy:
- - assign-queue 6
- - redirect Gi1/0/3
- - police-simple 512000 64 conform-action set-cos-transmit 5 violate-action drop
- state: present
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/tests/test.yml b/ansible_collections/dellemc/os6/roles/os6_qos/tests/test.yml
deleted file mode 100644
index 69562c924..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/tests/test.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os6.os6_qos
diff --git a/ansible_collections/dellemc/os6/roles/os6_qos/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_qos/vars/main.yml
deleted file mode 100644
index 5048d9b49..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_qos/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_qos \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_snmp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/README.md b/ansible_collections/dellemc/os6/roles/os6_snmp/README.md
deleted file mode 100644
index 22da4f71d..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/README.md
+++ /dev/null
@@ -1,108 +0,0 @@
-SNMP role
-=========
-
-This role facilitates the configuration of global SNMP attributes. It supports the configuration of SNMP server attributes including users, group, community, location, and traps. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The SNMP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_snmp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``snmp_contact`` | string | Configures SNMP contact information, field needs to be left blank to remove the contact information | os6 |
-| ``snmp_location`` | string | Configures SNMP location information, field needs to be left blank to remove the location | os6 |
-| ``snmp_community`` | list | Configures SNMP community information (see ``snmp_community.*``) | os6 |
-| ``snmp_community.name`` | string (required) | Configures the SNMP community string | os6 |
-| ``snmp_community.access_mode`` | string: ro,rw | Configures access-mode for the community | os6 |
-| ``snmp_community.state`` | string: absent,present\* | Deletes the SNMP community information if set to absent | os6 |
-| ``snmp_host`` | list | Configures SNMP hosts to receive SNMP traps (see ``snmp_host.*``) | os6 |
-| ``snmp_host.ip`` | string | Configures the IP address of the SNMP trap host | os6 |
-| ``snmp_host.communitystring`` | string | Configures the SNMP community string of the trap host | os6 |
-| ``snmp_host.udpport`` | string | Configures the UDP number of the SNMP trap host (0 to 65535) | os6 |
-| ``snmp_host.state`` | string: absent,present\* | Deletes the SNMP trap host if set to absent | os6 |
-| ``snmp_traps`` | list | Configures SNMP traps (see ``snmp_traps.*``) | os6 |
-| ``snmp_traps.name`` | string | Enables SNMP traps | os6 |
-| ``snmp_traps.state`` | string: absent,present\* | Deletes the SNMP trap if set to absent | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_snmp* role to completely set up the SNMP server attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6_snmp* role. By including the role, you automatically get access to all of the tasks to configure SNMP features.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_snmp:
- snmp_contact: test
- snmp_location: Santa Clara
- snmp_community:
- - name: public
- access_mode: ro
- state: present
- - name: private
- access_mode: rw
- state: present
- snmp_host:
- - ip: 10.0.0.1
- communitystring: public
- udpport: 1
- state: absent
- snmp_traps:
- - name: config
- state: present
-
-**Simple playbook to setup snmp — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_snmp
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/defaults/main.yml
deleted file mode 100644
index 994291fa1..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_snmp
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/handlers/main.yml
deleted file mode 100644
index 9dbd61736..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_snmp
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/meta/main.yml
deleted file mode 100644
index 6c26f3d30..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_snmp role facilitates the configuration of snmp attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/tasks/main.yml
deleted file mode 100644
index 051ba0340..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating SNMP configuration for os6"
- template:
- src: os6_snmp.j2
- dest: "{{ build_dir }}/snmp6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning SNMP configuration for os6"
- dellemc.os6.os6_config:
- src: os6_snmp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/templates/os6_snmp.j2 b/ansible_collections/dellemc/os6/roles/os6_snmp/templates/os6_snmp.j2
deleted file mode 100644
index bf13fc37a..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/templates/os6_snmp.j2
+++ /dev/null
@@ -1,94 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-
-Purpose:
-Configure snmp commands for os6 Devices
-
-os6_snmp:
- snmp_contact: test
- snmp_location: Santa Clara
- snmp_community:
- - name: public
- access_mode: ro
- state: absent
- - name: private
- access_mode: rw
- state: absent
- snmp_traps:
- - name: all
- state: present
- snmp_host:
- - ip: 4.4.4.4
- communitystring: public
- udpport: 1
- state: absent
-
-#####################################}
-{% if os6_snmp is defined and os6_snmp %}
-
-{% for key,value in os6_snmp|dictsort %}
- {% if key=="snmp_contact" %}
- {% if value %}
-snmp-server contact "{{ value }}"
- {% else %}
-no snmp-server contact
- {% endif %}
- {% elif key == "snmp_location" %}
- {% if value %}
-snmp-server location "{{ value }}"
- {% else %}
-no snmp-server location
- {% endif %}
- {% elif key == "snmp_community" %}
- {% if value %}
- {% for item in value %}
- {% if item.name is defined and item.name %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server community {{ item.name }}
- {% else %}
- {% if item.access_mode is defined and item.access_mode %}
-snmp-server community "{{ item.name }}" {{ item.access_mode }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% elif key == "snmp_host" and value %}
- {% for item in value %}
- {% if item.ip is defined and item.ip %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server host {{ item.ip }} traps
- {% else %}
- {% if item.communitystring is defined and item.communitystring %}
- {% if item.udpport is defined and item.udpport %}
-snmp-server host {{ item.ip }} "{{ item.communitystring }}" udp-port {{ item.udpport }}
- {% else %}
-snmp-server host {{ item.ip }} "{{ item.communitystring }}"
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor%}
- {% elif key == "snmp_traps" %}
- {% if value %}
- {% for val in value %}
- {% if val.name is defined and val.name %}
- {% if val.state is defined and val.state == "absent" %}
-no snmp-server enable traps {{ val.name }}
- {% else %}
- {% if val.name == "all" %}
- {% set trap_list = ['bgp state-changes limited','dvmrp','captive-portal','pim','captive-portal client-auth-failure','captive-portal client-connect','captive-portal client-db-full','captive-portal client-disconnect'] %}
- {% for name in trap_list %}
-snmp-server enable traps {{ name }}
- {% endfor %}
- {% else %}
-snmp-server enable traps {{ val.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/main.os6.yaml
deleted file mode 100644
index 59f8399db..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/main.os6.yaml
+++ /dev/null
@@ -1,23 +0,0 @@
----
-# vars file for dellemc.os6.os6_snmp,
-# below gives a sample configuration
- # Sample variables for OS6 device
-
-os6_snmp:
- snmp_contact: test
- snmp_location: Santa Clara
- snmp_community:
- - name: public
- access_mode: ro
- state: absent
- - name: private
- access_mode: rw
- state: absent
- snmp_traps:
- - name: all
- state: present
- snmp_host:
- - ip: 4.4.4.4
- communitystring: public
- udpport: 1
- state: absent \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_snmp/tests/test.yaml
deleted file mode 100644
index 22e0b3d7d..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_snmp
diff --git a/ansible_collections/dellemc/os6/roles/os6_snmp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_snmp/vars/main.yml
deleted file mode 100644
index 361ecad5f..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_snmp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_snmp
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/LICENSE b/ansible_collections/dellemc/os6/roles/os6_system/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/README.md b/ansible_collections/dellemc/os6/roles/os6_system/README.md
deleted file mode 100644
index 2e70b1924..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/README.md
+++ /dev/null
@@ -1,83 +0,0 @@
-System role
-===========
-
-This role facilitates the configuration of global system attributes. It specifically enables configuration of hostname and enable password for OS6. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The System role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_system keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``hostname`` | string | Configures a hostname to the device (no negate command) | os6 |
-| ``enable_password`` | string | Configures the enable password, field needs to be left blank to remove the enable password from the system | os6 |
-| ``mtu`` | integer | Configures the maximum transmission unit (MTU) for all interfaces, field needs to be left blank to remove the MTU configurations from the system | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
-********************
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_system role* to completely set the NTP server, hostname, enable password, management route, hash alogrithm, clock, line terminal, banner and reload type. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The system role writes a simple playbook that only references the *os6_system* role. By including the role, you automatically get access to all of the tasks to configure system features.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_system:
- hostname: host1
- enable_password: dell
- mtu: 2000
-
-
-**Simple playbook to setup system — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_system
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/defaults/main.yml
deleted file mode 100644
index c720d5cc5..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_system \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/handlers/main.yml
deleted file mode 100644
index a042201bf..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_system \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/meta/main.yml
deleted file mode 100644
index ca8e0bcfd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_system role facilitates the configuration of system attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/tasks/main.yml
deleted file mode 100644
index c10a7390e..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating system configuration for os6"
- template:
- src: os6_system.j2
- dest: "{{ build_dir }}/system6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning system configuration for os6"
- dellemc.os6.os6_config:
- src: os6_system.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/templates/os6_system.j2 b/ansible_collections/dellemc/os6/roles/os6_system/templates/os6_system.j2
deleted file mode 100644
index b6ca686d7..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/templates/os6_system.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-
-Purpose:
-Configure system commands for os6 devices
-
-os6_system:
- hostname: os6
- enable_password: force10
- mtu: 2000
-
-#####################################}
-{% if os6_system is defined and os6_system %}
-
-{% if os6_system.hostname is defined and os6_system.hostname %}
-hostname "{{ os6_system.hostname }}"
-{% endif %}
-{% for key,value in os6_system.items() %}
-
- {% if key == "enable_password" %}
- {% if value %}
-enable password {{ value }}
- {% else %}
-no enable password
- {% endif %}
- {% elif key== "mtu" %}
- {% if value %}
-system jumbo mtu {{ value }}
- {% else %}
-no system jumbo mtu
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_system/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_system/tests/main.os6.yaml
deleted file mode 100644
index 0665ae6b6..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/tests/main.os6.yaml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# vars file for dellemc.os6.os6_system,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
-os6_system:
- hostname: os6
- enable_password: force10
- mtu: 2000
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_system/tests/test.yaml
deleted file mode 100644
index 4a7a41de9..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_system \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_system/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_system/vars/main.yml
deleted file mode 100644
index 773a89500..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_system/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_system,
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/LICENSE b/ansible_collections/dellemc/os6/roles/os6_users/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/README.md b/ansible_collections/dellemc/os6/roles/os6_users/README.md
deleted file mode 100644
index 2146cff8c..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/README.md
+++ /dev/null
@@ -1,93 +0,0 @@
-Users role
-==========
-
-This role facilitates the configuration of global system user attributes. It supports the configuration of CLI users. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The Users role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os6_users list keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``username`` | string (required) | Configures the username which must adhere to specific format guidelines (valid usernames begin with A-Z, a-z, or 0-9 and can also contain `@#$%^&*-_= +;<>,.~` characters) | os6 |
-| ``password`` | string | Configures the password set for the username; password length must be at least eight character | os6 |
-| ``privilege`` | int | Configures the privilege level for the user; either 0, 1, or 15; if this key is ommitted, the default privilege is 1 | os6 |
-| ``state`` | string: absent,present\* | Deletes a user account if set to absent | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses *os6_users* role to configure user attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS6 name.
-
-If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file. It writes a simple playbook that only references the *os6_users* role. By including the role, you automatically get access to all of the tasks to configure user features.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_users:
- - username: u1
- privilege: 0
- state: absent
- - username: u1
- password: dell@force10
- password: false
- privilege: 1
- state: present
- - username: u2
- password: test1234567
- privilege: 3
- state: present
-
-**Simple playbook to setup users — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_users
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/defaults/main.yml
deleted file mode 100644
index ab2367bec..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_users
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/handlers/main.yml
deleted file mode 100644
index c1d47b7cb..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_users
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/meta/main.yml
deleted file mode 100644
index cde049b5a..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_users role facilitates the configuration of user attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/tasks/main.yml
deleted file mode 100644
index f94f356fc..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating users configuration for os6"
- template:
- src: os6_users.j2
- dest: "{{ build_dir }}/users6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning users configuration for os6"
- dellemc.os6.os6_config:
- src: os6_users.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/templates/os6_users.j2 b/ansible_collections/dellemc/os6/roles/os6_users/templates/os6_users.j2
deleted file mode 100644
index 52ff6880c..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/templates/os6_users.j2
+++ /dev/null
@@ -1,37 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-
-Purpose:
-Configure users commands for os6 Devices
-
-os6_users:
- - username: test
- password: test
- privilege: 0
- state: absent
-
-#####################################}
-{% if os6_users is defined and os6_users %}
-
-{% for item in os6_users %}
- {% if item.username is defined %}
- {% if item.state is defined and item.state == "absent" %}
-no username {{ item.username }}
- {% else %}
- {% if item.password is defined and item.password %}
- {% if item.privilege is defined and item.privilege %}
-username {{ item.username }} password {{ item.password }} privilege {{ item.privilege }}
- {% else %}
-username {{ item.username }} password {{ item.password }}
- {% endif %}
- {% elif not item.password %}
- {% if item.privilege is defined and item.privilege %}
-username {{ item.username }} nopassword privilege {{ item.privilege }}
- {% else %}
-username {{ item.username }} nopassword
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_users/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_users/tests/main.os6.yaml
deleted file mode 100644
index abf697567..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/tests/main.os6.yaml
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# vars file for dellemc.os6.os6_users,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
-os6_users:
- - username: test
- password: test
- privilege: 0
- state: absent \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_users/tests/test.yaml
deleted file mode 100644
index b07c1dd8f..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_users
diff --git a/ansible_collections/dellemc/os6/roles/os6_users/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_users/vars/main.yml
deleted file mode 100644
index e9c84b963..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_users/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_users
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/LICENSE b/ansible_collections/dellemc/os6/roles/os6_vlan/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/README.md b/ansible_collections/dellemc/os6/roles/os6_vlan/README.md
deleted file mode 100644
index 5ef09a9f5..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/README.md
+++ /dev/null
@@ -1,104 +0,0 @@
-VLAN role
-=========
-
-This role facilitates configuring virtual LAN (VLAN) attributes. It supports the creation and deletion of a VLAN and its member ports. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The VLAN role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- For variables with no state variable, setting an empty value for the variable negates the corresponding configuration
-- `os6_vlan` (dictionary) holds the key with the VLAN ID key and default-vlan key.
-- VLAN ID key should be in format "vlan <ID>" (1 to 4094)
-- Variables and values are case-sensitive
-
-
-**VLAN ID keys**
-
-| Key | Type | Notes | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``tagged_members_append`` | boolean: true,false | appends the tagged vlan members to the existing list on the interfaces | os6 |
-| ``tagged_members_state`` | string: absent,present | removes all tagged members | os6 |
-| ``vlan <id>`` | string | specifiy the vlan to be configured (see ``vlan <id>.*``) | os6 |
-| ``vlan <id>.name`` | string | Configures the name of the VLAN, field needs to be left blank to remove the user defined name and assign the default name | os6 |
-| ``vlan <id>.tagged_members`` | list | Specifies the list of port members to be tagged to the corresponding VLAN (see ``tagged_members.*``) | os6 |
-| ``tagged_members.port`` | string | Specifies valid device interface names to be tagged for each VLAN | os6 |
-| ``tagged_members.state`` | string: absent,present | Deletes the tagged association for the VLAN if set to absent | os6 |
-| ``vlan <id>.untagged_members`` | list | Specifies the list of port members to be untagged to the corresponding VLAN (see ``untagged_members.*``) | os6 |
-| ``untagged_members.port`` | string | Specifies valid device interface names to be untagged for each VLAN | os6 |
-| ``untagged_members.state`` | string: absent,present | Deletes the untagged association for the VLAN if set to absent | os6 |
-| ``vlan <id>.state`` | string: absent,present\* | Deletes the VLAN corresponding to the ID if set to absent | os6 |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars directories* or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the ANSIBLE_REMOTE_PORT option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-## Example playbook
-
-This example uses the *os6_vlan* role to setup the VLAN ID and name, and it configures tagged and untagged port members for the VLAN. You can also delete the VLAN with the ID or delete the members associated to it. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the os6_vlan role.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
- os6_vlan:
- tagged_members_append: False
- tagged_members_state: present
- vlan 100:
- name: "Mgmt Network"
- tagged_members:
- - port: Te1/0/30
- state: absent
- untagged_members:
- - port: Fo1/0/14
- state: present
- state: present
-
-
-**Simple playbook to setup system — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_vlan
-
-**Run**
-
- ansible-playbook -i hosts switch1.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/defaults/main.yml
deleted file mode 100644
index 623b20769..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_vlan \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/handlers/main.yml
deleted file mode 100644
index e3d581be9..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_vlan
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/meta/main.yml
deleted file mode 100644
index 0022966d9..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_vlan role facilitates the configuration of VLAN attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os6
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/tasks/main.yml
deleted file mode 100644
index 44d0b537f..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating VLAN configuration for os6"
- template:
- src: os6_vlan.j2
- dest: "{{ build_dir }}/vlan6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning VLAN configuration for os6"
- dellemc.os6.os6_config:
- src: os6_vlan.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/templates/os6_vlan.j2 b/ansible_collections/dellemc/os6/roles/os6_vlan/templates/os6_vlan.j2
deleted file mode 100644
index b0cbe6ff1..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/templates/os6_vlan.j2
+++ /dev/null
@@ -1,135 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#########################################
-
-Purpose:
-Configure VLAN Interface commands for os6 Devices
-
-os6_vlan:
- tagged_members_append: False
- tagged_members_state: present
- vlan 2:
- name: "os6vlan"
- tagged_members:
- - port: Gi1/0/1
- state: present
- - port: Gi1/0/2
- state: absent
- untagged_members:
- - port: Gi1/0/3
- state: present
- - port: Gi1/0/4
- state: absent
- state: present
-
-#########################################}
-{% if os6_vlan is defined and os6_vlan -%}
-{%- for key in os6_vlan.keys() -%}
-{% if 'vlan' in key %}
-{%- set vlan_id = key.split(" ") -%}
-{%- set vlan_vars = os6_vlan[key] -%}
- {% if vlan_vars.state is defined and vlan_vars.state=="absent" -%}
-no vlan {{ vlan_id[1] }}
- {%- else -%}
-vlan {{ vlan_id[1] }}
- {% if vlan_vars.name is defined -%}
- {% if vlan_vars.name-%}
-name "{{ vlan_vars.name }}"
- {% else -%}
-no name
- {% endif %}
- {% endif %}
-exit
- {% if vlan_vars.untagged_members is defined -%}
- {%- for ports in vlan_vars.untagged_members -%}
- {% if ports.port is defined and ports.port -%}
- {% if ports.state is defined and ports.state == "absent" -%}
-interface {{ ports.port }}
-no switchport access vlan
- {% else -%}
-interface {{ ports.port }}
-switchport access vlan {{ vlan_id[1] }}
- {% endif -%}
- {% endif -%}
-exit
- {% endfor -%}
- {% endif -%}
- {% endif -%}
-{% endif -%}
-{% endfor -%}
-{%- set cmd_dict = {} -%}
-{%- for key in os6_vlan.keys() -%}
-{% if 'vlan' in key %}
-{%- set vlan_id = key.split(" ") -%}
-{%- set vlan_vars = os6_vlan[key] -%}
-{%- set tagged_vlans = [] -%}
-{%- set tagged_members_present = [] -%}
-{%- set tagged_members_absent= [] -%}
-{% if vlan_vars.tagged_members is defined and vlan_vars.tagged_members -%}
- {%- for ports in vlan_vars.tagged_members -%}
- {% if ports.port is defined and ports.port -%}
- {%- set port = ports.port -%}
- {% if ports.state is defined and ports.state == 'absent' -%}
- {% if port in cmd_dict and 'absent' in cmd_dict[port] -%}
- {%- set tmp_vlan_list=cmd_dict[port]['absent'] -%}
- {%- set x=tmp_vlan_list.extend([vlan_id[1]]) -%}
- {%- set x=cmd_dict[port].update({'absent': tmp_vlan_list}) -%}
- {%- elif port in cmd_dict and 'absent' not in cmd_dict[port] -%}
- {%- set x=cmd_dict[port].update({'absent': [vlan_id[1]]}) -%}
- {%- else -%}
- {%- set x=cmd_dict.update({port: {'absent': [vlan_id[1]]}}) -%}
- {% endif -%}
- {%- else -%}
- {% if port in cmd_dict and 'present' in cmd_dict[port] -%}
- {%- set tmp_vlan_list=cmd_dict[port]['present'] -%}
- {%- set x=tmp_vlan_list.extend([vlan_id[1]]) -%}
- {%- set x=cmd_dict[port].update({'present': tmp_vlan_list}) -%}
- {%- elif port in cmd_dict and 'present' not in cmd_dict[port] -%}
- {%- set x=cmd_dict[port].update({'present': [vlan_id[1]]}) -%}
- {%- else -%}
- {%- set x=cmd_dict.update({port: {'present': [vlan_id[1]]}}) -%}
- {% endif -%}
- {% endif -%}
- {% endif -%}
- {% endfor -%}
-{% endif -%}
-{% endif -%}
- {% endfor -%}
-{%- for cmd in cmd_dict -%}
-interface {{cmd}}
-{% if 'tagged_members_state' in os6_vlan and os6_vlan['tagged_members_state']=='absent' %}
-no switchport trunk allowed vlan
-{% else %}
-{% for cmd_item in cmd_dict[cmd] %}
-{% if 'present' == cmd_item -%}
-{% set sort_list = cmd_dict[cmd]['present']| sort %}
-{% elif 'absent' in cmd_item -%}
-{% set sort_list = cmd_dict[cmd]['absent']| sort %}
-{% endif %}
-{% set range_list = [] %}
-{% set temp = {'temp': []} %}
-{% for i in range(sort_list|length) %}
-{% set x=temp['temp'].extend([sort_list[i]]) %}
-{% if (i != sort_list|length -1 and sort_list[i+1]|int - sort_list[i]|int > 1) or (i == sort_list|length -1) %}
-{% if temp['temp']|first != temp['temp']|last %}
-{% set x=range_list.extend([temp['temp']|first|string+'-'+temp['temp']|last|string]) %}
-{% set x=temp.update({'temp': []}) %}
-{% else %}
-{% set x=range_list.extend([temp['temp']|last|string]) %}
-{% set x=temp.update({'temp': []}) %}
-{% endif %}
-{% endif %}
-{% endfor %}
-{% if 'present' == cmd_item -%}
-{% if 'tagged_members_append' in os6_vlan and os6_vlan['tagged_members_append'] %}
-switchport trunk allowed vlan add {{ range_list| join(',') }}
-{% else %}
-switchport trunk allowed vlan {{ range_list| join(',') }}
-{% endif -%}
-{% elif 'absent' == cmd_item -%}
-switchport trunk allowed vlan remove {{ range_list| join(',') }}
-{% endif -%}
-{% endfor -%}
-exit
-{% endif -%}
-{% endfor -%}
-{% endif -%}
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/main.os6.yaml
deleted file mode 100644
index 5b0f68cfb..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/main.os6.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-# vars file for dellemc.os6.os6_vlan,
-# below gives a example configuration
-# Sample variables for OS6 device
-
-os6_vlan:
- vlan 100:
- tagged_members_append: False
- tagged_members_state: present
- name: "Blue Network"
- tagged_members:
- - port: Te1/0/1
- state: present
- - port: Te1/0/2
- state: present
- untagged_members:
- - port: Te1/0/3
- state: present
- - port: Te1/0/4
- state: present
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_vlan/tests/test.yaml
deleted file mode 100644
index 03697d94d..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_vlan
diff --git a/ansible_collections/dellemc/os6/roles/os6_vlan/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_vlan/vars/main.yml
deleted file mode 100644
index acd743a7e..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vlan/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_vlan
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_vrrp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/README.md b/ansible_collections/dellemc/os6/roles/os6_vrrp/README.md
deleted file mode 100644
index 9fd9f8614..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/README.md
+++ /dev/null
@@ -1,92 +0,0 @@
-VRRP role
-=========
-
-This role facilitates configuring virtual router redundancy protocol (VRRP) attributes. It supports the creation of VRRP groups for interfaces and setting the VRRP group attributes. This role is abstracted for OS6.
-
-The VRRP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- If `os6_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os6_vrrp` (dictionary) holds a dictionary with the interface name key
-- Interface name can correspond to any of the valid os6 interface with a unique interface identifier name
-- Physical interfaces names must be in *<interfacename> <tuple>* format (for example *Fo1/0/1*)
-- Variables and values are case-sensitive
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``vrrp_group_id`` | integer (required) | Configures the ID for the VRRP group (1 to 255) | os6 |
-| ``description`` | string | Configures a single line description for the VRRP group | os6 |
-| ``virtual_address`` | string | Configures a virtual address to the VRRP group (A.B.C.D format) | os6 |
-| ``enable`` | boolean: true,false | Enables/disables the VRRP group at the interface | os6 |
-| ``preempt`` | boolean: true\*,false | Configures preempt mode on the VRRP group | os6 |
-| ``priority`` |integer | Configures priority for the VRRP group (1 to 255; default 100), field needs to be left blank to remove the priority | os6 |
-| ``state`` | string: present\*,absent | Deletes the VRRP group from the interface if set to absent; VRRP group needs to be disabled to delete the VRRP group from the interface | os6 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC Networking roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_vrrp* role to configure VRRP commands at the interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS6 name.
-
-When `os6_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os6_vrrp* role.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/os6
- os6_vrrp:
- vlan 4:
- - vrrp_group_id: 4
- state: present
- description: "Interface-vrrp4"
- virtual_address: 10.2.0.1
- enable: true
- priority: 120
- preempt: false
-
-
-**Simple playbook to setup system — switch1.yaml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_vrrp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/defaults/main.yml
deleted file mode 100644
index ab5dc0abd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_vrrp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/handlers/main.yml
deleted file mode 100644
index 2e0b83364..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_vrrp
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/meta/main.yml
deleted file mode 100644
index 8b1bc5a11..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os6_vrrp role facilitates the configuration of Virtual Router Redundancy Protocol (VRRP) attributes in
- devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - dellemc
- - emc
- - os6
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/tasks/main.yml
deleted file mode 100644
index 72a07a4d9..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os6
- - name: "Generating VRRP configuration for os6"
- template:
- src: os6_vrrp.j2
- dest: "{{ build_dir }}/vrrp6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False'))| bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning VRRP configuration for os6"
- dellemc.os6.os6_config:
- src: os6_vrrp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/templates/os6_vrrp.j2 b/ansible_collections/dellemc/os6/roles/os6_vrrp/templates/os6_vrrp.j2
deleted file mode 100644
index 03cb3b801..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/templates/os6_vrrp.j2
+++ /dev/null
@@ -1,72 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#########################################
-Purpose:
-Configure VRRP Interface on OS6 Devices
-Variable file example:
----
-# VLAN Interface
-os6_vrrp:
- vlan 4:
- - vrrp_group_id: 4
- state: present
- description: "Interface-vrrp4"
- virtual_address: 10.2.0.1
- enable: true
- priority: 120
- preempt: false
-#######################################
-#}
-{% if os6_vrrp is defined and os6_vrrp %}
-{% for key in os6_vrrp.keys() %}
- {% set vrrp_vars = os6_vrrp[key] %}
- {% if vrrp_vars %}
-interface {{ key }}
- {% for group in vrrp_vars %}
- {% if group.vrrp_group_id is defined and group.vrrp_group_id %}
- {% if group.state is defined and group.state == "absent" %}
- {% if group.enable is defined and not group.enable %}
-no vrrp {{ group.vrrp_group_id }} mode
- {% endif %}
-no vrrp {{ group.vrrp_group_id }}
- {% else %}
-vrrp {{ group.vrrp_group_id }}
- {% if group.virtual_address is defined %}
- {% if group.virtual_address %}
-vrrp {{ group.vrrp_group_id }} ip {{ group.virtual_address }}
- {% endif %}
- {% endif %}
- {% if group.description is defined %}
- {% if group.description %}
-vrrp {{ group.vrrp_group_id }} description {{ group.description }}
- {% else %}
-no vrrp {{ group.vrrp_group_id }} description
- {% endif %}
- {% endif %}
- {% if group.preempt is defined %}
- {% if group.preempt %}
-vrrp {{ group.vrrp_group_id }} preempt
- {% else %}
-no vrrp {{ group.vrrp_group_id }} preempt
- {% endif %}
- {% endif %}
- {% if group.enable is defined %}
- {% if group.enable %}
-vrrp {{ group.vrrp_group_id }} mode
- {% else %}
-no vrrp {{ group.vrrp_group_id }} mode
- {% endif %}
- {% endif %}
- {% if group.priority is defined %}
- {% if group.priority %}
-vrrp {{ group.vrrp_group_id }} priority {{ group.priority }}
- {% else %}
-no vrrp {{ group.vrrp_group_id }} priority
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-exit
- {% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml
deleted file mode 100644
index b20d33767..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/main.os6.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# vars file for dellemc.os6.os6_vrrp,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
-os6_vrrp:
- vlan 4:
- - vrrp_group_id: 4
- state: present
- description: "Interface-vrrp4"
- virtual_address: 10.2.0.1
- enable: true
- priority: 120
- preempt: false \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/test.yaml
deleted file mode 100644
index 660d49b39..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_vrrp
diff --git a/ansible_collections/dellemc/os6/roles/os6_vrrp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_vrrp/vars/main.yml
deleted file mode 100644
index 9eb17b5d7..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_vrrp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_vrrp
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/LICENSE b/ansible_collections/dellemc/os6/roles/os6_xstp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/README.md b/ansible_collections/dellemc/os6/roles/os6_xstp/README.md
deleted file mode 100644
index 38adc0f36..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/README.md
+++ /dev/null
@@ -1,117 +0,0 @@
-# xSTP role
-
-This role facilitates the configuration of xSTP attributes. It supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP), rapid per-VLAN spanning-tree (Rapid PVST+), multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). It supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS6.
-
-The xSTP role requires an SSH connection for connectivity to a Dell EMC OS6 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os6.os6` as a value
-- `os6_xstp` (dictionary) contains the hostname (dictionary)
-- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value to any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**hostname keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|----------------------|
-| ``type`` | string (required) | Configures the type of spanning-tree mode specified that can vary according to the OS device; include RSTP, rapid-PVST, and MST | os6 |
-| ``enable`` | boolean: true,false | Enables/disables the spanning-tree protocol specified in the type variable | os6 |
-| ``stp`` | dictionary | Configures simple spanning-tree protocol (see ``stp.* keys``) | os6 |
-| ``stp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os6 |
-| ``rstp`` | dictionary | Configures rapid spanning-tree (see ``rstp.*``) | os6 |
-| ``rstp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os6 |
-| ``pvst`` | dictionary | Configures per-VLAN spanning-tree protocol (see ``pvst.*``) | os6 |
-| ``pvst.vlan`` | list | Configures the VLAN for PVST (see ``vlan.*``) | os6 |
-| ``vlan.range_or_id`` | string | Configures a VLAN/range of VLANs for the per-VLAN spanning-tree protocol | os6 |
-| ``vlan.bridge_priority`` | integer | Configures bridge-priority for the per-VLAN spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *vlan.root* | os6 |
-| ``vlan.state`` | string: absent, present\* | Deletes the configured PVST VLAN with ID if set to absent | os6 |
-| ``mstp`` | dictionary | Configures multiple spanning-tree protocol (see ``mstp.*``) | os6 |
-| ``mstp.mstp_instances`` | list | Configures a MSTP instance (see ``mstp_instances.*``) | os6 |
-| ``mstp_instances.number`` | integer | Configures the multiple spanning-tree instance number | os6 |
-| ``mstp_instances.vlans`` | string | Configures a VLAN/range of VLANs by mapping it to the instance number | os6 |
-| ``mstp_instances.bridge_priority`` | integer | Configures the bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *mstp_instances.root* | os6 |
-| ``mstp_instances.vlans_state`` | string: absent,present\* | Deletes a set of VLANs mapped to the spanning-tree instance if set to absent | os6 |
-| ``intf`` | list | Configures multiple spanning-tree in an interface (see ``intf.*``) | os6 |
-| ``intf <interface name>``| dictionary | Configures the interface name (see ``intf.<interface name>.*``) | os6 |
-| ``intf.<interface name>.edge_port`` | boolean: true,false | Enables port fast at the interface level if set to true | os6 |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os6, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os6_xstp* role to configure different variants of spanning-tree. Based on the type of STP and defined objects, VLANs are associated and bridge priorities are assigned. It creates a *hosts* file with the switch details, and a *host_vars* file with connection variables. The corresponding role variables are defined in the *vars/main.yml* file at the role path.
-It writes a simple playbook that only references the *os6_xstp* role. By including the role, you automatically get access to all of the tasks to configure xSTP.
-
-**Sample hosts file**
-
- switch1 ansible_host= <ip_address>
-
-**Sample host_vars/switch1**
-
- hostname: switch1
- ansible_become: yes
- ansible_become_method: enable
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os6.os6
- build_dir: ../temp/temp_os6
-
-
-**Sample vars/main.yml**
-
- os6_xstp:
- type: stp
- enable: true
- stp:
- bridge_priority: 4096
- pvst:
- vlan:
- - range_or_id: 10
- bridge_priority: 4096
- state: present
- mstp:
- mstp_instances:
- - number: 1
- vlans: 10,12
- bridge_priority: 4096
- vlans_state: present
- intf:
- Fo4/0/1:
- edge_port: true
-
-**Simple playbook to setup system — switch1.yml**
-
- - hosts: switch1
- roles:
- - dellemc.os6.os6_xstp
-
-**Run**
-
- ansible-playbook -i hosts switch1.yml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/defaults/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/defaults/main.yml
deleted file mode 100644
index 92da22dee..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os6.os6_xstp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/handlers/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/handlers/main.yml
deleted file mode 100644
index 03d5fa49b..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os6.os6_xstp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/meta/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/meta/main.yml
deleted file mode 100644
index dd1200bea..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os6_xstp role facilitates the configuration of STP attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os6
-
- galaxy_tags:
- - networking
- - dell
- - dellemc
- - emc
- - os6
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/tasks/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/tasks/main.yml
deleted file mode 100644
index 7fe379cf8..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os6
-
- - name: "Generating xSTP configuration for os6"
- template:
- src: os6_xstp.j2
- dest: "{{ build_dir }}/xstp6_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6") and ((os6_cfg_generate | default('False')) | bool)
-# notify: save config os6
- register: generate_output
-
- - name: "Provisioning xSTP configuration for os6"
- dellemc.os6.os6_config:
- src: os6_xstp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os6.os6")
-# notify: save config os6
- register: output \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/templates/os6_xstp.j2 b/ansible_collections/dellemc/os6/roles/os6_xstp/templates/os6_xstp.j2
deleted file mode 100644
index 2c6f482ff..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/templates/os6_xstp.j2
+++ /dev/null
@@ -1,129 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{##################################################
-PURPOSE: Configure xSTP commands for os6 devices
-
-os6_xstp:
- type: stp
- enable: true
- stp:
- bridge_priority: 4096
- pvst:
- vlan:
- - range_or_id: 10
- bridge_priority: 4096
- state: present
- mstp:
- mstp_instances:
- - number: 1
- vlans: 10,12
- vlans_state: present
- bridge_priority: 4096
- intf:
- Te1/1/8:
- edge_port: true
-
-#####################################################}
-{% if os6_xstp is defined and os6_xstp %}
-{% set xstp_vars = os6_xstp %}
-{% if xstp_vars.type is defined and xstp_vars.type %}
- {% if xstp_vars.enable is defined %}
- {% if xstp_vars.enable %}
-spanning-tree mode {{ xstp_vars.type }}
- {% else %}
-no spanning-tree
- {% endif %}
- {% endif %}
-{% endif %}
-{% if xstp_vars.stp is defined and xstp_vars.stp %}
- {% set val = xstp_vars.stp %}
- {% if val.bridge_priority is defined %}
- {% if val.bridge_priority == 0 or val.bridge_priority %}
-spanning-tree priority {{ val.bridge_priority }}
- {% else %}
-no spanning-tree priority
- {% endif %}
- {% endif %}
-{% endif %}
-
-{% if xstp_vars.pvst is defined and xstp_vars.pvst %}
- {% set val = xstp_vars.pvst %}
- {% if val.vlan is defined and val.vlan %}
- {% for vlan in val.vlan %}
- {% if vlan.range_or_id is defined and vlan.range_or_id %}
- {% if "-" in (vlan.range_or_id|string) %}
- {% set vlan_start_end = (vlan.range_or_id|string).split("-") %}
- {% set vlans = [] %}
- {% for id in range(vlan_start_end[0]|int,vlan_start_end[1]|int+1) %}
- {{ vlans.append(id) }}
- {% endfor %}
- {% else %}
- {% set vlans = (vlan.range_or_id|string).split(",") %}
- {% endif %}
- {% for vlanid in vlans %}
- {% if vlan.state is defined and vlan.state == "absent" %}
- {% if vlan.bridge_priority is defined %}
- {% if not vlan.bridge_priority %}
-no spanning-tree vlan {{ vlanid}} priority
- {% endif %}
- {% endif %}
- {% else %}
- {% if vlan.bridge_priority is defined %}
- {% if vlan.bridge_priority == 0 or vlan.bridge_priority %}
-spanning-tree vlan {{ vlanid }} priority {{ vlan.bridge_priority }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %}
-{% if xstp_vars.mstp is defined and xstp_vars.mstp %}
- {% set val = xstp_vars.mstp %}
- {% if val.mstp_instances is defined and val.mstp_instances %}
- {% for instance in val.mstp_instances %}
- {% if instance.number is defined and instance.number %}
- {% if instance.bridge_priority is defined %}
- {% if instance.bridge_priority == 0 or instance.bridge_priority %}
-spanning-tree mst {{ instance.number }} priority {{ instance.bridge_priority }}
- {% else %}
-no spanning-tree mst {{ instance.number }} priority
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if val.mstp_instances is defined and val.mstp_instances %}
-spanning-tree mst configuration
- {% for instance in val.mstp_instances %}
- {% if instance.number is defined and instance.number %}
- {% if instance.vlans is defined and instance.vlans %}
- {% set vlans = (instance.vlans|string).split(",") %}
- {% for vlan in vlans %}
- {% if instance.vlans_state is defined and instance.vlans_state == "absent" %}
-instance {{ instance.number }} remove vlan {{ vlan }}
- {% else %}
-instance {{ instance.number }} add vlan {{ vlan }}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
-exit
- {% endif %}
-{% endif %}
-{% if xstp_vars.intf is defined and xstp_vars.intf %}
- {% for intr in xstp_vars.intf.keys() %}
- {% set intf_vars = xstp_vars.intf[intr] %}
-interface {{ intr }}
- {% if intf_vars.edge_port is defined %}
- {% if not intf_vars.edge_port %}
-no spanning-tree portfast
- {% else %}
-spanning-tree portfast
- {% endif %}
-exit
- {% endif %}
- {% endfor %}
-{% endif %}
-{% endif %}
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/inventory.yaml b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/inventory.yaml
deleted file mode 100644
index 2980eb659..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/inventory.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-switch1 ansible_host=100.94.51.40 ansible_network_os="dellemc.os6.os6"
-switch2 ansible_host=100.94.52.38 ansible_network_os="dellemc.os6.os6"
-
-[os6switches]
-switch1
-switch2 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/main.os6.yaml b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/main.os6.yaml
deleted file mode 100644
index 5d11b69a9..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/main.os6.yaml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-# vars file for dellemc.os6.os6_xstp,
-# below gives a sample configuration
-# Sample variables for OS6 device
-
-os6_xstp:
- type: stp
- enable: true
- stp:
- bridge_priority: 4096
- pvst:
- vlan:
- - range_or_id: 10
- bridge_priority: 4096
- state: present
- mstp:
- mstp_instances:
- - number: 1
- vlans: 10,12
- bridge_priority: 4096
- vlans_state: present
- intf:
- Te1/0/5:
- edge_port: true \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/test.yaml b/ansible_collections/dellemc/os6/roles/os6_xstp/tests/test.yaml
deleted file mode 100644
index 4efc0f332..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: os6switches
- connection: network_cli
- roles:
- - dellemc.os6.os6_xstp
diff --git a/ansible_collections/dellemc/os6/roles/os6_xstp/vars/main.yml b/ansible_collections/dellemc/os6/roles/os6_xstp/vars/main.yml
deleted file mode 100644
index bd62f2a5a..000000000
--- a/ansible_collections/dellemc/os6/roles/os6_xstp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os6.os6_xstp
diff --git a/ansible_collections/dellemc/os6/tests/.gitignore b/ansible_collections/dellemc/os6/tests/.gitignore
deleted file mode 100644
index ea1472ec1..000000000
--- a/ansible_collections/dellemc/os6/tests/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-output/
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/defaults/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/cli.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/cli.yaml
deleted file mode 100644
index 4cf68f174..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/cli.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ item }}"
- with_items: "{{ test_items }}" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/main.yaml
deleted file mode 100644
index d4898c29b..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] } \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml
deleted file mode 100644
index 95770c6ab..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/bad_operator.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- debug: msg="START cli/bad_operator.yaml"
-
-- name: test bad operator
- os6_command:
- commands:
- - show version
- - show interfaces GigabitEthernet 1/0/1
- wait_for:
- - "result[0] contains 'Description: Foo'"
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed == true"
- - "result.msg is defined"
-
-- debug: msg="END cli/bad_operator.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml
deleted file mode 100644
index dd0f7a78f..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/contains.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- debug: msg="START cli/contains.yaml"
-
-- name: test contains operator
- os6_command:
- commands:
- - show version
- - show interfaces GigabitEthernet 1/0/1
- wait_for:
- - "result[0] contains 5"
- - "result[1] contains Access"
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
-
-- debug: msg="END cli/contains.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml
deleted file mode 100644
index 493196df7..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/invalid.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- debug: msg="START cli/invalid.yaml"
-
-- name: run invalid command
- os6_command:
- commands: ['show foo']
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed"
-
-- name: run commands that include invalid command
- os6_command:
- commands:
- - show version
- - show foo
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed"
-
-- debug: msg="END cli/invalid.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml
deleted file mode 100644
index 8a87d5da5..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/output.yaml
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- debug: msg="START cli/output.yaml"
-
-- name: get output for single command
- os6_command:
- commands: ['show version']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
-
-- name: get output for multiple commands
- os6_command:
- commands:
- - show version
- - show interfaces
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
- - "result.stdout | length == 2"
-
-- debug: msg="END cli/output.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml
deleted file mode 100644
index f1ea515d6..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_command/os6_command/tests/cli/timeout.yaml
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- debug: msg="START cli/timeout.yaml"
-
-- name: test bad condition
- os6_command:
- commands:
- - show version
- wait_for:
- - "result[0] contains bad_value_string"
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed == true"
- - "result.msg is defined"
-
-- debug: msg="END cli/timeout.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/defaults/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/cli.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/cli.yaml
deleted file mode 100644
index 4cf68f174..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/cli.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ item }}"
- with_items: "{{ test_items }}" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/main.yaml
deleted file mode 100644
index d4898c29b..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] } \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml
deleted file mode 100644
index 20d81a3df..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/backup.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- debug: msg="START cli/backup.yaml"
-
-- name: setup
- os6_config:
- commands:
- - no interface loopback 62
- provider: "{{ cli }}"
- ignore_errors: yes
-
-- name: collect any backup files
- find:
- paths: "{{ role_path }}/backup"
- pattern: "{{ inventory_hostname }}_config*"
- register: backup_files
- delegate_to: localhost
-
-- name: delete backup files
- file:
- path: "{{ item.path }}"
- state: absent
- with_items: "{{ backup_files.files }}"
-
-- name: configure device with config
- os6_config:
- src: basic/config.j2
- backup: yes
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "result.updates is defined"
-
-- name: collect any backup files
- find:
- paths: "{{ role_path }}/backup"
- pattern: "{{ inventory_hostname }}_config*"
- register: backup_files
- delegate_to: localhost
-
-- assert:
- that:
- - "backup_files.files is defined"
-
-- name: teardown
- os6_config:
- commands:
- - no interface loopback 62
- provider: "{{ cli }}"
-
-- debug: msg="END cli/backup.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml
deleted file mode 100644
index f4b1d0b66..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/basic.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-- debug: msg="START cli/basic.yaml"
-
-- name: setup
- os6_config:
- commands:
- - no interface loopback 62
- provider: "{{ cli }}"
- ignore_errors: yes
-
-- name: configure device with config
- os6_config:
- src: basic/config.j2
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "result.updates is defined"
-
-- name: check device with config
- os6_config:
- src: basic/config.j2
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.updates is defined"
-
-- name: teardown
- os6_config:
- commands:
- - no interface loopback 62
- provider: "{{ cli }}"
-- debug: msg="END cli/basic.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml
deleted file mode 100644
index 9969a9516..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/defaults.yaml
+++ /dev/null
@@ -1,43 +0,0 @@
----
-- debug: msg="START cli/defaults.yaml"
-
-- name: setup
- os6_config:
- commands:
- - no interface loopback 63
- provider: "{{ cli }}"
- ignore_errors: yes
-
-- name: configure device with defaults included
- os6_config:
- src: defaults/config.j2
- provider: "{{ cli }}"
- register: result
-
-- debug: var=result
-
-- assert:
- that:
- - "result.changed == true"
- - "result.updates is defined"
-
-- name: check device with defaults included
- os6_config:
- src: defaults/config.j2
- provider: "{{ cli }}"
- register: result
-
-- debug: var=result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.updates is defined"
-
-- name: teardown
- os6_config:
- commands:
- - no interface loopback 63
- provider: "{{ cli }}"
-
-- debug: msg="END cli/defaults.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml
deleted file mode 100644
index 37c2c4b2b..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/force.yaml
+++ /dev/null
@@ -1,41 +0,0 @@
----
-- debug: msg="START cli/force.yaml"
-
-- name: setup
- os6_config:
- commands:
- - interface loopback 62
- provider: "{{ cli }}"
- ignore_errors: yes
-
-- name: configure device with config
- os6_config:
- src: basic/config.j2
- provider: "{{ cli }}"
- match: none
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "result.updates is defined"
-
-- name: check device with config
- os6_config:
- src: basic/config.j2
- provider: "{{ cli }}"
- match: none
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "result.updates is defined"
-
-- name: teardown
- os6_config:
- commands:
- - no interface loopback 62
- provider: "{{ cli }}"
-
-- debug: msg="END cli/force.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml
deleted file mode 100644
index b978e8b3b..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- debug: msg="START cli/sublevel.yaml"
-
-- name: setup test
- os6_config:
- lines:
- - 'no ip access-list test'
- provider: "{{ cli }}"
- match: none
-
-- name: configure sub level command
- os6_config:
- lines: ['1000 permit every log']
- parents: ['ip access-list test']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'ip access-list test' in result.updates"
- - "'1000 permit every log' in result.updates"
-
-- name: configure sub level command idempotent check
- os6_config:
- lines: ['1000 permit every log']
- parents: ['ip access-list test']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os6_config:
- lines:
- - 'no ip access-list test'
- provider: "{{ cli }}"
- match: none
-
-- debug: msg="END cli/sublevel.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml
deleted file mode 100644
index db47989fa..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_block.yaml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-- debug: msg="START cli/sublevel_block.yaml"
-
-- name: setup
- os6_config:
- lines:
- - permit ip 1.1.1.1 0.0.0.0 any log
- - permit ip 2.2.2.2 0.0.0.0 any log
- - permit ip 3.3.3.3 0.0.0.0 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- after: ['exit']
- provider: "{{ cli }}"
- match: none
-
-- name: configure sub level command using block resplace
- os6_config:
- lines:
- - 1000 permit ip 1.1.1.1 0.0.0.0 any log
- - 1010 permit ip 2.2.2.2 0.0.0.0 any log
- - 1020 permit ip 3.3.3.3 0.0.0.0 any log
- - 1030 permit ip 4.4.4.4 0.0.0.0 any log
- parents: ['ip access-list test']
- replace: block
- after: ['exit']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'ip access-list test' in result.updates"
- - "'1000 permit ip 1.1.1.1 0.0.0.0 any log' in result.updates"
- - "'1010 permit ip 2.2.2.2 0.0.0.0 any log' in result.updates"
- - "'1020 permit ip 3.3.3.3 0.0.0.0 any log' in result.updates"
- - "'1030 permit ip 4.4.4.4 0.0.0.0 any log' in result.updates"
-
-- name: check sub level command using block replace
- os6_config:
- lines:
- - 1000 permit ip 1.1.1.1 0.0.0.0 any log
- - 1010 permit ip 2.2.2.2 0.0.0.0 any log
- - 1020 permit ip 3.3.3.3 0.0.0.0 any log
- - 1030 permit ip 4.4.4.4 0.0.0.0 any log
- parents: ['ip access-list test']
- replace: block
- after: ['exit']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os6_config:
- lines:
- - no ip access-list test
- match: none
- provider: "{{ cli }}"
-
-- debug: msg="END cli/sublevel_block.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml
deleted file mode 100644
index bafe24c59..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_exact.yaml
+++ /dev/null
@@ -1,66 +0,0 @@
----
-- debug: msg="START cli/sublevel_exact.yaml"
-
-- name: setup
- os6_config:
- lines:
- - permit ip 1.1.1.1 0.0.0.0 any log
- - permit ip 2.2.2.2 0.0.0.0 any log
- - permit ip 3.3.3.3 0.0.0.0 any log
- - permit ip 4.4.4.4 0.0.0.0 any log
- - permit ip 5.5.5.5 0.0.0.0 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- after: ['exit']
- provider: "{{ cli }}"
- match: none
-
-- name: configure sub level command using exact match
- os6_config:
- lines:
- - 1000 permit ip 1.1.1.1 0.0.0.0 any log
- - 1010 permit ip 2.2.2.2 0.0.0.0 any log
- - 1020 permit ip 3.3.3.3 0.0.0.0 any log
- - 1030 permit ip 4.4.4.4 0.0.0.0 any log
- parents: ['ip access-list test']
- after: ['exit']
- match: exact
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'ip access-list test' in result.updates"
- - "'1000 permit ip 1.1.1.1 0.0.0.0 any log' in result.updates"
- - "'1010 permit ip 2.2.2.2 0.0.0.0 any log' in result.updates"
- - "'1020 permit ip 3.3.3.3 0.0.0.0 any log' in result.updates"
- - "'1030 permit ip 4.4.4.4 0.0.0.0 any log' in result.updates"
- - "'1040 permit ip 5.5.5.5 0.0.0.0 any log' not in result.updates"
-
-- name: check sub level command using exact match
- os6_config:
- lines:
- - 1000 permit ip 1.1.1.1 0.0.0.0 any log
- - 1010 permit ip 2.2.2.2 0.0.0.0 any log
- - 1020 permit ip 3.3.3.3 0.0.0.0 any log
- - 1030 permit ip 4.4.4.4 0.0.0.0 any log
- - 1040 permit ip 5.5.5.5 0.0.0.0 any log
- parents: ['ip access-list test']
- after: ['exit']
- match: exact
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os6_config:
- lines:
- - no ip access-list test
- provider: "{{ cli }}"
- match: none
-
-- debug: msg="END cli/sublevel_exact.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml
deleted file mode 100644
index 51049e5e0..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/sublevel_strict.yaml
+++ /dev/null
@@ -1,63 +0,0 @@
----
-- debug: msg="START cli/sublevel_strict.yaml"
-
-- name: setup
- os6_config:
- lines:
- - permit ip 1.1.1.1 0.0.0.0 any log
- - permit ip 2.2.2.2 0.0.0.0 any log
- - permit ip 3.3.3.3 0.0.0.0 any log
- - permit ip 4.4.4.4 0.0.0.0 any log
- - permit ip 5.5.5.5 0.0.0.0 any log
- parents: ['ip access-list test']
- before: ['no ip access-list test']
- after: ['exit']
- provider: "{{ cli }}"
- match: none
-
-- name: configure sub level command using strict match
- os6_config:
- lines:
- - 1000 permit ip 1.1.1.1 0.0.0.0 any log
- - 1010 permit ip 2.2.2.2 0.0.0.0 any log
- - 1020 permit ip 3.3.3.3 0.0.0.0 any log
- - 1030 permit ip 4.4.4.4 0.0.0.0 any log
- parents: ['ip access-list test']
- match: strict
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: check sub level command using strict match
- os6_config:
- lines:
- - 1000 permit ip 1.1.1.1 0.0.0.0 any log
- - 1010 permit ip 3.3.3.3 0.0.0.0 any log
- - 1020 permit ip 2.2.2.2 0.0.0.0 any log
- parents: ['ip access-list test']
- after: ['exit']
- match: strict
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'ip access-list test' in result.updates"
- - "'1000 permit ip 1.1.1.1 0.0.0.0 any log' not in result.updates"
- - "'1020 permit ip 2.2.2.2 0.0.0.0 any log' in result.updates"
- - "'1010 permit ip 3.3.3.3 0.0.0.0 any log' in result.updates"
- - "'1030 permit ip 4.4.4.4 0.0.0.0 any log' not in result.updates"
- - "'1040 permit ip 5.5.5.5 0.0.0.0 any log' not in result.updates"
-
-- name: teardown
- os6_config:
- lines:
- - no ip access-list test
- provider: "{{ cli }}"
- match: none
-
-- debug: msg="END cli/sublevel_strict.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml
deleted file mode 100644
index 36cdb9a41..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- debug: msg="START cli/toplevel.yaml"
-
-- name: setup
- os6_config:
- lines: ['hostname {{ inventory_hostname }}']
- provider: "{{ cli }}"
- match: none
-
-- name: configure top level command
- os6_config:
- lines: ['hostname foo']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
-
-- name: configure top level command idempotent check
- os6_config:
- lines: ['hostname "foo"']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os6_config:
- lines: ['hostname {{ inventory_hostname }}']
- provider: "{{ cli }}"
- match: none
-
-- debug: msg="END cli/toplevel.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml
deleted file mode 100644
index 287bdb9a3..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_after.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- debug: msg="START cli/toplevel_after.yaml"
-
-- name: setup
- os6_config:
- lines:
- - "snmp-server contact ansible"
- - "hostname {{ inventory_hostname }}"
- provider: "{{ cli }}"
- match: none
-
-- name: configure top level command with before
- os6_config:
- lines: ['hostname foo']
- after: ['snmp-server contact bar']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
- - "'snmp-server contact bar' in result.updates"
-
-- name: configure top level command with before idempotent check
- os6_config:
- lines: ['hostname "foo"']
- after: ['snmp-server contact foo']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os6_config:
- lines:
- - "no snmp-server contact"
- - "hostname {{ inventory_hostname }}"
- provider: "{{ cli }}"
- match: none
-
-- debug: msg="END cli/toplevel_after.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml
deleted file mode 100644
index d058abfb7..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_before.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-- debug: msg="START cli/toplevel_before.yaml"
-
-- name: setup
- os6_config:
- lines:
- - "snmp-server contact ansible"
- - "hostname {{ inventory_hostname }}"
- provider: "{{ cli }}"
- match: none
-
-- name: configure top level command with before
- os6_config:
- lines: ['hostname foo']
- before: ['snmp-server contact bar']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
- - "'snmp-server contact bar' in result.updates"
-
-- name: configure top level command with before idempotent check
- os6_config:
- lines: ['hostname "foo"']
- before: ['snmp-server contact foo']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os6_config:
- lines:
- - "no snmp-server contact"
- - "hostname {{ inventory_hostname }}"
- provider: "{{ cli }}"
- match: none
-
-- debug: msg="END cli/toplevel_before.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml
deleted file mode 100644
index d529e8d11..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_config/os6_config/tests/cli/toplevel_nonidempotent.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-- debug: msg="START cli/toplevel_nonidempotent.yaml"
-
-- name: setup
- os6_config:
- lines: ['hostname {{ inventory_hostname }}']
- provider: "{{ cli }}"
- match: none
-
-- name: configure top level command
- os6_config:
- lines: ['hostname foo']
- provider: "{{ cli }}"
- match: strict
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
-
-- name: configure top level command idempotent check
- os6_config:
- lines: ['hostname foo']
- provider: "{{ cli }}"
- match: strict
- register: result
-
-- assert:
- that:
- - "result.changed == true"
-
-- name: teardown
- os6_config:
- lines: ['hostname {{ inventory_hostname }}']
- provider: "{{ cli }}"
- match: none
-
-- debug: msg="END cli/toplevel_nonidempotent.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml
deleted file mode 100644
index 7152815d7..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/cli.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact:
- test_items: "{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ item }}"
- with_items: "{{ test_items }}" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml
deleted file mode 100644
index d4898c29b..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] } \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml b/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml
deleted file mode 100644
index 1834f7b1e..000000000
--- a/ansible_collections/dellemc/os6/tests/integration/targets/os6_facts/os6_facts/tests/cli/facts.yaml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-- debug: msg="START cli/facts.yaml"
-
-- name: test all facts
- os6_facts:
- gather_subset:
- - all
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts is defined"
-
-- name: test all facts except hardware
- os6_facts:
- gather_subset:
- - "!hardware"
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts.ansible_net_memfree_mb is not defined"
-
-- name: test interface facts
- os6_facts:
- gather_subset:
- - interfaces
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts.ansible_net_interfaces is defined"
- - "result.ansible_facts.ansible_net_memfree_mb is not defined"
-
-
-- debug: msg="END cli/facts.yaml"
diff --git a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.10.txt
deleted file mode 100644
index 6945f1c26..000000000
--- a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.10.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-plugins/action/os6.py action-plugin-docs
-plugins/modules/os6_config.py validate-modules:parameter-list-no-elements
-plugins/modules/os6_facts.py validate-modules:parameter-list-no-elements
-plugins/modules/os6_command.py validate-modules:parameter-list-no-elements
diff --git a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.11.txt
deleted file mode 100644
index 91049edca..000000000
--- a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.11.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-plugins/action/os6.py action-plugin-docs
-plugins/modules/os6_config.py validate-modules:parameter-list-no-elements
-plugins/modules/os6_facts.py validate-modules:parameter-list-no-elements
-plugins/modules/os6_command.py validate-modules:parameter-list-no-elements \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/os6/tests/sanity/ignore-2.9.txt
deleted file mode 100644
index 99f52d2e9..000000000
--- a/ansible_collections/dellemc/os6/tests/sanity/ignore-2.9.txt
+++ /dev/null
@@ -1 +0,0 @@
-plugins/action/os6.py action-plugin-docs \ No newline at end of file
diff --git a/ansible_collections/dellemc/os6/tests/sanity/requirements.txt b/ansible_collections/dellemc/os6/tests/sanity/requirements.txt
deleted file mode 100644
index 3e3a96692..000000000
--- a/ansible_collections/dellemc/os6/tests/sanity/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-packaging # needed for update-bundled and changelog
-sphinx ; python_version >= '3.5' # docs build requires python 3+
-sphinx-notfound-page ; python_version >= '3.5' # docs build requires python 3+
-straight.plugin ; python_version >= '3.5' # needed for hacking/build-ansible.py which will host changelog generation and requires python 3+
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_config.cfg b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_config.cfg
deleted file mode 100644
index a8ed721c8..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_config.cfg
+++ /dev/null
@@ -1,16 +0,0 @@
-!
-hostname router
-exit
-!
-interface Te1/0/1
-description "test_string"
-exit
-!
-interface Te1/0/2
-no shutdown
-exit
-!
-interface Te1/0/9
-switchport access vlan 2
-exit
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_src.cfg b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_src.cfg
deleted file mode 100644
index 70d5f6653..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/os6_config_src.cfg
+++ /dev/null
@@ -1,7 +0,0 @@
-!
-hostname foo
-exit
-!
-interface Te1/0/2
-shutdown
-exit
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces
deleted file mode 100644
index f6aede901..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces
+++ /dev/null
@@ -1,41 +0,0 @@
-Interface Name................................. Te1/0/1
-SOC Hardware Info.............................. BCM56842_A1
-Link Status.................................... Up /None
-Keepalive Enabled.............................. FALSE
-Err-disable Cause.............................. None
-VLAN Membership Mode........................... Trunk Mode
-VLAN Membership................................ (1),2-4096
-MTU Size....................................... 1518
-Port Mode [Duplex]............................. Full
-Port Speed..................................... 1000
-Link Debounce Flaps............................ 0
-Auto-Negotation Status......................... Auto
-Burned MAC Address............................. F8B1.565B.615E
-L3 MAC Address................................. F8B1.565B.615F
-Sample Load Interval........................... 300
-Received Input Rate Bits/Sec................... 0
-Received Input Rate Packets/Sec................ 0
-Transmitted Input Rate Bits/Sec................ 440
-Transmitted Input Rate Packets/Sec : .......... 0
-Total Packets Received Without Errors.......... 0
-Unicast Packets Received....................... 0
-Multicast Packets Received..................... 0
-Broadcast Packets Received..................... 0
-Total Packets Received with MAC Errors......... 0
-Jabbers Received............................... 0
-Fragments/Undersize Received................... 0
-Alignment Errors............................... 0
-FCS Errors..................................... 0
-Overruns....................................... 0
-Total Received Packets Not Forwarded........... 0
-Total Packets Transmitted Successfully......... 381302
-Unicast Packets Transmitted.................... 1
-Multicast Packets Transmitted.................. 351645
-Broadcast Packets Transmitted.................. 29656
-Transmit Packets Discarded..................... 0
-Total Transmit Errors.......................... 0
-Total Transmit Packets Discarded............... 0
-Single Collision Frames........................ 0
-Multiple Collision Frames...................... 0
-Excessive Collision Frames..................... 0
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_status b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_status
deleted file mode 100644
index 28defda61..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_status
+++ /dev/null
@@ -1,48 +0,0 @@
-Port Description Duplex Speed Neg Link Flow M VLAN
- State Ctrl
---------- --------------- ------ ------- ---- ------ ----- -- -------------------
-Te1/0/1 connected to sp Full 1000 Auto Up Off T (1),2-4096
-Te1/0/2 to_NIC_1 Full 1000 Auto Up Off A 99
-Te1/0/3 N/A Unknown Auto Down Off A 1
-Te1/0/4 N/A Unknown Auto Down Off A 1
-Te1/0/5 N/A Unknown Auto Down Off A 1
-Te1/0/6 N/A Unknown Auto Down Off A 1
-Te1/0/7 N/A Unknown Auto Down Off A 1
-Te1/0/8 N/A Unknown Auto Down Off A 1
-Te1/0/9 N/A Unknown Auto Down Off A 2
-Te1/0/10 N/A Unknown Auto Down Off A 1
-Te1/0/11 N/A Unknown Auto Down Off A 1
-Te1/0/12 N/A Unknown Auto Down Off A 1
-Te1/0/13 N/A Unknown Auto Down Off A 1
-Te1/0/14 N/A Unknown Auto Down Off A 1
-Te1/0/15 N/A Unknown Auto Down Off A 1
-Te1/0/16 N/A Unknown Auto Down Off A 1
-Te1/0/17 N/A Unknown Auto Down Off A 1
-Te1/0/18 N/A Unknown Auto Down Off A 1
-Te1/0/19 N/A Unknown Auto Down Off A 1
-Te1/0/20 N/A Unknown Auto Down Off A 1
-Te1/0/21 N/A Unknown Auto Down Off A 1
-Te1/0/22 N/A Unknown Auto Down Off A 100
-Te1/0/23 N/A Unknown Auto Down Off A 1
-Te1/0/24 N/A Unknown Auto Down Off A 1
-Fo1/1/1 N/A N/A N/A Detach N/A
-Fo1/1/2 Full 40000 Off Down Off A 1
-Te1/1/1 N/A N/A N/A Detach N/A
-Te1/1/2 N/A N/A N/A Detach N/A
-Te1/1/3 N/A N/A N/A Detach N/A
-Te1/1/4 N/A N/A N/A Detach N/A
-Te1/1/5 N/A N/A N/A Detach N/A
-Te1/1/6 N/A N/A N/A Detach N/A
-Te1/1/7 N/A N/A N/A Detach N/A
-Te1/1/8 N/A N/A N/A Detach N/A
-
-Oob Type Link
- State
---- ------------------------------ -----
-oob Out-Of-Band Up
-
-
-Port Description Link M VLAN
-Channel State
-------- ------------------------------ ------- -- -------------------
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties
deleted file mode 100644
index 976f45a82..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_interfaces_transceiver_properties
+++ /dev/null
@@ -1,6 +0,0 @@
-Yes: Dell Qualified No: Not Qualified
-N/A : Not Applicable
-Port Type Media Serial Number Dell Qualified
---------- ------- --------------------- --------------------- --------------
-
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_ip_int b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_ip_int
deleted file mode 100644
index 043ee2cc2..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_ip_int
+++ /dev/null
@@ -1,15 +0,0 @@
-Default Gateway................................ 0.0.0.0
-L3 MAC Address................................. F8B1.565B.615F
-
-Routing Interfaces:
-
-Interface State IP Address IP Mask Method
----------- ----- --------------- --------------- -------
-Vl1 Down 0.0.0.0 0.0.0.0 None
-Vl2 Up 0.0.0.0 0.0.0.0 DHCP
-Vl99 Up 10.99.1.2 255.255.0.0 Manual
-Vl100 Up 3.3.3.3 255.255.255.0 Manual
-Vl999 Up 10.250.1.2 255.255.255.0 Manual
-Vl1010 Up 10.1.1.1 255.255.255.0 Manual
-Vl1681 Up 192.168.100.1 255.255.255.0 Manual
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp
deleted file mode 100644
index be89c415b..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp
+++ /dev/null
@@ -1,11 +0,0 @@
-LLDP Global Configuration
-
-
-Transmit Interval............................ 30 seconds
-
-Transmit Hold Multiplier..................... 4
-
-Reinit Delay................................. 2 seconds
-
-Notification Interval........................ 5 seconds
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all
deleted file mode 100644
index 2a22f444f..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_lldp_remote-device_all
+++ /dev/null
@@ -1,10 +0,0 @@
-LLDP Remote Device Summary
-
-Local
-Interface RemID Chassis ID Port ID System Name
---------- ------- ------------------- ----------------- -----------------
-Te1/0/5 14 F8:B1:56:70:49:38 Gi1/0/5 MAA-N2048-6884
-Te1/0/6 15 F8:B1:56:70:49:38 Gi1/0/6 MAA-N2048-6884
-
-
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_memory_cpu b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_memory_cpu
deleted file mode 100644
index 426576938..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_memory_cpu
+++ /dev/null
@@ -1,3 +0,0 @@
-Total Memory................................... 1723232 KBytes
-Available Memory Space......................... 638144 KBytes
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config
deleted file mode 100644
index b589c2968..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config
+++ /dev/null
@@ -1,124 +0,0 @@
-!Current Configuration:
-!System Description "Dell Networking N4064F, 6.3.3.10, Linux 3.7.10-e54850e7"
-!System Software Version 6.3.3.10
-!Cut-through mode is configured as disabled
-!
-configure
-hostname "os6"
-slot 1/0 5 ! Dell Networking N4064F
-slot 1/1 8 ! Dell 10GBase-T Card
-stack
-member 1 4 ! N4064F
-exit
-interface out-of-band
-ip address 10.16.148.73 255.255.0.0 10.16.144.254
-exit
-no logging console
-interface vlan 1
-ip address dhcp
-exit
-no passwords min-length
-username "admin" password 21232f297a57a5a743894a0e4a801fc3 privilege 1 encrypted
-line telnet
-exec-timeout 0
-exit
-ip ssh server
-application install SupportAssist auto-restart start-on-boot
-!
-interface Te1/0/1
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/2
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/3
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/4
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/5
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/6
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/7
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/8
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/9
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/10
-no switchport port-security violation protect
-exit
-!
-interface Te1/0/11
-no switchport port-security violation protect
-exit
-!
-interface port-channel 1
-no switchport port-security violation protect
-exit
-!
-interface port-channel 2
-no switchport port-security violation protect
-exit
-!
-interface port-channel 3
-no switchport port-security violation protect
-exit
-!
-interface port-channel 4
-no switchport port-security violation protect
-exit
-!
-interface port-channel 5
-no switchport port-security violation protect
-exit
-!
-snmp-server enable traps dvmrp
-snmp-server enable traps pim
-no snmp-server enable traps vrrp
-no snmp-server enable traps acl
-snmp-server enable traps captive-portal
-snmp-server enable traps captive-portal client-auth-failure
-snmp-server enable traps captive-portal client-connect
-snmp-server enable traps captive-portal client-db-full
-snmp-server enable traps captive-portal client-disconnect
-router bgp 11
-bgp router-id 1.1.1.1
-maximum-paths 2
-maximum-paths ibgp 2
-network 101.1.2.0 mask 255.255.255.0
-template peer MUX_HNV_ACCESS
-remote-as 64918
-exit
-neighbor 10.10.234.16 remote-as 64818
-neighbor 10.10.234.16 default-originate
-neighbor 10.10.234.16 timers 2 5
-neighbor 2001:4898:5808:ffa2::1 remote-as 64818
-neighbor 2001:4898:5808:ffa2::1 default-originate
-neighbor 2001:4898:5808:ffa2::1 timers 2 4
-address-family ipv6
-network 2001:4898:5808:ffa0::/126
-redistribute connected
-exit
-exit
-enable password c4f25f005187e9a85ad6480d3507a541 encrypted
-openflow
-exit
-eula-consent support-assist reject
-exit
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname
deleted file mode 100644
index 2015aaf9a..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_running-config__include_hostname
+++ /dev/null
@@ -1,3 +0,0 @@
-hostname "os6_sw1"
-
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_version b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_version
deleted file mode 100644
index 37c58e8b0..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/fixtures/show_version
+++ /dev/null
@@ -1,17 +0,0 @@
-Machine Description............... Dell Networking Switch
-System Model ID................... N4032
-Machine Type...................... Dell Networking N4032
-Serial Number..................... CN04G4FP282984AI0097A01
-Manufacturer...................... 0xbc00
-Burned In MAC Address............. F8B1.565B.615C
-System Object ID.................. 1.3.6.1.4.1.674.10895.3042
-CPU Version....................... XLP308H-B2
-SOC Version....................... BCM56842_A1
-HW Version........................ 3
-CPLD Version...................... 17
-
-unit active backup current-active next-active
----- ----------- ----------- -------------- --------------
-1 6.3.3.7 6.3.2.7 6.3.3.7 6.3.3.7
-
-
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/os6_module.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/os6_module.py
deleted file mode 100644
index 4f8cb8c98..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/os6_module.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import json
-
-from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
-
-
-fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
-fixture_data = {}
-
-
-def load_fixture(name):
- path = os.path.join(fixture_path, name)
-
- if path in fixture_data:
- return fixture_data[path]
-
- with open(path) as f:
- data = f.read()
-
- try:
- data = json.loads(data)
- except Exception:
- pass
-
- fixture_data[path] = data
- return data
-
-
-class TestDellos6Module(ModuleTestCase):
-
- def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
-
- self.load_fixtures(commands)
-
- if failed:
- result = self.failed()
- self.assertTrue(result['failed'], result)
- else:
- result = self.changed(changed)
- self.assertEqual(result['changed'], changed, result)
-
- if commands is not None:
- if sort:
- self.assertEqual(sorted(commands), sorted(result['updates']), result['updates'])
- else:
- self.assertEqual(commands, result['updates'], result['updates'])
-
- return result
-
- def failed(self):
- with self.assertRaises(AnsibleFailJson) as exc:
- self.module.main()
-
- result = exc.exception.args[0]
- self.assertTrue(result['failed'], result)
- return result
-
- def changed(self, changed=False):
- with self.assertRaises(AnsibleExitJson) as exc:
- self.module.main()
-
- result = exc.exception.args[0]
- self.assertEqual(result['changed'], changed, result)
- return result
-
- def load_fixtures(self, commands=None):
- pass
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_command.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_command.py
deleted file mode 100644
index b1f3f23fb..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_command.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from ansible.compat.tests.mock import patch
-from ansible_collections.dellemc.os6.plugins.modules import os6_command
-from units.modules.utils import set_module_args
-from .os6_module import TestDellos6Module, load_fixture
-
-
-class TestDellos6CommandModule(TestDellos6Module):
-
- module = os6_command
-
- def setUp(self):
- super(TestDellos6CommandModule, self).setUp()
-
- self.mock_run_commands = patch('ansible.modules.network.os6.os6_command.run_commands')
- self.run_commands = self.mock_run_commands.start()
-
- def tearDown(self):
- super(TestDellos6CommandModule, self).tearDown()
- self.mock_run_commands.stop()
-
- def load_fixtures(self, commands=None):
-
- def load_from_file(*args, **kwargs):
- module, commands = args
- output = list()
-
- for item in commands:
- try:
- obj = json.loads(item['command'])
- command = obj['command']
- except ValueError:
- command = item['command']
- filename = str(command).replace(' ', '_')
- output.append(load_fixture(filename))
- return output
-
- self.run_commands.side_effect = load_from_file
-
- def test_os6_command_simple(self):
- set_module_args(dict(commands=['show version']))
- result = self.execute_module()
- self.assertEqual(len(result['stdout']), 1)
- self.assertTrue(result['stdout'][0].startswith('Machine Description'))
-
- def test_os6_command_multiple(self):
- set_module_args(dict(commands=['show version', 'show version']))
- result = self.execute_module()
- self.assertEqual(len(result['stdout']), 2)
- self.assertTrue(result['stdout'][0].startswith('Machine Description'))
-
- def test_os6_command_wait_for(self):
- wait_for = 'result[0] contains "Machine Description"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for))
- self.execute_module()
-
- def test_os6_command_wait_for_fails(self):
- wait_for = 'result[0] contains "test string"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for))
- self.execute_module(failed=True)
- self.assertEqual(self.run_commands.call_count, 10)
-
- def test_os6_command_retries(self):
- wait_for = 'result[0] contains "test string"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
- self.execute_module(failed=True)
- self.assertEqual(self.run_commands.call_count, 2)
-
- def test_os6_command_match_any(self):
- wait_for = ['result[0] contains "Machine Description"',
- 'result[0] contains "test string"']
- set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
- self.execute_module()
-
- def test_os6_command_match_all(self):
- wait_for = ['result[0] contains "Machine Description"',
- 'result[0] contains "Dell Networking"']
- set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
- self.execute_module()
-
- def test_os6_command_match_all_failure(self):
- wait_for = ['result[0] contains "Machine Description"',
- 'result[0] contains "test string"']
- commands = ['show version', 'show version']
- set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
- self.execute_module(failed=True)
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_config.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_config.py
deleted file mode 100644
index 1d2f60eb3..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_config.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.compat.tests.mock import patch
-from ansible_collections.dellemc.os6.plugins.modules import os6_config
-from units.modules.utils import set_module_args
-from .os6_module import TestDellos6Module, load_fixture
-
-
-class TestDellos6ConfigModule(TestDellos6Module):
-
- module = os6_config
-
- def setUp(self):
- super(TestDellos6ConfigModule, self).setUp()
-
- self.mock_get_config = patch('ansible.modules.network.os6.os6_config.get_config')
- self.get_config = self.mock_get_config.start()
-
- self.mock_load_config = patch('ansible.modules.network.os6.os6_config.load_config')
- self.load_config = self.mock_load_config.start()
-
- self.mock_run_commands = patch('ansible.modules.network.os6.os6_config.run_commands')
- self.run_commands = self.mock_run_commands.start()
-
- def tearDown(self):
- super(TestDellos6ConfigModule, self).tearDown()
- self.mock_get_config.stop()
- self.mock_load_config.stop()
- self.mock_run_commands.stop()
-
- def load_fixtures(self, commands=None):
- config_file = 'os6_config_config.cfg'
- self.get_config.return_value = load_fixture(config_file)
- self.load_config.return_value = None
-
- def test_os6_config_unchanged(self):
- src = load_fixture('os6_config_config.cfg')
- set_module_args(dict(src=src))
- self.execute_module()
-
- def test_os6_config_src(self):
- src = load_fixture('os6_config_src.cfg')
- set_module_args(dict(src=src))
- commands = ['hostname foo', 'exit', 'interface Te1/0/2', 'shutdown', 'exit']
- self.execute_module(changed=True, commands=commands)
-
- def test_os6_config_backup(self):
- set_module_args(dict(backup=True))
- result = self.execute_module()
- self.assertIn('__backup__', result)
-
- def test_os6_config_save(self):
- set_module_args(dict(save=True))
- self.execute_module(changed=True)
- self.assertEqual(self.run_commands.call_count, 1)
- self.assertEqual(self.get_config.call_count, 0)
- self.assertEqual(self.load_config.call_count, 0)
- args = self.run_commands.call_args[0][1]
- self.assertDictContainsSubset({'command': 'copy running-config startup-config'}, args[0])
-# self.assertIn('copy running-config startup-config\r', args)
-
- def test_os6_config_lines_wo_parents(self):
- set_module_args(dict(lines=['hostname foo']))
- commands = ['hostname foo']
- self.execute_module(changed=True, commands=commands)
-
- def test_os6_config_lines_w_parents(self):
- set_module_args(dict(lines=['description "teest"', 'exit'], parents=['interface Te1/0/2']))
- commands = ['interface Te1/0/2', 'description "teest"', 'exit']
- self.execute_module(changed=True, commands=commands)
-
- def test_os6_config_before(self):
- set_module_args(dict(lines=['hostname foo'], before=['snmp-server contact bar']))
- commands = ['snmp-server contact bar', 'hostname foo']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os6_config_after(self):
- set_module_args(dict(lines=['hostname foo'], after=['snmp-server contact bar']))
- commands = ['hostname foo', 'snmp-server contact bar']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os6_config_before_after_no_change(self):
- set_module_args(dict(lines=['hostname router'],
- before=['snmp-server contact bar'],
- after=['snmp-server location chennai']))
- self.execute_module()
-
- def test_os6_config_config(self):
- config = 'hostname localhost'
- set_module_args(dict(lines=['hostname router'], config=config))
- commands = ['hostname router']
- self.execute_module(changed=True, commands=commands)
-
- def test_os6_config_replace_block(self):
- lines = ['description test string', 'shutdown']
- parents = ['interface Te1/0/2']
- set_module_args(dict(lines=lines, replace='block', parents=parents))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands)
-
- def test_os6_config_match_none(self):
- lines = ['hostname router']
- set_module_args(dict(lines=lines, match='none'))
- self.execute_module(changed=True, commands=lines)
-
- def test_os6_config_match_none(self):
- lines = ['description test string', 'shutdown']
- parents = ['interface Te1/0/2']
- set_module_args(dict(lines=lines, parents=parents, match='none'))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os6_config_match_strict(self):
- lines = ['description "test_string"',
- 'shutdown']
- parents = ['interface Te1/0/1']
- set_module_args(dict(lines=lines, parents=parents, match='strict'))
- commands = parents + ['shutdown']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os6_config_match_exact(self):
- lines = ['description test_string', 'shutdown']
- parents = ['interface Te1/0/1']
- set_module_args(dict(lines=lines, parents=parents, match='exact'))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands, sort=False)
diff --git a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py b/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py
deleted file mode 100644
index ace3a8a8e..000000000
--- a/ansible_collections/dellemc/os6/tests/unit/modules/network/os6/test_os6_facts.py
+++ /dev/null
@@ -1,105 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from ansible.compat.tests.mock import patch
-from units.modules.utils import set_module_args
-from .os6_module import TestDellos6Module, load_fixture
-from ansible_collections.dellemc.os6.plugins.modules import os6_facts
-
-
-class TestDellos6Facts(TestDellos6Module):
-
- module = os6_facts
-
- def setUp(self):
- super(TestDellos6Facts, self).setUp()
-
- self.mock_run_command = patch(
- 'ansible.modules.network.os6.os6_facts.run_commands')
- self.run_command = self.mock_run_command.start()
-
- def tearDown(self):
- super(TestDellos6Facts, self).tearDown()
-
- self.mock_run_command.stop()
-
- def load_fixtures(self, commands=None):
-
- def load_from_file(*args, **kwargs):
- module, commands = args
- output = list()
-
- for item in commands:
- try:
- obj = json.loads(item)
- command = obj['command']
- except ValueError:
- command = item
- if '|' in command:
- command = str(command).replace('|', '')
- filename = str(command).replace(' ', '_')
- filename = filename.replace('/', '7')
- output.append(load_fixture(filename))
- return output
-
- self.run_command.side_effect = load_from_file
-
- def test_os6_facts_gather_subset_default(self):
- set_module_args(dict())
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals('"os6_sw1"', ansible_facts['ansible_net_hostname'])
- self.assertIn('Te1/0/1', ansible_facts['ansible_net_interfaces'].keys())
- self.assertEquals(1682, ansible_facts['ansible_net_memtotal_mb'])
- self.assertEquals(623, ansible_facts['ansible_net_memfree_mb'])
-
- def test_os6_facts_gather_subset_config(self):
- set_module_args({'gather_subset': 'config'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals('"os6_sw1"', ansible_facts['ansible_net_hostname'])
- self.assertIn('ansible_net_config', ansible_facts)
-
- def test_os6_facts_gather_subset_hardware(self):
- set_module_args({'gather_subset': 'hardware'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals(1682, ansible_facts['ansible_net_memtotal_mb'])
- self.assertEquals(623, ansible_facts['ansible_net_memfree_mb'])
-
- def test_os6_facts_gather_subset_interfaces(self):
- set_module_args({'gather_subset': 'interfaces'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('Te1/0/1', ansible_facts['ansible_net_interfaces'].keys())
- self.assertEquals(['Te1/0/5', 'Te1/0/6'], ansible_facts['ansible_net_neighbors'].keys())
- self.assertIn('ansible_net_interfaces', ansible_facts)
diff --git a/ansible_collections/dellemc/os9/.ansible-lint b/ansible_collections/dellemc/os9/.ansible-lint
deleted file mode 100644
index d8c4900d7..000000000
--- a/ansible_collections/dellemc/os9/.ansible-lint
+++ /dev/null
@@ -1,2 +0,0 @@
-skip_list:
- - '208'
diff --git a/ansible_collections/dellemc/os9/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/os9/.github/workflows/ansible-test.yml
deleted file mode 100644
index 6834c54a2..000000000
--- a/ansible_collections/dellemc/os9/.github/workflows/ansible-test.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-name: CI
-on:
-- pull_request
-
-jobs:
- sanity:
- name: Sanity (${{ matrix.ansible }})
- strategy:
- matrix:
- ansible:
- - stable-2.10
- - devel
- runs-on: ubuntu-latest
- steps:
-
- - name: Check out code
- uses: actions/checkout@v1
- with:
- path: ansible_collections/dellemc/os9
-
- - name: Set up Python 3.6
- uses: actions/setup-python@v1
- with:
- python-version: 3.6
-
- - name: Install ansible-base (${{ matrix.ansible }})
- run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check
-
- - name: Install ansible_collections.ansible.netcommon
- run: ansible-galaxy collection install ansible.netcommon -p ../../
-
- - name: Run sanity tests
- run: ansible-test sanity --docker -v --color --python 3.6
diff --git a/ansible_collections/dellemc/os9/.gitignore b/ansible_collections/dellemc/os9/.gitignore
deleted file mode 100644
index c6fc14ad0..000000000
--- a/ansible_collections/dellemc/os9/.gitignore
+++ /dev/null
@@ -1,387 +0,0 @@
-
-# Created by https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
-# Edit at https://www.gitignore.io/?templates=git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
-
-### dotenv ###
-.env
-
-### Emacs ###
-# -*- mode: gitignore; -*-
-*~
-\#*\#
-/.emacs.desktop
-/.emacs.desktop.lock
-*.elc
-auto-save-list
-tramp
-.\#*
-
-# Org-mode
-.org-id-locations
-*_archive
-
-# flymake-mode
-*_flymake.*
-
-# eshell files
-/eshell/history
-/eshell/lastdir
-
-# elpa packages
-/elpa/
-
-# reftex files
-*.rel
-
-# AUCTeX auto folder
-/auto/
-
-# cask packages
-.cask/
-dist/
-
-# Flycheck
-flycheck_*.el
-
-# server auth directory
-/server/
-
-# projectiles files
-.projectile
-
-# directory configuration
-.dir-locals.el
-
-# network security
-/network-security.data
-
-
-### Git ###
-# Created by git for backups. To disable backups in Git:
-# $ git config --global mergetool.keepBackup false
-*.orig
-
-# Created by git when using merge tools for conflicts
-*.BACKUP.*
-*.BASE.*
-*.LOCAL.*
-*.REMOTE.*
-*_BACKUP_*.txt
-*_BASE_*.txt
-*_LOCAL_*.txt
-*_REMOTE_*.txt
-
-#!! ERROR: jupyternotebook is undefined. Use list command to see defined gitignore types !!#
-
-### Linux ###
-
-# temporary files which can be created if a process still has a handle open of a deleted file
-.fuse_hidden*
-
-# KDE directory preferences
-.directory
-
-# Linux trash folder which might appear on any partition or disk
-.Trash-*
-
-# .nfs files are created when an open file is removed but is still being accessed
-.nfs*
-
-### PyCharm+all ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-.idea/**/workspace.xml
-.idea/**/tasks.xml
-.idea/**/usage.statistics.xml
-.idea/**/dictionaries
-.idea/**/shelf
-
-# Generated files
-.idea/**/contentModel.xml
-
-# Sensitive or high-churn files
-.idea/**/dataSources/
-.idea/**/dataSources.ids
-.idea/**/dataSources.local.xml
-.idea/**/sqlDataSources.xml
-.idea/**/dynamic.xml
-.idea/**/uiDesigner.xml
-.idea/**/dbnavigator.xml
-
-# Gradle
-.idea/**/gradle.xml
-.idea/**/libraries
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn. Uncomment if using
-# auto-import.
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-cmake-build-*/
-
-# Mongo Explorer plugin
-.idea/**/mongoSettings.xml
-
-# File-based project format
-*.iws
-
-# IntelliJ
-out/
-
-# mpeltonen/sbt-idea plugin
-.idea_modules/
-
-# JIRA plugin
-atlassian-ide-plugin.xml
-
-# Cursive Clojure plugin
-.idea/replstate.xml
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-com_crashlytics_export_strings.xml
-crashlytics.properties
-crashlytics-build.properties
-fabric.properties
-
-# Editor-based Rest Client
-.idea/httpRequests
-
-# Android studio 3.1+ serialized cache file
-.idea/caches/build_file_checksums.ser
-
-### PyCharm+all Patch ###
-# Ignores the whole .idea folder and all .iml files
-# See https://github.com/joeblau/gitignore.io/issues/186 and https://github.com/joeblau/gitignore.io/issues/360
-
-.idea/
-
-# Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-249601023
-
-*.iml
-modules.xml
-.idea/misc.xml
-*.ipr
-
-# Sonarlint plugin
-.idea/sonarlint
-
-### pydev ###
-.pydevproject
-
-### Python ###
-# Byte-compiled / optimized / DLL files
-__pycache__/
-*.py[cod]
-*$py.class
-
-# C extensions
-*.so
-
-# Distribution / packaging
-.Python
-build/
-develop-eggs/
-downloads/
-eggs/
-.eggs/
-lib/
-lib64/
-parts/
-sdist/
-var/
-wheels/
-pip-wheel-metadata/
-share/python-wheels/
-*.egg-info/
-.installed.cfg
-*.egg
-MANIFEST
-
-# PyInstaller
-# Usually these files are written by a python script from a template
-# before PyInstaller builds the exe, so as to inject date/other infos into it.
-*.manifest
-*.spec
-
-# Installer logs
-pip-log.txt
-pip-delete-this-directory.txt
-
-# Unit test / coverage reports
-htmlcov/
-.tox/
-.nox/
-.coverage
-.coverage.*
-.cache
-nosetests.xml
-coverage.xml
-*.cover
-.hypothesis/
-.pytest_cache/
-
-# Translations
-*.mo
-*.pot
-
-# Scrapy stuff:
-.scrapy
-
-# Sphinx documentation
-docs/_build/
-
-# PyBuilder
-target/
-
-# pyenv
-.python-version
-
-# pipenv
-# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
-# However, in case of collaboration, if having platform-specific dependencies or dependencies
-# having no cross-platform support, pipenv may install dependencies that don't work, or not
-# install all needed dependencies.
-#Pipfile.lock
-
-# celery beat schedule file
-celerybeat-schedule
-
-# SageMath parsed files
-*.sage.py
-
-# Spyder project settings
-.spyderproject
-.spyproject
-
-# Rope project settings
-.ropeproject
-
-# Mr Developer
-.mr.developer.cfg
-.project
-
-# mkdocs documentation
-/site
-
-# mypy
-.mypy_cache/
-.dmypy.json
-dmypy.json
-
-# Pyre type checker
-.pyre/
-
-### Vim ###
-# Swap
-[._]*.s[a-v][a-z]
-[._]*.sw[a-p]
-[._]s[a-rt-v][a-z]
-[._]ss[a-gi-z]
-[._]sw[a-p]
-
-# Session
-Session.vim
-Sessionx.vim
-
-# Temporary
-.netrwhist
-# Auto-generated tag files
-tags
-# Persistent undo
-[._]*.un~
-
-### WebStorm ###
-# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio and WebStorm
-# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
-
-# User-specific stuff
-
-# Generated files
-
-# Sensitive or high-churn files
-
-# Gradle
-
-# Gradle and Maven with auto-import
-# When using Gradle or Maven with auto-import, you should exclude module files,
-# since they will be recreated, and may cause churn. Uncomment if using
-# auto-import.
-# .idea/modules.xml
-# .idea/*.iml
-# .idea/modules
-# *.iml
-# *.ipr
-
-# CMake
-
-# Mongo Explorer plugin
-
-# File-based project format
-
-# IntelliJ
-
-# mpeltonen/sbt-idea plugin
-
-# JIRA plugin
-
-# Cursive Clojure plugin
-
-# Crashlytics plugin (for Android Studio and IntelliJ)
-
-# Editor-based Rest Client
-
-# Android studio 3.1+ serialized cache file
-
-### WebStorm Patch ###
-# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
-
-# *.iml
-# modules.xml
-# .idea/misc.xml
-# *.ipr
-
-# Sonarlint plugin
-.idea/**/sonarlint/
-
-# SonarQube Plugin
-.idea/**/sonarIssues.xml
-
-# Markdown Navigator plugin
-.idea/**/markdown-navigator.xml
-.idea/**/markdown-navigator/
-
-### Windows ###
-# Windows thumbnail cache files
-Thumbs.db
-Thumbs.db:encryptable
-ehthumbs.db
-ehthumbs_vista.db
-
-# Dump file
-*.stackdump
-
-# Folder config file
-[Dd]esktop.ini
-
-# Recycle Bin used on file shares
-$RECYCLE.BIN/
-
-# Windows Installer files
-*.cab
-*.msi
-*.msix
-*.msm
-*.msp
-
-# Windows shortcuts
-*.lnk
-
-# End of https://www.gitignore.io/api/git,linux,pydev,python,windows,pycharm+all,jupyternotebook,vim,webstorm,emacs,dotenv
diff --git a/ansible_collections/dellemc/os9/COPYING b/ansible_collections/dellemc/os9/COPYING
deleted file mode 100644
index 10926e87f..000000000
--- a/ansible_collections/dellemc/os9/COPYING
+++ /dev/null
@@ -1,675 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (C) <year> <name of author>
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <http://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (C) <year> <name of author>
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<http://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<http://www.gnu.org/philosophy/why-not-lgpl.html>.
-
diff --git a/ansible_collections/dellemc/os9/FILES.json b/ansible_collections/dellemc/os9/FILES.json
deleted file mode 100644
index de5777d50..000000000
--- a/ansible_collections/dellemc/os9/FILES.json
+++ /dev/null
@@ -1,3953 +0,0 @@
-{
- "files": [
- {
- "format": 1,
- "ftype": "dir",
- "chksum_sha256": null,
- "name": ".",
- "chksum_type": null
- },
- {
- "ftype": "file",
- "chksum_sha256": "0c29a1ae51505d7a5d1e7f80c5abac708f68c44c5bd96fc94f0afff2408daeca",
- "name": ".ansible-lint",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/sanity",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "47b57717c6b01630d3628ebfd0288cb961d6c1ae43d050656ff40cca0c136831",
- "name": "tests/sanity/ignore-2.9.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c8a4ac4bfdef88e75d6e748e35a42fb4915947dfa2b7dd788626fd829600e014",
- "name": "tests/sanity/requirements.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "aa689a5caa0c4c0d15e13cc42590037dd2a70c8663d961b7d890b345cc175a99",
- "name": "tests/sanity/ignore-2.10.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "aa689a5caa0c4c0d15e13cc42590037dd2a70c8663d961b7d890b345cc175a99",
- "name": "tests/sanity/ignore-2.11.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_command",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_command/os9_command",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_command/os9_command/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_command/os9_command/tests/cli",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7fec66f82d7fc43d56da0eea8b53394eba52f26bf8f7059f12ea9703503b562f",
- "name": "tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a1082ade2b6b3b60448649536e311691e159519934fad93cd473b334e07a01f9",
- "name": "tests/integration/targets/os9_command/os9_command/tests/cli/contains",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "bafdebd96db17a954899808696fa2e3d38ba09b03114638ada75f49a96acb588",
- "name": "tests/integration/targets/os9_command/os9_command/tests/cli/output",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "29d2545c7c7de45bbc802b34e167797f5a9ff85cd8456ed30c2d4fe00cf80cb7",
- "name": "tests/integration/targets/os9_command/os9_command/tests/cli/timeout",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cedd8e1102bc85d0a77a8e752253d2dd42276dc672b0d5eb8e51ce5011dc15a0",
- "name": "tests/integration/targets/os9_command/os9_command/tests/cli/invalid",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "431660d4177c8289f53aa0487bd9195b0de7e1ed944bee9e09c665bd532cc8bb",
- "name": "tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_command/os9_command/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c67b69d54f5ebc1de087ed737f6a0e4119d6f045229f64b7cbd1971e4d5eb14f",
- "name": "tests/integration/targets/os9_command/os9_command/defaults/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_command/os9_command/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8b99fcd12715fcec0cba3d649252a55c86f4710650f9f2e0fdab0bb958fb88f1",
- "name": "tests/integration/targets/os9_command/os9_command/tasks/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "81ae4136ca3d879f645bc323268dd5af5a89467b0d776010965374f56ef07eb0",
- "name": "tests/integration/targets/os9_command/os9_command/tasks/cli.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "tests/integration/targets/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_facts",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_facts/os9_facts",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_facts/os9_facts/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_facts/os9_facts/tests/cli",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2a1b00378f0b59a8b92d07b174826617ce1dde761f2f98cb0627737ca2895171",
- "name": "tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_facts/os9_facts/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "name": "tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_facts/os9_facts/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47",
- "name": "tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2267f2038f66e2be89d7a6c63ffdb80801f457c22193408b10bae86c3144670e",
- "name": "tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_config",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_config/os9_config",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_config/os9_config/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_config/os9_config/tests/cli",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "71ff9b108a1e14b50a2cb5e71de55580c9a2a345cf53385391e95a359310508d",
- "name": "tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3dcd20cbd7aa3ff27003ccf2feed4bdf6d5bb86f11438772fd73c89c9a1955f3",
- "name": "tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "608a1218cafc5851413641f44f2109add4c0977d42b9e4a1b795bf89906a7155",
- "name": "tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_config/os9_config/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b6cec8117492a3110c2e9066aa77a54abd2b9774cea08d60eb42b01c51c3e032",
- "name": "tests/integration/targets/os9_config/os9_config/defaults/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/integration/targets/os9_config/os9_config/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ac196e55bc80cba08a1cadef71a151160e493ce9cb00cb3ae12e88cf83664c47",
- "name": "tests/integration/targets/os9_config/os9_config/tasks/main.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2267f2038f66e2be89d7a6c63ffdb80801f457c22193408b10bae86c3144670e",
- "name": "tests/integration/targets/os9_config/os9_config/tasks/cli.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b5726d3ec9335a09c124469eca039523847a6b0f08a083efaefd002b83326600",
- "name": "tests/.gitignore",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit/modules",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit/modules/network",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit/modules/network/os9",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d44b86de2cf6f1bc939fc743cf76dbec128e7b78ba872c2c8b6a9399c0acf3b5",
- "name": "tests/unit/modules/network/os9/test_os9_config.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "tests/unit/modules/network/os9/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3987235f717a4104ac08ba54efdaf0917d4f70566176728717ca382e3de74856",
- "name": "tests/unit/modules/network/os9/os9_module.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f776673004a00a68326154ede112ae99897541044cd83dc0162783c81019f050",
- "name": "tests/unit/modules/network/os9/test_os9_command.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "tests/unit/modules/network/os9/fixtures",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e6feab0f1d65c9a751171208547a21763d80b4f3589893bf3c9a175d7b31e483",
- "name": "tests/unit/modules/network/os9/fixtures/show_ipv6_interface",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "125417e5d4d2deca2272f11e4c5124579741cff5e35fdb749b63696e87e87d0b",
- "name": "tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5fb0e1ad78714da4423b9c9a7ebd2938b0050febc7b10d32246a9afe1981ade7",
- "name": "tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5a252359d7d8aaf0c632dcdfdce095984764c593871f6acb005f94dbcfa16aff",
- "name": "tests/unit/modules/network/os9/fixtures/show_inventory",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d09c902762898fca26cae465dfa5d091d8cb255892c07e75b0462d7046cb5544",
- "name": "tests/unit/modules/network/os9/fixtures/show_interfaces",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "182cb0ed4d2624e815ba3aca306283100e279b7b01305a771ac3dc9962839514",
- "name": "tests/unit/modules/network/os9/fixtures/os9_config_src.cfg",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "baf72eb01ee429ea7599874957dd1398b25da53212c61f667b214b3bf2615fc9",
- "name": "tests/unit/modules/network/os9/fixtures/show_version",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d895db603a8e35ece016395993b860fef4d8c04d3f5e316083203858592d338a",
- "name": "tests/unit/modules/network/os9/fixtures/show_file-systems",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8e44f65026c6a56109e9e3ba30e6b9bfb866dedd3b475beb37841479d5e010f7",
- "name": "tests/unit/modules/network/os9/fixtures/show_running-config",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "885553e6317b9e9da78f7230ba38e410b1ec7fe548d852d3d19c044bf29bfaa5",
- "name": "tests/unit/modules/network/os9/fixtures/os9_config_config.cfg",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "315f7a32efeddc96e388092d8af26a4e6bd220a29e8f78dcfaf8ffed471c7861",
- "name": "tests/unit/modules/network/os9/fixtures/show_memory__except_Processor",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2401872bfd39a36e786461d82e8672887258ba81a4e477c44ebd223ddaa8ba2d",
- "name": "tests/unit/modules/network/os9/test_os9_facts.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "21e22101c2afb46bb6a916f2bf3df69eb5023903506152bbf6f0669e831a422c",
- "name": "meta/runtime.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": ".github",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": ".github/workflows",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a72707b6d3f3810741fa9784222d3c56f48472b77aba15222ae780c652262eac",
- "name": ".github/workflows/ansible-test.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/module_utils",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/module_utils/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/module_utils/network",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/module_utils/network/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "78158444072c124ca00b4cfee2471cc5ac35d0ac23f55063665afad9c700831d",
- "name": "plugins/module_utils/network/os9.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/action",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/action/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2252271fdb4a68288f44c2a8d31c3693bca0141960ba8c5a1c01ab7a12147ba1",
- "name": "plugins/action/os9.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/terminal",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/terminal/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cd7498d518117883d4ace3f39e8045f0023fe9a2c62bcc8277f35d35a0a87ad7",
- "name": "plugins/terminal/os9.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/cliconf",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/cliconf/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "452eb0a83fa4a5adf85ca49e0c7b9f51e298d3b2bcb3a25009fc670fa4b3ecd7",
- "name": "plugins/cliconf/os9.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/doc_fragments",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/doc_fragments/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d3bc65aabc3b22af8323623fc19dbb48fd65252b0505fa20bf7ac7e9b8171f33",
- "name": "plugins/doc_fragments/os9.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "plugins/modules",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "88e781b42ea96e44fa81d08347c70f57139dfbb46c741879280f2b904638b29c",
- "name": "plugins/modules/os9_facts.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "49b93b6c6e7ddbd76a1b1c89b8775062ec9bea4e67209cef7238585794c6cbbf",
- "name": "plugins/modules/os9_config.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
- "name": "plugins/modules/__init__.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d1297f0656ee49abe886cf9ab77871a79b1f7196d51bc3d0a53aee675a0c8652",
- "name": "plugins/modules/os9_command.py",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0ae0485a5bd37a63e63603596417e4eb0e653334fa6c7f932ca3a0e85d4af227",
- "name": "COPYING",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "258e4be3cfda40797fe734b375b6f94c110c9a9bebce196fedce319a457ce720",
- "name": ".gitignore",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "changelogs",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "70a7fbafc6e1175acdf70896d266f6e7c3cdaf7b6d786a6ccfd6bc6d84e46bae",
- "name": "changelogs/config.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e05e09bd169818f68582946673a13a370158c2539a52a6a4b16e5ddf68e32668",
- "name": "changelogs/changelog.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "349741eaeee3c184691fa268cae80843cdb9e7a8a4400ef1869d05d6f795bb87",
- "name": "changelogs/CHANGELOG.rst",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "docs",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e81d64f1de9c3270f1f56d5ce253a45dd18e3fd85917cd72607ca597127d1f71",
- "name": "docs/os9_dns.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "19771f0031148029ab0b196b35580c440fd07884c3abe6b8b86b27aefed11732",
- "name": "docs/os9_aaa.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b4a5d8c724dfaa01e7b4a2a17ca38d3dd5f1bca9ce695666fcd829d0330f296b",
- "name": "docs/os9_ntp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9aa3a01ae4c5a30649015623999722c1ee2d7dc555177cc80b676608afe6c4d0",
- "name": "docs/roles.rst",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2ff6ab9e62f00e60c6c321476e3de23db595faf1f57b4b4d03df4ca27702b2b9",
- "name": "docs/os9_xstp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7c1e34c1ae9189e3c0e9092a75f867ea5b4ae0c19e273f57d30a84d59418d1bb",
- "name": "docs/os9_vlan.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d4ef7e561975060c0f8480f7ec0e7ca042ae250930eb4aaa2fe89ff1d5c935a6",
- "name": "docs/os9_acl.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d1d39e1bd43a44fd8ea455be74fdfcf42732e5775916716399599402075523bf",
- "name": "docs/os9_vrrp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "087ff2922aec2a9911d67440527e81e0ad1ea1fc079776f95e84badba9039843",
- "name": "docs/os9_vrf.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "dd43845c1a91be5e8323914ab1aee26275dc2362fbf4520f8c9d31952e628be3",
- "name": "docs/os9_interface.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b19735d8dc20ee8d5b79ec6f25244d5bf513ae93c0ec72cb130018a991e9d409",
- "name": "docs/os9_prefix_list.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "858d5c07058ee5d90bdf5022439e788a9bc1e2daad0bfb7ce522edff282598a6",
- "name": "docs/os9_lag.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b72d204f8dedd27d2988ecf0d53589a6547205774be69f6b298561f743b9b252",
- "name": "docs/os9_ecmp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "19c514de5c3e29a0f885abb6da52fff413eb9078b9ef1fe87253705e2f919ad8",
- "name": "docs/os9_lldp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a770aa97b66a8c49a948bf4ebd6dbec672b77b6c4c9e80d05973580b1ff13b12",
- "name": "docs/os9_system.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3aac3568b9d528d747b0b59acec0b71f473c8d7c5254452b4eb44c5f8131dc8d",
- "name": "docs/os9_dcb.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "440f5dfc57184ae41e73988b49e0391e678c1eabb534eb1a527fcbadc6ee1b76",
- "name": "docs/os9_sflow.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "33fd4184f2648fcc0e69c524b3797a5013afabd703eb4a3ec40ee3abc1436939",
- "name": "docs/os9_bgp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d000c615ec8d3366f408fc731d001055d85dd22912f064440e1331e0a86a94fb",
- "name": "docs/os9_copy_config.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7b5fa6361cabee40c802df10c63dea98b38ff1221c0a9846f5823908d9689190",
- "name": "docs/os9_snmp.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "88842853681202a3e8b4cc0712310dad87eba8c8f5b24f76fff101e93e7cd714",
- "name": "docs/os9_vlt.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c1bf779e16b26773779b3cff23111b14c76913556729e019af7f1091d8fbb235",
- "name": "docs/os9_users.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "fc49c2852e7bf0c11fa863341dd38dae4de133630a8de2af96c4348f52284c34",
- "name": "docs/os9_logging.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "23e9d4c2fd8d62be40f9ddbfdac92465f4682886c2b1dd073f37097197f705d9",
- "name": "README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "playbooks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "playbooks/clos_fabric_ebgp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "playbooks/clos_fabric_ebgp/group_vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8dfc56fa05d702dbc40efa71a43a4148cb1fabde2d61c27fdc5b652f96b47e7c",
- "name": "playbooks/clos_fabric_ebgp/group_vars/all",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "bc5ab983e078c9a5a8cd262b0379d890817589e29e324cf4ded2e2d87e157da6",
- "name": "playbooks/clos_fabric_ebgp/group_vars/spine.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "playbooks/clos_fabric_ebgp/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "855a81733f763088f5326ac9cb210284edfab2e21d0a0886764c007c6878194d",
- "name": "playbooks/clos_fabric_ebgp/datacenter.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ba9189063a69b9774a2aded0a7d4c2d7c36e9270bc5354bd21e62d146df6d881",
- "name": "playbooks/clos_fabric_ebgp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "playbooks/clos_fabric_ebgp/host_vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0686aa905934de49d4403b0d8fbb27fb68dfac45043db2a509ce827e58c321db",
- "name": "playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6ad88e8d68755ee79d86f572586bb3b2bea037e8699d52acd699fe1584abc488",
- "name": "playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d1f8d83e73880a72c0206133da575b0fd416c12919d2a85b628534de88ce5009",
- "name": "playbooks/clos_fabric_ebgp/host_vars/spine2.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "776939597c809d002ee820c5ed7f776df294a9bfe6be72b37e5aa4ee53512360",
- "name": "playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9c189687de2087d635359151f3c3d4f07bea2caed813e889ab55b462c9067326",
- "name": "playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "18e827e2380304c8065de68d8db4b7808eb351b86601611610ca3340e6a42844",
- "name": "playbooks/clos_fabric_ebgp/host_vars/spine1.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrf",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrf/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "328e38eb6243f1b7180e79b06a29b292fe36b3bfd9e47c3393f84e37393a79ad",
- "name": "roles/os9_vrf/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_vrf/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrf/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_vrf/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "82c2dacec1c7e99ae63ebf20fafe0c16105959699e02239c5d579c963cc695d2",
- "name": "roles/os9_vrf/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9d09f75a83662192bce545b4d03c876f1db00e79f9867e7e2875f765fd648cc5",
- "name": "roles/os9_vrf/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrf/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a9d07572f5aaa73ea15a375fdbb317a443d10a524b63ab28fe4edb67f8d273dd",
- "name": "roles/os9_vrf/templates/os9_vrf.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrf/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4791fe8f67d0b722e1af57aea187cde857443730901432f6f15da82d285474fc",
- "name": "roles/os9_vrf/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrf/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e223f46d113fa7925d4c5bd9218810c1f241fe944302f2effc0e8728e3ef4f80",
- "name": "roles/os9_vrf/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "af5620af058efa0916111f46705ab43205edef3bef05542d6da325ae47f2c120",
- "name": "roles/os9_vrf/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrf/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0e4a9b4572288830796b6833d2053b416d6243cdfd0969b7f4b54f2d9e8622c5",
- "name": "roles/os9_vrf/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrf/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "83170116a0504aba73704308942df782d7c6e342c0828ef9387342f4b0b3d079",
- "name": "roles/os9_vrf/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_sflow",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_sflow/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a9f076780aa43672910f9aede19c73abd0c7b8e017167ad2c483b0d4d1c58636",
- "name": "roles/os9_sflow/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_sflow/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_sflow/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_sflow/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a5fe3e746e1bc1b090970dea0271d25b6fadb8054efcd6250c1a8b49a59f697a",
- "name": "roles/os9_sflow/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "62be668d4bddb7e6708bcdd68b931ab71a3976867c66eca68965425326096669",
- "name": "roles/os9_sflow/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_sflow/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "82704f4f253f0d64ec1e43a38b38802c549256b3acd65417e7dfed1e98c4ae0e",
- "name": "roles/os9_sflow/templates/os9_sflow.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_sflow/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "47cceedfd12b18197b1eb1d691e0e2e46f3a33b8d4ddda5c180720dd781fc29b",
- "name": "roles/os9_sflow/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_sflow/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f0d4a18cc0ead67b6d26be2af0b2c7bffa0fcb82bf57b357f2823af5b237a5ec",
- "name": "roles/os9_sflow/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c63b06d76b54b76a2b8221e04a2514b8a691679e799813345ee17a5cf7453341",
- "name": "roles/os9_sflow/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_sflow/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b42edf22664bd7d9f44f6309b13faa376c197d95ced795193dda8ef8710de8fa",
- "name": "roles/os9_sflow/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_sflow/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9be297ba0a1e316c69f0738d824bbfdce133f2edec183a22b750c25824f63879",
- "name": "roles/os9_sflow/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lag",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lag/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e8ce2a6e7602821f89d3e8394dc3bfc8828df82be2512c63cbb442717b271a1b",
- "name": "roles/os9_lag/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_lag/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lag/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_lag/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c551da3ea192239ad7cc7ed48f7e05cece38c80c9e1388fc673c349efd57acd4",
- "name": "roles/os9_lag/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "47bfa1c48ee4cbeda5082df0d5d2ddaf3c0d5e2c8b6f55bb842cf07f425de331",
- "name": "roles/os9_lag/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lag/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ee73098cbce745d8c9ef24794e73123c6c334a3a7b4eb8a8640e842a970e38ba",
- "name": "roles/os9_lag/templates/os9_lag.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lag/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "01259b7234aa137d0d3873560d93fff1a2863472575dfb3c5386f9c2b5b6d395",
- "name": "roles/os9_lag/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lag/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4757240d44c3b63d143c289627b337375f6b012d3df063bdfe3f3b75193e99c0",
- "name": "roles/os9_lag/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ac08af08a2a8a3674ec86d5372661cfcd2cb2b59d22a92940f9193c5eef37897",
- "name": "roles/os9_lag/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lag/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "459917ce56ac381180085455a66a6404ccc38b88ea59d5437fadf7884d177cc6",
- "name": "roles/os9_lag/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lag/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e5e65e1af8aef78cd7ae46197210ef94d1800b2a7a0df70aa88efe338d6554b0",
- "name": "roles/os9_lag/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_bgp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_bgp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4399f312689744d712656ba35d0e6380b22ec594527097aed5b5c542fb9959df",
- "name": "roles/os9_bgp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_bgp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_bgp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_bgp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6d8c0035b353b80ca85fe611ba5b38216a74a00176619fadfa3a1ad9f31c647f",
- "name": "roles/os9_bgp/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cc84666798d17dae32dc929df414e0c170005b751c6f73c55cf1082d223c2e1a",
- "name": "roles/os9_bgp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_bgp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "402b54dfddd140ce7a39f22e52de134f82d10ee941beadfa0f1dd8b759fdbff1",
- "name": "roles/os9_bgp/templates/os9_bgp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_bgp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ddff5a7d8ae16b2005878b50e0c58be34a1adab7ef9549aff528bbd8914d7ff9",
- "name": "roles/os9_bgp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_bgp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e3bf13b2ab4c0bbc6dbf1c8317167024b9b242749a1fa358c8ffe28e58d95a64",
- "name": "roles/os9_bgp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "794a1ca7042ebadf2e8dbd7eb696fec5cbc982453536654022f8348d9dd63ad4",
- "name": "roles/os9_bgp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_bgp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4b8eecc34bbb888dd110c345c6c469afb5f4ffcc5f0dedb8539b905c3df6e1e0",
- "name": "roles/os9_bgp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_bgp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1c4af2285f60dcab13f5fdb4b2012dc924ad19df54fd56c8a3c5dc5c681a2af4",
- "name": "roles/os9_bgp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lldp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lldp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5a6bec4eb07c0b5b105cf47a4e1d6a300a54a3c3705a01512755c86e11e8a6d0",
- "name": "roles/os9_lldp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_lldp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lldp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_lldp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "bacbc3d73ed0149d3539ff2a367127f439e4a746d419b14d6827ae8aa71ed1bb",
- "name": "roles/os9_lldp/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2cccaad903053c4c7fb5306f2efc55c380add6c53af6219a533fd10fd26f42be",
- "name": "roles/os9_lldp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lldp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "581c3f712c0f682b8e794768040cf2b87623f871984f618ebbc8c168e409329d",
- "name": "roles/os9_lldp/templates/os9_lldp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lldp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a3c75160f8505b7530dd5f0a75ffb020ee40bd42b907cfb53f9a77d9e471d08a",
- "name": "roles/os9_lldp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lldp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1f37d26dc0302e65ab7e6c64ead0fbde3a9af90300b41c50833d8fdd4afcbc11",
- "name": "roles/os9_lldp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c07e9a3a9fa8e7ff8da33649e1e9dd70fb2946b56e1361c58f9f9183a006fcc5",
- "name": "roles/os9_lldp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lldp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "62abbb6384e7e70179a32796857731fe713246323b7b3b09cb26bb8bdf217f3f",
- "name": "roles/os9_lldp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_lldp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0e73bad322e68692969dd78a67a40a04e46405aa6101b7340ff3975dac554a1a",
- "name": "roles/os9_lldp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_aaa",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_aaa/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "41505d1144bcec0a736a30bb7a675387edefcd3f43786c11642facd88debc46a",
- "name": "roles/os9_aaa/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_aaa/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_aaa/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_aaa/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "46e62e6fdc007a49b8cd1bbb496047742c2d3634756addf6dee3222ada757f72",
- "name": "roles/os9_aaa/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "67a167f69545c2e18aefed05f7278dd991363af0a58b655e760212a72c5bf2ce",
- "name": "roles/os9_aaa/tests/main.os6.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_aaa/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d88597d2c9fef832b4c003757838c420f71fcb54c060deacbd753af87d46a333",
- "name": "roles/os9_aaa/templates/os9_aaa.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_aaa/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a31f22a0a47898e35b56b9c5aa4f6b9fd7e36829809aebace774b9ecdc31f39b",
- "name": "roles/os9_aaa/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_aaa/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ee4fc0e75c9bd541ce4202e29bb73201b79ae69c899abc65a802dd7769fdc5b4",
- "name": "roles/os9_aaa/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "526df525101ddda741aea64cffc40cdd740376739703639e71e45073311c6274",
- "name": "roles/os9_aaa/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_aaa/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3f916612c2b5d2833a97330e40f2075e952c62a0c8e24b023fa2c2415d09785b",
- "name": "roles/os9_aaa/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_aaa/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d4f4ed469715df92037e7560cf7fb49044c246a4824b5a378a7747b16d1d5fcc",
- "name": "roles/os9_aaa/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_system",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_system/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8b3be39873d380cac2259251db29c1ee0a27896283bc8d73e1b8fc9c6fa845d7",
- "name": "roles/os9_system/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_system/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_system/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_system/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6781ea8f4437f79de909fb4035678cda7a57661f6e2de823148fd6031ee5b354",
- "name": "roles/os9_system/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "80de0d1be31759897d13f274a29d667cc91900c51c3ea8461b0a13d6b53ec7e2",
- "name": "roles/os9_system/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_system/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8ba71352c4c8293f02164dad4c1d5e3b84667ba877d826bb7519522437e38b6f",
- "name": "roles/os9_system/templates/os9_system.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_system/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5af0a107d425ac7e7a6eabfb6244e036828c3e1e7fab4c7ebfc0019a80351c6e",
- "name": "roles/os9_system/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_system/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2ccff3e3162348757f00101ccb0890cf12bd823b19cdb9a24ecd9ee6aa1cfc4a",
- "name": "roles/os9_system/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9d32045c65e6b80d9cf45d4f17537caef70d61ee59ccc5005262889e3a40fd59",
- "name": "roles/os9_system/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_system/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5c0a32bf0ea6008ed276ee38ead2c7d5e5354f669244f9129024c28339214ae5",
- "name": "roles/os9_system/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_system/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "36fdd262d9e3162ff3da7d48de1dbe3267e8a1b5ee8bd6ddd29199e00c885e49",
- "name": "roles/os9_system/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_prefix_list",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_prefix_list/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "55e506ce13e2f09e9e92329a0c636f1828a4aa20bb55a3f8f03d3405a02d2527",
- "name": "roles/os9_prefix_list/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_prefix_list/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_prefix_list/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_prefix_list/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a977ff9d2a922d16bec06e5f95a81ef6fcaf996db42697d8102ce705f1e9d2ef",
- "name": "roles/os9_prefix_list/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5a0ba2151e2353f32777b08b3e87d7747ba486e3582909e4942fc09b24444ff3",
- "name": "roles/os9_prefix_list/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_prefix_list/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a1151c7962365507462e93f65755b778aed85ad023f606d7bfa1324c2fdb1416",
- "name": "roles/os9_prefix_list/templates/os9_prefix_list.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_prefix_list/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "462f14295d91855188b42ea296e20728f58000fd97ac57cb3e98d0d93fc8342f",
- "name": "roles/os9_prefix_list/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_prefix_list/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9aecebe122a66572a519557baca9944627a8c5ae508fc79846d530cc536985c5",
- "name": "roles/os9_prefix_list/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "271f9da4247e0b36a3566d5c344e3c5c64b528c60dfe0d6077ed008fc22ee1c7",
- "name": "roles/os9_prefix_list/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_prefix_list/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "992f92fcc74006e970557f814cd0146bbe6d2cefe1403791e2ebb7bb6eb51ad5",
- "name": "roles/os9_prefix_list/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_prefix_list/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "212f31f556f2327e0479895aaeecb66bb21eb804e96a3c0d01c82476fdbca0a7",
- "name": "roles/os9_prefix_list/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_snmp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_snmp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f63e7892ea088a676c9cf647f8be96b6163ee798d7a5241a8cc9851f28007ddc",
- "name": "roles/os9_snmp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_snmp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_snmp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_snmp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e470344c03bee986ef193b921efaf0af420e7d1b4d0d921ef9961f4142ec189f",
- "name": "roles/os9_snmp/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "92fab363287fae218e2c8c9df27430c9024c0053a5d604777459089d40a33545",
- "name": "roles/os9_snmp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_snmp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "964ea7367c2996d49f9630e066c501d55fd429f60fc69c6546a4d3dc8212b622",
- "name": "roles/os9_snmp/templates/os9_snmp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_snmp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "bb161a86e7babdd18253b1038616a326f03636a06536b475addb506c118281a4",
- "name": "roles/os9_snmp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_snmp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "be39ed1562711ed3b2437dea16f758d8bf517ab8a791446a4635e75b22bfbe21",
- "name": "roles/os9_snmp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3ffe2fe6323ea23a7b1b036865798e9f56616752411fb44c1c48a90e74f6ed62",
- "name": "roles/os9_snmp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_snmp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7389af4080c0b463f08c67954423343121aa7c8ce9b99dacd944d88a590dc63a",
- "name": "roles/os9_snmp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_snmp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "35f6eb0c04481e0ccfc6a0eb73eba0a790d441d56c5a5d79b0c2b246c4e14e8b",
- "name": "roles/os9_snmp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlt",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlt/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2b38e87edc4d3f68932d5bf099ed5fd8ea70d93d704306d3ed049d40e37f0ca9",
- "name": "roles/os9_vlt/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_vlt/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlt/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_vlt/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "132284efd21362e11003eef8bd1d7c459cb7b6784b33c32032d0a47114c6317f",
- "name": "roles/os9_vlt/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "344709014b6edcfe10171bffac2bcbba099ac82c8370bd26d8baf4147f4b8ee7",
- "name": "roles/os9_vlt/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlt/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "26098b1dfe90103b9b5e94d4777a2597c22b857dfcb54bf482c57c2432524f3e",
- "name": "roles/os9_vlt/templates/os9_vlt.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlt/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8ee5cb1918cf36de31fe6c7c7fa937fb7b96f9e09e8a7cc21ee785143e8d2db3",
- "name": "roles/os9_vlt/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlt/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b69245df09bbc3acf580b4bde39e5beda7de8427640510d66f7d15ddcc35fbb5",
- "name": "roles/os9_vlt/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "affc8b0508a1d8811daec7893c9b0cb674a30820b09f317da0b0141abb1156c2",
- "name": "roles/os9_vlt/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlt/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0f9dbde4e5e8fd21bbe18d8166c97829e32ce4672256901aebea6a55966865e1",
- "name": "roles/os9_vlt/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlt/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "07578fbd5595dce89ce08b8b53a777fbf5533be6e08d8e8db05a362e4b1b3b48",
- "name": "roles/os9_vlt/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_copy_config",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_copy_config/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "01ee1a797920557b814cf3554b45e74495243698d99d028df1800b325ae50483",
- "name": "roles/os9_copy_config/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_copy_config/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_copy_config/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_copy_config/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cb3f91d54eee30e53e35b2b99905f70f169ed549fd78909d3dac2defc9ed8d3b",
- "name": "roles/os9_copy_config/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1c90e7372af3606aff65055fd7a2f3be9b5c4dc4b38c17596d36beca0e164066",
- "name": "roles/os9_copy_config/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_copy_config/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5b9620be68039a5077ed58d7e1e044114af95a5c10d58b37f21efd1479d6ed55",
- "name": "roles/os9_copy_config/templates/os9_copy_config.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_copy_config/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1dd056b18de3a4536562c829ed620d26b9656b967f2d3f721a3db296ed492739",
- "name": "roles/os9_copy_config/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_copy_config/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d9b355a31983ad5f1acdeed96b95782584d363020a4f143b3118fd22c6c99125",
- "name": "roles/os9_copy_config/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "77723ba59cb770095cd7c17ae3b98e14236b5175a0b93a43b7b1aaa2a16971ce",
- "name": "roles/os9_copy_config/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_copy_config/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d4f41f5def27074e2fe886235868e8a3fcd6fdf629f7e0ae9d0b4671b4bf64a4",
- "name": "roles/os9_copy_config/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_copy_config/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "84527ffd86c46f3b873c6eb41c4ab3ec683a0705c4b283f1fe93921c60b1c685",
- "name": "roles/os9_copy_config/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ntp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ntp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "789d2d2eaf9f1dea06ec0d24120f98cdc8efd8df706d217b8eef9394c9af4df6",
- "name": "roles/os9_ntp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_ntp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ntp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_ntp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e15445ca6c63e463b3d16958cafd1b5487250d972a96b81499a8a638b1f54515",
- "name": "roles/os9_ntp/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "17efc759f3a2873ac54a378e6193a9bdbc27625fadc9f6648ac9cb8375c76379",
- "name": "roles/os9_ntp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ntp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ba6f6614f85e9de9725547367940e861552b47d37617be4b97dd78545314cd81",
- "name": "roles/os9_ntp/templates/os9_ntp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ntp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1147c842c5f03689beb4084aa81ab416f8a8623c361e32a2f0033e0876ab7af4",
- "name": "roles/os9_ntp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ntp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cc74d3f42e5a4f026b4e7abdc6816a024e30704cf83436258091e82677f71a28",
- "name": "roles/os9_ntp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5d4b91ee81601f3ad72554958ae19c16ff85f748e756415eaa713da36fae664f",
- "name": "roles/os9_ntp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ntp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "53b60c8aec93aca48d04eb336f7d6356933245223674d3121a47a2146b21c93f",
- "name": "roles/os9_ntp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ntp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b242fdd811c59913a23a4e78fcfe8e83f3b263843c7dee460158630f542fed1b",
- "name": "roles/os9_ntp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_logging",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_logging/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a718ec989264c1e6e7baf0aee663e100f3af2fe558ec05ea5715329c3b5b5d9a",
- "name": "roles/os9_logging/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_logging/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_logging/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_logging/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "697e597bf75d342ba4f3e60eb499e15bf4d092ad701b6684f9c5babd16da4dfb",
- "name": "roles/os9_logging/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5af7cd9b0d509543a80873d15bc44225648be398d2c8be6353e4dcd0bdcf7ba2",
- "name": "roles/os9_logging/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_logging/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "23247f4a92c1f63e5bd036b930112a0d24e5cd06318ab78ae84c2238b24a30ce",
- "name": "roles/os9_logging/templates/os9_logging.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_logging/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ba0c438ab0fb041c432db690eb4d391f67ea7760763be98a203fbc4a56bc5173",
- "name": "roles/os9_logging/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_logging/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6fa31d2a1e3412f56aa2390f96d1b8876fb433764ffbf2f0dd4930ddc1f67646",
- "name": "roles/os9_logging/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "df022a69119f4daf35333b84e4977bc3917504ae1ad258ee88a581f2d1b8fa71",
- "name": "roles/os9_logging/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_logging/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a0db5e2a60ed9d1f8ad1f73d05ba11f6f4e8be95aea985c152a8f94ed3969bc3",
- "name": "roles/os9_logging/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_logging/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "177872b6c9f2d6a1c9bdf150b3b4a77072c4af8ab76c8c304305fdd5d4fdb0c9",
- "name": "roles/os9_logging/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ecmp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ecmp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0b4f9c65d055ecdedd62f295d6582d792ee3023ef75203e131379180ca595b5f",
- "name": "roles/os9_ecmp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_ecmp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ecmp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_ecmp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2887f736ae9837d946286a7d3f37f2e344af45cbb6642d8f67bc250475628873",
- "name": "roles/os9_ecmp/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0131fa28520315b169b620453dcc86c2fe369ae1843e605ca3ef160aac191192",
- "name": "roles/os9_ecmp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ecmp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b50beb983ed6d26e3a55e5f1226dae4bbe32fd8993191e2c5be7444b27d56590",
- "name": "roles/os9_ecmp/templates/os9_ecmp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ecmp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3403ca7b7b2dddecd069d176ccdc743262e0d9d4d1b71cbf55d08e3b9bd412f5",
- "name": "roles/os9_ecmp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ecmp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3ab88739f9e5506e3f03af1983e5889d465d6a8ec6fd4822ae1dd65abd58f718",
- "name": "roles/os9_ecmp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ed8f4d75b1440fd46a57314230bf6b4e7940715fd6f06550e2eb348897a70d58",
- "name": "roles/os9_ecmp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ecmp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "af442ee7c7c9e2a752861ff664e25a342b0f1e798b22b0a3361dc4d91a1f81c6",
- "name": "roles/os9_ecmp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_ecmp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2e33cb744140789803c982297776a734828e9b1b0a369a3f64687c67ba1a2b4e",
- "name": "roles/os9_ecmp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dcb",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dcb/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b6dee9f01529a3945a1ef44678bd3ce2063c7b57359e72fac7526a4941ca61af",
- "name": "roles/os9_dcb/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_dcb/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dcb/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_dcb/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1879ecba3e800b09be3ccb02e2f506a5255fa7d05e6f4147ab821aa13d4d309a",
- "name": "roles/os9_dcb/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2fd0382a4e49d7b34e4cdc1646bbae302aa4c71edadd5001bf7ea8ab3a4d4863",
- "name": "roles/os9_dcb/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dcb/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9132818aba64211491e368f6ce466d20daf8f41188425bae13bd2afe8c14fc45",
- "name": "roles/os9_dcb/templates/os9_dcb.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dcb/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "93d73f08eb7f5cddf5e3fde83039d0af6a7e7b1dc20da90bfc5bb79d68599829",
- "name": "roles/os9_dcb/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dcb/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8c65780c8a45b662cd6da234e12087283032cace0000763f245997a40ddf4cb1",
- "name": "roles/os9_dcb/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d3fbc607d9d5654176b138ad3a3b8860bdd02efaee80857cc2c340fc47e012a3",
- "name": "roles/os9_dcb/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dcb/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f2a1463ec0a20576475fccaea3587b7c0021d64f9db67cb57bfd6bff1d97472d",
- "name": "roles/os9_dcb/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dcb/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8c219c522f0f0e34e311993513c54c403a7f14fd5e7a46f796bde3211bcad04e",
- "name": "roles/os9_dcb/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrrp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrrp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e14262571f041338ffb6ed5287842619eefc504e9a365ab0c5a2706733b59d97",
- "name": "roles/os9_vrrp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_vrrp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrrp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_vrrp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "06c7945a53e3756ed9904678f7948c988a134b2e31367fd2ee061675ded0c28f",
- "name": "roles/os9_vrrp/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "43c0e775030d8eceabbe51458243c9a6d4b8ac31ce327e790adb3537ac1a6dcb",
- "name": "roles/os9_vrrp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrrp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "da5f553bf203e8bf1261a6c39f54474e6383f7fb198ce4525e27bb245f56f4d0",
- "name": "roles/os9_vrrp/templates/os9_vrrp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrrp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "de4793fd6e3d27ceddcbc42a52f03ab2bfbfaadf09ca9f64384f1d4466b7e739",
- "name": "roles/os9_vrrp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrrp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "8ad50a040d16c979338d20bc027c505d74973999fce4309cfb5effadbb48f2b5",
- "name": "roles/os9_vrrp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6af7dbc1efceae78d4e59fbcc1a3dda1a2042742e41f15f446c5c14541f14307",
- "name": "roles/os9_vrrp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrrp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d8bf1cbccecd2adca7d94456a1c682629720860e4ac71e8c53cc41b2f3d7265b",
- "name": "roles/os9_vrrp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vrrp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c6c091fd261666fcb155aa3f53d435c8bce4f7afcc176124b7b8e3414d5357ea",
- "name": "roles/os9_vrrp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_users",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_users/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "564947fa6f512be5164882ea2e712e74644f07952416745a3bec43d003222d09",
- "name": "roles/os9_users/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_users/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_users/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_users/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2ef47d1df27ce87233757f2ac53dd95216fab5c1fba3a192c4840f81de19c91a",
- "name": "roles/os9_users/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d6abe4a2dfa4ee140d104db11dfffcc20292e3dd7f946711bda115f922d3ac94",
- "name": "roles/os9_users/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_users/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5acd0a378bc82e6c64d5e7491df5d0f6267e3d43fceedad591eddcb36acb5dac",
- "name": "roles/os9_users/templates/os9_users.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_users/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b0979140519185b665b020d642c2bb243524e1a9a22ad8dd3d73d653ae96f951",
- "name": "roles/os9_users/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_users/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f342917d465c3eb791bb41253bf90355047d6362f20d198b110f8a419d9e49a3",
- "name": "roles/os9_users/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "61b015f4b27447519bcb59a438f438657bdba3089f542b1e663421875f21e210",
- "name": "roles/os9_users/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_users/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "867fecb1d2f46cbef060883395f455fe9945458e38b16f7da343749ba2a66414",
- "name": "roles/os9_users/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_users/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "1eada97cb8000ff2dc0d71168b591225e75b5643ca1d21d6ee4e5ba092b7b424",
- "name": "roles/os9_users/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_interface",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_interface/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "06cb55093c7919c9b833928237314e617b3454c2e1f883d1fbd1042c08ba3b8c",
- "name": "roles/os9_interface/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_interface/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_interface/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_interface/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e9cad0fc65504bd7c7833ed3e9851563cd8de34e546ca88a4b628e54c84b3687",
- "name": "roles/os9_interface/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "bd827c9440da10af0908feb9fc80e9aee4050a858cba6f5d91b9d5506d4a4b44",
- "name": "roles/os9_interface/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_interface/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9f7b85676c476ea75a38bf9bd637592856684c5b791d2121777ba6bb0ef35aa0",
- "name": "roles/os9_interface/templates/os9_interface.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_interface/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "15ad26d58ac0d83592e74a9c7ed2ca686f3e6941ff3b8b7bf674fa4a74b90ad8",
- "name": "roles/os9_interface/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_interface/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f6669df8a0566976a72f43f5250f73d97c797362855aa6471aad87f2a2669fd5",
- "name": "roles/os9_interface/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c1974d2c55eef43d63c133aca494e7272abdf73233bf1d5da6933bdab6078386",
- "name": "roles/os9_interface/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_interface/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "c6e2d02450c180c4b6046f3c8c1dd6182596cfd6e7c5b2ec8bc55ffebe02410d",
- "name": "roles/os9_interface/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_interface/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4b256fa702eac321dd120c22381ebaf74fef91ad8cd1c846566afbb8e82a3fcf",
- "name": "roles/os9_interface/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_xstp",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_xstp/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7f06b39a0d22144098a7436d20589fe60993b26fd57c3b30fb8b995351a026ec",
- "name": "roles/os9_xstp/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_xstp/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_xstp/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_xstp/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "20f351bb4c6447e74fd6f694724b9c9880d5820f257b9cf017aab1b9357a22b3",
- "name": "roles/os9_xstp/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "60a16999f83102fc0a60b1dc62c3d8da3d82cd925231be9a84dd582a05367961",
- "name": "roles/os9_xstp/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_xstp/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "518f9666f56eee0374a345c8d66e95f89a6b54b8d437aaa4647e01b7f86317c4",
- "name": "roles/os9_xstp/templates/os9_xstp.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_xstp/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ce35411b14512701202f5951dcfbec195650af9fddc89690594f08ba3a0889fe",
- "name": "roles/os9_xstp/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_xstp/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b9c4e648b0f71e05a139a0a60e31834db399a4271420c8d015195d27d788eb92",
- "name": "roles/os9_xstp/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e4acb30fac87a104e4acd23307b9d56243561919b56f13263734e4935aa724ef",
- "name": "roles/os9_xstp/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_xstp/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "601f927b7dd50f6e6d4172e0b5bd895b16285bb81e3eec25571aa218cea59958",
- "name": "roles/os9_xstp/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_xstp/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "0457d3e39544589ca342c73424ef9af18f01ea64ea7ee3bddf7f70d0c06c3148",
- "name": "roles/os9_xstp/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dns",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dns/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d563505ac5f9e86cd31c8858f8d1f3280d0120df01d4b7cdbc294e32040c8963",
- "name": "roles/os9_dns/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_dns/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dns/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_dns/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "91fb7feb04a43a6e745068a54b5399ab757cefdaf59fa6fd1e58bf046ae72997",
- "name": "roles/os9_dns/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "17decf3eec309eb00c6836a4beedc8072a61befd67eec9c5972bca2a99ebc941",
- "name": "roles/os9_dns/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dns/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ec177955fd27a81e864b82af4914c04e29ad1f5459cfb1fff6fcb213afb45f17",
- "name": "roles/os9_dns/templates/os9_dns.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dns/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "6590574613a0d22d15672f3156f7aefb865b2543f7e7e3b2273e30f5a416b77a",
- "name": "roles/os9_dns/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dns/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ed942b5afe9a98c6c297aa61685e7a2e02f14591bf97e0b88baec7d4bedba10e",
- "name": "roles/os9_dns/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2e032670738e7258a61d77e685beb840702a2eacfdbef6ec3af3aca5fc945386",
- "name": "roles/os9_dns/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dns/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "7f760f65685d6c512fbceeacabae0a50e3628efe3120973750c1addd1c4ff1d3",
- "name": "roles/os9_dns/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_dns/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f3ad5e14b372d852332ad5869bf7872dec83160a5fb227a84d63d8d9f06708b3",
- "name": "roles/os9_dns/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlan",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlan/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "cbbbd29017700d1dd2c88752c27a8cd1c49ec80a58ce65552bd220cf6130ae75",
- "name": "roles/os9_vlan/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_vlan/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlan/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_vlan/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "9f9ef9b08b8e36ed3ba0179873751eecefabae0ffd664f351d22227d00fa9e0c",
- "name": "roles/os9_vlan/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4018361707fab5667b43805963b21dfa1f8752ceb9a3dbecd52f65066811dce1",
- "name": "roles/os9_vlan/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlan/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e8291586b745585ed2bc488e9aae24d989b95b9cc1fcd6cf450059ce63b82cb6",
- "name": "roles/os9_vlan/templates/os9_vlan.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlan/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "d82b31e71f2000b209fc6a91dff8ee36448138c7a38524c5c5424b285a3604d9",
- "name": "roles/os9_vlan/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlan/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "a5a44d45dc61f9efabc3f6fae37c33cb55bcab7f62a460c75f4f3fff42598ca2",
- "name": "roles/os9_vlan/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "24b34bd5b0f9c8738728e8a3b937c89e462ea902c5288ac764a5da0d3a96f457",
- "name": "roles/os9_vlan/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlan/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "3abddd14991d068ad364bd2e46437fff7398f71d0d7cf92b9a10b3dca5cbd9ff",
- "name": "roles/os9_vlan/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_vlan/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "bfb597d511246cac6886b4639f4fa5858df32b4a49235b07cb12e6bb965b8684",
- "name": "roles/os9_vlan/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_acl",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_acl/handlers",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "41505d1144bcec0a736a30bb7a675387edefcd3f43786c11642facd88debc46a",
- "name": "roles/os9_acl/handlers/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "5883eb4c7ece1931e1b25a9071bcbe50c1eaab7b9a38d2515e1b4be29e630fb3",
- "name": "roles/os9_acl/LICENSE",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_acl/tests",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "f5b68d8f1801c40577f9acc356e0c9a8a08ac91ddbd4f3d82158a808fd9d9770",
- "name": "roles/os9_acl/tests/inventory.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "b66014feff883af307b5b3284a1da62a647dfb3bf47dba57c109e85ee247e456",
- "name": "roles/os9_acl/tests/main.os9.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "2c34b288af25aa54fde46bae5baa21fedeb5cf7f0644a7e7ebd35b2900b14452",
- "name": "roles/os9_acl/tests/test.yaml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_acl/templates",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ebad1f9043572155dc792e70fc54da5a9de28edd93de8bb16aa6a419b403f168",
- "name": "roles/os9_acl/templates/os9_acl.j2",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_acl/meta",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "e06f7c300f06675328ca857b16b2d9752a508680b384975fba89792ca1575eaa",
- "name": "roles/os9_acl/meta/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_acl/vars",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "ef238bf54c409cb4adce143ce09d457e00c60f6c59b6d7e1aee588ed6151cc7f",
- "name": "roles/os9_acl/vars/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "40edb7024f466d237d8d32f1f5085e48359a3e178c035bc0fd0430b58e84990b",
- "name": "roles/os9_acl/README.md",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_acl/defaults",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "4206de44d73c0016a24aa118927f90e4de3099612b386424eed651b33a28ad50",
- "name": "roles/os9_acl/defaults/main.yml",
- "chksum_type": "sha256",
- "format": 1
- },
- {
- "ftype": "dir",
- "chksum_sha256": null,
- "name": "roles/os9_acl/tasks",
- "chksum_type": null,
- "format": 1
- },
- {
- "ftype": "file",
- "chksum_sha256": "82b059098882a03cf7486e95d81a275c6d1cb89050e1330264864068d515a256",
- "name": "roles/os9_acl/tasks/main.yml",
- "chksum_type": "sha256",
- "format": 1
- }
- ],
- "format": 1
-} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/LICENSE b/ansible_collections/dellemc/os9/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/MANIFEST.json b/ansible_collections/dellemc/os9/MANIFEST.json
deleted file mode 100644
index 546d3f173..000000000
--- a/ansible_collections/dellemc/os9/MANIFEST.json
+++ /dev/null
@@ -1,37 +0,0 @@
-{
- "collection_info": {
- "description": "Ansible Network Collections for Dell EMC OS9",
- "repository": "https://github.com/ansible-collections/dellemc.os9",
- "tags": [
- "dell",
- "dellemc",
- "os9",
- "emc",
- "networking"
- ],
- "dependencies": {
- "ansible.netcommon": ">=1.0.0"
- },
- "authors": [
- "Senthil Ganesan Ganesan <Senthil_Kumar_Ganesa@Dell.com>",
- "Komal Patil <Komal_uttamrao_Patil@dell.com>"
- ],
- "issues": "https://github.com/ansible-collections/dellemc.os9/issues",
- "name": "os9",
- "license": [],
- "documentation": "https://github.com/ansible-collections/dellemc.os9/tree/master/docs",
- "namespace": "dellemc",
- "version": "1.0.4",
- "readme": "README.md",
- "license_file": "LICENSE",
- "homepage": "https://github.com/ansible-collections/dellemc.os9"
- },
- "file_manifest_file": {
- "format": 1,
- "ftype": "file",
- "chksum_sha256": "7666ea3d71e26d56fcb8d90a06ea28c4e634ea9a3618f96e7ab99cd276ea67fb",
- "name": "FILES.json",
- "chksum_type": "sha256"
- },
- "format": 1
-} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/README.md b/ansible_collections/dellemc/os9/README.md
deleted file mode 100644
index 0ed967442..000000000
--- a/ansible_collections/dellemc/os9/README.md
+++ /dev/null
@@ -1,96 +0,0 @@
-# Ansible Network Collection for Dell EMC OS9
-
-## Collection contents
-
-This collection includes the Ansible modules, plugins and roles needed to provision and manage Dell EMC PowerSwitch platforms running Dell EMC OS9. Sample playbooks and documentation are also included to show how the collection can be used.
-
-### Collection core modules
-
-- **os9_command.py** — Run commands on devices running OS9
-
-- **os9_config.py** — Manage configuration sections on devices running OS9
-
-- **os9_facts.py** — Collect facts from devices running OS9
-
-### Collection roles
-
-These roles facilitate provisioning and administration of devices running Dell EMC OS9. There are over 22 roles available that provide a comprehensive coverage of most OS9 resources, including os9_aaa, os9_bgp and os9_ecmp. The documentation for each role is at [OS9 roles](https://github.com/ansible-collections/dellemc.os9/blob/master/docs/roles.rst).
-
-### Sample use case playbooks
-
-This collection includes the following sample playbooks that illustrate end to end use cases:
-
-- [CLOS Fabric](https://github.com/ansible-collections/dellemc.os9/blob/master/playbooks/clos_fabric_ebgp/README.md) — Example playbook to build a Layer 3 Leaf-Spine fabric
-
-## Installation
-
-Use this command to install the latest version of the OS9 collection from Ansible Galaxy:
-
-```
- ansible-galaxy collection install dellemc.os9
-
-```
-
-To install a specific version, a version range identifier must be specified. For example, to install the most recent version that is greater than or equal to 1.0.0 and less than 2.0.0:
-
-```
- ansible-galaxy collection install 'dellemc.os9:>=1.0.0,<2.0.0'
-
-```
-
-## Version compatibility
-
-* Ansible version 2.10 or higher
-* Python 2.7 or higher and Python 3.5 or higher
-
-> **NOTE**: For Ansible versions lower than 2.10, use the legacy [dellos9 modules](https://ansible-dellos-docs.readthedocs.io/en/latest/modules.html#os9-modules) and [dellos roles](https://ansible-dellos-docs.readthedocs.io/en/latest/roles.html).
-
-
-## Sample playbook
-
-**playbook.yaml**
-
-```
-- hosts: os9_switches
- connection: network_cli
- collections:
- - dellemc.os9
- roles:
- - os9_vlan
-```
-
-**host_vars/os9_sw1.yaml**
-
-```
-hostname: os9_sw1
-# Parameters for connection type network_cli
-ansible_ssh_user: xxxx
-ansible_ssh_pass: xxxx
-ansible_network_os: dellemc.os9.os9
-
-# Create vlan100 and delete vlan888
-os9_vlan:
- vlan 100:
- description: "Blue"
- state: present
- vlan 888:
- state: absent
-
-```
-
-**inventory.yaml**
-
-```
-[os9_sw1]
-os9_sw1 ansible_host=100.104.28.119
-
-[os9_sw2]
-os9_sw2 ansible_host=100.104.28.118
-
-[os9_switches:children]
-os9_sw1
-os9_sw2
-
-```
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/changelogs/CHANGELOG.rst b/ansible_collections/dellemc/os9/changelogs/CHANGELOG.rst
deleted file mode 100644
index e91c0ed13..000000000
--- a/ansible_collections/dellemc/os9/changelogs/CHANGELOG.rst
+++ /dev/null
@@ -1,76 +0,0 @@
-======================================================================
-Ansible Network Collection for Dell EMC OS9 Release Notes
-======================================================================
-
-.. contents:: Topics
-
-v1.0.4
-======
-
-Release Summary
----------------
-
-- Fixed sanity error found during the sanity tst of automation hub upload
-- Fix issue in using list of strings for commands argument for os10_command module (https://github.com/ansible-collections/dellemc.os9/issues/15)
-
-v1.0.3
-======
-
-Release Summary
----------------
-
-Added bug fixes for bugs found during System Test.
-
-v1.0.2
-======
-
-Release Summary
----------------
-
-Added changelogs.
-
-v1.0.1
-======
-
-Release Summary
----------------
-
-Updated documentation review comments.
-
-v1.0.0
-======
-
-New Modules
------------
-
-- os9_command - Run commands on devices running Dell EMC os9.
-- os9_config - Manage configuration on devices running os9.
-- os9_facts - Collect facts from devices running os9.
-
-New Roles
----------
-
-- os9_aaa - Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server.
-- os9_acl - Facilitates the configuration of Access Control lists.
-- os9_bgp - Facilitates the configuration of border gateway protocol (BGP) attributes.
-- os9_copy_config - This role pushes the backup running configuration into a os9 device.
-- os9_dcb - Facilitates the configuration of data center bridging (DCB).
-- os9_dns - Facilitates the configuration of domain name service (DNS).
-- os9_ecmp - Facilitates the configuration of equal cost multi-path (ECMP) for IPv4.
-- os9_interface - Facilitates the configuration of interface attributes.
-- os9_lag - Facilitates the configuration of link aggregation group (LAG) attributes.
-- os9_lldp - Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level.
-- os9_logging - Facilitates the configuration of global logging attributes and logging servers.
-- os9_ntp - Facilitates the configuration of network time protocol (NTP) attributes.
-- os9_prefix_list - Facilitates the configuration of IP prefix-list.
-- os9_sflow - Facilitates the configuration of global and interface level sFlow attributes.
-- os9_snmp - Facilitates the configuration of global SNMP attributes.
-- os9_system - Facilitates the configuration of hostname and hashing algorithm.
-- os9_users - Facilitates the configuration of global system user attributes.
-- os9_vlan - Facilitates the configuration of virtual LAN (VLAN) attributes.
-- os9_vlt - Facilitates the configuration of virtual link trunking (VLT).
-- os9_vrf - Facilitates the configuration of virtual routing and forwarding (VRF).
-- os9_vrrp - Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes.
-- os9_xstp - Facilitates the configuration of xSTP attributes.
-
-\(c) 2020 Dell Inc. or its subsidiaries. All Rights Reserved.
diff --git a/ansible_collections/dellemc/os9/changelogs/changelog.yaml b/ansible_collections/dellemc/os9/changelogs/changelog.yaml
deleted file mode 100644
index 82ea60c2e..000000000
--- a/ansible_collections/dellemc/os9/changelogs/changelog.yaml
+++ /dev/null
@@ -1,107 +0,0 @@
-ancestor: null
-releases:
- 1.0.0:
- modules:
- - description: Run commands on devices running Dell EMC os9.
- name: os9_command
- namespace: ''
- - description: Manage configuration on devices running os9.
- name: os9_config
- namespace: ''
- - description: Collect facts from devices running os9.
- name: os9_facts
- namespace: ''
- roles:
- - description: Facilitates the configuration of Authentication Authorization and Accounting (AAA), TACACS and RADIUS server.
- name: os9_aaa
- namespace: ''
- - description: Facilitates the configuration of Access Control lists.
- name: os9_acl
- namespace: ''
- - description: Facilitates the configuration of border gateway protocol (BGP) attributes.
- name: os9_bgp
- namespace: ''
- - description: This role pushes the backup running configuration into a OS9 device.
- name: os9_copy_config
- namespace: ''
- - description: Facilitates the configuration of data center bridging (DCB).
- name: os9_dcb
- namespace: ''
- - description: Facilitates the configuration of domain name service (DNS).
- name: os9_dns
- namespace: ''
- - description: Facilitates the configuration of equal cost multi-path (ECMP) for IPv4.
- name: os9_ecmp
- namespace: ''
- - description: Facilitates the configuration of interface attributes.
- name: os9_interface
- namespace: ''
- - description: Facilitates the configuration of link aggregation group (LAG) attributes.
- name: os9_lag
- namespace: ''
- - description: Facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level.
- name: os9_lldp
- namespace: ''
- - description: Facilitates the configuration of global logging attributes and logging servers.
- name: os9_logging
- namespace: ''
- - description: Facilitates the configuration of network time protocol (NTP) attributes.
- name: os9_ntp
- namespace: ''
- - description: Facilitates the configuration of IP prefix-list.
- name: os9_prefix_list
- namespace: ''
- - description: Facilitates the configuration of global and interface level sFlow attributes.
- name: os9_sflow
- namespace: ''
- - description: Facilitates the configuration of global SNMP attributes.
- name: os9_snmp
- namespace: ''
- - description: Facilitates the configuration of hostname and hashing algorithm.
- name: os9_system
- namespace: ''
- - description: Facilitates the configuration of global system user attributes.
- name: os9_users
- namespace: ''
- - description: Facilitates the configuration of virtual LAN (VLAN) attributes.
- name: os9_vlan
- namespace: ''
- - description: Facilitates the configuration of virtual link trunking (VLT).
- name: os9_vlt
- namespace: ''
- - description: Facilitates the configuration of virtual routing and forwarding (VRF).
- name: os9_vrf
- namespace: ''
- - description: Facilitates the configuration of virtual router redundancy protocol (VRRP) attributes.
- name: os9_vrrp
- namespace: ''
- - description: Facilitates the configuration of xSTP attributes.
- name: os9_xstp
- namespace: ''
- release_date: '2020-07-31'
- 1.0.1:
- changes:
- release_summary: Updated documentation review comments
- fragments:
- - 1.0.1.yaml
- release_date: '2020-08-04'
- 1.0.2:
- changes:
- release_summary: Added changelogs.
- fragments:
- - 1.0.2.yaml
- release_date: '2020-08-18'
- 1.0.3:
- changes:
- release_summary: Added bug fixes for bugs found during System Test.
- fragments:
- - 1.0.3.yaml
- release_date: '2020-10-09'
- 1.0.4:
- changes:
- bugfixes:
- - Fixed sanity error found during the sanity tst of automation hub upload
- - Fix issue in using list of strings for commands argument for os10_command module (https://github.com/ansible-collections/dellemc.os9/issues/15)
- fragments:
- - 1.0.4.yaml
- release_date: '2021-02-15' \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/changelogs/config.yaml b/ansible_collections/dellemc/os9/changelogs/config.yaml
deleted file mode 100644
index d536811c3..000000000
--- a/ansible_collections/dellemc/os9/changelogs/config.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-changelog_filename_template: CHANGELOG.rst
-changelog_filename_version_depth: 0
-changes_file: changelog.yaml
-changes_format: combined
-keep_fragments: false
-mention_ancestor: true
-new_plugins_after_name: removed_features
-notesdir: fragments
-prelude_section_name: release_summary
-prelude_section_title: Release Summary
-flatmap: true
-sections:
-- - major_changes
- - Major Changes
-- - minor_changes
- - Minor Changes
-- - breaking_changes
- - Breaking Changes / Porting Guide
-- - deprecated_features
- - Deprecated Features
-- - removed_features
- - Removed Features (previously deprecated)
-- - security_fixes
- - Security Fixes
-- - bugfixes
- - Bugfixes
-- - known_issues
- - Known Issues
-title: Ansible Network Collection for Dell EMC OS9
-trivial_section_name: trivial
diff --git a/ansible_collections/dellemc/os9/docs/os9_aaa.md b/ansible_collections/dellemc/os9/docs/os9_aaa.md
deleted file mode 100644
index 276bb7667..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_aaa.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_aaa/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_acl.md b/ansible_collections/dellemc/os9/docs/os9_acl.md
deleted file mode 100644
index e2a0fe662..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_acl.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_acl/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_bgp.md b/ansible_collections/dellemc/os9/docs/os9_bgp.md
deleted file mode 100644
index dc6b754cd..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_bgp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_bgp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_copy_config.md b/ansible_collections/dellemc/os9/docs/os9_copy_config.md
deleted file mode 100644
index d9342bc2a..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_copy_config.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_copy_config/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_dcb.md b/ansible_collections/dellemc/os9/docs/os9_dcb.md
deleted file mode 100644
index 7199f1f64..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_dcb.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_dcb/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_dns.md b/ansible_collections/dellemc/os9/docs/os9_dns.md
deleted file mode 100644
index 011470824..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_dns.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_dns/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_ecmp.md b/ansible_collections/dellemc/os9/docs/os9_ecmp.md
deleted file mode 100644
index b3281d802..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_ecmp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_ecmp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_interface.md b/ansible_collections/dellemc/os9/docs/os9_interface.md
deleted file mode 100644
index 1fc7f06e1..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_interface.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_interface/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_lag.md b/ansible_collections/dellemc/os9/docs/os9_lag.md
deleted file mode 100644
index 3621b7a1f..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_lag.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_lag/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_lldp.md b/ansible_collections/dellemc/os9/docs/os9_lldp.md
deleted file mode 100644
index 619667acc..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_lldp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_lldp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_logging.md b/ansible_collections/dellemc/os9/docs/os9_logging.md
deleted file mode 100644
index eb996e012..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_logging.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_logging/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_ntp.md b/ansible_collections/dellemc/os9/docs/os9_ntp.md
deleted file mode 100644
index 6e6800f07..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_ntp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_ntp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_prefix_list.md b/ansible_collections/dellemc/os9/docs/os9_prefix_list.md
deleted file mode 100644
index 53760cec9..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_prefix_list.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_prefix_list/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_sflow.md b/ansible_collections/dellemc/os9/docs/os9_sflow.md
deleted file mode 100644
index 293434466..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_sflow.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_sflow/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_snmp.md b/ansible_collections/dellemc/os9/docs/os9_snmp.md
deleted file mode 100644
index c698c4a73..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_snmp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_snmp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_system.md b/ansible_collections/dellemc/os9/docs/os9_system.md
deleted file mode 100644
index 350df9ae7..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_system.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_system/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_users.md b/ansible_collections/dellemc/os9/docs/os9_users.md
deleted file mode 100644
index 893584bba..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_users.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_users/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_vlan.md b/ansible_collections/dellemc/os9/docs/os9_vlan.md
deleted file mode 100644
index 62b37f32c..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_vlan.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_vlan/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_vlt.md b/ansible_collections/dellemc/os9/docs/os9_vlt.md
deleted file mode 100644
index e492ad7a1..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_vlt.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_vlt/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_vrf.md b/ansible_collections/dellemc/os9/docs/os9_vrf.md
deleted file mode 100644
index 3c1c4488f..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_vrf.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_vrf/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_vrrp.md b/ansible_collections/dellemc/os9/docs/os9_vrrp.md
deleted file mode 100644
index 3ba26515d..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_vrrp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_vrrp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/os9_xstp.md b/ansible_collections/dellemc/os9/docs/os9_xstp.md
deleted file mode 100644
index 85137fd47..000000000
--- a/ansible_collections/dellemc/os9/docs/os9_xstp.md
+++ /dev/null
@@ -1 +0,0 @@
-../roles/os9_xstp/README.md \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/docs/roles.rst b/ansible_collections/dellemc/os9/docs/roles.rst
deleted file mode 100644
index b87901d9c..000000000
--- a/ansible_collections/dellemc/os9/docs/roles.rst
+++ /dev/null
@@ -1,136 +0,0 @@
-##############################################################
-Ansible Network Collection Roles for Dell EMC OS9
-##############################################################
-
-The roles facilitate provisioning of devices running Dell EMC OS9. This document describes each of the roles.
-
-AAA role
---------
-
-The `os9_aaa <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_aaa/README.md>`_ role facilitates the configuration of authentication, authorization, and acccounting (AAA). It supports the configuration of TACACS and RADIUS server and AAA.
-
-
-ACL role
---------
-
-The `os9_acl <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_acl/README.md>`_ role facilitates the configuration of an Access Control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to line terminals.
-
-
-BGP role
---------
-
-The `os9_bgp <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_bgp/README.md>`_ role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum path.
-
-
-Copy configuration role
------------------------
-
-The `os9_copy_config <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_copy_config/README.md>`_ role pushes the backup running configuration into a device. This role merges the configuration in the template file with the running configuration of the Dell EMC Networking OS9 device.
-
-
-DCB role
---------
-
-The `os9_dcb <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_dcb/README.md>`_ role facilitates the configuration of data center bridging (DCB). It supports the configuration of the DCB map and the DCB buffer, and assigns them to interfaces.
-
-
-DNS role
---------
-
-The `os9_dns <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_dns/README.md>`_ role facilitates the configuration of domain name service (DNS).
-
-
-ECMP role
----------
-
-The `os9_ecmp <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_ecmp/README.md>`_ role facilitates the configuration of equal cost multi-path (ECMP). It supports the configuration of ECMP for IPv4.
-
-
-Interface role
---------------
-
-The `os9_interface <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_interface/README.md>`_ role facilitates the configuration of interface attributes. It supports the configuration of administrative state, description, MTU, IP address, IP helper, suppress_ra and port mode.
-
-
-LAG role
---------
-
-The `os9_lag <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_lag/README.md>`_ role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type (static/dynamic) and minimum required link.
-
-
-LLDP role
----------
-
-The `os9_lldp <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_lldp/README.md>`_ role facilitates the configuration of link layer discovery protocol (LLDP) attributes at global and interface level. This role supports the configuration of hello, mode, multiplier, advertise tlvs, management interface, fcoe, iscsi at global and interface levels.
-
-
-Logging role
-------------
-
-The `os9_logging <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_logging/README.md>`_ role facilitates the configuration of global logging attributes, and supports the configuration of logging servers.
-
-
-NTP role
---------
-
-The `os9_ntp <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_ntp/README.md>`_ role facilitates the configuration of network time protocol attributes.
-
-
-Prefix-list role
-----------------
-
-The `os9_prefix_list <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_prefix_list/README.md>`_ role facilitates the configuration of a prefix-list, supports the configuration of IP prefix-list, and assigns the prefix-list to line terminals.
-
-
-sFlow role
-----------
-
-The `os9_sflow <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_sflow/README.md>`_ role facilitates the configuration of global and interface level sFlow attributes. It supports the configuration of sFlow collectors at the global level, enable/disable, and specification of sFlow polling-interval, sample-rate, max-datagram size, and so on are supported at the interface and global level.
-
-
-SNMP role
----------
-
-The `os9_snmp <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_snmp/README.md>`_ role facilitates the configuration of global snmp attributes. It supports the configuration of SNMP server attributes like users, group, community, location, and traps.
-
-
-System role
------------
-
-The `os9_system <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_system/README.md>`_ role facilitates the configuration of global system attributes. This role specifically enables configuration of hostname and enable password for os9. It also supports the configuration of management route, hash alogrithm, clock, line terminal, banner, and reload type.
-
-
-Users role
-----------
-
-The `os9_users <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_users/README.md>`_ role facilitates the configuration of global system user attributes. This role supports the configuration of CLI users.
-
-
-VLAN role
----------
-
-The `os9_vlan <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_vlan/README.md>`_ role facilitates configuring virtual LAN (VLAN) attributes. This role supports the creation and deletion of a VLAN and its member ports.
-
-
-VLT role
---------
-
-The `os9_vlt <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_vlt/README.md>`_ role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology.
-
-
-VRF role
---------
-
-The `os9_vrf <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_vrf/README.md>`_ role facilitates the configuration of basic virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers.
-
-
-VRRP role
----------
-
-The `os9_vrrp <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_vrrp/README.md>`_ role facilitates configuration of virtual router redundancy protocol (VRRP) attributes. This role supports the creation of VRRP groups for interfaces, and setting the VRRP group attributes.
-
-
-xSTP role
----------
-
-The `os9_xstp <https://github.com/ansible-collections/dellemc.os9/blob/master/roles/os9_xstp/README.md>`_ role facilitates the configuration of xSTP attributes. This role supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP) protocol, multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). This role supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances.
diff --git a/ansible_collections/dellemc/os9/meta/runtime.yml b/ansible_collections/dellemc/os9/meta/runtime.yml
deleted file mode 100644
index ad1562588..000000000
--- a/ansible_collections/dellemc/os9/meta/runtime.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-plugin_routing:
- action:
- os9_config:
- redirect: dellemc.os9.os9
- os9_command:
- redirect: dellemc.os9.os9
- os9_facts:
- redirect: dellemc.os9.os9
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/README.md b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/README.md
deleted file mode 100644
index 410147db9..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/README.md
+++ /dev/null
@@ -1,35 +0,0 @@
-# Provision CLOS fabric using the Ansible collection for Dell EMC OS9
-
-This example describes how to use Ansible to build a CLOS fabric with a Dell EMC PowerSwitch platform running Dell EMC OS9 device. The sample topology is a two-tier CLOS fabric with two spines and four leaves connected as mesh. eBGP is running between the two tiers. All switches in spine have the same AS number, and each leaf switch has a unique AS number. All AS numbers used are private.
-
-For application load-balancing purposes, the same prefix is advertised from multiple leaf switches and uses _BGP multipath relax_ feature.
-
-![CLOS FABRIC Topology](https://ansible-dellos-docs.readthedocs.io/en/latest/_images/topo.png)
-
-## Create a simple Ansible playbook
-
-**1**. Create an inventory file called `inventory.yaml`, then specify the device IP address.
-
-**2**. Create a group variable file called `group_vars/all`, then define credentials and SNMP variables.
-
-**3**. Create a group variable file called `group_vars/spine.yaml`, then define credentials, hostname, and BGP neighbors of spine group.
-
-**4**. Create a host variable file called `host_vars/spine1.yaml`, then define the host, credentials, and transport.
-
-**5**. Create a host variable file called `host_vars/spine2.yaml`, then define the host, credentials, and transport.
-
-**6**. Create a host variable file called `host_vars/leaf1.yaml`, then define the host, credentials, and transport.
-
-**7**. Create a host variable file called `host_vars/leaf2.yaml`, then define the host, credentials, and transport.
-
-**8**. Create a host variable file called `host_vars/leaf3.yaml`, then define the host, credentials, and transport.
-
-**9**. Create a host variable file called `host_vars/leaf4.yaml`, then define the host, credentials, and transport.
-
-**10**. Create a playbook called `datacenter.yaml`.
-
-**11**. Run the playbook.
-
- ansible-playbook -i inventory.yaml datacenter.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved. \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/datacenter.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/datacenter.yaml
deleted file mode 100644
index f17ebd143..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/datacenter.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- hosts: datacenter
- gather_facts: no
- connection: network_cli
- collections:
- - dellemc.os9
- roles:
- - os9_interface
- - os9_bgp
- - os9_snmp
- - os9_system
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/all b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/all
deleted file mode 100644
index c3e4398b6..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/all
+++ /dev/null
@@ -1,10 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os9.os9
-build_dir: ../tmp/tmp_os9
-
-os9_snmp:
- snmp_community:
- - name: public
- access_mode: ro
- state: present
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/spine.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/spine.yaml
deleted file mode 100644
index 175687251..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/group_vars/spine.yaml
+++ /dev/null
@@ -1,64 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os9.os9
-
-os9_system:
- hostname: "{{ spine_hostname }}"
-
-os9_bgp:
- asn: 64901
- router_id: "{{ bgp_router_id }}"
- best_path:
- as_path: ignore
- as_path_state: present
- med:
- - attribute: confed
- state: present
- neighbor:
- - type: ipv4
- remote_asn: "{{ bgp_neigh1_remote_asn }}"
- ip: "{{ bgp_neigh1_ip }}"
- admin: up
- state: present
- - type: ipv4
- remote_asn: "{{ bgp_neigh2_remote_asn }}"
- ip: "{{ bgp_neigh2_ip }}"
- admin: up
- state: present
- - type: ipv4
- remote_asn: "{{ bgp_neigh3_remote_asn }}"
- ip: "{{ bgp_neigh3_ip }}"
- admin: up
- state: present
- - type: ipv4
- remote_asn: "{{ bgp_neigh4_remote_asn }}"
- ip: "{{ bgp_neigh4_ip }}"
- admin: up
- state: present
- - type: ipv6
- remote_asn: "{{ bgp_neigh5_remote_asn }}"
- ip: "{{ bgp_neigh5_ip }}"
- admin: up
- state: present
- - type: ipv6
- remote_asn: "{{ bgp_neigh6_remote_asn }}"
- ip: "{{ bgp_neigh6_ip }}"
- admin: up
- address_family:
- - type: ipv4
- activate: false
- state: present
- - type: ipv6
- activate: true
- state: present
- state: present
- - type: ipv6
- remote_asn: "{{ bgp_neigh7_remote_asn }}"
- ip: "{{ bgp_neigh7_ip }}"
- admin: up
- state: present
- - type: ipv6
- remote_asn: "{{ bgp_neigh8_remote_asn }}"
- ip: "{{ bgp_neigh8_ip }}"
- admin: up
- state: present
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml
deleted file mode 100644
index 2244418ef..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf1.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os9.os9
-leaf_hostname: "leaf-1"
-os9_system:
- hostname: "{{ leaf_hostname }}"
- hash_algo:
- algo:
- - name: ecmp
- mode: xor1
- state: present
-os9_interface:
- TenGigabitEthernet 0/0:
- desc: "Connected to Spine 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.1.2/24
- ipv6_and_mask: 2001:100:1:1::2/64
- state_ipv6: present
- TenGigabitEthernet 0/1:
- desc: "Connected to Spine 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.1.2/24
- ipv6_and_mask: 2001:100:2:1::2/64
- state_ipv6: present
-os9_bgp:
- asn: 64801
- router_id: 100.0.2.1
- best_path:
- as_path: ignore
- as_path_state: present
- med:
- - attribute: confed
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.1.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.2.1.1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:1::1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:2:1::1
- admin: up
- state: present
- state: present
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml
deleted file mode 100644
index 2e5cc580d..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf2.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-hostname: leaf2
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os9.os9
-leaf_hostname: "leaf-2"
-os9_system:
- hostname: "{{ leaf_hostname }}"
- hash_algo:
- algo:
- - name: ecmp
- mode: xor1
- state: present
-os9_interface:
- TenGigabitEthernet 0/0:
- desc: "Connected to Spine 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.17.2/24
- ipv6_and_mask: 2001:100:1:11::2/64
- state_ipv6: present
- TenGigabitEthernet 0/1:
- desc: "Connected to Spine 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.17.2/24
- ipv6_and_mask: 2001:100:2:11::2/64
-os9_bgp:
- asn: 64802
- router_id: 100.0.2.2
- best_path:
- as_path: ignore
- as_path_state: present
- med:
- - attribute: confed
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.18.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.17.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.2.17.1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:11::1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:2:11::1
- admin: up
- state: present
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml
deleted file mode 100644
index f14f44e0c..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf3.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
-hostname: leaf3
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os9.os9
-leaf_hostname: "leaf-3"
-os9_system:
- hostname: "{{ leaf_hostname }}"
- hash_algo:
- algo:
- - name: ecmp
- mode: xor1
- state: present
-os9_interface:
- TenGigabitEthernet 0/0:
- desc: "Connected to Spine 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.33.2/24
- ipv6_and_mask: 2001:100:1:21::2/64
- state_ipv6: present
- TenGigabitEthernet 0/1:
- desc: "Connected to Spine 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.33.2/24
- ipv6_and_mask: 2001:100:2:21::2/64
-os9_bgp:
- asn: 64803
- router_id: 100.0.2.3
- best_path:
- as_path: ignore
- as_path_state: present
- med:
- - attribute: confed
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.33.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.2.33.1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:21::1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:22::1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:2:21::1
- admin: up
- state: present
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml
deleted file mode 100644
index 9fc8ca87d..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/leaf4.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-hostname: leaf4
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os9.os9
-leaf_hostname: "leaf-4"
-os9_system:
- hostname: "{{ leaf_hostname }}"
- hash_algo:
- algo:
- - name: ecmp
- mode: xor1
- state: present
-os9_interface:
- TenGigabitEthernet 0/0:
- desc: "Connected to Spine 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.49.2/24
- ipv6_and_mask: 2001:100:1:31::2/64
- state_ipv6: present
- TenGigabitEthernet 0/1:
- desc: "Connected to Spine 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.49.2/24
- ipv6_and_mask: 2001:100:2:31::2/64
- state_ipv6: present
-os9_bgp:
- asn: 64804
- router_id: 100.0.2.4
- best_path:
- as_path: ignore
- as_path_state: present
- med:
- - attribute: confed
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 64901
- ip: 100.1.49.1
- admin: up
- state: present
- - type: ipv4
- remote_asn: 64901
- ip: 100.2.49.1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:1:31::1
- admin: up
- state: present
- - type: ipv6
- remote_asn: 64901
- ip: 2001:100:2:31::1
- admin: up
- state: present
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml
deleted file mode 100644
index 9967d338b..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine1.yaml
+++ /dev/null
@@ -1,61 +0,0 @@
-hostname: spine1
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os9.os9
-spine_hostname: "spine-1"
-
-os9_interface:
- TenGigabitEthernet 0/2:
- desc: "Connected to leaf 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.1.1/24
- ipv6_and_mask: 2001:100:1:1::1/64
- state_ipv6: present
- TenGigabitEthernet 0/3:
- desc: "Connected to leaf 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.33.1/24
- ipv6_and_mask: 2001:100:1:21::1/64
- state_ipv6: present
- TenGigabitEthernet 0/4:
- desc: "Connected to leaf 3"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.17.1/24
- ipv6_and_mask: 2001:100:1:11::1/64
- state_ipv6: present
- TenGigabitEthernet 0/5:
- desc: "Connected to leaf 4"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.1.49.1/24
- ipv6_and_mask: 2001:100:1:31::1/64
- state_ipv6: present
-
-bgp_router_id: "100.0.1.1"
-bgp_neigh1_remote_asn: 64801
-bgp_neigh1_ip: "100.1.1.2"
-bgp_neigh2_remote_asn: 64803
-bgp_neigh2_ip: "100.1.33.2"
-bgp_neigh3_remote_asn: 64802
-bgp_neigh3_ip: "100.1.17.2"
-bgp_neigh4_remote_asn: 64804
-bgp_neigh4_ip: "100.1.49.2"
-bgp_neigh5_remote_asn: 64801
-bgp_neigh5_ip: "2001:100:1:1::2"
-bgp_neigh6_remote_asn: 64802
-bgp_neigh6_ip: "2001:100:1:11::2"
-bgp_neigh7_remote_asn: 64803
-bgp_neigh7_ip: "2001:100:1:21::2"
-bgp_neigh8_remote_asn: 64804
-bgp_neigh8_ip: "2001:100:1:31::2"
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml
deleted file mode 100644
index 218d6478a..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/host_vars/spine2.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
-hostname: spine2
-ansible_ssh_user: xxxxx
-ansible_ssh_pass: xxxxx
-ansible_network_os: dellemc.os9.os9
-spine_hostname: "spine-2"
-os9_interface:
- TenGigabitEthernet 0/6:
- desc: "Connected to leaf 1"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.1.1/24
- ipv6_and_mask: 2001:100:2:1::1/64
- state_ipv6: present
- TenGigabitEthernet 0/7:
- desc: "Connected to leaf 2"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.17.1/24
- ipv6_and_mask: 2001:100:2:11::1/64
- state_ipv6: present
- TenGigabitEthernet 0/8:
- desc: "Connected to leaf 3"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.33.1/24
- ipv6_and_mask: 2001:100:2:21::1/64
- state_ipv6: present
- TenGigabitEthernet 0/9:
- desc: "Connected to leaf 4"
- mtu: 9216
- portmode:
- admin: up
- switchport: False
- ip_and_mask: 100.2.49.1/24
- ipv6_and_mask: 2001:100:2:31::1/64
- state_ipv6: present
-
-bgp_router_id: "100.0.1.2"
-bgp_neigh1_remote_asn: 64801
-bgp_neigh1_ip: "100.2.1.2"
-bgp_neigh2_remote_asn: 64802
-bgp_neigh2_ip: "100.2.33.2"
-bgp_neigh3_remote_asn: 64803
-bgp_neigh3_ip: "100.2.17.2"
-bgp_neigh4_remote_asn: 64804
-bgp_neigh4_ip: "100.2.49.2"
-bgp_neigh5_remote_asn: 64801
-bgp_neigh5_ip: "2001:100:2:1::2"
-bgp_neigh6_remote_asn: 64802
-bgp_neigh6_ip: "2001:100:2:11::2"
-bgp_neigh7_remote_asn: 64803
-bgp_neigh7_ip: "2001:100:2:21::2"
-bgp_neigh8_remote_asn: 64804
-bgp_neigh8_ip: "2001:100:2:31::2"
diff --git a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/inventory.yaml b/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/playbooks/clos_fabric_ebgp/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/plugins/action/os9.py b/ansible_collections/dellemc/os9/plugins/action/os9.py
deleted file mode 100644
index 0cbfa910c..000000000
--- a/ansible_collections/dellemc/os9/plugins/action/os9.py
+++ /dev/null
@@ -1,95 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# Copyright (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import sys
-import copy
-
-from ansible import constants as C
-from ansible.module_utils._text import to_text
-from ansible.module_utils.connection import Connection
-from ansible_collections.ansible.netcommon.plugins.action.network import ActionModule as ActionNetworkModule
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import load_provider
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import os9_provider_spec
-from ansible.utils.display import Display
-
-display = Display()
-
-
-class ActionModule(ActionNetworkModule):
-
- def run(self, tmp=None, task_vars=None):
- del tmp # tmp no longer has any effect
-
- module_name = self._task.action.split('.')[-1]
- self._config_module = True if module_name == 'os9_config' else False
- socket_path = None
- persistent_connection = self._play_context.connection.split('.')[-1]
-
- if persistent_connection == 'network_cli':
- provider = self._task.args.get('provider', {})
- if any(provider.values()):
- display.warning('provider is unnecessary when using network_cli and will be ignored')
- del self._task.args['provider']
- elif self._play_context.connection == 'local':
- provider = load_provider(os9_provider_spec, self._task.args)
- pc = copy.deepcopy(self._play_context)
- pc.connection = 'network_cli'
- pc.network_os = 'dellemc.os9.os9'
- pc.remote_addr = provider['host'] or self._play_context.remote_addr
- pc.port = int(provider['port'] or self._play_context.port or 22)
- pc.remote_user = provider['username'] or self._play_context.connection_user
- pc.password = provider['password'] or self._play_context.password
- pc.private_key_file = provider['ssh_keyfile'] or self._play_context.private_key_file
- command_timeout = int(provider['timeout'] or C.PERSISTENT_COMMAND_TIMEOUT)
- pc.become = provider['authorize'] or False
- if pc.become:
- pc.become_method = 'enable'
- pc.become_pass = provider['auth_pass']
-
- display.vvv('using connection plugin %s' % pc.connection, pc.remote_addr)
- connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin)
- connection.set_options(direct={'persistent_command_timeout': command_timeout})
-
- socket_path = connection.run()
- display.vvvv('socket_path: %s' % socket_path, pc.remote_addr)
- if not socket_path:
- return {'failed': True,
- 'msg': 'unable to open shell. Please see: ' +
- 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'}
-
- task_vars['ansible_socket'] = socket_path
-
- # make sure we are in the right cli context which should be
- # enable mode and not config module
- if socket_path is None:
- socket_path = self._connection.socket_path
-
- conn = Connection(socket_path)
- out = conn.get_prompt()
- while to_text(out, errors='surrogate_then_replace').strip().endswith(')#'):
- display.vvvv('wrong context, sending exit to device', self._play_context.remote_addr)
- conn.send_command('exit')
- out = conn.get_prompt()
-
- result = super(ActionModule, self).run(task_vars=task_vars)
- return result
diff --git a/ansible_collections/dellemc/os9/plugins/cliconf/os9.py b/ansible_collections/dellemc/os9/plugins/cliconf/os9.py
deleted file mode 100644
index 95334bfdb..000000000
--- a/ansible_collections/dellemc/os9/plugins/cliconf/os9.py
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-DOCUMENTATION = """
----
-cliconf: os9
-short_description: Use os9 cliconf to run command on Dell OS9 platform
-description:
- - This os9 plugin provides low level abstraction apis for
- sending and receiving CLI commands from Dell OS9 network devices.
-"""
-
-import re
-import json
-
-from itertools import chain
-
-from ansible.module_utils._text import to_bytes, to_text
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list
-from ansible.plugins.cliconf import CliconfBase, enable_mode
-
-
-class Cliconf(CliconfBase):
-
- def get_device_info(self):
- device_info = {}
-
- device_info['network_os'] = 'dellemc.os9.os9'
- reply = self.get('show version')
- data = to_text(reply, errors='surrogate_or_strict').strip()
-
- match = re.search(r'Software Version (\S+)', data)
- if match:
- device_info['network_os_version'] = match.group(1)
-
- match = re.search(r'System Type (\S+)', data, re.M)
- if match:
- device_info['network_os_model'] = match.group(1)
-
- reply = self.get('show running-config | grep hostname')
- data = to_text(reply, errors='surrogate_or_strict').strip()
- match = re.search(r'^hostname (.+)', data, re.M)
- if match:
- device_info['network_os_hostname'] = match.group(1)
-
- return device_info
-
- @enable_mode
- def get_config(self, source='running', format='text', flags=None):
- if source not in ('running', 'startup'):
- return self.invalid_params("fetching configuration from %s is not supported" % source)
-# if source == 'running':
-# cmd = 'show running-config all'
- else:
- cmd = 'show startup-config'
- return self.send_command(cmd)
-
- @enable_mode
- def edit_config(self, command):
- for cmd in chain(['configure terminal'], to_list(command), ['end']):
- self.send_command(cmd)
-
- def get(self, command, prompt=None, answer=None, sendonly=False, newline=True, check_all=False):
- return self.send_command(command=command, prompt=prompt, answer=answer, sendonly=sendonly, newline=newline, check_all=check_all)
-
- def get_capabilities(self):
- result = super(Cliconf, self).get_capabilities()
- return json.dumps(result)
diff --git a/ansible_collections/dellemc/os9/plugins/doc_fragments/os9.py b/ansible_collections/dellemc/os9/plugins/doc_fragments/os9.py
deleted file mode 100644
index 35ec6725a..000000000
--- a/ansible_collections/dellemc/os9/plugins/doc_fragments/os9.py
+++ /dev/null
@@ -1,73 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2020, Peter Sprygada <psprygada@ansible.com>
-# Copyright: (c) 2020, Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-class ModuleDocFragment(object):
-
- # Standard files documentation fragment
- DOCUMENTATION = r'''
-options:
- provider:
- description:
- - A dict object containing connection details.
- type: dict
- suboptions:
- host:
- description:
- - Specifies the DNS host name or address for connecting to the remote
- device over the specified transport. The value of host is used as
- the destination address for the transport.
- type: str
- port:
- description:
- - Specifies the port to use when building the connection to the remote
- device.
- type: int
- username:
- description:
- - User to authenticate the SSH session to the remote device. If the
- value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_USERNAME) will be used instead.
- type: str
- password:
- description:
- - Password to authenticate the SSH session to the remote device. If the
- value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_PASSWORD) will be used instead.
- type: str
- ssh_keyfile:
- description:
- - Path to an ssh key used to authenticate the SSH session to the remote
- device. If the value is not specified in the task, the value of
- environment variable C(ANSIBLE_NET_SSH_KEYFILE) will be used instead.
- type: path
- timeout:
- description:
- - Specifies idle timeout (in seconds) for the connection. Useful if the
- console freezes before continuing. For example when saving
- configurations.
- type: int
- authorize:
- description:
- - Instructs the module to enter privileged mode on the remote device before
- sending any commands. If not specified, the device will attempt to execute
- all commands in non-privileged mode. If the value is not specified in the
- task, the value of environment variable C(ANSIBLE_NET_AUTHORIZE) will be
- used instead.
- type: bool
- auth_pass:
- description:
- - Specifies the password to use if required to enter privileged mode on the
- remote device. If I(authorize) is false, then this argument does nothing.
- If the value is not specified in the task, the value of environment variable
- C(ANSIBLE_NET_AUTH_PASS) will be used instead.
- type: str
-notes:
- - For more information on using Ansible to manage Dell EMC Network devices see U(https://www.ansible.com/ansible-dell-networking).
-'''
diff --git a/ansible_collections/dellemc/os9/plugins/module_utils/network/os9.py b/ansible_collections/dellemc/os9/plugins/module_utils/network/os9.py
deleted file mode 100644
index 14c777734..000000000
--- a/ansible_collections/dellemc/os9/plugins/module_utils/network/os9.py
+++ /dev/null
@@ -1,146 +0,0 @@
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# (c) 2020 Red Hat, Inc
-#
-# Copyright (c) 2020 Dell Inc.
-#
-# This code is part of Ansible, but is an independent component.
-# This particular file snippet, and this file snippet only, is BSD licensed.
-# Modules you write using this snippet, which is embedded dynamically by Ansible
-# still belong to the author of the module, and may assign their own license
-# to the complete work.
-#
-# Redistribution and use in source and binary forms, with or without modification,
-# are permitted provided that the following conditions are met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above copyright notice,
-# this list of conditions and the following disclaimer in the documentation
-# and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
-# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
-# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
-# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-import re
-
-from ansible.module_utils._text import to_text
-from ansible.module_utils.basic import env_fallback
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import to_list, ComplexList
-from ansible.module_utils.connection import exec_command
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, ConfigLine
-
-_DEVICE_CONFIGS = {}
-
-WARNING_PROMPTS_RE = [
- r"[\r\n]?\[confirm yes/no\]:\s?$",
- r"[\r\n]?\[y/n\]:\s?$",
- r"[\r\n]?\[yes/no\]:\s?$"
-]
-
-os9_provider_spec = {
- 'host': dict(),
- 'port': dict(type='int'),
- 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])),
- 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True),
- 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'),
- 'authorize': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTHORIZE']), type='bool'),
- 'auth_pass': dict(fallback=(env_fallback, ['ANSIBLE_NET_AUTH_PASS']), no_log=True),
- 'timeout': dict(type='int'),
-}
-os9_argument_spec = {
- 'provider': dict(type='dict', options=os9_provider_spec),
-}
-
-
-def check_args(module, warnings):
- pass
-
-
-def get_config(module, flags=None):
- flags = [] if flags is None else flags
-
- cmd = 'show running-config '
- cmd += ' '.join(flags)
- cmd = cmd.strip()
-
- try:
- return _DEVICE_CONFIGS[cmd]
- except KeyError:
- rc, out, err = exec_command(module, cmd)
- if rc != 0:
- module.fail_json(msg='unable to retrieve current config', stderr=to_text(err, errors='surrogate_or_strict'))
- cfg = to_text(out, errors='surrogate_or_strict').strip()
- _DEVICE_CONFIGS[cmd] = cfg
- return cfg
-
-
-def to_commands(module, commands):
- spec = {
- 'command': dict(key=True),
- 'prompt': dict(),
- 'answer': dict()
- }
- transform = ComplexList(spec, module)
- return transform(commands)
-
-
-def run_commands(module, commands, check_rc=True):
- responses = list()
- commands = to_commands(module, to_list(commands))
- for cmd in commands:
- cmd = module.jsonify(cmd)
- rc, out, err = exec_command(module, cmd)
- if check_rc and rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), rc=rc)
- responses.append(to_text(out, errors='surrogate_or_strict'))
- return responses
-
-
-def load_config(module, commands):
- rc, out, err = exec_command(module, 'configure terminal')
- if rc != 0:
- module.fail_json(msg='unable to enter configuration mode', err=to_text(err, errors='surrogate_or_strict'))
-
- for command in to_list(commands):
- if command == 'end':
- continue
- rc, out, err = exec_command(module, command)
- if rc != 0:
- module.fail_json(msg=to_text(err, errors='surrogate_or_strict'), command=command, rc=rc)
-
- exec_command(module, 'end')
-
-
-def get_sublevel_config(running_config, module):
- contents = list()
- current_config_contents = list()
- running_config = NetworkConfig(contents=running_config, indent=1)
- obj = running_config.get_object(module.params['parents'])
- if obj:
- contents = obj.children
- contents[:0] = module.params['parents']
-
- indent = 0
- for c in contents:
- if isinstance(c, str):
- current_config_contents.append(c.rjust(len(c) + indent, ' '))
- if isinstance(c, ConfigLine):
- current_config_contents.append(c.raw)
- indent = 1
- sublevel_config = '\n'.join(current_config_contents)
-
- return sublevel_config
diff --git a/ansible_collections/dellemc/os9/plugins/modules/os9_command.py b/ansible_collections/dellemc/os9/plugins/modules/os9_command.py
deleted file mode 100644
index 20e3cc581..000000000
--- a/ansible_collections/dellemc/os9/plugins/modules/os9_command.py
+++ /dev/null
@@ -1,232 +0,0 @@
-#!/usr/bin/python
-# -*- coding: utf-8 -*-
-
-# Copyright: (c) 2020, Peter Sprygada <psprygada@ansible.com>
-# Copyright: (c) 2020, Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-DOCUMENTATION = """
----
-module: os9_command
-author: "Dhivya P (@dhivyap)"
-short_description: Run commands on remote devices running Dell OS9
-description:
- - Sends arbitrary commands to a Dell OS9 node and returns the results
- read from the device. This module includes an
- argument that will cause the module to wait for a specific condition
- before returning or timing out if the condition is not met.
- - This module does not support running commands in configuration mode.
- Please use M(dellemc_os9_os9_config) to configure Dell OS9 devices.
-extends_documentation_fragment: dellemc.os9.os9
-options:
- commands:
- description:
- - List of commands to send to the remote os9 device over the
- configured provider. The resulting output from the command
- is returned. If the I(wait_for) argument is provided, the
- module is not returned until the condition is satisfied or
- the number of retries has expired.
- type: list
- required: true
- wait_for:
- description:
- - List of conditions to evaluate against the output of the
- command. The task will wait for each condition to be true
- before moving forward. If the conditional is not true
- within the configured number of I(retries), the task fails.
- See examples.
- type: list
- elements: str
- match:
- description:
- - The I(match) argument is used in conjunction with the
- I(wait_for) argument to specify the match policy. Valid
- values are C(all) or C(any). If the value is set to C(all)
- then all conditionals in the wait_for must be satisfied. If
- the value is set to C(any) then only one of the values must be
- satisfied.
- type: str
- default: all
- choices: [ all, any ]
- retries:
- description:
- - Specifies the number of retries a command should be tried
- before it is considered failed. The command is run on the
- target device every retry and evaluated against the
- I(wait_for) conditions.
- type: int
- default: 10
- interval:
- description:
- - Configures the interval in seconds to wait between retries
- of the command. If the command does not pass the specified
- conditions, the interval indicates how long to wait before
- trying the command again.
- type: int
- default: 1
-notes:
- - This module requires Dell OS9 version 9.10.0.1P13 or above.
- - This module requires to increase the ssh connection rate limit.
- Use the following command I(ip ssh connection-rate-limit 60)
- to configure the same. This can be done via M(os9_config) module
- as well.
-"""
-
-EXAMPLES = """
-tasks:
- - name: run show version on remote devices
- os9_command:
- commands: show version
- - name: run show version and check to see if output contains OS9
- os9_command:
- commands: show version
- wait_for: result[0] contains OS9
- - name: run multiple commands on remote nodes
- os9_command:
- commands:
- - show version
- - show interfaces
- - name: run multiple commands and evaluate the output
- os9_command:
- commands:
- - show version
- - show interfaces
- wait_for:
- - result[0] contains OS9
- - result[1] contains Loopback
-"""
-
-RETURN = """
-stdout:
- description: The set of responses from the commands
- returned: always apart from low level errors (such as action plugin)
- type: list
- sample: ['...', '...']
-stdout_lines:
- description: The value of stdout split into a list
- returned: always apart from low level errors (such as action plugin)
- type: list
- sample: [['...', '...'], ['...'], ['...']]
-failed_conditions:
- description: The list of conditionals that have failed
- returned: failed
- type: list
- sample: ['...', '...']
-warnings:
- description: The list of warnings (if any) generated by module based on arguments
- returned: always
- type: list
- sample: ['...', '...']
-"""
-import time
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import run_commands
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import os9_argument_spec, check_args
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import ComplexList
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.parsing import Conditional
-from ansible.module_utils.six import string_types
-
-
-def to_lines(stdout):
- for item in stdout:
- if isinstance(item, string_types):
- item = str(item).split('\n')
- yield item
-
-
-def parse_commands(module, warnings):
- command = ComplexList(dict(
- command=dict(key=True),
- prompt=dict(),
- answer=dict()
- ), module)
- commands = command(module.params['commands'])
- for index, item in enumerate(commands):
- if module.check_mode and not item['command'].startswith('show'):
- warnings.append(
- 'only show commands are supported when using check mode, not '
- 'executing `%s`' % item['command']
- )
- elif item['command'].startswith('conf'):
- module.fail_json(
- msg='os9_command does not support running config mode '
- 'commands. Please use os9_config instead'
- )
- return commands
-
-
-def main():
- """main entry point for module execution
- """
- argument_spec = dict(
- # { command: <str>, prompt: <str>, response: <str> }
- commands=dict(type='list', required=True),
-
- wait_for=dict(type='list', elements='str'),
- match=dict(default='all', choices=['all', 'any']),
-
- retries=dict(default=10, type='int'),
- interval=dict(default=1, type='int')
- )
-
- argument_spec.update(os9_argument_spec)
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- result = {'changed': False}
-
- warnings = list()
- check_args(module, warnings)
- commands = parse_commands(module, warnings)
- result['warnings'] = warnings
-
- wait_for = module.params['wait_for'] or list()
- conditionals = [Conditional(c) for c in wait_for]
-
- retries = module.params['retries']
- interval = module.params['interval']
- match = module.params['match']
-
- while retries > 0:
- responses = run_commands(module, commands)
-
- for item in list(conditionals):
- if item(responses):
- if match == 'any':
- conditionals = list()
- break
- conditionals.remove(item)
-
- if not conditionals:
- break
-
- time.sleep(interval)
- retries -= 1
-
- if conditionals:
- failed_conditions = [item.raw for item in conditionals]
- msg = 'One or more conditional statements have not been satisfied'
- module.fail_json(msg=msg, failed_conditions=failed_conditions)
-
- result.update({
- 'changed': False,
- 'stdout': responses,
- 'stdout_lines': list(to_lines(responses))
- })
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os9/plugins/modules/os9_config.py b/ansible_collections/dellemc/os9/plugins/modules/os9_config.py
deleted file mode 100644
index a6d20ed00..000000000
--- a/ansible_collections/dellemc/os9/plugins/modules/os9_config.py
+++ /dev/null
@@ -1,350 +0,0 @@
-#!/usr/bin/python
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# Copyright (c) 2020 Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: os9_config
-author: "Dhivya P (@dhivyap)"
-short_description: Manage Dell EMC Networking OS9 configuration sections
-description:
- - OS9 configurations use a simple block indent file syntax
- for segmenting configuration into sections. This module provides
- an implementation for working with OS9 configuration sections in
- a deterministic way.
-extends_documentation_fragment: dellemc.os9.os9
-options:
- lines:
- description:
- - The ordered set of commands that should be configured in the
- section. The commands must be the exact same commands as found
- in the device running-config. Be sure to note the configuration
- command syntax as some commands are automatically modified by the
- device config parser. This argument is mutually exclusive with I(src).
- type: list
- aliases: ['commands']
- parents:
- description:
- - The ordered set of parents that uniquely identify the section or hierarchy
- the commands should be checked against. If the parents argument
- is omitted, the commands are checked against the set of top
- level or global commands.
- type: list
- src:
- description:
- - Specifies the source path to the file that contains the configuration
- or configuration template to load. The path to the source file can
- either be the full path on the Ansible control host or a relative
- path from the playbook or role root directory. This argument is
- mutually exclusive with I(lines).
- type: path
- before:
- description:
- - The ordered set of commands to push on to the command stack if
- a change needs to be made. This allows the playbook designer
- the opportunity to perform configuration commands prior to pushing
- any changes without affecting how the set of commands are matched
- against the system.
- type: list
- after:
- description:
- - The ordered set of commands to append to the end of the command
- stack if a change needs to be made. Just like with I(before) this
- allows the playbook designer to append a set of commands to be
- executed after the command set.
- type: list
- match:
- description:
- - Instructs the module on the way to perform the matching of
- the set of commands against the current device config. If
- match is set to I(line), commands are matched line by line. If
- match is set to I(strict), command lines are matched with respect
- to position. If match is set to I(exact), command lines
- must be an equal match. Finally, if match is set to I(none), the
- module will not attempt to compare the source configuration with
- the running configuration on the remote device.
- type: str
- default: line
- choices: ['line', 'strict', 'exact', 'none']
- replace:
- description:
- - Instructs the module on the way to perform the configuration
- on the device. If the replace argument is set to I(line) then
- the modified lines are pushed to the device in configuration
- mode. If the replace argument is set to I(block) then the entire
- command block is pushed to the device in configuration mode if any
- line is not correct.
- type: str
- default: line
- choices: ['line', 'block']
- update:
- description:
- - The I(update) argument controls how the configuration statements
- are processed on the remote device. Valid choices for the I(update)
- argument are I(merge) and I(check). When you set this argument to
- I(merge), the configuration changes merge with the current
- device running configuration. When you set this argument to I(check)
- the configuration updates are determined but not actually configured
- on the remote device.
- type: str
- default: merge
- choices: ['merge', 'check']
- save:
- description:
- - The C(save) argument instructs the module to save the running-
- config to the startup-config at the conclusion of the module
- running. If check mode is specified, this argument is ignored.
- type: bool
- default: no
- config:
- description:
- - The module, by default, will connect to the remote device and
- retrieve the current running-config to use as a base for comparing
- against the contents of source. There are times when it is not
- desirable to have the task get the current running-config for
- every task in a playbook. The I(config) argument allows the
- implementer to pass in the configuration to use as the base
- config for comparison.
- type: str
- backup:
- description:
- - This argument will cause the module to create a full backup of
- the current C(running-config) from the remote device before any
- changes are made. If the C(backup_options) value is not given,
- the backup file is written to the C(backup) folder in the playbook
- root directory. If the directory does not exist, it is created.
- type: bool
- default: 'no'
- backup_options:
- description:
- - This is a dict object containing configurable options related to backup file path.
- The value of this option is read only when C(backup) is set to I(yes), if C(backup) is set
- to I(no) this option will be silently ignored.
- suboptions:
- filename:
- description:
- - The filename to be used to store the backup configuration. If the the filename
- is not given it will be generated based on the hostname, current time and date
- in format defined by <hostname>_config.<current-date>@<current-time>
- type: str
- dir_path:
- description:
- - This option provides the path ending with directory name in which the backup
- configuration file will be stored. If the directory does not exist it will be first
- created and the filename is either the value of C(filename) or default filename
- as described in C(filename) options description. If the path value is not given
- in that case a I(backup) directory will be created in the current working directory
- and backup configuration will be copied in C(filename) within I(backup) directory.
- type: path
- type: dict
-notes:
- - This module requires Dell OS9 version 9.10.0.1P13 or above.
- - This module requires to increase the ssh connection rate limit.
- Use the following command I(ip ssh connection-rate-limit 60)
- to configure the same. This can also be done with the
- M(os9_config) module.
-"""
-
-EXAMPLES = """
-- os9_config:
- lines: ['hostname {{ inventory_hostname }}']
- provider: "{{ cli }}"
-- os9_config:
- lines:
- - 10 permit ip host 1.1.1.1 any log
- - 20 permit ip host 2.2.2.2 any log
- - 30 permit ip host 3.3.3.3 any log
- - 40 permit ip host 4.4.4.4 any log
- - 50 permit ip host 5.5.5.5 any log
- parents: ['ip access-list extended test']
- before: ['no ip access-list extended test']
- match: exact
-- os9_config:
- lines:
- - 10 permit ip host 1.1.1.1 any log
- - 20 permit ip host 2.2.2.2 any log
- - 30 permit ip host 3.3.3.3 any log
- - 40 permit ip host 4.4.4.4 any log
- parents: ['ip access-list extended test']
- before: ['no ip access-list extended test']
- replace: block
-- os9_config:
- lines: ['hostname {{ inventory_hostname }}']
- provider: "{{ cli }}"
- backup: yes
- backup_options:
- filename: backup.cfg
- dir_path: /home/user
-"""
-
-RETURN = """
-updates:
- description: The set of commands that will be pushed to the remote device.
- returned: always
- type: list
- sample: ['hostname foo', 'router bgp 1', 'bgp router-id 1.1.1.1']
-commands:
- description: The set of commands that will be pushed to the remote device
- returned: always
- type: list
- sample: ['hostname foo', 'router bgp 1', 'bgp router-id 1.1.1.1']
-saved:
- description: Returns whether the configuration is saved to the startup
- configuration or not.
- returned: When not check_mode.
- type: bool
- sample: True
-backup_path:
- description: The full path to the backup file
- returned: when backup is yes
- type: str
- sample: /playbooks/ansible/backup/os9_config.2016-07-16@22:28:34
-"""
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import get_config, get_sublevel_config
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import os9_argument_spec, check_args
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import load_config, run_commands
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import WARNING_PROMPTS_RE
-from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.config import NetworkConfig, dumps
-
-
-def get_candidate(module):
- candidate = NetworkConfig(indent=1)
- if module.params['src']:
- candidate.load(module.params['src'])
- elif module.params['lines']:
- parents = module.params['parents'] or list()
- commands = module.params['lines'][0]
- if (isinstance(commands, dict)) and (isinstance(commands['command'], list)):
- candidate.add(commands['command'], parents=parents)
- elif (isinstance(commands, dict)) and (isinstance(commands['command'], str)):
- candidate.add([commands['command']], parents=parents)
- else:
- candidate.add(module.params['lines'], parents=parents)
- return candidate
-
-
-def get_running_config(module):
- contents = module.params['config']
- if not contents:
- contents = get_config(module)
- return contents
-
-
-def main():
-
- backup_spec = dict(
- filename=dict(),
- dir_path=dict(type='path')
- )
- argument_spec = dict(
- lines=dict(aliases=['commands'], type='list'),
- parents=dict(type='list'),
-
- src=dict(type='path'),
-
- before=dict(type='list'),
- after=dict(type='list'),
-
- match=dict(default='line',
- choices=['line', 'strict', 'exact', 'none']),
- replace=dict(default='line', choices=['line', 'block']),
-
- update=dict(choices=['merge', 'check'], default='merge'),
- save=dict(type='bool', default=False),
- config=dict(),
- backup=dict(type='bool', default=False),
- backup_options=dict(type='dict', options=backup_spec)
- )
-
- argument_spec.update(os9_argument_spec)
-
- mutually_exclusive = [('lines', 'src'),
- ('parents', 'src')]
- module = AnsibleModule(argument_spec=argument_spec,
- mutually_exclusive=mutually_exclusive,
- supports_check_mode=True)
-
- parents = module.params['parents'] or list()
-
- match = module.params['match']
- replace = module.params['replace']
-
- warnings = list()
- check_args(module, warnings)
-
- result = dict(changed=False, saved=False, warnings=warnings)
-
- candidate = get_candidate(module)
-
- if module.params['backup']:
- if not module.check_mode:
- result['__backup__'] = get_config(module)
- commands = list()
-
- if any((module.params['lines'], module.params['src'])):
- if match != 'none':
- config = get_running_config(module)
- if parents:
- contents = get_sublevel_config(config, module)
- config = NetworkConfig(contents=contents, indent=1)
- else:
- config = NetworkConfig(contents=config, indent=1)
- configobjs = candidate.difference(config, match=match, replace=replace)
- else:
- configobjs = candidate.items
-
- if configobjs:
- commands = dumps(configobjs, 'commands')
- if ((isinstance(module.params['lines'], list)) and
- (isinstance(module.params['lines'][0], dict)) and
- set(['prompt', 'answer']).issubset(module.params['lines'][0])):
-
- cmd = {'command': commands,
- 'prompt': module.params['lines'][0]['prompt'],
- 'answer': module.params['lines'][0]['answer']}
- commands = [module.jsonify(cmd)]
- else:
- commands = commands.split('\n')
-
- if module.params['before']:
- commands[:0] = module.params['before']
-
- if module.params['after']:
- commands.extend(module.params['after'])
-
- if not module.check_mode and module.params['update'] == 'merge':
- load_config(module, commands)
-
- result['changed'] = True
- result['commands'] = commands
- result['updates'] = commands
-
- if module.params['save']:
- result['changed'] = True
- if not module.check_mode:
- cmd = {'command': 'copy running-config startup-config',
- 'prompt': r'\[confirm yes/no\]:\s?$', 'answer': 'yes'}
- run_commands(module, [cmd])
- result['saved'] = True
- else:
- module.warn('Skipping command `copy running-config startup-config`'
- 'due to check_mode. Configuration not copied to '
- 'non-volatile storage')
-
- module.exit_json(**result)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os9/plugins/modules/os9_facts.py b/ansible_collections/dellemc/os9/plugins/modules/os9_facts.py
deleted file mode 100644
index fe04afc04..000000000
--- a/ansible_collections/dellemc/os9/plugins/modules/os9_facts.py
+++ /dev/null
@@ -1,578 +0,0 @@
-#!/usr/bin/python
-#
-# (c) 2020 Peter Sprygada, <psprygada@ansible.com>
-# Copyright (c) 2020 Dell Inc.
-# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
-
-from __future__ import absolute_import, division, print_function
-__metaclass__ = type
-
-
-ANSIBLE_METADATA = {'metadata_version': '1.1',
- 'status': ['preview'],
- 'supported_by': 'community'}
-
-
-DOCUMENTATION = """
----
-module: os9_facts
-author: "Dhivya P (@dhivyap)"
-short_description: Collect facts from remote devices running Dell EMC Networking OS9
-description:
- - Collects a base set of device facts from a remote device that
- is running OS9. This module prepends all of the
- base network fact keys with C(ansible_net_<fact>). The facts
- module will always collect a base set of facts from the device
- and can enable or disable collection of additional facts.
-extends_documentation_fragment: dellemc.os9.os9
-options:
- gather_subset:
- description:
- - When supplied, this argument will restrict the facts collected
- to a given subset. Possible values for this argument include
- all, hardware, config, and interfaces. Can specify a list of
- values to include a larger subset. Values can also be used
- with an initial C(M(!)) to specify that a specific subset should
- not be collected.
- type: list
- default: [ '!config' ]
-notes:
- - This module requires OS9 version 9.10.0.1P13 or above.
- - This module requires an increase of the SSH connection rate limit.
- Use the following command I(ip ssh connection-rate-limit 60)
- to configure the same. This can be also be done with the M(os9_config) module.
-"""
-
-EXAMPLES = """
-# Collect all facts from the device
-- os9_facts:
- gather_subset: all
-# Collect only the config and default facts
-- os9_facts:
- gather_subset:
- - config
-# Do not collect hardware facts
-- os9_facts:
- gather_subset:
- - "!hardware"
-"""
-
-RETURN = """
-ansible_net_gather_subset:
- description: The list of fact subsets collected from the device
- returned: always
- type: list
-# default
-ansible_net_model:
- description: The model name returned from the device
- returned: always
- type: str
-ansible_net_serialnum:
- description: The serial number of the remote device
- returned: always
- type: str
-ansible_net_servicetags:
- description: The servicetags from remote device
- returned: always
- type: list
-ansible_net_version:
- description: The operating system version running on the remote device
- returned: always
- type: str
-ansible_net_hostname:
- description: The configured hostname of the device
- returned: always
- type: str
-ansible_net_image:
- description: The image file the device is running
- returned: always
- type: str
-# hardware
-ansible_net_filesystems:
- description: All file system names available on the device
- returned: when hardware is configured
- type: list
-ansible_net_memfree_mb:
- description: The available free memory on the remote device in Mb
- returned: when hardware is configured
- type: int
-ansible_net_memtotal_mb:
- description: The total memory on the remote device in Mb
- returned: when hardware is configured
- type: int
-# config
-ansible_net_config:
- description: The current active config from the device
- returned: when config is configured
- type: str
-# interfaces
-ansible_net_all_ipv4_addresses:
- description: All IPv4 addresses configured on the device
- returned: when interfaces is configured
- type: list
-ansible_net_all_ipv6_addresses:
- description: All IPv6 addresses configured on the device
- returned: when interfaces is configured
- type: list
-ansible_net_interfaces:
- description: A hash of all interfaces running on the system
- returned: when interfaces is configured
- type: dict
-ansible_net_neighbors:
- description: The list of LLDP neighbors from the remote device
- returned: when interfaces is configured
- type: dict
-"""
-import re
-try:
- from itertools import izip
-except ImportError:
- izip = zip
-
-from ansible.module_utils.basic import AnsibleModule
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import run_commands
-from ansible_collections.dellemc.os9.plugins.module_utils.network.os9 import os9_argument_spec, check_args
-from ansible.module_utils.six import iteritems
-
-
-class FactsBase(object):
-
- COMMANDS = list()
-
- def __init__(self, module):
- self.module = module
- self.facts = dict()
- self.responses = None
-
- def populate(self):
- self.responses = run_commands(self.module, self.COMMANDS, check_rc=False)
-
- def run(self, cmd):
- return run_commands(self.module, cmd, check_rc=False)
-
-
-class Default(FactsBase):
-
- COMMANDS = [
- 'show version',
- 'show inventory',
- 'show running-config | grep hostname'
- ]
-
- def populate(self):
- super(Default, self).populate()
- data = self.responses[0]
- self.facts['version'] = self.parse_version(data)
- self.facts['model'] = self.parse_model(data)
- self.facts['image'] = self.parse_image(data)
-
- data = self.responses[1]
- self.facts['serialnum'] = self.parse_serialnum(data)
- self.facts['servicetags'] = self.parse_servicetags(data)
-
- data = self.responses[2]
- self.facts['hostname'] = self.parse_hostname(data)
-
- def parse_version(self, data):
- match = re.search(r'Software Version:\s*(.+)', data)
- if match:
- return match.group(1)
-
- def parse_hostname(self, data):
- match = re.search(r'^hostname (.+)', data, re.M)
- if match:
- return match.group(1)
-
- def parse_model(self, data):
- match = re.search(r'^System Type:\s*(.+)', data, re.M)
- if match:
- return match.group(1)
-
- def parse_image(self, data):
- match = re.search(r'image file is "(.+)"', data)
- if match:
- return match.group(1)
-
- def parse_serialnum(self, data):
- for line in data.split('\n'):
- if line.startswith('*'):
- match = re.search(
- r'\s+(\S+)\s+(\S+)\s+(\S+)\s+(\S+)', line, re.M)
- if match:
- return match.group(3)
-
- def parse_servicetags(self, data):
- tags = []
- for line in data.split('\n'):
- match = re.match(r'\**\s+[0-9]+\s+.*(\b[A-Z0-9]{7}\b)', line)
- if match:
- tags.append(match.group(1))
- return tags
-
-
-class Hardware(FactsBase):
-
- COMMANDS = [
- 'show file-systems',
- 'show memory | except Processor'
- ]
-
- def populate(self):
- super(Hardware, self).populate()
- data = self.responses[0]
- self.facts['filesystems'] = self.parse_filesystems(data)
-
- data = self.responses[1]
- match = re.findall(r'\s(\d+)\s', data)
- if match:
- self.facts['memtotal_mb'] = int(match[0]) // 1024
- self.facts['memfree_mb'] = int(match[2]) // 1024
-
- def parse_filesystems(self, data):
- return re.findall(r'\s(\S+):$', data, re.M)
-
-
-class Config(FactsBase):
-
- COMMANDS = ['show running-config']
-
- def populate(self):
- super(Config, self).populate()
- self.facts['config'] = self.responses[0]
-
-
-class Interfaces(FactsBase):
-
- COMMANDS = [
- 'show interfaces',
- 'show ipv6 interface',
- 'show lldp neighbors detail',
- 'show inventory'
- ]
-
- def populate(self):
- super(Interfaces, self).populate()
- self.facts['all_ipv4_addresses'] = list()
- self.facts['all_ipv6_addresses'] = list()
-
- data = self.responses[0]
- interfaces = self.parse_interfaces(data)
-
- for key in list(interfaces.keys()):
- if "ManagementEthernet" in key:
- temp_parsed = interfaces[key]
- del interfaces[key]
- interfaces.update(self.parse_mgmt_interfaces(temp_parsed))
-
- for key in list(interfaces.keys()):
- if "Vlan" in key:
- temp_parsed = interfaces[key]
- del interfaces[key]
- interfaces.update(self.parse_vlan_interfaces(temp_parsed))
-
- self.facts['interfaces'] = self.populate_interfaces(interfaces)
-
- data = self.responses[1]
- if len(data) > 0:
- data = self.parse_ipv6_interfaces(data)
- self.populate_ipv6_interfaces(data)
-
- data = self.responses[3]
- if 'LLDP' in self.get_protocol_list(data):
- neighbors = self.responses[2]
- self.facts['neighbors'] = self.parse_neighbors(neighbors)
-
- def get_protocol_list(self, data):
- start = False
- protocol_list = list()
- for line in data.split('\n'):
- match = re.search(r'Software Protocol Configured\s*', line)
- if match:
- start = True
- continue
- if start:
- line = line.strip()
- if line.isalnum():
- protocol_list.append(line)
- return protocol_list
-
- def populate_interfaces(self, interfaces):
- facts = dict()
- for key, value in interfaces.items():
- intf = dict()
- intf['description'] = self.parse_description(value)
- intf['macaddress'] = self.parse_macaddress(value)
- ipv4 = self.parse_ipv4(value)
- intf['ipv4'] = self.parse_ipv4(value)
- if ipv4:
- self.add_ip_address(ipv4['address'], 'ipv4')
-
- intf['mtu'] = self.parse_mtu(value)
- intf['bandwidth'] = self.parse_bandwidth(value)
- intf['mediatype'] = self.parse_mediatype(value)
- intf['duplex'] = self.parse_duplex(value)
- intf['lineprotocol'] = self.parse_lineprotocol(value)
- intf['operstatus'] = self.parse_operstatus(value)
- intf['type'] = self.parse_type(value)
-
- facts[key] = intf
- return facts
-
- def populate_ipv6_interfaces(self, data):
- for key, value in data.items():
- if key in self.facts['interfaces']:
- self.facts['interfaces'][key]['ipv6'] = list()
- addresses = re.findall(r'\s+(.+), subnet', value, re.M)
- subnets = re.findall(r', subnet is (\S+)', value, re.M)
- for addr, subnet in izip(addresses, subnets):
- ipv6 = dict(address=addr.strip(), subnet=subnet.strip())
- self.add_ip_address(addr.strip(), 'ipv6')
- self.facts['interfaces'][key]['ipv6'].append(ipv6)
-
- def add_ip_address(self, address, family):
- if family == 'ipv4':
- self.facts['all_ipv4_addresses'].append(address)
- else:
- self.facts['all_ipv6_addresses'].append(address)
-
- def parse_neighbors(self, neighbors):
- facts = dict()
-
- for entry in neighbors.split(
- '========================================================================'):
- if entry == '':
- continue
-
- intf = self.parse_lldp_intf(entry)
- if intf not in facts:
- facts[intf] = list()
- fact = dict()
- fact['host'] = self.parse_lldp_host(entry)
- fact['port'] = self.parse_lldp_port(entry)
- facts[intf].append(fact)
- return facts
-
- def parse_interfaces(self, data):
- parsed = dict()
- newline_count = 0
- interface_start = True
-
- for line in data.split('\n'):
- if interface_start:
- newline_count = 0
- if len(line) == 0:
- newline_count += 1
- if newline_count == 2:
- interface_start = True
- else:
- match = re.match(r'^(\S+) (\S+)', line)
- if match and interface_start:
- interface_start = False
- key = match.group(0)
- parsed[key] = line
- else:
- parsed[key] += '\n%s' % line
- return parsed
-
- def parse_mgmt_interfaces(self, data):
- parsed = dict()
- interface_start = True
- for line in data.split('\n'):
- match = re.match(r'^(\S+) (\S+)', line)
- if "Time since" in line:
- interface_start = True
- parsed[key] += '\n%s' % line
- elif match and interface_start:
- interface_start = False
- key = match.group(0)
- parsed[key] = line
- else:
- parsed[key] += '\n%s' % line
- return parsed
-
- def parse_vlan_interfaces(self, data):
- parsed = dict()
- interface_start = True
- line_before_end = False
- for line in data.split('\n'):
- match = re.match(r'^(\S+) (\S+)', line)
- match_endline = re.match(r'^\s*\d+ packets, \d+ bytes$', line)
-
- if "Output Statistics" in line:
- line_before_end = True
- parsed[key] += '\n%s' % line
- elif match_endline and line_before_end:
- line_before_end = False
- interface_start = True
- parsed[key] += '\n%s' % line
- elif match and interface_start:
- interface_start = False
- key = match.group(0)
- parsed[key] = line
- else:
- parsed[key] += '\n%s' % line
- return parsed
-
- def parse_ipv6_interfaces(self, data):
- parsed = dict()
- for line in data.split('\n'):
- if len(line) == 0:
- continue
- if line[0] == ' ':
- parsed[key] += '\n%s' % line
- else:
- match = re.match(r'^(\S+) (\S+)', line)
- if match:
- key = match.group(0)
- parsed[key] = line
- return parsed
-
- def parse_description(self, data):
- match = re.search(r'Description: (.+)$', data, re.M)
- if match:
- return match.group(1)
-
- def parse_macaddress(self, data):
- match = re.search(r'address is (\S+)', data)
- if match:
- if match.group(1) != "not":
- return match.group(1)
-
- def parse_ipv4(self, data):
- match = re.search(r'Internet address is (\S+)', data)
- if match:
- if match.group(1) != "not":
- addr, masklen = match.group(1).split('/')
- return dict(address=addr, masklen=int(masklen))
-
- def parse_mtu(self, data):
- match = re.search(r'MTU (\d+)', data)
- if match:
- return int(match.group(1))
-
- def parse_bandwidth(self, data):
- match = re.search(r'LineSpeed (\d+)', data)
- if match:
- return int(match.group(1))
-
- def parse_duplex(self, data):
- match = re.search(r'(\w+) duplex', data, re.M)
- if match:
- return match.group(1)
-
- def parse_mediatype(self, data):
- media = re.search(r'(.+) media present, (.+)', data, re.M)
- if media:
- match = re.search(r'type is (.+)$', media.group(0), re.M)
- return match.group(1)
-
- def parse_type(self, data):
- match = re.search(r'Hardware is (.+),', data, re.M)
- if match:
- return match.group(1)
-
- def parse_lineprotocol(self, data):
- match = re.search(r'line protocol is (\w+[ ]?\w*)\(?.*\)?$', data, re.M)
- if match:
- return match.group(1)
-
- def parse_operstatus(self, data):
- match = re.search(r'^(?:.+) is (.+),', data, re.M)
- if match:
- return match.group(1)
-
- def parse_lldp_intf(self, data):
- match = re.search(r'^\sLocal Interface (\S+\s\S+)', data, re.M)
- if match:
- return match.group(1)
-
- def parse_lldp_host(self, data):
- match = re.search(r'Remote System Name: (.+)$', data, re.M)
- if match:
- return match.group(1)
-
- def parse_lldp_port(self, data):
- match = re.search(r'Remote Port ID: (.+)$', data, re.M)
- if match:
- return match.group(1)
-
-
-FACT_SUBSETS = dict(
- default=Default,
- hardware=Hardware,
- interfaces=Interfaces,
- config=Config,
-)
-
-VALID_SUBSETS = frozenset(FACT_SUBSETS.keys())
-
-
-def main():
- """main entry point for module execution
- """
- argument_spec = dict(
- gather_subset=dict(default=['!config'], type='list')
- )
-
- argument_spec.update(os9_argument_spec)
-
- module = AnsibleModule(argument_spec=argument_spec,
- supports_check_mode=True)
-
- gather_subset = module.params['gather_subset']
-
- runable_subsets = set()
- exclude_subsets = set()
-
- for subset in gather_subset:
- if subset == 'all':
- runable_subsets.update(VALID_SUBSETS)
- continue
-
- if subset.startswith('!'):
- subset = subset[1:]
- if subset == 'all':
- exclude_subsets.update(VALID_SUBSETS)
- continue
- exclude = True
- else:
- exclude = False
-
- if subset not in VALID_SUBSETS:
- module.fail_json(msg='Bad subset')
-
- if exclude:
- exclude_subsets.add(subset)
- else:
- runable_subsets.add(subset)
-
- if not runable_subsets:
- runable_subsets.update(VALID_SUBSETS)
-
- runable_subsets.difference_update(exclude_subsets)
- runable_subsets.add('default')
-
- facts = dict()
- facts['gather_subset'] = list(runable_subsets)
-
- instances = list()
- for key in runable_subsets:
- instances.append(FACT_SUBSETS[key](module))
-
- for inst in instances:
- inst.populate()
- facts.update(inst.facts)
-
- ansible_facts = dict()
- for key, value in iteritems(facts):
- key = 'ansible_net_%s' % key
- ansible_facts[key] = value
-
- warnings = list()
- check_args(module, warnings)
-
- module.exit_json(ansible_facts=ansible_facts, warnings=warnings)
-
-
-if __name__ == '__main__':
- main()
diff --git a/ansible_collections/dellemc/os9/plugins/terminal/os9.py b/ansible_collections/dellemc/os9/plugins/terminal/os9.py
deleted file mode 100644
index e0052fc46..000000000
--- a/ansible_collections/dellemc/os9/plugins/terminal/os9.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# Copyright (c) 2020 Dell Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-#
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import re
-import json
-
-from ansible.module_utils._text import to_text, to_bytes
-from ansible.plugins.terminal import TerminalBase
-from ansible.errors import AnsibleConnectionFailure
-
-
-class TerminalModule(TerminalBase):
-
- terminal_stdout_re = [
- re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
- re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
- ]
-
- terminal_stderr_re = [
- re.compile(br"% ?Error: (?:(?!\bdoes not exist\b)(?!\balready exists\b)(?!\bHost not found\b)(?!\bnot active\b).)*\n"),
- re.compile(br"% ?Bad secret"),
- re.compile(br"invalid input", re.I),
- re.compile(br"(?:incomplete|ambiguous) command", re.I),
- re.compile(br"connection timed out", re.I),
- re.compile(br"'[^']' +returned error code: ?\d+"),
- ]
-
- terminal_initial_prompt = br"\[y/n\]:"
-
- terminal_initial_answer = b"y"
-
- def on_open_shell(self):
- try:
- self._exec_cli_command(b'terminal length 0')
- except AnsibleConnectionFailure:
- raise AnsibleConnectionFailure('unable to set terminal parameters')
-
- def on_become(self, passwd=None):
- if self._get_prompt().endswith(b'#'):
- return
-
- cmd = {u'command': u'enable'}
- if passwd:
- cmd[u'prompt'] = to_text(r"[\r\n]?password: $", errors='surrogate_or_strict')
- cmd[u'answer'] = passwd
-
- try:
- self._exec_cli_command(to_bytes(json.dumps(cmd), errors='surrogate_or_strict'))
- except AnsibleConnectionFailure:
- raise AnsibleConnectionFailure('unable to elevate privilege to enable mode')
-
- def on_unbecome(self):
- prompt = self._get_prompt()
- if prompt is None:
- # if prompt is None most likely the terminal is hung up at a prompt
- return
-
- if prompt.strip().endswith(b')#'):
- self._exec_cli_command(b'end')
- self._exec_cli_command(b'disable')
-
- elif prompt.endswith(b'#'):
- self._exec_cli_command(b'disable')
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/LICENSE b/ansible_collections/dellemc/os9/roles/os9_aaa/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/README.md b/ansible_collections/dellemc/os9/roles/os9_aaa/README.md
deleted file mode 100644
index 849950900..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/README.md
+++ /dev/null
@@ -1,331 +0,0 @@
-AAA role
-========
-
-This role facilitates the configuration of authentication, authorization, and acccounting (AAA), and supports the configuration of RADIUS and TACACS servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The AAA role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_aaa keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``radius_server`` | dictionary | Configures the RADIUS server (see ``radius_server.*``) | os9 |
-| ``radius_server.key`` | string (required): 0,7,LINE | Configures the authentication key for the RADIUS server | os9 |
-| ``radius_server.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *radius_server.key* is 7 or 0 | os9 |
-| ``radius_server.retransmit`` | integer | Configures the number of retransmissions | os9 |
-| ``radius_server.timeout`` | integer | Configures the timeout for retransmissions | os9 |
-| ``radius_server.deadtime`` | integer | Configures the server dead time | os9 |
-| ``radius_server.group`` | dictionary | Configures the RADIUS servers group (see ``group.*``) | os9 |
-| ``group.name`` | string (required) | Configures the group name of the RADIUS servers | os9 |
-| ``group.host`` | dictionary | Configures the RADIUS server host in the group (see ``host.*``) | os9 |
-| ``host.ip`` | string | Configures the RADIUS server host address in the group | os9 |
-| ``host.key`` | string (required): 0,7,LINE | Configures the authentication key | os9 |
-| ``host.key_string`` | string: 7,0 | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os9 |
-| ``host.retransmit`` | integer | Configures the number of retransmissions | os9 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os9 |
-| ``host.timeout`` | integer | Configures the timeout for retransmissions | os9 |
-| ``host.state`` | string: present,absent | Removes the host from group of RADIUS server if set to absent | os9 |
-| ``group.vrf`` | dictionary | Configures the VRF for RADIUS servers in the group (see ``vrf.*``) | os9 |
-| ``vrf.vrf_name`` | string (required) | Configures the name of VRF for the RADIUS server group | os9 |
-| ``vrf.source_intf`` | integer | Configures the source interface for outgoing packets from servers in the group | os9 |
-| ``vrf.state`` | string: present,absent | Removes the VRF from group of RADIUS servers if set to absent | os9 |
-| ``group.state`` | string: present,absent | Removes the RADIUS server group if set to absent | os9 |
-| ``radius_server.host`` | dictionary | Configures the RADIUS server host (see ``host.*``) | os9 |
-| ``host.ip`` | string | Configures the RADIUS server host address | os9 |
-| ``host.key`` | string (required); 0,7,LINE | Configures the authentication key | os9 |
-| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os9 |
-| ``host.retransmit`` | integer | Configures the number of retransmissions | os9 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os9 |
-| ``host.timeout`` | integer | Configures timeout for retransmissions | os9 |
-| ``host.state`` | string: present,absent | Removes the RADIUS server host if set to absent | os9 |
-| ``auth.key`` | string (required); 0,7,LINE | Configures the authentication key | os9 |
-| ``tacacs_server`` | dictionary | Configures the TACACS server (see ``tacacs_server.*``)| os9 |
-| ``tacacs_server.key`` | string (required): 0,7,LINE | Configures the authentication key for TACACS server | os9 |
-| ``tacacs_server.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *tacacs_server.key* is 7 or 0 | os9 |
-| ``tacacs_server.group`` | dictionary | Configures the group of TACACS servers (see ``group.*``) | os9 |
-| ``group.name`` | string (required) | Configures the group name of the TACACS servers | os9 |
-| ``group.host`` | dictionary | Configures the TACACS server host in the group (see ``host.*``) | os9 |
-| ``host.ip`` | string | Configures the TACACS server host address in the group | os9 |
-| ``host.key`` | string (required): 0,7,LINE | Configures the authentication key of the TACACS server host | os9 |
-| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only *host.key* is 7 or 0 | os9 |
-| ``host.retransmit`` | integer | Configures the number of retransmissions | os9 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os9 |
-| ``host.timeout`` | integer | Configures timeout for retransmissions | os9 |
-| ``host.state`` | string: present,absent | Removes the host from group of TACACS server if set to absent | os9 |
-| ``group.vrf`` | dictionary | Configures VRF for TACACS servers in the group (see ``vrf.*``) | os9 |
-| ``vrf.vrf_name`` | string (required) | Configures the name of VRF for TACACS server group | os9 |
-| ``vrf.source_intf`` | integer | Configures source interface for outgoing packets from servers in the group | os9 |
-| ``vrf.state`` | string: present,absent | Removes the VRF from group of TACACS server if set to absent | os9 |
-| ``group.state`` | string: present,absent | Removes the TACACS server group if set to absent | os9 |
-| ``tacacs_server.host`` | dictionary | Configures the TACACS server host (see ``host.*``) | os9 |
-| ``host.ip`` | string | Configures the TACACS sever host address | os9 |
-| ``host.key`` | string (required): 0,7,LINE | Configures the authentication key | os9 |
-| ``host.key_string`` | string | Configures the user key string; variable takes the hidden user key string if value is 7; variable takes the unencrypted user key (clear-text) if value is 0; variable supported only if *host.key* is 7 or 0 | os9 |
-| ``host.retransmit`` | integer | Configures the number of retransmissions | os9 |
-| ``host.auth_port`` | integer | Configures the authentication port (0 to 65535) | os9 |
-| ``host.timeout`` | integer | Configures the timeout for retransmissions | os9 |
-| ``host.state`` | string: present,absent | Removes the TACACS server host if set to absent | os9 |
-| ``aaa_accounting`` | dictionary | Configures accounting parameters (see ``aaa_accounting.*``) | os9 |
-| ``aaa_accounting.commands`` | list | Configures accounting for EXEC (shell) and config commands (see ``commands.*``) | os9 |
-| ``commands.enable_level`` | integer | Configures enable level for accounting of commands | os9 |
-| ``commands.role_name`` | string | Configures user role for accounting of commands; variable is mutually exclusive with ``enable_level`` | os9 |
-| ``commands.accounting_list_name`` | integer | Configures named accounting list for commands | os9 |
-| ``commands.no_accounting`` | boolean | Configures no accounting of commands | os9 |
-| ``commands.record_option`` | string: start-stop,stop-only,wait-start | Configures options to record data | os9 |
-| ``commands.state`` | string: present,absent | Removes the named accounting list for the commands if set to absent | os9 |
-| ``aaa_accounting.exec`` | list | Configures accounting for EXEC (shell) commands (see ``exec.*``) | os9 |
-| ``exec.accounting_list_name`` | string | Configures named accounting list for EXEC (shell) commands | os9 |
-| ``exec.no_accounting`` | boolean | Configures no accounting of EXEC (shell) commands | os9 |
-| ``exec.record_option`` | string: start-stop,stop-only,wait-start | Configures options to record data | os9 |
-| ``exec.state`` | string: present,absent | Removes the named accounting list for the EXEC (shell) commands if set to absent | os9 |
-| ``aaa_accounting.suppress`` | boolean | Suppresses accounting for users with NULL username | os9|
-| ``aaa_accounting.dot1x`` | string: none,start-stop,stop-only,wait-start | Configures accounting for dot1x events | os9 |
-| ``aaa_accounting.rest`` | string:none,start-stop,stop-only,wait-start | Configures accounting for REST interface events | os9 |
-| ``aaa_authorization`` | dictionary | Configures authorization parameters (see ``aaa_authorization.*``) | os9 |
-| ``aaa_authorization.commands`` | list | Configures authorization for EXEC (shell) and config commands (see ``commands.*``)| os9 |
-| ``commands.enable_level`` | integer | Configures enable level for authorization of commands | os9 |
-| ``commands.role_name`` | string | Configures user role for authorization of commands; mutually exclusive with ``enable_level`` | os9 |
-| ``commands.authorization_list_name`` | string | Configures named authorization list for commands | os9 |
-| ``commands.authorization_method`` | string: none | Configures no authorization of commands | os9 |
-| ``commands.use_data`` | string: local,tacacs+ | Configures data used for authorization | os9 |
-| ``commands.state`` | string: present,absent | Removes the named authorization list for the commands if set to absent | os9 |
-| ``aaa_authorization.config_commands`` | boolean | Configures authorization for configuration mode commands | os9 |
-| ``aaa_authorization.role_only`` | boolean | Configures validation of authentication mode for user role | os9 |
-| ``aaa_authorization.exec`` | list | Configures authorization for EXEC (shell) commands (see ``exec.*``) | os9 |
-| ``exec.authorization_list_name`` | string | Configures named authorization list for EXEC (shell) commands | os9 |
-| ``exec.authorization_method`` | string: none | Configures no authorization of EXEC (shell) commands | os9 |
-| ``exec.use_data`` | string: local,tacacs+ | Configures data used for authorization | os9 |
-| ``exec.state`` | string: present,absent | Removes the named authorization list for the EXEC (shell) commands if set to absent | os9 |
-| ``aaa_authorization.network`` | string: none,radius,ias | Configures authorization for network events | os9 |
-| ``aaa_authentication`` | dictionary | Configures authentication parameters (see ``aaa_authentication.*``) | os9 |
-| ``aaa_radius`` | dictionary | Configures AAA for RADIUS group of servers (see ``aaa_radius.*``) | os9 |
-| ``aaa_radius.group`` | string | Configures name of the RADIUS group of servers for AAA | os9 |
-| ``aaa_radius.auth_method`` | string: pap,mschapv2 | Configures authentication method of RADIUS group of servers for AAA | os9 |
-| ``aaa_tacacs`` | dictionary | Configures AAA for TACACS group of servers (see ``aaa_tacacs.*``) | os9 |
-| ``aaa_tacacs.group`` | string | Configures name of the TACACS group of servers for AAA | os9 |
-| ``aaa_authentication.auth_list`` | list | Configures named authentication list for hosts (see ``host.*``) | os9 |
-| ``auth_list.name`` | string | Configures named authentication list | os9 |
-| ``auth_list.login_or_enable`` | string: enable,login | Configures authentication list for login or enable | os9 |
-| ``auth_list.server`` | string: radius,tacacs+ | Configures AAA to use this list of all server hosts | os9 |
-| ``auth_list.use_password`` | string: line,local,enable,none | Configures password to use for authentication | os9 |
-| ``auth_list.state`` | string: present,absent | Removes the named authentication list if set to absent | os9 |
-| ``aaa_authentication.dot1x`` | string: none,radius,ias | Configures authentication for dot1x events | os9 |
-| ``line_terminal`` | dictionary | Configures the terminal line (see ``line_terminal.*``) | os9 |
-| ``line_terminal.<terminal>`` | dictionary | Configures the primary or virtual terminal line (see ``<terminal>.*``); value can be console <line_number>, vty <line_number> | os9 |
-| ``<terminal>.authorization`` | dictionary | Configures authorization parameters of line terminal (see ``authorization.*``) | os9 |
-| ``authorization.commands`` | list | Configures authorization for EXEC (shell) and config commands (see ``commands.*``) | os9 |
-| ``commands.enable_level`` | integer | Configures enable level for authorization of commands at line terminal | os9 |
-| ``commands.role_name`` | string | Configures user role for authorization of commands at line terminal; mutually exclusive with `enable_level` | os9 |
-| ``commands.authorization_list_name`` | string | Configures named authorization list for commands | os9 |
-| ``commands.state`` | string: present,absent | Removes the authorization of commands from line terminal if set to absent | os9 |
-| ``authorization.exec`` | list | Configures authorization for EXEC (shell) commands at line terminal (see ``exec.*``) | os9 |
-| ``exec.authorization_list_name`` | string | Configures named authorization list for EXEC (shell) commands | os9 |
-| ``exec.state`` | string: present,absent | Removes the authorization of EXEC (shell) from line terminal if set to absent | os9 |
-| ``<terminal>.accounting`` | dictionary | Configures accounting parameters of line terminal (see ``accounting.*``) | os9 |
-| ``accounting.commands`` | list | Configures accounting for EXEC (shell) and config commands (see ``commands.*``) | os9 |
-| ``commands.enable_level`` | integer | Configures enable level for accounting of commands at line terminal | os9|
-| ``commands.role_name`` | string | Configures user role for accounting of commands at line terminal; mutually exclusive with ``enable_level`` | os9 |
-| ``commands.accounting_list_name`` | string | Configures named accounting list for commands | os9 |
-| ``commands.state`` | string: present,absent | Removes the accounting of commands from line terminal if set to absent | os9|
-| ``accounting.exec`` | list | Configures accounting for EXEC (shell) commands at line terminal (see ``exec.*``) | os9 |
-| ``exec.accounting_list_name`` | string | Configures named accounting list for EXEC (shell) commands | os9 |
-| ``exec.state`` | string: present,absent | Removes the accounting of EXEC (shell) from line terminal if set to absent | os9 |
-| ``<terminal>.authentication`` | dictionary | Configures authentication parameters of line terminal (see ``authentication.*``) | os9 |
-| ``authentication.enable`` | string | Configures the authentication list for privilege-level password authentication | os9 |
-| ``authentication.login`` | string | Configures the authentication list for password checking | os9 |
-| ``client.ip`` | string | Configures the client IP for the radius server | os9 |
-| ``client.key`` | string (required): 0,7,LINE | Configures the authentication key for the RADIUS server | os9 |
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_aaa* role to configure AAA for RADIUS and TACACS servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS0 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in the *build_dir* path. By default, it is set to false and it writes a simple playbook that only references the *os9_aaa* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_aaa:
- radius_server:
- key: radius
- retransmit: 5
- timeout: 40
- deadtime: 2300
- group:
- - name: RADIUS
- host:
- - ip: 2001:4898:f0:f09b::1002
- key: 0
- key_string: aaaa
- retransmit: 5
- auth_port: 3
- timeout: 2
- state: present
- vrf:
- vrf_name: test
- source_intf: fortyGigE 1/2
- state: absent
- state: present
- host:
- - ip: 2001:4898:f0:f09b::1002
- key: xxx
- retransmit: 5
- auth_port: 3
- timeout: 2
- state: present
- tacacs_server:
- key: 7
- key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa
- group:
- - name: TACACS
- host:
- - ip: 2001:4898:f0:f09b::1000
- key: 0
- key_string: aaa
- retransmit: 6
- auth_port: 3
- timeout: 2
- state: present
- vrf:
- vrf_name: tes
- source_intf: fortyGigE 1/3
- state: present
- state: present
- host:
- - ip: 2001:4898:f0:f09b::1000
- key: 0
- key_string: aa
- retransmit: 5
- auth_port: 3
- timeout: 2
- state: present
- aaa_accounting:
- commands:
- - enable_level: 2
- accounting_list_name: aa
- record_option: start-stop
- state: present
- - role_name: netadmin
- accounting_list_name: aa
- no_accounting: none
- suppress: True
- exec:
- - accounting_list_name: aaa
- no_accounting: true
- state: present
- dot1x: none
- rest: none
- aaa_authorization:
- commands:
- - enable_level: 2
- authorization_list_name: aa
- use_data: local
- state: present
- - role_name: netadmin
- authorization_list_name: aa
- authorization_method: none
- use_data: local
- config_commands: True
- role_only:
- exec:
- - authorization_list_name: aaa
- authorization_method: if-authenticated
- use_data: local
- state: present
- aaa_authentication:
- auth_list:
- - name: default
- login_or_enable: login
- server: radius
- use_password: local
- state: present
- - name: console
- server: tacacs+
- login_or_enable: login
- use_password: local
- aaa_radius:
- group: RADIUS
- auth_method: pap
- aaa_tacacs:
- group: TACACS
- line_terminal:
- vty 0:
- authorization:
- commands:
- - enable_level: 2
- authorization_list_name: aa
- state: present
- - role_name: netadmin
- authorization_list_name: aa
- state: present
- exec:
- - authorization_list_name: aa
- state: present
- accounting:
- commands:
- - enable_level: 2
- accounting_list_name: aa
- state: present
- - role_name: netadmin
- accounting_list_name: aa
- state: absent
- exec:
- accounting_list_name: aa
- state: present
- authentication:
- enable:
- login: console
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_aaa
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/defaults/main.yml
deleted file mode 100644
index 8fce00350..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/defaults/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# defaults file for dellemc.os9.os9_aaa
-attribute_type:
- mandatory: mandatory
- on_for_login_auth: on-for-login-auth
- include_in_access_req: include-in-access-req
- mac: "mac format"
- mac_ietf: "mac format ietf"
- mac_ietf_lower_case: "mac format ietf lower-case"
- mac_ietf_upper_case: "mac format ietf upper-case"
- mac_legacy: "mac format legacy"
- mac_legacy_lower_case: "mac format legacy lower-case"
- mac_legacy_upper_case: "mac format legacy upper-case"
- mac_unformatted: "mac format unformatted"
- mac_unformatted_lower_case: "mac format unformatted lower-case"
- mac_unformatted_upper_case: "mac format unformatted upper-case" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/handlers/main.yml
deleted file mode 100644
index ad771c4fa..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_aaa \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/meta/main.yml
deleted file mode 100644
index 2f94f923f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os9_aaa role facilitates the configuration of Authentication Authorization Acccounting (AAA) attributes
- in devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/tasks/main.yml
deleted file mode 100644
index 5ffba7b6b..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os9
-
- - name: "Generating AAA configuration for os9"
- template:
- src: os9_aaa.j2
- dest: "{{ build_dir }}/aaa9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning AAA configuration for os9"
- dellemc.os9.os9_config:
- src: os9_aaa.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2 b/ansible_collections/dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2
deleted file mode 100644
index 0d4aa9f4a..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/templates/os9_aaa.j2
+++ /dev/null
@@ -1,680 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure AAA commands for os9 Devices
-os9_aaa:
- tacacs_server:
- key: 7
- key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa
- group:
- - name: TACACS
- host:
- - ip: 2001:4898:f0:f09b::1000
- key: 0
- key_string: aaa
- auth_port: 3
- timeout: 2
- state: present
- vrf:
- vrf_name: test
- source_intf: fortyGigE 1/2
- state: present
- state: present
- host:
- - ip: 2001:4898:f0:f09b::1000
- key: 0
- key_string: aaa
- auth_port: 3
- timeout: 2
- state: present
- radius_server:
- key: 7
- key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fb
- retransmit: 5
- timeout: 10
- deadtime: 2000
- group:
- - name: Radius
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- key_string: aaa
- retransmit: 5
- auth_port: 3
- timeout: 2
- state: present
- vrf:
- vrf_name: test
- source_intf: fortyGigE 1/3
- state: present
- state: present
- host:
- - ip: 2001:4898:f0:f09b::1001
- key: 0
- key_string: aaa
- retransmit: 5
- auth_port: 3
- timeout: 2
- state: present
- aaa_accounting:
- commands:
- - enable_level: 2
- accounting_list_name: aa
- no_accounting: true
- record_option: start-stop
- state: present
- suppress: True
- exec:
- - accounting_list_name: aaa
- no_accounting: true
- state: present
- dot1x: none
- rest: none
- aaa_authorization:
- commands:
- - enable_level: 2
- authorization_list_name: aa
- use_data: local
- state: present
- - role_name: netadmin
- authorization_list_name: aa
- authorization_method: none
- use_data: local
- config_commands: True
- role_only: True
- exec:
- - authorization_list_name: aaa
- authorization_method: if-authenticated
- use_data: local
- state: present
- aaa_radius:
- group: RADIUS
- auth_method: pap
- aaa_tacacs:
- group: TACACS
- aaa_authentication:
- auth_list:
- - name: default
- login_or_enable: login
- server: tacacs+
- use_password: local
- state: present
- - name: console
- server: radius
- login_or_enable: login
- use_password: local
- line_terminal:
- vty 0:
- authorization:
- commands:
- - enable_level: 2
- authorization_list_name: aa
- state: present
- - enable_level: 2
- authorization_list_name: aa
- state: present
- exec:
- - authorization_list_name: aa
- state: present
- accounting:
- commands:
- - enable_level: 2
- accounting_list_name: aa
- state: present
- - enable_level: 2
- accounting_list_name: aa
- state: present
- exec:
- - accounting_list_name: aa
- state: present
- authentication:
- enable: aa
- login: console
-##################################################}
-{% if os9_aaa is defined and os9_aaa %}
-{% for key in os9_aaa.keys() %}
- {% set aaa_vars = os9_aaa[key] %}
- {% if key == "tacacs_server" %}
- {% set server = "tacacs-server" %}
- {% endif %}
- {% if key == "radius_server" %}
- {% set server = "radius-server" %}
- {% endif %}
- {% if server is defined and server %}
- {% if aaa_vars %}
- {% set item = aaa_vars %}
- {% if item.retransmit is defined %}
- {% if item.retransmit %}
-{{ server }} retransmit {{ item.retransmit }}
- {% else %}
-no {{ server }} retransmit
- {% endif %}
- {% endif %}
- {% if item.timeout is defined %}
- {% if item.timeout %}
-{{ server }} timeout {{ item.timeout }}
- {% else %}
-no {{ server }} timeout
- {% endif %}
- {% endif %}
- {% if item.deadtime is defined %}
- {% if item.deadtime %}
-{{ server }} deadtime {{ item.deadtime }}
- {% else %}
-no {{ server }} deadtime
- {% endif %}
- {% endif %}
- {% if item.key is defined %}
- {% if item.key == 0 or item.key == 7 %}
- {% if item.key_string is defined and item.key_string%}
-{{ server }} key {{ item.key }} {{ item.key_string }}
- {% endif %}
- {% elif item.key %}
-{{ server }} key {{ item.key }}
- {% else %}
-no {{ server }} key
- {% endif %}
- {% endif %}
- {% if item.host is defined and item.host %}
- {% for hostlist in item.host %}
- {% if hostlist.ip is defined and hostlist.ip %}
- {% if hostlist.state is defined and hostlist.state == "absent" %}
- {% if (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7) ) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) %}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) %}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) %}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-no {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) %}
-no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-no {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
-no {{ server }} host {{ hostlist.ip }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7) )%}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }}
- {% elif (hostlist.key is defined and hostlist.key) %}
-no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }}
- {% else %}
-no {{ server }} host {{ hostlist.ip }}
- {% endif %}
- {% else %}
- {% if (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and (hostlist.key== 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) %}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) %}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) %}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-{{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-{{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-{{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) %}
-{{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
-{{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server"%}
-{{ server }} host {{ hostlist.ip }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7))%}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }}
- {% elif (hostlist.key is defined and hostlist.key) %}
-{{ server }} host {{ hostlist.ip }} key {{ hostlist.key }}
- {% else %}
-{{ server }} host {{ hostlist.ip }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if item.group is defined and item.group %}
- {% for groupitem in item.group %}
- {% if groupitem.name is defined and groupitem.name %}
- {% if groupitem.state is defined and groupitem.state == "absent" %}
-no {{ server }} group {{ groupitem.name }}
- {% else %}
-{{ server }} group {{ groupitem.name }}
- {% if groupitem.host is defined and groupitem.host %}
- {% for hostlist in groupitem.host %}
- {% if hostlist.ip is defined and hostlist.ip %}
- {% if hostlist.state is defined and hostlist.state == "absent" %}
- {% if (hostlist.key is defined and (hostlist.key or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and (hostlist.key or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.key is defined and (hostlist.key or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.key is defined and (hostlist.key or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- no {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) %}
- no {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- no {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- no {{ server }} host {{ hostlist.ip }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }}
- {% elif (hostlist.key is defined and hostlist.key) %}
- no {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }}
- {% else %}
- no {{ server }} host {{ hostlist.ip }}
- {% endif %}
- {% else %}
- {% if (hostlist.key is defined and (hostlist.key== 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) and (hostlist.timeout is defined and hostlist.timeout) %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) and (hostlist.key_string is defined and hostlist.key_string) %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.key is defined and hostlist.key) and (hostlist.timeout is defined and hostlist.timeout) %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) and (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.auth_port is defined and hostlist.auth_port) and (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server" %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.timeout is defined and hostlist.timeout) %}
- {{ server }} host {{ hostlist.ip }} timeout {{ hostlist.timeout }}
- {% elif (hostlist.auth_port is defined and hostlist.auth_port) %}
- {% if server == "radius-server" %}{%set port = "auth-port" %}{%else %}{% set port = "port" %}{% endif %}
- {{ server }} host {{ hostlist.ip }} {{ port }} {{ hostlist.auth_port }}
- {% elif (hostlist.retransmit is defined and hostlist.retransmit) and server == "radius-server"%}
- {{ server }} host {{ hostlist.ip }} retransmit {{ hostlist.retransmit }}
- {% elif (hostlist.key is defined and (hostlist.key == 0 or hostlist.key == 7)) %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }} {{ hostlist.key_string }}
- {% elif (hostlist.key is defined and hostlist.key) %}
- {{ server }} host {{ hostlist.ip }} key {{ hostlist.key }}
- {% else %}
- {{ server }} host {{ hostlist.ip }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if groupitem.vrf is defined and groupitem.vrf %}
- {% if groupitem.vrf.vrf_name is defined and groupitem.vrf.vrf_name %}
- {% if groupitem.vrf.state is defined and groupitem.vrf.state == "absent" %}
- no {{ server }} vrf {{ groupitem.vrf.vrf_name }}
- {% else %}
- {% if groupitem.vrf.source_intf is defined and groupitem.vrf.source_intf %}
- {{ server }} vrf {{ groupitem.vrf.vrf_name }} source-interface {{ groupitem.vrf.source_intf }}
- {% else %}
- {{ server }} vrf {{ groupitem.vrf.vrf_name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endfor %}
-
- {% if os9_aaa.aaa_accounting is defined and os9_aaa.aaa_accounting %}
- {% set aaa_accounting = os9_aaa.aaa_accounting %}
- {% if aaa_accounting.suppress is defined %}
- {% if aaa_accounting.suppress %}
-aaa accounting suppress null-username
- {% else %}
-no aaa accounting suppress null-username
- {% endif %}
- {% endif %}
- {% if aaa_accounting.dot1x is defined %}
- {% if aaa_accounting.dot1x == "none" %}
-aaa accounting dot1x default none
- {% elif aaa_accounting.dotx %}
-aaa accounting dot1x default {{ aaa_accounting.dot1x }} tacacs+
- {% else %}
-no aaa accounting dotx default
- {% endif %}
- {% endif %}
- {% if aaa_accounting.rest is defined %}
- {% if aaa_accounting.rest == "none" %}
-aaa accounting rest default none
- {% elif aaa_accounting.rest %}
-aaa accounting rest default {{ aaa_accounting.rest }} tacacs+
- {% else %}
-no aaa accounting rest default
- {% endif %}
- {% endif %}
- {% if aaa_accounting.exec is defined and aaa_accounting.exec %}
- {% for command in aaa_accounting.exec %}
- {% if command.accounting_list_name is defined and command.accounting_list_name %}
- {% if command.state is defined and command.state == "absent" %}
-no aaa accounting exec {{ command.accounting_list_name }}
- {% else %}
- {% if command.record_option is defined and command.record_option %}
-aaa accounting exec {{ command.accounting_list_name }} {{ command.record_option }} tacacs+
- {% elif command.no_accounting is defined and command.no_accounting %}
-aaa accounting exec {{ command.accounting_list_name }} none
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if aaa_accounting.commands is defined and aaa_accounting.commands %}
- {% for command in aaa_accounting.commands %}
- {% if command.enable_level is defined and command.enable_level %}
- {% if command.accounting_list_name is defined and command.accounting_list_name %}
- {% if command.state is defined and command.state == "absent" %}
-no aaa accounting commands {{ command.enable_level }} {{ command.accounting_list_name }}
- {% else %}
- {% if command.record_option is defined and command.record_option %}
-aaa accounting commands {{ command.enable_level }} {{ command.accounting_list_name }} {{ command.record_option }} tacacs+
- {% elif command.no_accounting is defined and command.no_accounting %}
-aaa accounting commands {{ command.enable_level }} {{ command.accounting_list_name }} none
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif command.role_name is defined and command.role_name %}
- {% if command.accounting_list_name is defined and command.accounting_list_name %}
- {% if command.state is defined and command.state == "absent" %}
-no aaa accounting commands role {{ command.role_name }} {{ command.accounting_list_name }}
- {% else %}
- {% if command.record_option is defined and command.record_option %}
-aaa accounting commands role {{ command.role_name }} {{ command.accounting_list_name }} {{ command.record_option }} tacacs+
- {% elif command.no_accounting is defined and command.no_accounting %}
-aaa accounting commands role {{ command.role_name }} {{ command.accounting_list_name }} none
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% if os9_aaa.aaa_authorization is defined and os9_aaa.aaa_authorization %}
- {% set aaa_authorization = os9_aaa.aaa_authorization %}
- {% if aaa_authorization.config_commands is defined %}
- {% if aaa_authorization.config_commands %}
-aaa authorization config-commands
- {% else %}
-no aaa authorization config-commands
- {% endif %}
- {% endif %}
- {% if aaa_authorization.role_only is defined %}
- {% if aaa_authorization.role_only %}
-aaa authorization role-only
- {% else %}
-no aaa authorization role-only
- {% endif %}
- {% endif %}
- {% if aaa_authorization.exec is defined and aaa_authorization.exec %}
- {% for command in aaa_authorization.exec %}
- {% if command.authorization_list_name is defined and command.authorization_list_name %}
- {% if command.state is defined and command.state == "absent" %}
-no aaa authorization exec {{ command.authorization_list_name }}
- {% else %}
- {% if command.use_data is defined and command.use_data %}
- {% if command.authorization_method is defined and command.authorization_method %}
-aaa authorization exec {{ command.authorization_list_name }} {{ command.use_data }} {{ command.authorization_method }}
- {% else %}
-aaa authorization exec {{ command.authorization_list_name }} {{ command.use_data }}
- {% endif %}
- {% else %}
- {% if command.authorization_method is defined and command.authorization_method %}
-aaa authorization exec {{ command.authorization_list_name }} {{ command.authorization_method }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if aaa_authorization.commands is defined and aaa_authorization.commands %}
- {% for command in aaa_authorization.commands %}
- {% if command.enable_level is defined and command.enable_level %}
- {% if command.authorization_list_name is defined and command.authorization_list_name %}
- {% if command.state is defined and command.state == "absent" %}
-no aaa authorization commands {{ command.enable_level }} {{ command.authorization_list_name }}
- {% else %}
- {% if command.use_data is defined and command.use_data %}
- {% if command.authorization_method is defined and command.authorization_method %}
-aaa authorization commands {{ command.enable_level }} {{ command.authorization_list_name }} {{ command.use_data }} {{ command.authorization_method }}
- {% else %}
-aaa authorization commands {{ command.enable_level }} {{ command.authorization_list_name }} {{ command.use_data }}
- {% endif %}
- {% else %}
- {% if command.authorization_method is defined and command.authorization_method %}
-aaa authorization commands {{ command.enable_level }} {{ command.authorization_list_name }} {{ command.authorization_method }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif command.role_name is defined and command.role_name %}
- {% if command.authorization_list_name is defined and command.authorization_list_name %}
- {% if command.state is defined and command.state == "absent" %}
-no aaa authorization commands role {{ command.role_name }} {{ command.authorization_list_name }}
- {% else %}
- {% if command.use_data is defined and command.use_data %}
- {% if command.authorization_method is defined and command.authorization_method %}
-aaa authorization commands role {{ command.role_name }} {{ command.authorization_list_name }} {{ command.use_data }} {{ command.authorization_method }}
- {% else %}
-aaa authorization commands role {{ command.role_name }} {{ command.authorization_list_name }} {{ command.use_data }}
- {% endif %}
- {% else %}
- {% if command.authorization_method is defined and command.authorization_method %}
-aaa authorization commands role {{ command.role_name }} {{ command.authorization_list_name }} {{ command.authorization_method }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-
- {% if os9_aaa.aaa_radius is defined and os9_aaa.aaa_radius %}
- {% if os9_aaa.aaa_radius.group is defined %}
- {% if os9_aaa.aaa_radius.group %}
-aaa radius group {{ os9_aaa.aaa_radius.group }}
- {% else %}
-no aaa radius group
- {% endif %}
- {% endif %}
- {% if os9_aaa.aaa_radius.auth_method is defined %}
- {% if os9_aaa.aaa_radius.auth_method %}
-aaa radius auth-method {{ os9_aaa.aaa_radius.auth_method }}
- {% else %}
-no aaa radius auth-method
- {% endif %}
- {% endif %}
- {% endif %}
- {% if os9_aaa.aaa_tacacs is defined and os9_aaa.aaa_tacacs %}
- {% if os9_aaa.aaa_tacacs.group is defined %}
- {% if os9_aaa.aaa_tacacs.group %}
-aaa tacacs group {{ os9_aaa.aaa_tacacs.group }}
- {% else %}
-no aaa tacacs group
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if os9_aaa.aaa_authentication is defined and os9_aaa.aaa_authentication %}
- {% if os9_aaa.aaa_authentication.auth_list is defined and os9_aaa.aaa_authentication.auth_list %}
- {% for auth_list in os9_aaa.aaa_authentication.auth_list %}
- {% if auth_list.login_or_enable is defined and auth_list.login_or_enable %}
- {% if auth_list.name is defined and auth_list.name %}
- {% if auth_list.state is defined and auth_list.state == "absent" %}
-no aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }}
- {% else %}
- {% if auth_list.server is defined and auth_list.server %}
- {% if auth_list.use_password is defined and auth_list.use_password %}
-aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }} {{ auth_list.server }} {{ auth_list.use_password }}
- {% else %}
-aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }} {{ auth_list.server }}
- {% endif %}
- {% else %}
- {% if auth_list.use_password is defined and auth_list.use_password %}
-aaa authentication {{ auth_list.login_or_enable }} {{ auth_list.name }} {{ auth_list.use_password }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-
- {% if os9_aaa.line_terminal is defined and os9_aaa.line_terminal %}
- {% for terminal in os9_aaa.line_terminal.keys() %}
- {% set terminal_vars = os9_aaa.line_terminal[terminal] %}
-line {{ terminal }}
- {% if terminal_vars.authorization is defined and terminal_vars.authorization %}
- {% if terminal_vars.authorization.commands is defined and terminal_vars.authorization.commands %}
- {% for commands in terminal_vars.authorization.commands %}
- {% if commands.enable_level is defined and commands.enable_level %}
- {% if commands.state is defined and commands.state == "absent" %}
- no authorization commands {{ commands.enable_level }}
- {% else %}
- {% if commands.authorization_list_name is defined and commands.authorization_list_name %}
- authorization commands {{ commands.enable_level }} {{ commands.authorization_list_name }}
- {% endif %}
- {% endif %}
- {% elif commands.role_name is defined and commands.role_name %}
- {% if commands.state is defined and commands.state == "absent" %}
- no authorization commands role {{ commands.role_name }}
- {% else %}
- {% if commands.authorization_list_name is defined and commands.authorization_list_name %}
- authorization commands role {{ commands.role_name }} {{ commands.authorization_list_name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if terminal_vars.authorization.exec is defined and terminal_vars.authorization.exec %}
- {% set exec = terminal_vars.authorization.exec %}
- {% if exec.state is defined and exec.state == "absent" %}
- no authorization exec
- {% else %}
- {% if exec.authorization_list_name is defined and exec.authorization_list_name %}
- authorization exec {{ exec.authorization_list_name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if terminal_vars.accounting is defined and terminal_vars.accounting %}
- {% if terminal_vars.accounting.commands is defined and terminal_vars.accounting.commands %}
- {% for commands in terminal_vars.accounting.commands %}
- {% if commands.enable_level is defined and commands.enable_level %}
- {% if commands.state is defined and commands.state == "absent" %}
- no accounting commands {{ commands.enable_level }}
- {% else %}
- {% if commands.accounting_list_name is defined and commands.accounting_list_name %}
- accounting commands {{ commands.enable_level }} {{ commands.accounting_list_name }}
- {% endif %}
- {% endif %}
- {% elif commands.role_name is defined and commands.role_name %}
- {% if commands.state is defined and commands.state == "absent" %}
- no accounting commands role {{ commands.role_name }}
- {% else %}
- {% if commands.accounting_list_name is defined and commands.accounting_list_name %}
- accounting commands role {{ commands.role_name }} {{ commands.accounting_list_name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if terminal_vars.accounting.exec is defined and terminal_vars.accounting.exec %}
- {% set exec = terminal_vars.accounting.exec %}
- {% if exec.state is defined and exec.state == "absent" %}
- no accounting exec
- {% else %}
- {% if exec.accounting_list_name is defined and exec.accounting_list_name %}
- authorization exec {{ exec.accounting_list_name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if terminal_vars.authentication is defined and terminal_vars.authentication %}
- {% if terminal_vars.authentication.enable is defined %}
- {% if terminal_vars.authentication.enable %}
- enable authentication {{ terminal_vars.authentication.enable }}
- {% else %}
- no enable authentication
- {% endif %}
- {% endif %}
- {% if terminal_vars.authentication.login is defined %}
- {% if terminal_vars.authentication.login %}
- login authentication {{ terminal_vars.authentication.login }}
- {% else %}
- no login authentication
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/main.os6.yaml b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/main.os6.yaml
deleted file mode 100644
index b4e871b81..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/main.os6.yaml
+++ /dev/null
@@ -1,133 +0,0 @@
----
-# vars file for dellemc.os9.os9_aaa,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_aaa:
- radius_server:
- key: radius
- retransmit: 5
- timeout: 40
- deadtime: 2300
- group:
- - name: RADIUS
- host:
- - ip: 2001:4898:f0:f09b::1002
- key: 0
- key_string: aaaa
- retransmit: 5
- auth_port: 3
- timeout: 2
- state: present
- vrf:
- vrf_name: test
- source_intf: fortyGigE 1/2
- state: absent
- state: present
- host:
- - ip: 10.1.1.1
- key: 0
- key_string: aaa
- retransmit: 6
- auth_port: 3
- timeout: 2
- state: present
- tacacs_server:
- key: 7
- key_string: 9ea8ec421c2e2e5bec757f44205015f6d81e83a4f0aa52fa
- group:
- - name: TACACS
- host:
- - ip: 2001:4898:f0:f09b::1000
- key: 0
- key_string: aaa
- auth_port: 3
- timeout: 2
- state: present
- vrf:
- vrf_name: tes
- source_intf: fortyGigE 1/3
- state: present
- state: present
- host:
- - ip: 2001:4898:f0:f09b::1000
- key: 0
- key_string: aaa
- auth_port: 3
- timeout: 2
- state: present
- aaa_accounting:
- commands:
- - enable_level: 2
- accounting_list_name: aa
- record_option: start-stop
- state: present
- - role_name: netadmin
- accounting_list_name: aa
- no_accounting: none
- suppress: True
- exec:
- - accounting_list_name: aaa
- no_accounting: true
- state: present
- dot1x: none
- rest: none
- aaa_authorization:
- commands:
- - enable_level: 2
- authorization_list_name: aa
- use_data: local
- state: present
- - role_name: netadmin
- authorization_list_name: aa
- authorization_method: none
- use_data: local
- config_commands: True
- role_only:
- exec:
- - authorization_list_name: aaa
- authorization_method: if-authenticated
- use_data: local
- state: present
- line_terminal:
- vty 0:
- authorization:
- commands:
- - enable_level: 2
- authorization_list_name: aa
- state: present
- - role_name: netadmin
- authorization_list_name: aa
- state: present
- exec:
- - authorization_list_name: aa
- state: present
- accounting:
- commands:
- - enable_level: 2
- accounting_list_name: aa
- state: present
- - role_name: netadmin
- accounting_list_name: aa
- state: absent
- exec:
- accounting_list_name: aa
- state: present
- authentication:
- enable:
- login: console
- aaa_radius:
- group: RADIUS
- auth_method: pap
- aaa_tacacs:
- group: TACACS
- aaa_authentication:
- auth_list:
- - name: default
- login_or_enable: login
- server: tacacs+
- use_password: local
- state: present
- - name: console
- server: radius
- login_or_enable: login
- use_password: local \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_aaa/tests/test.yaml
deleted file mode 100644
index e99880ca2..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_aaa \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_aaa/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_aaa/vars/main.yml
deleted file mode 100644
index e198e3ed6..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_aaa/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_aaa \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/LICENSE b/ansible_collections/dellemc/os9/roles/os9_acl/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/README.md b/ansible_collections/dellemc/os9/roles/os9_acl/README.md
deleted file mode 100644
index 52ef4c552..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/README.md
+++ /dev/null
@@ -1,134 +0,0 @@
-ACL role
-========
-
-This role facilitates the configuration of an access-control list (ACL). It supports the configuration of different types of ACLs (standard and extended) for both IPv4 and IPv6, and assigns the access-class to the line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The ACL role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_acl keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string (required): ipv4, ipv6, mac | Configures the L3 (IPv4/IPv6) or L2 (MAC) access-control list | os9 |
-| ``name`` | string (required) | Configures the name of the access-control list | os9 |
-| ``description`` | string | Configures the description about the access-control list | os9 |
-| ``remark`` | list | Configures the ACL remark (see ``remark.*``) | os9 |
-| ``remark.number`` | integer (required) | Configures the remark sequence number | os9 |
-| ``remark.description`` | string | Configures the remark description | os9 |
-| ``remark.state`` | string: absent,present\* | Deletes the configured remark for an ACL entry if set to absent | os9 |
-| ``extended`` | boolean: true,false | Configures an extended ACL type if set to true; configures a standard ACL if set to false | os9 |
-| ``entries`` | list | Configures ACL rules (see ``seqlist.*``) | os9 |
-| ``entries.number`` | integer (required) | Specifies the sequence number of the ACL rule | os9 |
-| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true; specifies to reject packets if set to false | os9 |
-| ``entries.protocol`` | string (required) | Specifies the type of protocol or the protocol number to filter | os9 |
-| ``entries.source`` | string (required) | Specifies the source address to match in the packets | os9 |
-| ``entries.src_condition`` | string | Specifies the condition to filter packets from the source address; ignored if MAC | os9 |
-| ``entries.destination`` | string (required) | Specifies the destination address to match in the packets | os9 |
-| ``entries.dest_condition`` | string | Specifies the condition to filter packets to the destination address | os9 |
-| ``entries.other_options`` | string | Specifies the other options applied on packets (count, log, order, monitor, and so on) | os9 |
-| ``entries.state`` | string: absent,present\* | Deletes the rule from the ACL if set to absent | os9 |
-| ``stage_ingress`` | list | Configures ingress ACL to the interface (see ``stage_ingress.*``) | os9 |
-| ``stage_ingress.name`` | string (required) | Configures the ingress ACL filter to the interface with this interface name | os9 |
-| ``stage_ingress.state`` | string: absent,present\* | Deletes the configured ACL from the interface if set to absent | os9 |
-| ``stage_ingress.seq_number`` | integer | Configure the sequence number (greater than 0) to rank precedence for this interface and direction |
-| ``stage_egress`` | list | Configures egress ACL to the interface (see ``stage_egress.*``) | os9 |
-| ``stage_egress.name`` | string (required) | Configures the egress ACL filter to the interface with this interface name | os9 |
-| ``stage_egress.state`` | string: absent,present\* | Deletes the configured egress ACL from the interface if set to absent | os9 |
-| ``lineterminal`` | list | Configures the terminal to apply the ACL (see ``lineterminal.*``) | os9 |
-| ``lineterminal.line`` | string (required) | Configures access-class on the line terminal | os9 |
-| ``lineterminal.state`` | string: absent,present\* | Deletes the access-class from line terminal if set to absent | os9 |
-| ``state`` | string: absent,present\* | Deletes the ACL if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes /os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_acl* role to configure different types of ACLs (standard and extended) for both IPv4 and IPv6 and assigns the access-class to the line terminals. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, it generates the configuration commands as a .part file in the *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os9_acl* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_acl:
- - type: ipv4
- name: ssh-only
- description: ipv4acl
- extended: true
- remark:
- - number: 5
- description: "ipv4remark"
- entries:
- - number: 5
- permit: true
- protocol: tcp
- source: any
- src_condition: ack
- destination: any
- dest_condition: eq 22
- other_options: count
- state: present
- stage_ingress:
- - name: fortyGigE 1/28
- state: present
- stage_egress:
- - name: fortyGigE 1/28
- state: present
- lineterminal:
- - line: vty 1
- state: present
- - line: vty 2
- state: absent
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_acl
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/defaults/main.yml
deleted file mode 100644
index 7c196010c..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_acl \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/handlers/main.yml
deleted file mode 100644
index ad771c4fa..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_aaa \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/meta/main.yml
deleted file mode 100644
index dd418f870..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_acl role facilitates the configuration of access control list (ACL) attributes in devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/tasks/main.yml
deleted file mode 100644
index 53612e5ec..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-#tasks file for os9
- - name: "Generating ACL configuration for os9"
- template:
- src: os9_acl.j2
- dest: "{{ build_dir }}/acl9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning ACL configuration for os9"
- dellemc.os9.os9_config:
- src: os9_acl.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/templates/os9_acl.j2 b/ansible_collections/dellemc/os9/roles/os9_acl/templates/os9_acl.j2
deleted file mode 100644
index b47a1c2c9..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/templates/os9_acl.j2
+++ /dev/null
@@ -1,277 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-
-Purpose:
-Configure ACL commands for os9 devices
-
-os9_acl:
- - name: ssh-only
- type: ipv4
- description: acl
- extended: true
- remark:
- - number: 1
- description: helloworld
- state: present
- entries:
- - number: 10
- permit: true
- protocol: tcp
- source: any
- destination: any
- src_condition: eq 22
- dest_condition: ack
- other_options: count
- state: present
- stage_ingress:
- - name: fortyGigE 1/8
- state: present
- - name: fortyGigE 1/9
- state: present
- stage_egress:
- - name: fortyGigE 1/19
- state: present
- lineterminal:
- - line: vty 0
- state: present
- - line: vty 1
- state: present
- state: present
- - name: ipv6-ssh-only
- type: ipv6
- entries:
- - number: 10
- permit: true
- protocol: ipv6
- source: 2001:4898::/32
- destination: any
- - number: 20
- permit: true
- protocol: tcp
- source: any
- src_condition: ack
- destination: any
- - number: 40
- permit: true
- protocol: tcp
- source: any
- destination: any
- state: present
- lineterminal:
- - line: vty 0
- state: present
- - line: vty 1
- state: present
-#####################################}
-{% if os9_acl is defined and os9_acl %}
- {% for val in os9_acl
- %}
- {% if val.name is defined and val.name %}
- {% if val.state is defined and val.state == "absent" %}
- {% if val.type is defined and val.type == "ipv4" %}
- {% if val.extended is defined and val.extended %}
-no ip access-list extended {{ val.name }}
- {% else %}
-no ip access-list standard {{ val.name }}
- {% endif %}
- {% elif val.type is defined and val.type == "ipv6" %}
-no ipv6 access-list {{ val.name }}
- {% elif val.type is defined and val.type == "mac" %}
- {% if val.extended is defined and val.extended %}
-no mac access-list extended {{ val.name }}
- {% else %}
-no mac access-list standard {{ val.name }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if val.type is defined and val.type == "ipv4" %}
- {% if val.extended is defined and val.extended %}
-ip access-list extended {{ val.name }}
- {% else %}
-ip access-list standard {{ val.name }}
- {% endif %}
- {% elif val.type is defined and val.type == "ipv6" %}
-ipv6 access-list {{ val.name }}
- {% elif val.type is defined and val.type == "mac" %}
- {% if val.extended is defined and val.extended %}
-mac access-list extended {{ val.name }}
- {% else %}
-mac access-list standard {{ val.name }}
- {% endif %}
- {% endif %}
- {% if val.description is defined %}
- {% if val.description %}
- description {{ val.description }}
- {% else %}
- no description a
- {% endif %}
- {% endif %}
- {% if val.remark is defined and val.remark %}
- {% for remark in val.remark %}
- {% if remark.number is defined and remark.number %}
- {% if remark.state is defined and remark.state == "absent" %}
- no remark {{ remark.number }}
- {% else %}
- {% if remark.description is defined and remark.description %}
- remark {{ remark.number }} {{ remark.description }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if val.entries is defined and val.entries %}
- {% for rule in val.entries %}
- {% if rule.number is defined and rule.number %}
- {% if rule.state is defined and rule.state == "absent" %}
- no seq {{ rule.number }}
- {% else %}
- {% if rule.permit is defined %}
- {% if rule.permit %}
- {% set is_permit = "permit" %}
- {% else %}
- {% set is_permit = "deny" %}
- {% endif %}
- {% if val.type is defined and val.type == "mac" %}
- {% if rule.source is defined and rule.source %}
- {% if rule.destination is defined and rule.destination %}
- {% if rule.other_options is defined and rule.other_options %}
- {% if rule.other_options == "log" %}
- {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %}
- {% else %}
- {% set other_options = rule.other_options %}
- {% endif %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.source }} {{ rule.destination }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.source }} {{ rule.destination }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% else %}
- {% if rule.protocol is defined and rule.protocol %}
- {% if rule.source is defined and rule.source %}
- {% if rule.destination is defined and rule.destination %}
- {% if rule.src_condition is defined and rule.src_condition %}
- {% if rule.dest_condition is defined and rule.dest_condition %}
- {% if rule.other_options is defined and rule.other_options %}
- {% if rule.other_options == "log" %}
- {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %}
- {% else %}
- {% set other_options = rule.other_options %}
- {% endif %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ rule.dest_condition }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ rule.dest_condition }}
- {% endif %}
- {% else %}
- {% if rule.other_options is defined and rule.other_options %}
- {% if rule.other_options == "log" %}
- {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %}
- {% else %}
- {% set other_options = rule.other_options %}
- {% endif %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.src_condition }} {{ rule.destination }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if rule.dest_condition is defined and rule.dest_condition %}
- {% if rule.other_options is defined and rule.other_options %}
- {% if rule.other_options == "log" %}
- {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %}
- {% else %}
- {% set other_options = rule.other_options %}
- {% endif %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ rule.dest_condition }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ rule.dest_condition }}
- {% endif %}
- {% else %}
- {% if rule.other_options is defined and rule.other_options %}
- {% if rule.other_options == "log" %}
- {% set other_options = rule.other_options + ' threshold-in-msgs 10 interval 5' %}
- {% else %}
- {% set other_options = rule.other_options %}
- {% endif %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }} {{ other_options }}
- {% else %}
- seq {{ rule.number }} {{ is_permit }} {{ rule.protocol }} {{ rule.source }} {{ rule.destination }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if val.lineterminal is defined and val.lineterminal %}
- {% if val.type is defined and not val.type == "mac" %}
- {% for vty in val.lineterminal %}
- {% if vty.line is defined and vty.line %}
-line {{ vty.line }}
- {% if vty.state is defined and vty.state == "absent" %}
- no access-class {{ val.name }} {{ val.type }}
- {% else %}
- access-class {{ val.name }} {{ val.type }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-
- {% if val.stage_ingress is defined and val.stage_ingress %}
- {% for intf in val.stage_ingress %}
- {% if intf.state is defined and intf.state == "absent" %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if val.type is defined and val.type == "mac" %}
- no mac access-group {{ val.name }} in
- {% else %}
- no ip access-group {{ val.name }} in
- {% endif %}
- {% endif %}
- {% else %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if val.type is defined and val.type == "mac" %}
- mac access-group {{ val.name }} in
- {% else %}
- ip access-group {{ val.name }} in
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if val.stage_egress is defined and val.stage_egress %}
- {% for intf in val.stage_egress %}
- {% if intf.state is defined and intf.state == "absent" %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if val.type is defined and val.type == "mac" %}
- no mac access-group {{ val.name }} out
- {% else %}
- no ip access-group {{ val.name }} out
- {% endif %}
- {% endif %}
- {% else %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if val.type is defined and val.type == "mac" %}
- mac access-group {{ val.name }} out
- {% else %}
- ip access-group {{ val.name }} out
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_acl/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_acl/tests/main.os9.yaml
deleted file mode 100644
index 9f083bb9c..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/tests/main.os9.yaml
+++ /dev/null
@@ -1,88 +0,0 @@
----
-# vars file for dellemc.os9.os9_acl,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_acl:
- - name: ssh-only-mac
- type: mac
- description: macacl
- extended: true
- remark:
- - number: 1
- description: mac
- state: present
- entries:
- - number: 5
- permit: true
- protocol: tcp
- source: any
- destination: any
- dest_condition: eq 2
- other_options: count
- state: present
- - number: 6
- permit: false
- protocol: tcp
- source: bb:bb:bb:bb:bb:bb ff:ff:ff:ff:ff:ff
- destination: any
- dest_condition: log
- state: present
- stage_ingress:
- - name: fortyGigE 1/28
- state: present
- - name: fortyGigE 1/27
- state: present
- stage_egress:
- - name: fortyGigE 1/28
- state: present
- lineterminal:
- - line: vty 1
- state: present
- - line: vty 2
- state: absent
- - line: vty 3
- state: present
- - name: ipv6-ssh-only
- type: ipv6
- description: ipv6acl
- remark:
- - number: 1
- description: ipv6
- entries:
- - number: 10
- permit: true
- protocol: ipv6
- source: 2001:4898::/32
- destination: any
- - number: 20
- permit: true
- protocol: tcp
- source: any
- src_condition: eq 2
- destination: 2404:f801::/32
- - number: 30
- permit: true
- protocol: tcp
- source: any
- destination: 2a01:110::/31
- dest_condition: ack
- - number: 40
- permit: true
- protocol: tcp
- source: any
- destination: any
- stage_ingress:
- - name: fortyGigE 1/26
- state: present
- - name: fortyGigE 1/27
- state: present
- stage_egress:
- - name: fortyGigE 1/26
- state: present
- lineterminal:
- - line: vty 0
- state: absent
- - line: vty 1
- - line: vty 2
- - line: vty 3
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_acl/tests/test.yaml
deleted file mode 100644
index dbe56bcd9..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_acl \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_acl/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_acl/vars/main.yml
deleted file mode 100644
index 95a393630..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_acl/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_acl \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_bgp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/README.md b/ansible_collections/dellemc/os9/roles/os9_bgp/README.md
deleted file mode 100644
index c8f580bcd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/README.md
+++ /dev/null
@@ -1,224 +0,0 @@
-BGP role
-========
-
-This role facilitates the configuration of border gateway protocol (BGP) attributes. It supports the configuration of router ID, networks, neighbors, and maximum path. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The BGP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_bgp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``asn`` | string (required) | Configures the autonomous system (AS) number of the local BGP instance | os9 |
-| ``router_id`` | string | Configures the IP address of the local BGP router instance | os9 |
-| ``graceful_restart`` | boolean | Configures graceful restart capability | os9 |
-| ``graceful_restart.state`` | string: absent,present\* | Removes graceful restart capability if set to absent | os9 |
-| ``maxpath_ibgp`` | integer | Configures the maximum number of paths to forward packets through iBGP (1 to 64; default 1) | os9 |
-| ``maxpath_ebgp`` | integer | Configures the maximum number of paths to forward packets through eBGP (1 to 64; default 1) | os9 |
-| ``best_path`` | list | Configures the default best-path selection (see ``best_path.*``) | os9 |
-| ``best_path.as_path`` | string (required): ignore,multipath-relax | Configures the AS path used for the best-path computation | os9 |
-| ``best_path.as_path_state`` | string: absent,present\* | Deletes the AS path configuration if set to absent | os9 |
-| ``best_path.ignore_router_id`` | boolean: true,false | Ignores the router identifier in best-path computation if set to true | os9 |
-| ``best_path.med`` | list | Configures the MED attribute (see ``med.*``) | os9 |
-| ``med.attribute`` | string (required): confed,missing-as-best | Configures the MED attribute used for the best-path computation | os9 |
-| ``med.state`` | string: absent,present\* | Deletes the MED attribute if set to absent | os9, |
-| ``ipv4_network`` | list | Configures an IPv4 BGP networks (see ``ipv4_network.*``) | , os9, |
-| ``ipv4_network.address`` | string (required) | Configures the IPv4 address of the BGP network (A.B.C.D/E format) | os9 |
-| ``ipv4_network.state`` | string: absent,present\* | Deletes an IPv4 BGP network if set to absent | os9 |
-| ``ipv6_network`` | list | Configures an IPv6 BGP network (see ``ipv6_network.*``) | os9 |
-| ``ipv6_network.address`` | string (required) | Configures the IPv6 address of the BGP network (2001:4898:5808:ffa2::1/126 format) | os9 |
-| ``ipv6_network.state`` | string: absent,present\* | Deletes an IPv6 BGP network if set to absent | os9 |
-| ``neighbor`` | list | Configures IPv4 BGP neighbors (see ``neighbor.*``) | os9 |
-| ``neighbor.ip`` | string (required) | Configures the IPv4 address of the BGP neighbor (10.1.1.1) | os9 |
-| ``neighbor.interface`` | string | Configures the BGP neighbor interface details | |
-| ``neighbor.name`` | string (required) | Configures the BGP peer-group with this name; supported only when the neighbor is a peer group; mutually exclusive with *neighbor.ip* | os9 |
-| ``neighbor.type`` | string (required): ipv4,ipv6,peergroup | Specifies the BGP neighbor type | os9 |
-| ``neighbor.remote_asn`` | string (required) | Configures the remote AS number of the BGP neighbor | os9 |
-| ``neighbor.remote_asn_state`` | string: absent,present\* | Deletes the remote AS number from the peer group if set to absent; supported only when *neighbor.type* is "peergroup" | os9 |
-| ``neighbor.timer`` | string | Configures neighbor timers (<int> <int>); 5 10, where 5 is the keepalive interval and 10 is the holdtime | os9 |
-| ``neighbor.default_originate`` | boolean: true, false\* | Configures default originate routes to the BGP neighbor | os9 |
-| ``neighbor.peergroup`` | string | Configures neighbor to BGP peer-group (configured peer-group name) | os9 |
-| ``neighbor.peergroup_state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor from the peer-group if set to absent | os9 |
-| ``neighbor.distribute_list`` | list | Configures the distribute list to filter networks from routing updates (see ``distribute_list.*``) | os9 |
-| ``distribute_list.in`` | string | Configures the name of the prefix-list to filter incoming packets | os9 |
-| ``distribute_list.in_state`` | string: absent,present\* | Deletes the filter at incoming packets if set to absent | os9 |
-| ``distribute_list.out`` | string | Configures the name of the prefix-list to filter outgoing packets | os9 |
-| ``distribute_list.out_state`` | string: absent,present\* | Deletes the filter at outgoing packets if set to absent | os9 |
-| ``neighbor.admin`` | string: up,down | Configures the administrative state of the neighbor | os9 |
-| ``neighbor.adv_interval`` | integer | Configures the advertisement interval of the neighbor | os9 |
-| ``neighbor.fall_over`` | string: absent,present | Configures the session fall on peer-route loss | os9 |
-| ``neighbor.sender_loop_detect`` | boolean: true,false | Enables/disables the sender-side loop detect for neighbors | os9 |
-| ``neighbor.src_loopback`` | integer | Configures the source loopback interface for routing packets | os9 |
-| ``neighbor.src_loopback_state`` | string: absent,present\* | Deletes the source for routing packets if set to absent | os9 |
-| ``neighbor.ebgp_multihop`` | integer | Configures the maximum-hop count value allowed in eBGP neighbors that are not directly connected (default 255) | os9 |
-| ``neighbor.passive`` | boolean: true,false\* | Configures the passive BGP peer group; supported only when neighbor is a peer-group | os9 |
-| ``neighbor.subnet`` | string (required) | Configures the passive BGP neighbor to this subnet; required together with the *neighbor.passive* key for os9 devices | , os9, |
-| ``neighbor.subnet_state`` | string: absent,present\* | Deletes the subnet range set for dynamic IPv4 BGP neighbor if set to absent | os9 |
-| ``neighbor.limit`` | integer | Configures maximum dynamic peers count (key is required together with ``neighbor.subnet``) | |
-| ``neighbor.bfd`` | boolean | Enables BDF for neighbor | |
-| ``neighbor.state`` | string: absent,present\* | Deletes the IPv4 BGP neighbor if set to absent | os9 |
-| ``redistribute`` | list | Configures the redistribute list to get information from other routing protocols (see ``redistribute.*``) | os9 |
-| ``redistribute.route_type`` | string (required): static,connected | Configures the name of the routing protocol to redistribute | os9 |
-| ``redistribute.route_map_name`` | string | Configures the route-map to redistribute | os9 |
-| ``redistribute.route_map`` | string: absent,present\* | Deletes the route-map to redistribute if set to absent | os9 |
-| ``redistribute.address_type`` | string (required): ipv4,ipv6 | Configures the address type of IPv4 or IPv6 routes | os9 |
-| ``redistribute.state`` | string: absent,present\* | Deletes the redistribution information if set to absent | os9 |
-| ``state`` | string: absent,present\* | Deletes the local router BGP instance if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_bgp* role to configure the BGP network and neighbors. The example creates a hosts file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. This example writes a simple playbook that only references the *os9_bgp* role. The sample host_vars given below is for os9.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_bgp:
- asn: 11
- router_id: 192.168.3.100
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- graceful_restart: true
- best_path:
- as_path: ignore
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- - attribute: missing-as-best
- state: present
- ipv4_network:
- - address: 102.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - ip: 192.168.10.2
- type: ipv4
- remote_asn: 12
- timer: 5 10
- adv_interval: 40
- fall_over: present
- default_originate: False
- peergroup: per
- peergroup_state: present
- sender_loop_detect: false
- src_loopback: 1
- src_loopback_state: present
- distribute_list:
- in: aa
- in_state: present
- ebgp_multihop: 25
- admin: up
- state: present
- - ip: 2001:4898:5808:ffa2::1
- type: ipv6
- remote_asn: 14
- peergroup: per
- peergroup_state: present
- distribute_list:
- in: aa
- in_state: present
- src_loopback: 0
- src_loopback_state: present
- ebgp_multihop: 255
- admin: up
- state: present
- - name: peer1
- type: peergroup
- remote_asn: 14
- distribute_list:
- in: an
- in_state: present
- out: bb
- out_state: present
- passive: True
- subnet: 10.128.4.192/27
- subnet_state: present
- state: present
- - ip: 172.20.12.1
- description: O_site2-spine1
- type: ipv4
- remote_asn: 64640
- fall_over: present
- ebgp_multihop: 4
- src_loopback: 1
- adv_interval: 1
- timer: 3 9
- send_community:
- - type: extended
- address_family:
- - type: ipv4
- activate: falsesrc_loopback
- state: present
- - type: l2vpn
- activate: true
- state: present
- admin: up
- state: present
- redistribute:
- - route_type: static
- route_map_name: aa
- state: present
- address_type: ipv4
- - route_type: connected
- address_type: ipv6
- state: present
- state: present
-
-**Simple playbook to configure BGP — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_bgp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/defaults/main.yml
deleted file mode 100644
index 0063029c0..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_bgp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/handlers/main.yml
deleted file mode 100644
index 385a5f7a6..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_bgp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/meta/main.yml
deleted file mode 100644
index 5db413c33..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_bgp role facilitates the configuration of BGP attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/tasks/main.yml
deleted file mode 100644
index 46f84c4c2..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating BGP configuration for os9"
- template:
- src: os9_bgp.j2
- dest: "{{ build_dir }}/bgp9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning BGP configuration for os9"
- dellemc.os9.os9_config:
- src: os9_bgp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2 b/ansible_collections/dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2
deleted file mode 100644
index 4bc679121..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/templates/os9_bgp.j2
+++ /dev/null
@@ -1,351 +0,0 @@
-#jinja2: trim_blocks: True, lstrip_blocks: True
-{##########################################
-Purpose:
-Configure BGP commands for os9 Devices
-os9_bgp:
- asn: 12
- router_id:
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- best_path:
- as_path: ignore
- as_path_state: present
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- ipv4_network:
- - address: 101.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- neighbor:
- - type: ipv4
- remote_asn: 11
- ip: 192.168.11.1
- admin: up
- sender_loop_detect: false
- src_loopback: 0
- src_loopback_state: present
- ebgp_multihop: 255
- distribute_list:
- in: aa
- in_state: present
- out: aa
- out_state: present
- state: present
- - type: ipv6
- remote_asn: 14
- ip: 2001:4898:5808:ffa2::1
- sender_loop_detect: false
- src_loopback: 0
- src_loopback_state: present
- state: present
- - type: peer_group
- name: peer1
- remote_asn: 6
- subnet: 10.128.3.192/27
- subnet_state: present
- admin: up
- default_originate: true
- sender_loop_detect: false
- src_loopback: 1
- src_loopback_state: present
- ebgp_multihop: 255
- state: present
- redistribute:
- - route_type: static
- state: present
- state: present
-################################}
-{% if os9_bgp is defined and os9_bgp%}
-{% set bgp_vars = os9_bgp %}
-
-{% if bgp_vars.asn is defined and bgp_vars.asn %}
- {% if bgp_vars.state is defined and bgp_vars.state == "absent" %}
-no router bgp {{ bgp_vars.asn }}
- {% else %}
-{# Add Feature to the switch #}
-router bgp {{ bgp_vars.asn }}
- {% if bgp_vars.router_id is defined %}
- {% if bgp_vars.router_id %}
- bgp router-id {{ bgp_vars.router_id }}
- {% else %}
- no bgp router-id
- {% endif %}
- {% endif %}
-
- {% if bgp_vars.maxpath_ebgp is defined %}
- {% if bgp_vars.maxpath_ebgp %}
- maximum-paths ebgp {{ bgp_vars.maxpath_ebgp }}
- {% else %}
- no maximum-paths ebgp
- {% endif %}
- {% endif %}
-
- {% if bgp_vars.maxpath_ibgp is defined %}
- {% if bgp_vars.maxpath_ibgp %}
- maximum-paths ibgp {{ bgp_vars.maxpath_ibgp }}
- {% else %}
- no maximum-paths ibgp
- {% endif %}
- {% endif %}
-
- {% if bgp_vars.graceful_restart is defined and bgp_vars.graceful_restart %}
- {% if bgp_vars.graceful_restart.state is defined and bgp_vars.graceful_restart.state == "present" %}
- bgp graceful-restart
- {% else %}
- no bgp graceful-restart
- {% endif %}
- {% endif %}
-
- {% if bgp_vars.best_path is defined and bgp_vars.best_path %}
- {% if bgp_vars.best_path.as_path is defined and bgp_vars.best_path.as_path %}
- {% if bgp_vars.best_path.as_path_state is defined and bgp_vars.best_path.as_path_state == "absent" %}
- no bgp bestpath as-path {{ bgp_vars.best_path.as_path }}
- {% else %}
- bgp bestpath as-path {{ bgp_vars.best_path.as_path }}
- {% endif %}
- {% endif %}
- {% if bgp_vars.best_path.ignore_router_id is defined %}
- {% if bgp_vars.best_path.ignore_router_id %}
- bgp bestpath router-id ignore
- {% else %}
- no bgp bestpath router-id ignore
- {% endif %}
- {% endif %}
- {% if bgp_vars.best_path.med is defined and bgp_vars.best_path.med %}
- {% for med in bgp_vars.best_path.med %}
- {% if med.attribute is defined and med.attribute %}
- {% if med.state is defined and med.state == "absent" %}
- no bgp bestpath med {{ med.attribute }}
- {% else %}
- bgp bestpath med {{ med.attribute }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% if bgp_vars.ipv4_network is defined and bgp_vars.ipv4_network %}
- {% for net in bgp_vars.ipv4_network %}
- {# remove BGP network announcement #}
- {% if net.address is defined and net.address %}
- {% if net.state is defined and net.state == "absent" %}
- no network {{ net.address }}
-{# Add BGP network announcement #}
- {% else %}
- network {{ net.address }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if bgp_vars.ipv6_network is defined and bgp_vars.ipv6_network %}
- address-family ipv6 unicast
- {% for net in bgp_vars.ipv6_network %}
- {% if net.address is defined and net.address %}
- {% if net.state is defined and net.state == "absent" %}
- no network {{ net.address }}
- {% else %}
- network {{ net.address }}
- {% endif %}
- {% endif %}
- {% endfor %}
- exit-address-family
- {% endif %}
-
- {% if bgp_vars.neighbor is defined and bgp_vars.neighbor %}
- {% for neighbor in bgp_vars.neighbor %}
- {% if neighbor.type is defined %}
- {% if neighbor.type == "ipv4" or neighbor.type =="ipv6" %}
- {% if neighbor.ip is defined and neighbor.ip %}
- {% set tag_or_ip = neighbor.ip %}
- {% if neighbor.remote_asn is defined and neighbor.remote_asn %}
- {% if neighbor.state is defined and neighbor.state == "absent" %}
- no neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }}
- {% if neighbor.peergroup is defined and neighbor.peergroup %}
- {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %}
- no neighbor {{ tag_or_ip }} peer-group {{ neighbor.peergroup }}
- {% endif %}
- {% endif %}
- {% if neighbor.type == "ipv6" %}
- address-family ipv6 unicast
- no neighbor {{ tag_or_ip }} activate
- exit-address-family
- {% endif %}
- {% else %}
- neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }}
- {% if neighbor.peergroup is defined and neighbor.peergroup %}
- {% if neighbor.peergroup_state is defined and neighbor.peergroup_state == "absent" %}
- no neighbor {{ tag_or_ip }} peer-group {{ neighbor.peergroup }}
- {% else %}
- neighbor {{ tag_or_ip }} peer-group {{ neighbor.peergroup }}
- {% endif %}
- {% endif %}
- {% if neighbor.type == "ipv6" %}
- address-family ipv6 unicast
- neighbor {{ tag_or_ip }} activate
- exit-address-family
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif neighbor.type == "peergroup" %}
- {% if neighbor.name is defined and neighbor.name %}
- {% set tag_or_ip = neighbor.name %}
- {% if neighbor.state is defined and neighbor.state == "absent" %}
- no neighbor {{ tag_or_ip }} peer-group
- {% else %}
- {% if neighbor.passive is defined and neighbor.passive %}
- neighbor {{ tag_or_ip }} peer-group passive
- {% if neighbor.subnet is defined and neighbor.subnet %}
- {% if neighbor.subnet_state is defined and neighbor.subnet_state == "absent" %}
- no neighbor {{ tag_or_ip }} subnet {{ neighbor.subnet }}
- {% else %}
- neighbor {{ tag_or_ip }} subnet {{ neighbor.subnet }}
- {% endif %}
- {% endif %}
- {% else %}
- neighbor {{ tag_or_ip }} peer-group
- {% endif %}
- {% if neighbor.remote_asn is defined and neighbor.remote_asn %}
- {% if neighbor.remote_asn_state is defined and neighbor.remote_asn_state == "absent" %}
- no neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }}
- {% else %}
- neighbor {{ tag_or_ip }} remote-as {{ neighbor.remote_asn }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if tag_or_ip is defined and tag_or_ip %}
- {% if (neighbor.state is not defined) or (neighbor.state is defined and not neighbor.state == "absent") %}
- {% if neighbor.timer is defined %}
- {% if neighbor.timer %}
- neighbor {{ tag_or_ip }} timers {{ neighbor.timer }}
- {% else %}
- no neighbor {{ tag_or_ip }} timers
- {% endif %}
- {% endif %}
- {% if neighbor.default_originate is defined %}
- {% if neighbor.default_originate %}
- neighbor {{ tag_or_ip }} default-originate
- {% else %}
- no neighbor {{ tag_or_ip }} default-originate
- {% endif %}
- {% endif %}
- {% if neighbor.sender_loop_detect is defined %}
- {% if neighbor.sender_loop_detect %}
- neighbor {{ tag_or_ip }} sender-side-loop-detection
- {% else %}
- no neighbor {{ tag_or_ip }} sender-side-loop-detection
- {% endif %}
- {% endif %}
- {% if neighbor.src_loopback is defined and neighbor.src_loopback|int(-1) != -1 %}
- {% if neighbor.src_loopback_state is defined and neighbor.src_loopback_state == "absent" %}
- no neighbor {{ tag_or_ip }} update-source Loopback {{neighbor.src_loopback }}
- {% else %}
- neighbor {{ tag_or_ip }} update-source Loopback {{ neighbor.src_loopback }}
- {% endif %}
- {% endif %}
- {% if neighbor.ebgp_multihop is defined %}
- {% if neighbor.ebgp_multihop %}
- neighbor {{ tag_or_ip }} ebgp-multihop {{ neighbor.ebgp_multihop }}
- {% else %}
- no neighbor {{ tag_or_ip }} ebgp-multihop
- {% endif %}
- {% endif %}
- {% if neighbor.distribute_list is defined and neighbor.distribute_list %}
- {% if neighbor.distribute_list.in is defined and neighbor.distribute_list.in %}
- {% if neighbor.distribute_list.in_state is defined and neighbor.distribute_list.in_state == "absent" %}
- no neighbor {{ tag_or_ip }} distribute-list {{ neighbor.distribute_list.in }} in
- {% else %}
- neighbor {{ tag_or_ip }} distribute-list {{ neighbor.distribute_list.in }} in
- {% endif %}
- {% endif %}
- {% if neighbor.distribute_list.out is defined and neighbor.distribute_list.out %}
- {% if neighbor.distribute_list.out_state is defined and neighbor.distribute_list.out_state == "absent" %}
- no neighbor {{ tag_or_ip }} distribute-list {{ neighbor.distribute_list.out }} out
- {% else %}
- neighbor {{ tag_or_ip }} distribute-list {{ neighbor.distribute_list.out }} out
- {% endif %}
- {% endif %}
- {% endif %}
- {% if neighbor.admin is defined and (neighbor.admin == "up" or neighbor.admin == "present") %}
- neighbor {{ tag_or_ip }} no shutdown
- {% else %}
- neighbor {{ tag_or_ip }} shutdown
- {% endif %}
- {% if neighbor.adv_interval is defined %}
- {% if neighbor.adv_interval %}
- neighbor {{ tag_or_ip }} advertisement-interval {{ neighbor.adv_interval }}
- {% else %}
- no neighbor {{ tag_or_ip }} advertisement-interval
- {% endif %}
- {% endif %}
- {% if neighbor.fall_over is defined and neighbor.fall_over == "present" %}
- neighbor {{ tag_or_ip }} fall-over
- {% elif neighbor.fall_over is defined and neighbor.fall_over == "absent" %}
- no neighbor {{ tag_or_ip }} fall-over
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if bgp_vars.redistribute is defined and bgp_vars.redistribute %}
- {% for route in bgp_vars.redistribute %}
- {% if route.route_type is defined and route.route_type %}
- {% if route.address_type is defined and route.address_type %}
- {% if route.address_type == "ipv6" %}
- address-family {{ route.address_type }} unicast
- {% if route.state is defined and route.state == "absent" %}
- no redistribute {{ route.route_type }}
- {% else %}
- {% if route.route_map is defined %}
- {% if route.route_map == "present" %}
- {% if route.route_map_name is defined and route.route_map_name %}
- redistribute {{ route.route_type }} route-map {{ route.route_map_name }}
- {% else %}
- redistribute {{ route.route_type }}
- {% endif %}
- {% else %}
- {% if route.route_map_name is defined and route.route_map_name %}
- no redistribute {{ route.route_type }} route-map {{ route.route_map_name }}
- {% endif %}
- {% endif %}
- {% else %}
- redistribute {{ route.route_type }}
- {% endif %}
- {% endif %}
- exit-address-family
- {% else %}
- {% if route.state is defined and route.state == "absent" %}
- no redistribute {{ route.route_type }}
- {% else %}
- {% if route.route_map is defined %}
- {% if route.route_map == "present" %}
- {% if route.route_map_name is defined and route.route_map_name %}
- redistribute {{ route.route_type }} route-map {{ route.route_map_name }}
- {% else %}
- redistribute {{ route.route_type }}
- {% endif %}
- {% else %}
- {% if route.route_map_name is defined and route.route_map_name %}
- no redistribute {{ route.route_type }} route-map {{ route.route_map_name }}
- {% endif %}
- {% endif %}
- redistribute {{ route.route_type }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
-{% endif %}
-{% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/main.os9.yaml
deleted file mode 100644
index ed00565bc..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/main.os9.yaml
+++ /dev/null
@@ -1,97 +0,0 @@
----
-# vars file for dellemc.os9.os9_bgp,
-# below gives a sample configuration
-# Sample variables for OS9 device
- os9_bgp:
- asn: 11
- router_id: 192.168.3.100
- maxpath_ibgp: 2
- maxpath_ebgp: 2
- best_path:
- as_path: ignore
- as_path_state: absent
- ignore_router_id: true
- med:
- - attribute: confed
- state: present
- - attribute: missing-as-best
- state: present
- ipv4_network:
- - address: 102.1.1.0/30
- state: present
- ipv6_network:
- - address: "2001:4898:5808:ffa0::/126"
- state: present
- - address: "2001:4898:5808:ffa1::/126"
- state: present
- neighbor:
- - name: per
- type: peergroup
- remote_asn: 12
- remote_asn_state: absent
- default_originate: False
- src_loopback: 0
- src_loopback_state: present
- ebgp_multihop: 255
- state: present
-
- - name: peer1
- type: peergroup
- remote_asn: 14
- distribute_list:
- in: an
- in_state: present
- out: bb
- out_state: present
- passive: True
- subnet: 10.128.4.192/27
- state: present
-
- - ip: 192.168.10.2
- type: ipv4
- remote_asn: 12
- timer: 5 10
- default_originate: False
- peergroup: per
- peergroup_state: present
- distribute_list:
- in: aa
- in_state: present
- admin: up
- state: present
-
- - ip: 192.168.13.3
- type: ipv4
- remote_asn: 13
- sender_loop_detect: false
- src_loopback: 1
- src_loopback_state: present
- distribute_list:
- in: aa
- in_state: present
- out: aa
- out_state: present
- ebgp_multihop: 25
- state: present
-
- - ip: 2001:4898:5808:ffa2::1
- type: ipv6
- remote_asn: 14
- peergroup: per
- peergroup_state: present
- distribute_list:
- in: aa
- in_state: present
- src_loopback: 0
- src_loopback_state: present
- ebgp_multihop: 255
- admin: up
- state: present
- redistribute:
- - route_type: static
- state: present
- address_type: ipv4
- - route_type: connected
- address_type: ipv6
- state: present
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_bgp/tests/test.yaml
deleted file mode 100644
index 15511b4d1..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_bgp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_bgp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_bgp/vars/main.yml
deleted file mode 100644
index 3482e5cc7..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_bgp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_bgp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/LICENSE b/ansible_collections/dellemc/os9/roles/os9_copy_config/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/README.md b/ansible_collections/dellemc/os9/roles/os9_copy_config/README.md
deleted file mode 100644
index c74cf1c6e..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-Copy-config role
-================
-
-This role is used to push the backup running configuration into a Dell EMC OS9 device, and merges the configuration in the template file with the running configuration of the device. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The copy-config role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- No predefined variables are part of this role
-- Use *host_vars* or *group_vars* as part of the template file
-- Configuration file is host-specific
-- Copy the host-specific configuration to the respective file under the template directory in *<host_name>.j2* format
-- Variables and values are case-sensitive
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_copy_config* role to push the configuration file into the device. It creates a *hosts* file with the switch details and corresponding variables. It writes a simple playbook that only references the *os9_copy_config* role. By including the role, you automatically get access to all of the tasks to push configuration file.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
-
- # This variable shall be applied in the below jinja template for each host by defining here
- os9_bgp
- asn: 64801
-
-**Sample roles/os9_copy_config/templates/leaf1.j2**
-
- ! Leaf1 BGP profile on Dell OS9 switch
- snmp-server community public ro
- hash-algorithm ecmp crc
- !
- interface ethernet1/1/1:1
- no switchport
- ip address 100.1.1.2/24
- ipv6 address 2001:100:1:1::2/64
- mtu 9216
- no shutdown
- !
- interface ethernet1/1/9:1
- no switchport
- ip address 100.2.1.2/24
- ipv6 address 2001:100:2:1::2/64
- mtu 9216
- no shutdown
- !
- router bgp {{ os9_bgp.asn }}
- bestpath as-path multipath-relax
- bestpath med missing-as-worst
- router-id 100.0.2.1
- !
- address-family ipv4 unicast
- !
- address-family ipv6 unicast
- !
- neighbor 100.1.1.1
- remote-as 64901
- no shutdown
- !
- neighbor 100.2.1.1
- remote-as 64901
- no shutdown
- !
- neighbor 2001:100:1:1::1
- remote-as 64901
- no shutdown
- !
- address-family ipv4 unicast
- no activate
- exit
- !
- address-family ipv6 unicast
- activate
- exit
- !
- neighbor 2001:100:2:1::1
- remote-as 64901
- no shutdown
- !
- address-family ipv4 unicast
- no activate
- exit
- !
- address-family ipv6 unicast
- activate
- exit
- !
-
-**Simple playbook to setup to push configuration file into device — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_copy_config
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/defaults/main.yml
deleted file mode 100644
index 7f5279464..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_copy_config \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/handlers/main.yml
deleted file mode 100644
index 69e9baf74..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_copy_config \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/meta/main.yml
deleted file mode 100644
index d27c18368..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- This role shall be used to push the backup running configuration into the device.
- This role shall merge the configuration in the template file with the running configuration of the device
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/tasks/main.yml
deleted file mode 100644
index 682a6e9f0..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/tasks/main.yml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-# tasks file for dellemc.os9.os9_copy_config
- - name: "Merge the config file to running configuration for OS9"
- dellemc.os9.os9_config:
- src: "{{ hostname }}.j2"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/templates/os9_copy_config.j2 b/ansible_collections/dellemc/os9/roles/os9_copy_config/templates/os9_copy_config.j2
deleted file mode 100644
index bb0e16e7f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/templates/os9_copy_config.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-! Version 10.3.0E
-! Last configuration change at May 09 21:47:35 2017
-! \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/main.os9.yaml
deleted file mode 100644
index 73b314ff7..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/main.os9.yaml
+++ /dev/null
@@ -1 +0,0 @@
---- \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/test.yaml
deleted file mode 100644
index e2fb514ea..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os9.os9_copy_config
diff --git a/ansible_collections/dellemc/os9/roles/os9_copy_config/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_copy_config/vars/main.yml
deleted file mode 100644
index 21269beb7..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_copy_config/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_copy_config
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/LICENSE b/ansible_collections/dellemc/os9/roles/os9_dcb/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/README.md b/ansible_collections/dellemc/os9/roles/os9_dcb/README.md
deleted file mode 100644
index 20f1c0207..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/README.md
+++ /dev/null
@@ -1,133 +0,0 @@
-DCB role
-========
-
-This role facilitates the configuration of data center bridging (DCB). It supports the configuration of the DCB map and the DCB buffer, and assigns them to interfaces. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The DCB role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable and can take the `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_dcb keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``dcb_enable`` | boolean: true,false | Enables/disables DCB | os9 |
-| ``dcb_map`` | list | Configures the DCB map (see ``dcb_map.*``) | os9 |
-| ``dcb_map.name`` | string (required) | Configures the DCB map name | os9 |
-| ``dcb_map.priority_group`` | list | Configures the priority-group for the DCB map (see ``priority_group.*``) | os9 |
-| ``priority_group.pgid`` | integer (required): 0-7 | Configures the priority-group ID | os9 |
-| ``priority_group.bandwidth`` | integer (required) | Configures the bandwidth percentage for the priority-group | os9 |
-| ``priority_group.pfc`` | boolean: true,false (required) | Configures PFC on/off for the priorities in the priority-group | os9 |
-| ``priority_group.state`` | string: absent,present\* | Deletes the priority-group of the DCB map if set to absent | os9 |
-| ``dcb_map.priority_pgid`` |string (required) | Configures priority to priority-group mapping; value is the PGID of priority groups separated by a space (1 1 2 2 3 3 3 4) | os9 |
-| ``dcb_map.intf`` | list | Configures the DCB map to the interface (see ``intf.*``) | os9 |
-| ``intf.name`` | string (required) | Configures the DCB map to the interface with this interface name | os9 |
-| ``intf.state`` | string: absent,present\* | Deletes the DCB map from the interface if set to absent | os9 |
-| ``dcb_map.state`` | string: absent,present\* | Deletes the DCB map if set to absent | os9 |
-| ``dcb_buffer`` | list | Configures the DCB buffer profile (see ``dcb_buffer.*``) | os9 |
-| ``dcb_buffer.name`` | string (required) | Configures the DCB buffer profile name | os9 |
-| ``dcb_buffer.description`` | string (required) | Configures a description about the DCB buffer profile | os9 |
-| ``dcb_buffer.priority_params`` | list | Configures priority flow-control buffer parameters (see ``priority_params.*``)| os9 |
-| ``priority_params.pgid`` | integer (required): 0-7 | Specifies the priority-group ID | os9 |
-| ``priority_params.buffer_size`` | int (required) | Configures the ingress buffer size (in KB) of the DCB buffer profile | os9 |
-| ``priority_params.pause`` | integer | Configures the buffer limit (in KB) for pausing | os9 |
-| ``priority_params.resume`` | integer | Configures buffer offset limit (in KB) for resume | os9 |
-| ``priority_params.state`` | string: absent,present\* | Deletes the priority flow parameters of the DCB buffer if set to absent | os9 |
-| ``dcb_buffer.intf`` | list | Configures the DCB buffer to the interface (see ``intf.*``) | os9 |
-| ``intf.name`` | string (required) | Configures the DCB buffer to the interface with this interface name | os9 |
-| ``intf.state`` | string: absent,present\* | Deletes the DCB buffer from the interface if set to absent | os9 |
-| ``dcb_buffer.state`` | string: absent,present\* | Deletes the DCB buffer profile if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_dcb* role to completely configure DCB map and DCB buffer profiles and assigns it to interfaces. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os9_dcb* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_dcb:
- dcb_map:
- - name: test
- priority_pgid: 0 0 0 3 3 3 3 0
- priority_group:
- - pgid: 0
- bandwidth: 20
- pfc: true
- state: present
- - pgid: 3
- bandwidth: 80
- pfc: true
- state: present
- intf:
- - name: fortyGigE 1/8
- state: present
- - name: fortyGigE 1/9
- state: present
- state: present
- dcb_buffer:
- - name: buffer
- description:
- priority_params:
- - pgid: 0
- buffer_size: 5550
- pause: 40
- resume: 40
- state: present
- intf:
- - name: fortyGigE 1/8
- state: present
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/defaults/main.yml
deleted file mode 100644
index 58a954632..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_dcb \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/handlers/main.yml
deleted file mode 100644
index 476aabf04..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_dcb \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/meta/main.yml
deleted file mode 100644
index b76457b6f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os9_dcb role facilitates the configuration of Data Center Bridging (DCB) attributes in devices
- running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/tasks/main.yml
deleted file mode 100644
index cc1b44fea..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating DCB configuration for os9"
- template:
- src: os9_dcb.j2
- dest: "{{ build_dir }}/dcb9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning DCB configuration for os9"
- dellemc.os9.os9_config:
- src: os9_dcb.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2 b/ansible_collections/dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2
deleted file mode 100644
index be654009c..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/templates/os9_dcb.j2
+++ /dev/null
@@ -1,216 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-Purpose:
-Configure DCB commands for os9 Devices
-os9_dcb:
- dcb_enable: true
- dcb_map:
- - name: test
- priority_pgid: 0 0 0 3 3 3 0 3
- priority_group:
- - pgid: 0
- bandwidth: 20
- pfc: true
- state: present
- - pgid: 3
- bandwidth: 20
- pfc: true
- state: present
- intf:
- - name: fortyGigE 1/8
- state: present
- - name: fortyGigE 1/9
- state: present
- state: present
- dcb_buffer:
- - name: buffer
- description:
- priority_params:
- - pgid: 0
- buffer_size: 5550
- pause: 40
- resume: 40
- state: present
- intf:
- - name: fortyGigE 1/6
- state: present
- state: present
-################################}
-{% if os9_dcb is defined and os9_dcb %}
-{% set dcb_vars = os9_dcb %}
-{% if dcb_vars.dcb_enable is defined %}
- {% if dcb_vars.dcb_enable %}
-dcb enable
- {% else %}
-no dcb enable
- {% endif %}
-{% endif %}
-{% if dcb_vars.dcb_map is defined and dcb_vars.dcb_map %}
- {% for map in dcb_vars.dcb_map %}
- {% if map.name is defined and map.name %}
- {% if map.state is defined and map.state == "absent" %}
- {% if map.intf is defined and map.intf %}
- {% for intf in map.intf %}
- {% if intf.state is defined and intf.state == "absent" %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- no dcb-map {{ map.name }}
- exit
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-no dcb-map {{ map.name }}
- {% else %}
-dcb-map {{ map.name }}
- {% set pgid_set = {'value': False} %}
- {% if map.priority_group is defined and map.priority_group %}
- {% for group in map.priority_group %}
- {% if group.pgid is defined and group.pgid >= 0 %}
- {% if group.state is defined and group.state == "absent" %}
- {% if not pgid_set['value'] %}
- {% if map.priority_pgid is defined %}
- {% if pgid_set.update({'value': True}) %} {% endif %}
- {% if map.priority_pgid %}
- priority-pgid {{ map.priority_pgid }}
- {% else %}
- no priority-pgid
- {% endif %}
- {% endif %}
- {% endif %}
- no priority-group {{ group.pgid }}
- {% else %}
- {% if group.bandwidth is defined and group.bandwidth %}
- {% if group.pfc is defined %}
- {% if group.pfc %}
- priority-group {{ group.pgid }} bandwidth {{ group.bandwidth }} pfc on
- {% else %}
- priority-group {{ group.pgid }} bandwidth {{ group.bandwidth }} pfc off
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if not pgid_set['value'] %}
- {% if map.priority_pgid is defined %}
- {% if map.priority_pgid %}
- priority-pgid {{ map.priority_pgid }}
- {% else %}
- no priority-pgid
- {% endif %}
- {% endif %}
- {% endif %}
- {% if map.intf is defined and map.intf %}
- {% for intf in map.intf %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if intf.state is defined and intf.state == "absent" %}
- no dcb-map {{ map.name }}
- {% else %}
- dcb-map {{ map.name }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-{% if dcb_vars.dcb_buffer is defined and dcb_vars.dcb_buffer %}
- {% for buf in dcb_vars.dcb_buffer %}
- {% if buf.name is defined and buf.name %}
- {% if buf.state is defined and buf.state == "absent" %}
-no dcb-buffer-threshold {{ buf.name }}
- {% else %}
-dcb-buffer-threshold {{ buf.name }}
- {% if buf.description is defined and buf.description %}
- description {{ buf.description }}
- {% elif buf.description is defined and not buf.description %}
- no description
- {% endif %}
-
- {% if buf.priority_params is defined and buf.priority_params %}
- {% for params in buf.priority_params %}
- {% if params.pgid is defined and params.pgid >= 0 %}
- {% if params.state is defined and params.state == "absent" %}
- {% if params.buffer_size is defined and params.buffer_size %}
- {% if params.pause is defined and params.pause %}
- {% if params.resume is defined and params.resume %}
- no priority {{ params.pgid }} buffer-size {{ params.buffer_size }} pause-threshold {{ params.pause }} resume-offset {{ params.resume }}
- {% else %}
- no priority {{ params.pgid }} buffer-size {{ params.buffer_size }} pause-threshold {{ params.pause }}
- {% endif %}
- {% else %}
- {% if params.resume is defined and params.resume %}
- no priority {{ params.pgid }} buffer-size {{ params.buffer_size }} resume-offset {{ params.resume }}
- {% else %}
- no priority {{ params.pgid }} buffer-size {{ params.buffer_size }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if params.pause is defined and params.pause %}
- {% if params.resume is defined and params.resume %}
- no priority {{ params.pgid }} pause-threshold {{ params.pause }} resume-offset {{ params.resume }}
- {% else %}
- no priority {{ params.pgid }} pause-threshold {{ params.pause }}
- {% endif %}
- {% else %}
- {% if params.resume is defined and params.resume %}
- no priority {{ params.pgid }} resume-offset {{ params.resume }}
- {% else %}
- no priority {{ params.pgid }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% else %}
- {% if params.buffer_size is defined and params.buffer_size %}
- {% if params.pause is defined and params.pause %}
- {% if params.resume is defined and params.resume %}
- priority {{ params.pgid }} buffer-size {{ params.buffer_size }} pause-threshold {{ params.pause }} resume-offset {{ params.resume }}
- {% else %}
- priority {{ params.pgid }} buffer-size {{ params.buffer_size }} pause-threshold {{ params.pause }}
- {% endif %}
- {% else %}
- {% if params.resume is defined and params.resume %}
- priority {{ params.pgid }} buffer-size {{ params.buffer_size }} resume-offset {{ params.resume }}
- {% else %}
- priority {{ params.pgid }} buffer-size {{ params.buffer_size }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if params.pause is defined and params.pause %}
- {% if params.resume is defined and params.resume %}
- priority {{ params.pgid }} pause-threshold {{ params.pause }} resume-offset {{ params.resume }}
- {% else %}
- priority {{ params.pgid }} pause-threshold {{ params.pause }}
- {% endif %}
- {% else %}
- {% if params.resume is defined and params.resume %}
- priority {{ params.pgid }} resume-offset {{ params.resume }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if buf.intf is defined and buf.intf %}
- {% for intf in buf.intf %}
- {% if intf.name is defined and intf.name %}
-interface {{ intf.name }}
- {% if intf.state is defined and intf.state == "absent" %}
- no dcb-policy buffer-threshold {{ buf.name }}
- {% else %}
- dcb-policy buffer-threshold {{ buf.name }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/main.os9.yaml
deleted file mode 100644
index 4c19958ff..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/main.os9.yaml
+++ /dev/null
@@ -1,38 +0,0 @@
----
-# vars file for dellemc.os9.os9_dcb,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_dcb:
- dcb_enable: true
- dcb_map:
- - name: test
- priority_pgid: 0 0 0 3 0 0 0 0
- priority_group:
- - pgid: 0
- bandwidth: 50
- pfc: false
- state: present
- - pgid: 3
- bandwidth: 50
- pfc: true
- state: present
- intf:
- - name: fortyGigE 1/8
- state: absent
- - name: fortyGigE 1/9
- state: present
- dcb_buffer:
- - name: buffer
- description: testbuffer
- priority_params:
- - pgid: 0
- buffer_size: 70
- pause: 40
- resume: 40
- state: present
- intf:
- - name: fortyGigE 1/8
- state: present
- - name: fortyGigE 1/5
- state: present
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_dcb/tests/test.yaml
deleted file mode 100644
index ad59857af..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_dcb \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dcb/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_dcb/vars/main.yml
deleted file mode 100644
index dec87c76f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dcb/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_dcb \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/LICENSE b/ansible_collections/dellemc/os9/roles/os9_dns/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/README.md b/ansible_collections/dellemc/os9/roles/os9_dns/README.md
deleted file mode 100644
index ad97999f9..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/README.md
+++ /dev/null
@@ -1,94 +0,0 @@
-DNS role
-========
-
-This role facilitates the configuration of the domain name service (DNS). This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The DNS role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take the dellemc.os9.os9 as a value
-- If `os9_cfg_generate` set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_dns keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``name_server`` | list | Configures DNS (see ``name_server.*``) | os9 |
-| ``name_server.domain_lookup`` | boolean | Enables or disables domain name lookup | os9 |
-| ``name_server.ip`` | list | Configures the name server IP | os9 |
-| ``name_server.vrf`` | list | Configures VRF for each IP | os9 |
-| ``name_server.state`` | string: absent,present\* | Deletes the name server IP if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_dns* role to completely set up the DNS server configuration. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. It writes a simple playbook that only references the *os9_dns* role. By including the role, you automatically get access to all of the tasks to configure DNS.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_dns:
- domain_lookup: true
- name_server:
- - ip:
- - 1.1.1.1
- - 1.1.1.2
- vrf:
- - test
- - management
- state: absent
- - ip:
- - 2.2.2.2
- - ip:
- - 3.3.2.2
- state: absent
-
-**Simple playbook to setup DNS — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_dns
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/defaults/main.yml
deleted file mode 100644
index a5b36f9eb..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_dns \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/handlers/main.yml
deleted file mode 100644
index 4e8ac24cb..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_dns \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/meta/main.yml
deleted file mode 100644
index 75373c4d8..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_dns role facilitates the configuration DNS attributes in devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/tasks/main.yml
deleted file mode 100644
index f9a732c3e..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating DNS configuration for os9"
- template:
- src: os9_dns.j2
- dest: "{{ build_dir }}/dns9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning DNS configuration for os9"
- dellemc.os9.os9_config:
- src: os9_dns.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/templates/os9_dns.j2 b/ansible_collections/dellemc/os9/roles/os9_dns/templates/os9_dns.j2
deleted file mode 100644
index 12f013af6..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/templates/os9_dns.j2
+++ /dev/null
@@ -1,111 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-Purpose:
-Configure DNS commands for os9 devices
-os9_dns:
- domain_lookup: true
- domain_name: dns.search.name
- name_server:
- - ip:
- - 3.1.1.1
- - 3.1.1.2
- vrf:
- - test
- - test1
- - vrf:
- - test1
- state: absent
- - ip:
- - 2.2.2.2
- - ip:
- - 3.3.2.2
- state: absent
- domain_list:
- - vrf:
- - test
- - test1
- state: absent
- - name:
- - dname3
- - dname4
- - name:
- - dname5
- - dname6
- state: absent
- - name:
- - dname7
- - dname8
- vrf:
- - test
- - test1
-#####################################}
-{% if (os9_dns is defined and os9_dns) %}
- {% if os9_dns.domain_lookup is defined and os9_dns.domain_lookup == true %}
- ip domain-lookup
- {% elif os9_dns.domain_lookup is defined and os9_dns.domain_lookup == false %}
- no ip domain-lookup
- {% endif %}
- {% if os9_dns.domain_name is defined and os9_dns.domain_name %}
- ip domain-name {{ os9_dns.domain_name }}
- {% elif os9_dns.domain_name is defined and os9_dns.domain_name %}
- no ip domain-name {{ os9_dns.domain_name }}
- {% endif %}
- {% if (os9_dns.name_server is defined and os9_dns.name_server) %}
- {% for name_server in os9_dns.name_server %}
- {% set absent = "" %}
- {% if name_server.state is defined and name_server.state == "absent" %}
- {% set absent = "no " %}
- {% endif %}
-
- {% set vrf_name_list = name_server.vrf %}
- {% if (vrf_name_list is defined and vrf_name_list ) %}
- {% for vrf_name in vrf_name_list %}
- {% set ip_list = name_server.ip %}
- {% if (ip_list is defined and ip_list ) %}
- {% for ip_val in ip_list %}
- {{ absent }}ip name-server vrf {{ vrf_name }} {{ ip_val }}
- {% endfor %}
- {% elif name_server.state is defined and name_server.state == "absent"%}
- {{ absent }}ip name-server vrf {{ vrf_name }}
- {% endif %}
- {% endfor %}
- {% else %}
- {% set ip_list = name_server.ip %}
- {% if (ip_list is defined and ip_list ) %}
- {% for ip_val in ip_list %}
- {{ absent }}ip name-server {{ ip_val }}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if (os9_dns.domain_list is defined and os9_dns.domain_list) %}
- {% for domain in os9_dns.domain_list %}
- {% set absent = "" %}
- {% if domain.state is defined and domain.state == "absent" %}
- {% set absent = "no " %}
- {% endif %}
-
- {% set vrf_name_list = domain.vrf %}
- {% if (vrf_name_list is defined and vrf_name_list ) %}
- {% for vrf_name in vrf_name_list %}
- {% set name_list = domain.name %}
- {% if (name_list is defined and name_list ) %}
- {% for name_val in name_list %}
- {{ absent }}ip domain-list vrf {{ vrf_name }} {{ name_val }}
- {% endfor %}
- {% elif domain.state is defined and domain.state == "absent"%}
- {{ absent }}ip domain-list vrf {{ vrf_name }}
- {% endif %}
- {% endfor %}
- {% else %}
- {% set name_list = domain.name %}
- {% if (name_list is defined and name_list ) %}
- {% for name_val in name_list %}
- {{ absent }}ip domain-list {{ name_val }}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_dns/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_dns/tests/main.os9.yaml
deleted file mode 100644
index 28efa0430..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/tests/main.os9.yaml
+++ /dev/null
@@ -1,40 +0,0 @@
----
-# vars file for dellemc.os9.os9_dns,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_dns:
- domain_lookup: true
- name_server:
- - ip:
- - 3.1.1.1
- - 3.1.1.2
- vrf:
- - test
- - test1
- - vrf:
- - test1
- state: absent
- - ip:
- - 2.2.2.2
- state: absent
- - ip:
- - 3.3.2.2
- state: absent
- domain_list:
- - vrf:
- - test
- - test1
- state: absent
- - name:
- - dname3
- - dname4
- - name:
- - dname5
- - dname6
- state: absent
- - name:
- - dname7
- - dname8
- vrf:
- - test
- - test1 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_dns/tests/test.yaml
deleted file mode 100644
index 87942483d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_dns \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_dns/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_dns/vars/main.yml
deleted file mode 100644
index e5d083a00..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_dns/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_dns \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_ecmp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/README.md b/ansible_collections/dellemc/os9/roles/os9_ecmp/README.md
deleted file mode 100644
index 3c59d11d4..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-ECMP role
-=========
-
-This role facilitates the configuration of equal cost multi-path (ECMP), and it supports the configuration of ECMP for IPv4. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The ECMP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take the dellemc.os9.os9 as a value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_ecmp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``weighted_ecmp`` | boolean: true,false | Configures weighted ECMP | os9 |
-| ``ecmp_group_max_paths`` | integer | Configures the number of maximum-paths per ecmp-group | os9 |
-| ``ecmp_group_path_fallback`` | boolean: true,false | Configures ECMP group path management | os9 |
-| ``ecmp <group id>`` | dictionary | Configures ECMP group (see ``ecmp <group id>.*``) | os9 |
-| ``ecmp <group id>.interface`` | list | Configures interface into an ECMP group | os9 |
-| ``ecmp <group id>.link_bundle_monitor`` | boolean: true,false | Configures link-bundle monitoring | os9 |
-| ``ecmp <group id>.state`` | string: present\*,absent | Deletes the ECMP group if set to absent | os9 |
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_ecmp* role to configure ECMP for IPv4. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The example writes a simple playbook that only references the *os9_ecmp* role. The sample *host_vars* is provided for OS9 only.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_ecmp:
- ecmp 1:
- interface:
- - fortyGigE 1/49
- - fortyGigE 1/51
- link_bundle_monitor: true
- state: present
- weighted_ecmp: true
- ecmp_group_max_paths: 3
- ecmp_group_path_fallback: true
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_ecmp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/defaults/main.yml
deleted file mode 100644
index 8c84dde39..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_ecmp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/handlers/main.yml
deleted file mode 100644
index 99b79b666..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_ecmp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/meta/main.yml
deleted file mode 100644
index 2f355abce..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_ecmp role facilitates the configuration of ECMP group attributes in devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/tasks/main.yml
deleted file mode 100644
index 0ffec8d6e..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/tasks/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
----
-# tasks file for os9
-
- - name: "Generating ECMP configuration for os9"
- template:
- src: os9_ecmp.j2
- dest: "{{ build_dir }}/ecmp9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning ECMP configuration for os9"
- dellemc.os9.os9_config:
- src: os9_ecmp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/templates/os9_ecmp.j2 b/ansible_collections/dellemc/os9/roles/os9_ecmp/templates/os9_ecmp.j2
deleted file mode 100644
index 051764678..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/templates/os9_ecmp.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-Purpose:
-Configure ECMP commands for os9 devices
-os9_ecmp:
- weighted_ecmp: true
- ecmp_group_max_paths: 3
- ecmp_group_path_fallback: true
- ecmp 1:
- interface:
- - fortyGigE 1/49
- - fortyGigE 1/51
- link_bundle_monitor: true
- state: present
-#####################################}
-{% if os9_ecmp is defined and os9_ecmp %}
- {% if os9_ecmp.weighted_ecmp is defined %}
- {% if os9_ecmp.weighted_ecmp %}
-ip ecmp weighted
- {% else %}
-no ip ecmp weighted
- {% endif %}
- {% endif %}
- {% if os9_ecmp.ecmp_group_max_paths is defined %}
- {% if os9_ecmp.ecmp_group_max_paths %}
-ip ecmp-group maximum-paths {{ os9_ecmp.ecmp_group_max_paths }}
- {% else %}
-no ip ecmp-group maximum-paths 2
- {% endif %}
- {% endif %}
- {% if os9_ecmp.ecmp_group_path_fallback is defined %}
- {% if os9_ecmp.ecmp_group_path_fallback %}
-ip ecmp-group path-fallback
- {% else %}
-no ip ecmp-group path-fallback
- {% endif %}
- {% endif %}
-
- {% for key in os9_ecmp.keys() %}
- {% if " " in key %}
- {% set ecmp_vars = os9_ecmp[key] %}
- {% set group_num = key.split(" ") %}
- {% if ecmp_vars.state is defined and ecmp_vars.state == "absent" %}
-no ecmp-group {{ group_num[1] }}
- {% else %}
-ecmp-group {{ group_num[1] }}
- {% if ecmp_vars.interface is defined and ecmp_vars.interface %}
- {% for intf in ecmp_vars.interface %}
- interface {{ intf }}
- {% endfor %}
- {% endif %}
- {% if ecmp_vars.link_bundle_monitor is defined %}
- {% if ecmp_vars.link_bundle_monitor %}
- link-bundle-monitor enable
- {% else %}
- no link-bundle-monitor enable
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml
deleted file mode 100644
index 00bb8af60..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/main.os9.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-# vars file for dellemc.os9.os9_ecmp,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_ecmp:
- ecmp 1:
- interface:
- - fortyGigE 1/49
- - fortyGigE 1/51
- link_bundle_monitor: true
- state: present
- weighted_ecmp: true
- ecmp_group_max_paths: 3
- ecmp_group_path_fallback: true \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/test.yaml
deleted file mode 100644
index 6c4fea5e6..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_ecmp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ecmp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_ecmp/vars/main.yml
deleted file mode 100644
index 532506f05..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ecmp/vars/main.yml
+++ /dev/null
@@ -1,3 +0,0 @@
-
----
-# vars file for dellemc.os9.os9_ecmp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/LICENSE b/ansible_collections/dellemc/os9/roles/os9_interface/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/README.md b/ansible_collections/dellemc/os9/roles/os9_interface/README.md
deleted file mode 100644
index 0597e069d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/README.md
+++ /dev/null
@@ -1,173 +0,0 @@
-Interface role
-==============
-
-This role facilitates the configuration of interface attributes. It supports the configuration of admin state, description, MTU, IP address, IP helper, suppress_ra and port mode. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The interface role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os9_interface` (dictionary) holds a dictionary with the interface name; interface name can correspond to any of the valid OS interfaces with the unique interface identifier name
-- For physical interfaces, the interface name must be in *<interfacename> <tuple>* format; for logical interfaces, the interface must be in *<logical_interfacename> <id>* format; physical interface name can be *fortyGigE 1/1*
-- For interface ranges, the interface name must be in *range <interface_type> <node/slot/port[:subport]-node/slot/port[:subport]>* format;
-- Logical interface names can be *vlan 1* or *port-channel 1*
-- Variables and values are case-sensitive
-
-> **NOTE**: Only define supported variables for the interface type. For example, do not define the *switchport* variable for a logical interface, and do not configure port mode when *switchport* is present in OS9 devices.
-
-**interface name keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``desc`` | string | Configures a single line interface description | os9 |
-| ``portmode`` | string | Configures port-mode according to the device type | access and trunk, os9 (hybrid) |
-| ``switchport`` | boolean: true,false\* | Configures an interface in L2 mode | os9 |
-| ``admin`` | string: up,down\* | Configures the administrative state for the interface; configuring the value as administratively "up" enables the interface; configuring the value as administratively "down" disables the interface | os9 |
-| ``mtu`` | integer | Configures the MTU size for L2 and L3 interfaces (594 to 12000; 1280 to 65535 to set globally) | os9 |
-| ``fanout`` | string:dual, single, quad (os9); string:10g-4x, 40g-1x, 25g-4x, 100g-1x, 50g-2x) | Configures fanout to the appropriate value | os9 |
-| ``fanout_speed`` | string: 10G, 25G, 40G, 50G | Configures speed for the fanout port based on the fanout mode specified | os9 |
-| ``fanout_state`` | string: present, absent* | Configures the fanout mode to a port if state is set to present | os9 |
-| ``keepalive`` | boolean: true,false | Configures keepalive on the port if set to true | os9 |
-| ``speed`` | string:10,100,1000,auto | Configures interface speed parameters | os9 |
-| ``duplex`` | string: full,half | Configures interface duplex parameters | os9 |
-| ``auto_neg`` | boolean: true,false | Configures auto-negotiation mode if set to true | os9 |
-| ``cr4_auto_neg`` | boolean: true,false | Configures auto-negotiation mode on a CR4 interface type if set to true | os9 |
-| ``suppress_ra`` | string; present,absent | Configures IPv6 router advertisements if set to present | os9 |
-| ``ip_type_dynamic`` | boolean: true,false | Configures IP address DHCP if set to true (*ip_and_mask* is ignored if set to true) | os9 |
-| ``ipv6_type_dynamic`` | boolean: true,false | Configures an IPv6 address for DHCP if set to true (*ipv6_and_mask* is ignored if set to true) |
-| ``ipv6_autoconfig`` | boolean: true,false | Configures stateless configuration of IPv6 addresses if set to true (*ipv6_and_mask* is ignored if set to true) |
-| ``class_vendor_identifier`` | string: present,absent,string | Configures the vendor-class identifier without a user-defined string if set to present; configures a vendor-class identifier with a user-defined string when a string is specified; ignored when *ip_type_dynamic* is set to false | os9 |
-| ``option82`` | boolean: true,false\* | Configures option82 with the remote-id MAC address if *remote_id* is undefined; ignored when *ip_type_dynamic* is set to false | os9 |
-| ``remote_id`` |string: hostname,mac,string | Configures option82 with the specified *remote-id*; ignored when *option82* is set to false | os9 |
-| ``vrf`` | string | Configures the specified VRF to be associated to the interface | os9 |
-| ``min_ra`` | string | Configures RA minimum interval time period | os9 |
-| ``max_ra`` | string | Configures RA maximum interval time period | os9 |
-| ``ip_and_mask`` | string | Configures the specified IP address to the interface; configures the specified IP address to the interface VLAN on devices (192.168.11.1/24 format) | os9 |
-| ``ip_and_mask_secondary`` | string | Configures the specified IP address as secondary address to the interface on os9 an devices (192.168.11.2/24 format) | os9 |
-| ``ip_virtual_gateway_ip`` | string | Configures an anycast gateway IP address for a VxLAN virtual network |
-| ``secondary_ip_state`` | string: absent,present\* | Deletes the secondary IP address if set to absent | os9 |
-| ``ipv6_and_mask`` | string | Configures a specified IPv6 address to the interface; configures a specified IP address to the interface VLAN on devices (2001:4898:5808:ffa2::1/126 format) | os9 |
-| ``state_ipv6`` | string: absent,present\* | Deletes the IPV6 address if set to absent |
-| ``ipv6_reachabletime`` | integer | Configures the reachability time for IPv6 neighbor discovery (0 to 3600000) | os9 |
-| ``ip_helper`` | list | Configures DHCP server address objects (see ``ip_helper.*``) | os9 |
-| ``ip_helper.ip`` | string (required) | Configures the IPv4 address of the DHCP server (A.B.C.D format) | os9 |
-| ``ip_helper.state`` | string: absent,present\* | Deletes the IP helper address if set to absent | os9 |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the` ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | /os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_interface* role to set up description, MTU, admin status, portmode, and switchport details for an interface. The example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, this variable is set to false. The example writes a simple playbook that only references the *os9_interface* role.
-
-**Sample hosts file**
-
- leaf3 ansible_host= <ip_address>
-
-**Sample host_vars/leaf3**
-
- hostname: "leaf3"
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_interface:
- TenGigabitEthernet 1/8:
- desc: "Connected to Spine1"
- portmode:
- switchport: False
- mtu: 2500
- admin: up
- auto_neg: true
- speed: auto
- duplex: full
- keepalive: true
- ipv6_and_mask: 2001:4898:5808:ffa2::5/126
- suppress_ra : present
- ip_type_dynamic: true
- ip_and_mask: 192.168.23.22/24
- class_vendor_identifier: present
- option82: true
- remote_id: hostname
- fortyGigE 1/9:
- desc: "Connected to Spine2"
- switchport: False
- mtu: 2500
- admin: up
- cr4_auto_neg: true
- ip_and_mask: 192.168.234.20/31
- ip_and_mask_secondary: "192.168.234.21/31"
- secondary_ip_state: present
- suppress_ra: absent
- ip_type_dynamic: false
- class_vendor_identifier: absent
- option82: true
- remote_id: hostname
- ipv6_and_mask: 2001:4898:5808:ffa2::9/126
- flowcontrol:
- mode: "receive"
- enable: "on"
- state: "present"
- vlan 100:
- mtu: 4096
- admin: down
- ip_and_mask:
- ipv6_and_mask: 2002:4898:5408:faaf::1/64
- suppress_ra: present
- state_ipv6: absent
- ip_helper:
- - ip: 10.0.0.36
- state: absent
- ipv6_reachabletime: 600000
- virtual-network 888:
- vrf: "green"
- desc: "virtual-network interface"
- ip_and_mask: "172.17.17.251/24"
- ip_virtual_gateway_ip: "172.17.17.1"
- admin: up
- vlan 20:
- suppress_ra: absent
- min_ra: 3
- max_ra: 4
- admin: up
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf3
- roles:
- - dellemc.os9.os9_interface
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/defaults/main.yml
deleted file mode 100644
index 7c8c24e04..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_interface \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/handlers/main.yml
deleted file mode 100644
index 617eb3fcd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_interface \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/meta/main.yml
deleted file mode 100644
index ff7e1baaf..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_interface role facilitates the configuration of interface attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/tasks/main.yml
deleted file mode 100644
index 4301ea42d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating interface configuration for os9"
- template:
- src: os9_interface.j2
- dest: "{{ build_dir }}/intf9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os== "dellemc.os9.os9") and (os9_cfg_generate | default('False') | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning interface configuration for os9"
- dellemc.os9.os9_config:
- src: os9_interface.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/templates/os9_interface.j2 b/ansible_collections/dellemc/os9/roles/os9_interface/templates/os9_interface.j2
deleted file mode 100644
index 2a98c850f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/templates/os9_interface.j2
+++ /dev/null
@@ -1,237 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{###################################################
-Purpose:
-Configure interface commands for os9 Devices.
-os9_interface:
- TenGigabitEthernet 1/36:
- desc: "OS9 intf"
- portmode: hybrid
- mtu: 2000
- switchport: False
- admin: up
- auto_neg: true
- keepalive: true
- ip_and_mask: "192.168.13.1/24"
- ip_and_mask_secondary: "192.168.14.1/24"
- secondary_ip_state: present
- suppress_ra: present
- ip_type_dynamic: true
- ipv6_and_mask: 2001:4898:5808:ffa2::9/126
- ipv6_reachabletime: 60000
- ip_helper:
- - ip: 10.0.0.33
- state: present
- class_vendor_identifier: present
- option82: true
- remote_id: hostname
- speed: auto
- duplex: half
- fortyGigE 1/1:
- fanout: single
- fanout_speed: 40G
- fanout_state: present
- fortyGigE 0/8:
- cr4_auto_neg: true
-####################################################}
-{% if os9_interface is defined and os9_interface %}
-{% for key in os9_interface.keys() %}
- {% set intf_vars = os9_interface[key] %}
- {% set intf = key.split(" ") %}
- {% set port = intf[1].split('/') %}
- {% if intf_vars.fanout is defined %}
- {% if intf_vars.fanout %}
- {% if intf_vars.fanout_state is defined and intf_vars.fanout_state == "present" %}
- {% if intf_vars.fanout_speed is defined and intf_vars.fanout_speed %}
-stack-unit {{ port[0] }} port {{ port[1] }} portmode {{ intf_vars.fanout}} speed {{ intf_vars.fanout_speed }} no-confirm
- {% else %}
-stack-unit {{ port[0] }} port {{ port[1] }} portmode {{ intf_vars.fanout }} no-confirm
- {% endif %}
- {% else %}
-no stack-unit {{ port[0] }} port {{ port[1] }} portmode {{ intf_vars.fanout }} no-confirm
- {% endif %}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% for key in os9_interface.keys() %}
-{% set intf_vars = os9_interface[key] %}
-{% set intf = key.split(" ") %}
-{% set port = intf[1].split('/') %}
- {% if (intf_vars.fanout is defined and not intf_vars.fanout) or (intf_vars.fanout is not defined)%}
-interface {{ key }}
- {% if intf_vars.desc is defined %}
- {% if intf_vars.desc %}
- description {{ intf_vars.desc }}
- {% else %}
- no description
- {% endif %}
- {% endif %}
-
- {% if intf_vars.portmode is defined %}
- {% if intf_vars.switchport is defined and intf_vars.switchport == False %}
- no switchport
- {% endif %}
- {% if intf_vars.portmode %}
- portmode {{ intf_vars.portmode}}
- {% else %}
- no portmode hybrid
- {% endif %}
- {% endif %}
-
- {% if intf_vars.switchport is defined %}
- {% if intf_vars.switchport == True %}
- switchport
- {% endif %}
- {% if intf_vars.portmode is not defined %}
- {% if intf_vars.switchport is defined and intf_vars.switchport == False %}
- no switchport
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if intf_vars.mtu is defined %}
- {% if intf_vars.mtu %}
- mtu {{ intf_vars.mtu }}
- {% else %}
- no mtu
- {% endif %}
- {% endif %}
-
- {% if intf_vars.keepalive is defined %}
- {% if intf_vars.keepalive %}
- keepalive
- {% else %}
- no keepalive
- {% endif %}
- {% endif %}
-
- {% if intf_vars.speed is defined %}
- {% if intf_vars.speed %}
- speed {{ intf_vars.speed }}
- {% else %}
- no speed
- {% endif %}
- {% endif %}
-
- {% if intf_vars.duplex is defined %}
- {% if intf_vars.duplex %}
- duplex {{ intf_vars.duplex }}
- {% else %}
- no duplex
- {% endif %}
- {% endif %}
-
- {% if intf_vars.auto_neg is defined %}
- {% if intf_vars.auto_neg %}
- negotiation auto
- {% else %}
- no negotiation auto
- {% endif %}
- {% endif %}
-
- {% if intf_vars.cr4_auto_neg is defined %}
- {% if intf_vars.cr4_auto_neg %}
- intf-type cr4 autoneg
- {% else %}
- no intf-type cr4 autoneg
- {% endif %}
- {% endif %}
-
- {% if intf_vars.suppress_ra is defined %}
- {% if intf_vars.suppress_ra == "present" %}
- ipv6 nd suppress-ra
- {% else %}
- no ipv6 nd suppress-ra
- {% endif %}
- {% endif %}
-
- {% if intf_vars.ip_type_dynamic is defined and intf_vars.ip_type_dynamic %}
- {% if intf_vars.class_vendor_identifier is defined and intf_vars.class_vendor_identifier == "present" %}
- {% if intf_vars.option82 is defined and intf_vars.option82 %}
- {% if intf_vars.remote_id is defined and intf_vars.remote_id %}
- ip address dhcp vendor-class-identifier relay information-option remote-id {{ intf_vars.remote_id }}
- {% else %}
- ip address dhcp relay information-option vendor-class-identifier
- {% endif %}
- {% else %}
- ip address dhcp vendor-class-identifier
- {% endif %}
-
- {% elif intf_vars.class_vendor_identifier is defined and (intf_vars.class_vendor_identifier|length >1 and not intf_vars.class_vendor_identifier == "absent") %}
- {% if intf_vars.option82 is defined and intf_vars.option82 %}
- {% if intf_vars.remote_id is defined and intf_vars.remote_id %}
- ip address dhcp relay information-option remote-id {{ intf_vars.remote_id }} vendor-class-identifier {{ intf_vars.class_vendor_identifier }}
- {% else %}
- ip address dhcp relay information-option vendor-class-identifier {{ intf_vars.class_vendor_identifier }}
- {% endif %}
- {% else %}
- ip address dhcp vendor-class-identifier {{ intf_vars.class_vendor_identifier }}
- {% endif %}
-
- {% else %}
- {% if intf_vars.option82 is defined and intf_vars.option82 %}
- {% if intf_vars.remote_id is defined and intf_vars.remote_id %}
- ip address dhcp relay information-option remote-id {{ intf_vars.remote_id }}
- {% else %}
- ip address dhcp relay information-option
- {% endif %}
- {% else %}
- ip address dhcp
- {% endif %}
- {% endif %}
- {% else %}
- {% if intf_vars.ip_and_mask is defined %}
- {% if intf_vars.ip_and_mask %}
- ip address {{ intf_vars.ip_and_mask }}
- {% else %}
- no ip address
- {% endif %}
- {% endif %}
- {% if intf_vars.ip_and_mask_secondary is defined and intf_vars.ip_and_mask_secondary %}
- {% if intf_vars.secondary_ip_state is defined and intf_vars.secondary_ip_state == "absent" %}
- no ip address {{ intf_vars.ip_and_mask_secondary }} secondary
- {% else %}
- ip address {{ intf_vars.ip_and_mask_secondary }} secondary
- {% endif %}
- {% endif %}
- {% endif %}
-
-
- {% if intf_vars.ipv6_and_mask is defined %}
- {% if intf_vars.ipv6_and_mask %}
- ipv6 address {{ intf_vars.ipv6_and_mask }}
- {% else %}
- no ipv6 address
- {% endif %}
- {% endif %}
-
- {% if intf_vars.ipv6_reachabletime is defined %}
- {% if intf_vars.ipv6_reachabletime %}
- ipv6 nd reachable-time {{ intf_vars.ipv6_reachabletime }}
- {% else %}
- no ipv6 nd reachable-time
- {% endif %}
- {% endif %}
-
- {% if intf_vars.ip_helper is defined and intf_vars.ip_helper %}
- {% for helper in intf_vars.ip_helper %}
- {% if helper.ip is defined and helper.ip %}
- {% if helper.state is defined and helper.state == "absent" %}
- no ip helper-address {{ helper.ip }}
- {% else %}
- ip helper-address {{ helper.ip }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if intf_vars.admin is defined %}
- {% if intf_vars.admin == "up" %}
- no shutdown
- {% elif intf_vars.admin == "down" %}
- shutdown
- {% endif %}
- {% endif %}
-
- {% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_interface/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_interface/tests/main.os9.yaml
deleted file mode 100644
index 790233814..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/tests/main.os9.yaml
+++ /dev/null
@@ -1,50 +0,0 @@
----
-# vars file for dellemc.os9.os9_interface
-# Sample variables for OS9 device
-os9_interface:
- TenGigabitEthernet 1/3:
- desc: "Connected to Spine1"
- portmode:
- switchport: False
- suppress_ra: present
- mtu: 2500
- admin: up
- auto_neg: true
- keepalive: true
- speed: auto
- duplex: full
- ipv6_and_mask: 2001:4898:5808:ffa2::5/126
- ip_type_dynamic: true
- ip_and_mask: 192.168.23.22/24
- class_vendor_identifier: present
- option82: true
- remote_id: hostname
- fortyGigE 1/9:
- desc: "Connected to Spine2"
- switchport: False
- mtu: 2500
- admin: up
- ip_and_mask: 192.168.234.20/31
- ip_and_mask_secondary: "192.168.14.1/24"
- secondary_ip_state: present
- cr4_auto_neg: true
- keepalive: false
- ip_type_dynamic: false
- class_vendor_identifier: absent
- option82: true
- remote_id: hostname
- ipv6_and_mask: 2001:4898:5808:ffa2::9/126
- fortyGigE 1/12:
- fanout: single
- fanout_speed: 40G
- fanout_state: present
- Vlan 100:
- mtu: 4096
- admin: down
- ip_and_mask:
- ipv6_and_mask: 2002:4898:5408:faaf::1/64
- state_ipv6: absent
- ip_helper:
- - ip: 10.0.0.36
- state: absent
- ipv6_reachabletime: 600000 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_interface/tests/test.yaml
deleted file mode 100644
index 7663d9365..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_interface \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_interface/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_interface/vars/main.yml
deleted file mode 100644
index 35ddaae4f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_interface/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_interface \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/LICENSE b/ansible_collections/dellemc/os9/roles/os9_lag/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/README.md b/ansible_collections/dellemc/os9/roles/os9_lag/README.md
deleted file mode 100644
index 19aa5f346..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/README.md
+++ /dev/null
@@ -1,110 +0,0 @@
-LAG role
-========
-
-This role facilitates the configuration of link aggregation group (LAG) attributes, and supports the creation and deletion of a LAG and its member ports. It also supports the configuration of an interface type as a static or dynamic LAG and minimum required link. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The LAG role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- Object drives the tasks in this role
-- `os9_lag` (dictionary) contains the hostname (dictionary)
-- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device
-- Any role variable with a corresponding state variable setting to absent negates the configuration of that variable
-- Setting an empty value to any variable negates the corresponding configuration
-- `os9_lag` (dictionary) holds a dictionary with the port-channel ID key in `Po <ID>` format (1 to 4096)
-- Variables and values are case-sensitive
-
-**port-channel ID keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string: static,dynamic | Configures the interface either as a static or dynamic LAG | os9 |
-| ``min_links`` | integer | Configures the minimum number of links in the LAG that must be in *operup* status (1 to 64 for os9) | os9 |
-| ``lacp`` | dictionary | Specifies LACP fast-switchover or long timeout options | os9 |
-| ``lacp.fast_switchover`` | boolean | Configures the fast-switchover option if set to true | os9 |
-| ``lacp.long_timeout`` | boolean | Configures the long-timeout option if set to true | os9 |
-| ``lacp_system_priority`` | integer | Configures the LACP system-priority value (1 to 65535) | os9 |
-| ``lacp_ungroup_vlt`` | boolean | Configures all VLT LACP members to be switchports if set to true | os9 |
-| ``lacp_ungroup`` | list | Specifies the list of port-channels to become switchports (see ``lacp_ungroup.*``) | os9 |
-| ``lacp_ungroup.port_channel`` | integer (required) | Specifies valid port-channel numbers | os9 |
-| ``lacp_ungroup.state`` | string: present,absent\* | Deletes the ungroup association if set to absent | os9 |
-| ``channel_members`` | list | Specifies the list of port members to be associated to the port-channel (see ``channel_members.*``) | os9 |
-| ``channel_members.port`` | string | Specifies valid os9 | os9 |
-| ``channel_members.state`` | string: absent,present | Deletes the port member association if set to absent | os9 |
-| ``state`` | string: absent,present\* | Deletes the LAG corresponding to the port-channel ID if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_lag* role to setup port channel ID and description, and configures hash algorithm and minimum links for the LAG. Channel members can be configured for the port-channel either in static or dynamic mode. You can also delete the LAG with the port-channel ID or delete the members associated to it. This example creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_lag* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_lag:
- Po 127:
- type: static
- min_links: 3
- lacp:
- long_timeout: true
- fast_switchover: true
- lacp_system_priority: 1
- lacp_ungroup_vlt: true
- lacp_ungroup:
- - port-channel:1
- state: present
- channel_members:
- - port: fortyGigE 1/4
- state: present
- - port: fortyGigE 1/5
- state: present
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_lag
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/defaults/main.yml
deleted file mode 100644
index bcfbb8974..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_lag \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/handlers/main.yml
deleted file mode 100644
index cddda15cf..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_lag \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/meta/main.yml
deleted file mode 100644
index 2463cb893..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_lag role facilitates the configuration of LAG attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/tasks/main.yml
deleted file mode 100644
index b581b870c..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating LAG configuration for os9"
- template:
- src: os9_lag.j2
- dest: "{{ build_dir }}/lag9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning LAG configuration for os9"
- dellemc.os9.os9_config:
- src: os9_lag.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/templates/os9_lag.j2 b/ansible_collections/dellemc/os9/roles/os9_lag/templates/os9_lag.j2
deleted file mode 100644
index f708efc11..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/templates/os9_lag.j2
+++ /dev/null
@@ -1,114 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-Purpose:
-Configure LAG commands for os9 Devices.
-os9_lag:
- Po 1:
- type: static
- min_links: 3
- lacp:
- long_timeout: true
- fast_switchover: true
- lacp_system_priority: 2
- lacp_ungroup:
- - port_channel: 1
- state: present
- lacp_ungroup_vlt: true
- channel_members:
- - port: fortyGigE 0/4
- state: present
- state: present
- ###############################}
-{% if os9_lag is defined and os9_lag %}
-{% for key in os9_lag.keys() %}
-{% set channel_id = key.split(" ") %}
-{% set lag_vars = os9_lag[key] %}
-
- {% if lag_vars.lacp_system_priority is defined %}
- {% if lag_vars.lacp_system_priority %}
-lacp system-priority {{ lag_vars.lacp_system_priority }}
- {% else %}
-no lacp system-priority
- {% endif %}
- {% endif %}
-
- {% if lag_vars.lacp_ungroup_vlt is defined %}
- {% if lag_vars.lacp_ungroup_vlt %}
-lacp ungroup member-independent vlt
- {% else %}
-no lacp ungroup member-independent vlt
- {% endif %}
- {% endif %}
-
- {% if lag_vars.lacp_ungroup is defined %}
- {% if lag_vars.lacp_ungroup %}
- {% for port in lag_vars.lacp_ungroup %}
- {% if port.port_channel is defined and port.port_channel %}
- {% if port.state is defined and port.state == "absent" %}
-no lacp ungroup member-independent port-channel {{ port.port_channel }}
- {% else %}
-lacp ungroup member-independent port-channel {{ port.port_channel }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-
- {% if lag_vars.state is defined and lag_vars.state == "absent" %}
-no interface Port-channel {{ channel_id[1] }}
- {% else %}
-interface Port-channel {{ channel_id[1] }}
-
- {% if lag_vars.min_links is defined %}
- {% if lag_vars.min_links %}
- minimum-links {{ lag_vars.min_links }}
- {% else %}
- no minimum-links
- {% endif %}
- {% endif %}
-
- {% if lag_vars.lacp is defined and lag_vars.lacp %}
- {% if lag_vars.lacp.fast_switchover is defined %}
- {% if lag_vars.lacp.fast_switchover %}
- lacp fast-switchover
- {% else %}
- no lacp fast-switchover
- {% endif %}
- {% endif %}
- {% if lag_vars.lacp.long_timeout is defined %}
- {% if lag_vars.lacp.long_timeout %}
- lacp long-timeout
- {% else %}
- no lacp long-timeout
- {% endif %}
- {% endif %}
- {% endif %}
-
- {% if lag_vars.channel_members is defined %}
- {% for ports in lag_vars.channel_members %}
- {% if lag_vars.type is defined and lag_vars.type == "static" %}
- {% if ports.port is defined and ports.port %}
- {% if ports.state is defined and ports.state == "absent" %}
- no channel-member {{ ports.port }}
- {% else %}
- channel-member {{ ports.port }}
- {% endif %}
- {% endif %}
- {% elif lag_vars.type is defined and lag_vars.type == "dynamic" %}
- {% if ports.port is defined and ports.port %}
- {% if ports.state is defined and ports.state == "absent" %}
-interface {{ ports.port }}
- no port-channel-protocol LACP
- {% else %}
-interface {{ ports.port }}
- port-channel-protocol LACP
- port-channel {{ channel_id[1] }} mode active
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_lag/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_lag/tests/main.os9.yaml
deleted file mode 100644
index cbb19bdda..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/tests/main.os9.yaml
+++ /dev/null
@@ -1,21 +0,0 @@
----
-# vars file for dellemc.os9.os9_lag
-# Sample variables for os9 device
-os9_lag:
- Po 127:
- type: dynamic
- lacp:
- long_timeout: true
- fast_switchover: true
- lacp_ungroup_vlt: true
- lacp_system_priority: 1
- lacp_ungroup:
- - port_channel: 1
- state: present
- min_links: 3
- channel_members:
- - port: fortyGigE 1/4
- state: present
- - port: fortyGigE 1/8
- state: present
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_lag/tests/test.yaml
deleted file mode 100644
index 0f6729370..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_lag \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lag/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_lag/vars/main.yml
deleted file mode 100644
index cada8d7e3..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lag/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_lag \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_lldp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/README.md b/ansible_collections/dellemc/os9/roles/os9_lldp/README.md
deleted file mode 100644
index 802adc68a..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/README.md
+++ /dev/null
@@ -1,245 +0,0 @@
-LLDP role
-=========
-
-This role facilitates the configuration of link layer discovery protocol (LLDP) attributes at a global and interface level. It supports the configuration of hello, mode, multiplier, advertise TLVs, management interface, FCoE, iSCSI at global and interface level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The LLDP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_lldp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``global_lldp_state`` | string: absent,present | Deletes LLDP at a global level if set to absent | os9 |
-| ``enable`` | boolean | Enables or disables LLDP at a global level | os9 |
-| ``hello`` | integer | Configures the global LLDP hello interval (5 to 180) | os9 |
-| ``mode`` | string: rx,tx | Configures global LLDP mode configuration | os9 |
-| ``multiplier`` | integer | Configures the global LLDP multiplier (2 to 10) | os9 |
-| ``fcoe_priority_bits`` | integer | Configures priority bits for FCoE traffic (1 to FF) | os9 |
-| ``iscsi_priority_bits`` | integer | Configures priority bits for iSCSI traffic (1 to FF) | os9 |
-| ``dcbx`` | dictionary | Configures DCBx parameters at the global level (see ``dcbx.*``) | os9 |
-| ``dcbx.version`` | string | Configures the DCBx version | os9 |
-| ``advertise`` | dictionary | Configures LLDP-MED and TLV advertisement at the global level (see ``advertise.*``) | os9 |
-| ``advertise.dcbx_tlv`` | string | Configures DCBx TLVs advertisements | os9 |
-| ``advertise.dcbx_tlv_state`` | string: present,absent | Deletes DCBx TLVs advertisement if set to absent | os9 |
-| ``advertise.dcbx_appln_tlv`` | string | Configures DCBx application priority TLVs advertisement | os9 |
-| ``advertise.dcbx_appln_tlv_state`` | string: present,absent | Deletes DCBx application priority TLVs advertisement if set to absent | os9 |
-| ``advertise.dot1_tlv`` | dictionary | Configures 802.1 TLVs advertisement (see ``dot1_tlv.*``) | os9 |
-| ``dot1_tlv.port_tlv`` | dictionary | Configures 802.1 TLVs advertisement (see ``port_tlv.*``) | os9 |
-| ``port_tlv.protocol_vlan_id`` | boolean | Configures 802.1 VLAN ID TLVs advertisement | os9 |
-| ``port_tlv.port_vlan_id`` | boolean | Configures 802.1 VLAN ID TLVs advertisement | os9 |
-| ``dot1_tlv.vlan_tlv`` | dictionary | Configures 802.1 VLAN TLVs advertisement (see ``vlan_tlv.*``) | os9 |
-| ``vlan_tlv.vlan_range`` | string | Configures 802.1 VLAN name TLVs advertisement | os9 |
-| ``advertise.dot3_tlv`` | dictionary | Configures 802.3 TLVs advertisement (see ``dot3_tlv.*``) | os9 |
-| ``dot3_tlv.max_frame_size`` | boolean | Configures 802.3 maximum frame size TLVs advertisement | os9 |
-| ``advertise.port_descriptor`` | boolean | Configures global port descriptor advertisement | os9 |
-| ``advertise.management_tlv`` | string | Configures global management TLVs advertisement | os9 |
-| ``advertise.management_tlv_state`` | string: absent,present | Deletes global TLVs advertisement if set to absent | os9 |
-| ``advertise.med`` | dictionary | Configures MED TLVs advertisement (see ``med_tlv.*``) | , os9 |
-| ``med.global_med`` | boolean | Configures global MED TLVs advertisement | os9 |
-| ``med.application`` | list | Configures global MED TLVs advertisement for an application (see ``application.*``) | os9 |
-| ``application.name`` | string | Configures the application name for MED TLVs advertisement | os9 |
-| ``application.vlan_id`` | integer | Configures the VLAN ID for the application MED TLVs advertisement (1 to 4094) | os9 |
-| ``application.priority_tagged`` | boolean | Configures priority tagged for the application MED TLVs advertisement; mutually exclusive with *application.vlan_id* | os9 |
-| ``application.l2_priority`` | integer | Configures the L2 priority for the application MED TLVs advertisement (0 to 7) | os9 |
-| ``application.code_point_value`` | integer | Configures differentiated services code point values for MED TLVs advertisement (0 to 63) | os9 |
-| ``med.location_identification`` | list | Configures MED location identification TLVs advertisement (see ``location_identification.*``) | os9 |
-| ``location_identification.loc_info`` | string | Configures location information for MED TLVs advertisement | os9 |
-| ``location_identification.value`` | string | Configures location information values | os9 |
-| ``location_identification.state`` | string: absent,present | Deletes the location information if set to absent | os9 |
-| ``management_interface`` | dictionary | Configures LLDP on the management interface (see ``management_interface.*``) | os9 |
-| ``management_interface.enable`` | boolean | Enables/disables LLDP on the management interface | os9 |
-| ``management_interface.hello`` | integer | Configures LLDP hello interval on the management interface (5 to 180) | os9 |
-| ``management_interface.mode`` | string: rx,tx | Configures LLDP mode on the management interface | os9 |
-| ``management_interface.multiplier`` | integer | Configures LLDP multiplier on the management interface (2 to 10) | os9 |
-| ``management_interface.advertise`` | dictionary | Configures TLV advertisement on the management interface (see ``advertise.*``) | os9 |
-| ``advertise.port_descriptor`` | boolean | Configures port descriptor advertisement on the management interface | os9 |
-| ``advertise.management_tlv`` | string | Configures management TLVs advertisement | os9 |
-| ``advertise.management_tlv_state`` | string: absent,present | Deletes management TLVs advertisement if set to absent | os9 |
-| ``local_interface`` | dictionary | Configures LLDP at the interface level (see ``local_interface.*``) | os9 |
-| ``local_interface.<interface name>`` | dictionary | Configures LLDP at the interface level (see ``interface name.*``) | os9 |
-| ``<interface name>.state`` | string: absent,present | Deletes LLDP at the interface level if set to absent | os9 |
-| ``<interface name>.enable`` | boolean | Enables or disables LLDP at the interface level | os9 |
-| ``<interface name>.hello`` | integer | Configures LLDP hello interval at the interface level (5 to 180) | os9 |
-| ``<interface name>.mode`` | string: rx,tx | Configures LLDP mode configuration at the interface level | os9 |
-| ``<interface name>.multiplier`` | integer | Configures LLDP multiplier at the interface level (2 to 10) | os9 |
-| ``<interface name>.dcbx`` | dictionary | Configures DCBx parameters at the interface level (see ``dcbx.*``) | os9 |
-| ``dcbx.version`` | string | Configures DCBx version at the interface level | os9 |
-| ``<interface name>.advertise`` | dictionary | Configures LLDP-MED TLV advertisement at the interface level (see ``advertise.*``) | os9 |
-| ``advertise.dcbx_tlv`` | string | Configures DCBx TLVs advertisement at the interface level | os9 |
-| ``advertise.dcbx_tlv_state`` | string: present,absent | Deletes interface level DCBx TLVs advertisement if set to absent | os9 |
-| ``advertise.dcbx_appln_tlv`` | string | Configures DCBx application priority TLVs advertisement at the interface level | os9 |
-| ``advertise.dcbx_appln_tlv_state`` | string: present,absent | Deletes interface level DCBx application priority TLVs advertisement if set to absent | os9 |
-| ``advertise.dot1_tlv`` | dictionary | Configures 802.1 TLVs advertisement at the interface level (see ``dot1_tlv.*``) | os9 |
-| ``dot1_tlv.port_tlv`` | dictionary | Configures 802.1 TLVs advertisement at the interface level (see ``port_tlv.*``) | os9 |
-| ``port_tlv.protocol_vlan_id`` | boolean | Configures 802.1 VLAN ID TLVs advertisement at the interface level | os9 |
-| ``port_tlv.port_vlan_id`` | boolean | Configures 802.1 VLAN ID TLVs advertisement at the interface level | os9 |
-| ``dot1_tlv.vlan_tlv`` | dictionary | Configures 802.1 VLAN TLVs advertisement at the interface level (see ``vlan_tlv.*``) | os9 |
-| ``vlan_tlv.vlan_range`` | string | Configures 802.1 VLAN name TLVs advertisement at the interface level | os9 |
-| ``advertise.dot3_tlv`` | dictionary | Configures 802.3 TLVs advertisement at the interface level (see ``dot3_tlv.*``) | os9 |
-| ``dot3_tlv.max_frame_size`` | boolean | Configures 802.3 maximum frame size TLVs advertisement at the interface level | os9 |
-| ``advertise.port_descriptor`` | boolean | Configures port descriptor advertisement at the interface level | os9 |
-| ``advertise.management_tlv`` | string | Configures TLVs advertisement at the interface level | os9 |
-| ``advertise.management_tlv_state`` | string: absent,present | Deletes TLVs advertisement at the interface level if set to absent | os9 |
-| ``advertise.med`` | dictionary | Configures MED TLVs advertisement at the interface level (see ``med_tlv.*``) | os9 |
-| ``med.global_med`` | boolean | Configures MED TLVs advertisement at the interface level | os9 |
-| ``med.application`` | list | Configures MED TLVs advertisement for the application at the interface level (see ``application.*``) | os9 |
-| ``application.name`` | string | Configures the application name for MED TLVs advertisement | os9 |
-| ``application.vlan_id`` | integer | Configures the VLAN ID for the application MED TLVs advertisement at the interface level (1 to 4094) | os9 |
-| ``application.priority_tagged`` | boolean | Configures priority tagged for the application MED TLVs advertisement at the interface level; mutually exclusive with *application.vlan_id* | os9 |
-| ``application.l2_priority`` | integer | Configures the L2 priority for the application MED TLVs advertisement at the interface level (0 to 7) | os9 |
-| ``application.code_point_value`` | integer | Configures differentiated services code point value for MED TLVs advertisement at the interface level (0 to 63) | os9 |
-| ``med.location_identification`` | list | Configures MED location identification TLVs advertisement at the interface level (see ``location_identification.*``) | os9 |
-| ``location_identification.loc_info`` | string | Configures location information for MED TLVs advertisement at the interface level | os9 |
-| ``location_identification.value`` | string | Configures the location information value for MED TLVs advertisement at the interface level | os9 |
-| ``location_identification.state`` | string: absent,present | Deletes the interface level MED location information if set to absent | os9 |
-
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device. |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_lldp* role to configure protocol lldp. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_lldp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_lldp:
- global_lldp_state: present
- enable: false
- mode: rx
- multiplier: 3
- fcoe_priority_bits: 3
- iscsi_priority_bits: 3
- hello: 6
- dcbx:
- version: auto
- management_interface:
- hello: 7
- multiplier: 3
- mode: tx
- enable: true
- advertise:
- port_descriptor: false
- management_tlv: management-address system-capabilities
- management_tlv_state: absent
- advertise:
- dcbx_tlv: pfc
- dcbx_tlv_state: absent
- dcbx_appln_tlv: fcoe
- dcbx_appln_tlv_state:
- dot1_tlv:
- port_tlv:
- protocol_vlan_id: true
- port_vlan_id: true
- vlan_tlv:
- vlan_range: 2-4
- dot3_tlv:
- max_frame_size: false
- port_descriptor: false
- management_tlv: management-address system-capabilities
- management_tlv_state: absent
- med:
- global_med: true
- application:
- - name: "guest-voice"
- vlan_id: 2
- l2_priority: 3
- code_point_value: 4
- - name: voice
- priority_tagged: true
- l2_priority: 3
- code_point_value: 4
- location_identification:
- - loc_info: ecs-elin
- value: 12345678911
- state: present
- local_interface:
- fortyGigE 1/3:
- lldp_state: present
- enable: false
- mode: rx
- multiplier: 3
- hello: 8
- dcbx:
- version: auto
- advertise:
- dcbx_tlv: pfc
- dcbx_tlv_state: present
- dcbx_appln_tlv: fcoe
- dcbx_appln_tlv_state: absent
- dot1_tlv:
- port_tlv:
- protocol_vlan_id: true
- port_vlan_id: true
- vlan_tlv:
- vlan_range: 2-4
- state: present
- dot3_tlv:
- max_frame_size: true
- port_descriptor: true
- management_tlv: management-address system-capabilities
- management_tlv_state: absent
- med:
- application:
- - name: guest-voice
- vlan_id: 2
- l2_priority: 3
- code_point_value: 4
- - name: voice
- priority_tagged: true
- l2_priority: 3
- code_point_value: 4
- location_identification:
- - loc_info: ecs-elin
- value: 12345678911
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_lldp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/defaults/main.yml
deleted file mode 100644
index 11d293a1d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_lldp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/handlers/main.yml
deleted file mode 100644
index 38e013e19..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_lldp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/meta/main.yml
deleted file mode 100644
index 4b76193d9..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os9_lldp role facilitates the configuration of Link Layer Discovery Protocol(LLDP) attributes in devices
- running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/tasks/main.yml
deleted file mode 100644
index 97e349146..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating LLDP configuration for os9"
- template:
- src: os9_lldp.j2
- dest: "{{ build_dir }}/lldp9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning LLDP configuration for os9"
- dellemc.os9.os9_config:
- src: os9_lldp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/templates/os9_lldp.j2 b/ansible_collections/dellemc/os9/roles/os9_lldp/templates/os9_lldp.j2
deleted file mode 100644
index 375fba74f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/templates/os9_lldp.j2
+++ /dev/null
@@ -1,514 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{###################################################
-Purpose:
-Configure LLDP commands for os9 Devices.
-
-os9_lldp:
- global_lldp_state: present
- enable: false
- mode: rx
- multiplier: 3
- iscsi_priority_bits: 3
- fcoe_priority_bits: 3
- hello: 8
- dcbx:
- version: auto
- management_interface:
- hello: 6
- multiplier: 3
- mode: tx
- enable: true
- advertise:
- port_descriptor: false
- management_tlv: management-address system-capabilities
- management_tlv_state: present
- advertise:
- dcbx_tlv: pfc
- dcbx_tlv_state: present
- dcbx_appln_tlv: fcoe
- dcbx_appln_tlv_state:
- dot1_tlv:
- port_tlv:
- protocol_vlan_id: true
- port_vlan_id: true
- vlan_tlv:
- vlan_range: 2-4
- state: present
- dot3_tlv:
- max_frame_size: true
- port_descriptor: true
- management_tlv: management-address system-capabilities
- management_tlv_state: present
- med:
- application:
- - name: guest-voice
- vlan_id: 2
- l2_priority: 3
- code_point_value: 4
- - name: voice
- priority_tagged: true
- l2_priority: 3
- code_point_value: 4
- location_identification:
- - loc_info: ecs-elin
- value: 12345678911
- local_interface:
- fortyGigE 1/3:
- lldp_state: present
- enable: false
- mode: rx
- multiplier: 3
- hello: 8
- dcbx:
- version: auto
- advertise:
- dcbx_tlv: pfc
- dcbx_tlv_state: present
- dcbx_appln_tlv: fcoe
- dcbx_appln_tlv_state:
- dot1_tlv:
- port_tlv:
- protocol_vlan_id: true
- port_vlan_id: true
- vlan_tlv:
- vlan_range: 2-4
- state: present
- dot3_tlv:
- max_frame_size: true
- port_descriptor: true
- management_tlv: management-address system-capabilities
- management_tlv_state: present
- med:
- application:
- - name: guest-voice
- vlan_id: 2
- l2_priority: 3
- code_point_value: 4
- - name: voice
- priority_tagged: true
- l2_priority: 3
- code_point_value: 4
- location_identification:
- - loc_info: ecs-elin
- value: 12345678911
-
-
-####################################################}
-{% if os9_lldp is defined and os9_lldp %}
- {% set global_state = [] %}
- {% if global_state.append(True) %}{% endif %}
-
- {% for key in os9_lldp.keys() %}
- {% set lldp_vars = os9_lldp[key] %}
- {% if key == "global_lldp_state" and lldp_vars == "absent" %}
-no protocol lldp
- {% if global_state.insert(False,0) %}{% endif %}
- {% endif %}
- {% endfor %}
-
-{% if global_state[0] %}
-protocol lldp
-{% endif %}
-
-{% for key in os9_lldp.keys() %}
-{% set lldp_vars = os9_lldp[key] %}
-{% if global_state[0] %}
-{% if key == "management_interface" %}
- management-interface
- {% if lldp_vars.hello is defined and lldp_vars.hello %}
- hello {{ lldp_vars.hello }}
- {% else %}
- no hello
- {% endif %}
- {% if lldp_vars.enable is defined and lldp_vars.enable %}
- no disable
- {% else %}
- disable
- {% endif %}
- {% if lldp_vars.mode is defined and lldp_vars.mode %}
- mode {{ lldp_vars.mode }}
- {% else %}
- no mode
- {% endif %}
- {% if lldp_vars.multiplier is defined and lldp_vars.multiplier %}
- multiplier {{ lldp_vars.multiplier }}
- {% else %}
- no multiplier
- {% endif %}
- {% if lldp_vars.advertise is defined and lldp_vars.advertise %}
- {% if lldp_vars.advertise.port_descriptor is defined %}
- {% if lldp_vars.advertise.port_descriptor %}
- advertise interface-port-desc
- {% else %}
- no advertise interface-port-desc
- {% endif %}
- {% endif %}
- {% if lldp_vars.advertise.management_tlv is defined and lldp_vars.advertise.management_tlv %}
- {% if lldp_vars.advertise.management_tlv_state is defined and lldp_vars.advertise.management_tlv_state == "absent" %}
- no advertise management-tlv {{ lldp_vars.advertise.management_tlv }}
- {% else %}
- advertise management-tlv {{ lldp_vars.advertise.management_tlv }}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endif %}
-{% if key == "enable" %}
- {% if lldp_vars %}
- no disable
- {% else %}
- disable
- {% endif %}
-{% endif %}
-{% if key == "fcoe_priority_bits" %}
- {% if lldp_vars %}
- fcoe priority-bits {{ lldp_vars }}
- {% else %}
- no fcoe priority-bits
- {% endif %}
-{% endif %}
-{% if key == "hello" %}
- {% if lldp_vars %}
- hello {{ lldp_vars }}
- {% else %}
- no hello
- {% endif %}
-{% endif %}
-{% if key == "mode" %}
- {% if lldp_vars %}
- mode {{ lldp_vars }}
- {% else %}
- no mode
- {% endif %}
-{% endif %}
-{% if key == "multiplier" %}
- {% if lldp_vars %}
- multiplier {{ lldp_vars }}
- {% else %}
- no multiplier
- {% endif %}
-{% endif %}
-{% if key == "iscsi_priority_bits" %}
- {% if lldp_vars %}
- iscsi priority-bits {{ lldp_vars }}
- {% else %}
- no iscsi priority-bits
- {% endif %}
-{% endif %}
-{% if key == "dcbx" %}
- {% if lldp_vars.version is defined and lldp_vars.version %}
- dcbx version {{ lldp_vars.version }}
- {% else %}
- no dcbx version
- {% endif %}
-{% endif %}
-{% if key == "advertise" %}
-{% if lldp_vars.management_tlv is defined and lldp_vars.management_tlv %}
- {% if lldp_vars.management_tlv_state is defined and lldp_vars.management_tlv_state == "absent" %}
- no advertise management-tlv {{ lldp_vars.management_tlv }}
- {% else %}
- advertise management-tlv {{ lldp_vars.management_tlv }}
- {% endif %}
-{% endif %}
-
-{% if lldp_vars.port_descriptor is defined %}
- {% if lldp_vars.port_descriptor %}
- advertise interface-port-desc
- {% else %}
- no advertise interface-port-desc
- {% endif %}
-{% endif %}
-
-{% if lldp_vars.med is defined and lldp_vars.med %}
- {% for med in lldp_vars.med.keys() %}
- {% set med_vars = lldp_vars.med[med] %}
- {% if med == "global_med" %}
- {% if med_vars %}
- advertise med
- {% else %}
- no advertise med
- {% endif %}
- {% endif %}
- {% if med == "location_identification" %}
- {% for loc in med_vars %}
- {% if loc.loc_info is defined and loc.loc_info %}
- {% if loc.value is defined and loc.value %}
- {% if loc.state is defined and loc.state == "absent" %}
- no advertise med location-identification {{ loc.loc_info }} {{ loc.value }}
- {% else %}
- advertise med location-identification {{ loc.loc_info }} {{ loc.value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if med == "application" %}
- {% for app in med_vars %}
- {% if app.name is defined and app.name %}
- {% if app.vlan_id is defined and app.vlan_id %}
- {% set vlan_or_tag = app.vlan_id %}
- {% elif app.priority_tagged is defined and app.priority_tagged %}
- {% set vlan_or_tag = "priority-tagged" %}
- {% endif %}
- {% if vlan_or_tag is defined and vlan_or_tag %}
- {% if app.l2_priority is defined and app.l2_priority %}
- {% if app.code_point_value is defined and app.code_point_value %}
- {% if app.state is defined and app.state == "absent" %}
- no advertise med {{ app.name }} {{ vlan_or_tag }} {{ app.l2_priority }} {{ app.code_point_value }}
- {% else %}
- advertise med {{ app.name }} {{ vlan_or_tag }} {{ app.l2_priority }} {{ app.code_point_value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endfor %}
-{% endif %}
-
-{% if lldp_vars.dcbx_tlv is defined and lldp_vars.dcbx_tlv %}
- {% if lldp_vars.dcbx_tlv_state is defined and lldp_vars.dcbx_tlv_state == "absent" %}
- no advertise dcbx-tlv {{ lldp_vars.dcbx_tlv }}
- {% else %}
- advertise dcbx-tlv {{ lldp_vars.dcbx_tlv }}
- {% endif %}
-{% endif %}
-
-{% if lldp_vars.dcbx_appln_tlv is defined and lldp_vars.dcbx_appln_tlv %}
- {% if lldp_vars.dcbx_appln_tlv_state is defined and lldp_vars.dcbx_appln_tlv_state == "absent" %}
- no advertise dcbx-appln-tlv {{ lldp_vars.dcbx_appln_tlv }}
- {% else %}
- advertise dcbx-appln-tlv {{ lldp_vars.dcbx_appln_tlv }}
- {% endif %}
-{% endif %}
-
-{% if lldp_vars.dot3_tlv is defined and lldp_vars.dot3_tlv %}
- {% for dot3 in lldp_vars.dot3_tlv.keys() %}
- {% set dot3_vars = lldp_vars.dot3_tlv[dot3] %}
- {% if dot3 == "max_frame_size" %}
- {% if dot3_vars %}
- advertise dot3-tlv max-frame-size
- {% else %}
- no advertise dot3-tlv max-frame-size
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-
-{% if lldp_vars.dot1_tlv is defined and lldp_vars.dot1_tlv %}
- {% for dot1 in lldp_vars.dot1_tlv.keys() %}
- {% set dot1_vars = lldp_vars.dot1_tlv[dot1] %}
- {% if dot1 == "port_tlv" %}
- {% if dot1_vars.protocol_vlan_id is defined and dot1_vars.protocol_vlan_id %}
- {% if dot1_vars.port_vlan_id is defined %}
- {% if dot1_vars.port_vlan_id %}
- advertise dot1-tlv port-protocol-vlan-id port-vlan-id
- {% else %}
- advertise dot1-tlv port-protocol-vlan-id
- no advertise dot1-tlv port-vlan-id
- {% endif %}
- {% else %}
- advertise dot1-tlv port-protocol-vlan-id
- {% endif %}
- {% else %}
- {% if not dot1_vars.protocol_vlan_id %}
- no advertise dot1-tlv port-protocol-vlan-id
- {% endif %}
- {% if dot1_vars.port_vlan_id is defined %}
- {% if dot1_vars.port_vlan_id %}
- advertise dot1-tlv port-vlan-id
- {% else %}
- no advertise dot1-tlv port-vlan-id
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if dot1 == "vlan_tlv" %}
- {% if dot1_vars.vlan_range is defined and dot1_vars.vlan_range %}
- {% if dot1_vars.state is defined and dot1_vars.state == "absent" %}
- no advertise dot1-tlv vlan-name vlan-id {{ dot1_vars.vlan_range }}
- {% else %}
- advertise dot1-tlv vlan-name vlan-id {{ dot1_vars.vlan_range }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-{% endif %}
-{% endif %}
-{% endfor %}
-{% endif %}
-{% if os9_lldp is defined and os9_lldp %}
-{% for key in os9_lldp.keys() %}
-{% set lldp_vars = os9_lldp[key] %}
-{% if key == "local_interface" %}
- {% for intf in lldp_vars.keys() %}
- {% set intf_vars = lldp_vars[intf] %}
-interface {{ intf }}
- {% if intf_vars.lldp_state is defined and intf_vars.lldp_state == "absent" %}
- no protocol lldp
- {% else %}
- protocol lldp
- {% if intf_vars.hello is defined and intf_vars.hello %}
- hello {{ intf_vars.hello }}
- {% else %}
- no hello
- {% endif %}
- {% if intf_vars.enable is defined and intf_vars.enable %}
- no disable
- {% else %}
- disable
- {% endif %}
- {% if intf_vars.mode is defined and intf_vars.mode %}
- mode {{ intf_vars.mode }}
- {% else %}
- no mode
- {% endif %}
- {% if intf_vars.multiplier is defined and intf_vars.multiplier %}
- multiplier {{ intf_vars.multiplier }}
- {% else %}
- no multiplier
- {% endif %}
- {% if intf_vars.dcbx is defined and intf_vars.dcbx %}
- {% if intf_vars.dcbx.version is defined and intf_vars.dcbx.version %}
- dcbx version {{ intf_vars.dcbx.version }}
- {% else %}
- no dcbx version
- {% endif %}
- {% endif %}
- {% if intf_vars.advertise is defined and intf_vars.advertise %}
- {% if intf_vars.advertise.port_descriptor is defined %}
- {% if intf_vars.advertise.port_descriptor %}
- advertise interface-port-desc
- {% else %}
- no advertise interface-port-desc
- {% endif %}
- {% endif %}
- {% if intf_vars.advertise.management_tlv is defined and intf_vars.advertise.management_tlv %}
- {% if intf_vars.advertise.management_tlv_state is defined and intf_vars.advertise.management_tlv_state == "absent" %}
- no advertise management-tlv {{ intf_vars.advertise.management_tlv }}
- {% else %}
- advertise management-tlv {{ intf_vars.advertise.management_tlv }}
- {% endif %}
- {% endif %}
- {% if intf_vars.advertise.med is defined and intf_vars.advertise.med %}
- {% for med in intf_vars.advertise.med.keys() %}
- {% set med_vars = intf_vars.advertise.med[med] %}
- {% if med == "global_med" %}
- {% if med_vars %}
- advertise med
- {% else %}
- no advertise med
- {% endif %}
- {% endif %}
- {% if med == "location_identification" %}
- {% for loc in med_vars %}
- {% if loc.loc_info is defined and loc.loc_info %}
- {% if loc.value is defined and loc.value %}
- {% if loc.state is defined and loc.state == "absent" %}
- no advertise med location-identification {{ loc.loc_info }} {{ loc.value }}
- {% else %}
- advertise med location-identification {{ loc.loc_info }} {{ loc.value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if med == "application" %}
- {% for app in med_vars %}
- {% if app.name is defined and app.name %}
- {% if app.vlan_id is defined and app.vlan_id %}
- {% set vlan_or_tag = app.vlan_id %}
- {% elif app.priority_tagged is defined and app.priority_tagged %}
- {% set vlan_or_tag = "priority-tagged" %}
- {% endif %}
- {% if vlan_or_tag is defined and vlan_or_tag %}
- {% if app.l2_priority is defined and app.l2_priority %}
- {% if app.code_point_value is defined and app.code_point_value %}
- {% if app.state is defined and app.state == "absent" %}
- no advertise med {{ app.name }} {{ vlan_or_tag }} {{ app.l2_priority }} {{ app.code_point_value }}
- {% else %}
- advertise med {{ app.name }} {{ vlan_or_tag }} {{ app.l2_priority }} {{ app.code_point_value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if intf_vars.advertise.dcbx_tlv is defined and intf_vars.advertise.dcbx_tlv %}
- {% if intf_vars.advertise.dcbx_tlv_state is defined and intf_vars.advertise.dcbx_tlv_state == "absent" %}
- no advertise dcbx-tlv {{ intf_vars.advertise.dcbx_tlv }}
- {% else %}
- advertise dcbx-tlv {{ intf_vars.advertise.dcbx_tlv }}
- {% endif %}
- {% endif %}
-
- {% if intf_vars.advertise.dcbx_appln_tlv is defined and intf_vars.advertise.dcbx_appln_tlv %}
- {% if intf_vars.advertise.dcbx_appln_tlv_state is defined and intf_vars.advertise.dcbx_appln_tlv_state == "absent" %}
- no advertise dcbx-appln-tlv {{ intf_vars.advertise.dcbx_appln_tlv }}
- {% else %}
- advertise dcbx-appln-tlv {{ intf_vars.advertise.dcbx_appln_tlv }}
- {% endif %}
- {% endif %}
-
- {% if intf_vars.advertise.dot3_tlv is defined and intf_vars.advertise.dot3_tlv %}
- {% for dot3 in intf_vars.advertise.dot3_tlv.keys() %}
- {% set dot3_vars = intf_vars.advertise.dot3_tlv[dot3] %}
- {% if dot3 == "max_frame_size" %}
- {% if dot3_vars %}
- advertise dot3-tlv max-frame-size
- {% else %}
- no advertise dot3-tlv max-frame-size
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if intf_vars.advertise.dot1_tlv is defined and intf_vars.advertise.dot1_tlv %}
- {% for dot1 in intf_vars.advertise.dot1_tlv.keys() %}
- {% set dot1_vars = intf_vars.advertise.dot1_tlv[dot1] %}
- {% if dot1 == "port_tlv" %}
- {% if dot1_vars.protocol_vlan_id is defined and dot1_vars.protocol_vlan_id %}
- {% if dot1_vars.port_vlan_id is defined %}
- {% if dot1_vars.port_vlan_id %}
- advertise dot1-tlv port-protocol-vlan-id port-vlan-id
- {% else %}
- advertise dot1-tlv port-protocol-vlan-id
- no advertise dot1-tlv port-vlan-id
- {% endif %}
- {% else %}
- advertise dot1-tlv port-protocol-vlan-id
- {% endif %}
- {% else %}
- {% if not dot1_vars.protocol_vlan_id %}
- no advertise dot1-tlv port-protocol-vlan-id
- {% endif %}
- {% if dot1_vars.port_vlan_id is defined %}
- {% if dot1_vars.port_vlan_id %}
- advertise dot1-tlv port-vlan-id
- {% else %}
- no advertise dot1-tlv port-vlan-id
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if dot1 == "vlan_tlv" %}
- {% if dot1_vars.vlan_range is defined and dot1_vars.vlan_range %}
- {% if dot1_vars.state is defined and dot1_vars.state == "absent" %}
- no advertise dot1-tlv vlan-name vlan-id {{ dot1_vars.vlan_range }}
- {% else %}
- advertise dot1-tlv vlan-name vlan-id {{ dot1_vars.vlan_range }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/main.os9.yaml
deleted file mode 100644
index ab40de8de..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/main.os9.yaml
+++ /dev/null
@@ -1,94 +0,0 @@
----
-# vars file for dellemc.os9.os9_lldp,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_lldp:
- global_lldp_state: present
- enable: false
- mode: rx
- multiplier: 3
- fcoe_priority_bits: 3
- iscsi_priority_bits: 3
- hello: 6
- dcbx:
- version: auto
- management_interface:
- hello: 7
- multiplier: 3
- mode: tx
- enable: true
- advertise:
- port_descriptor: false
- management_tlv: management-address system-capabilities
- management_tlv_state: absent
- advertise:
- dcbx_tlv: pfc
- dcbx_tlv_state: absent
- dcbx_appln_tlv: fcoe
- dcbx_appln_tlv_state:
- dot1_tlv:
- port_tlv:
- protocol_vlan_id: true
- port_vlan_id: true
- vlan_tlv:
- vlan_range: 2-4
- dot3_tlv:
- max_frame_size: false
- port_descriptor: false
- management_tlv: management-address system-capabilities system-name
- management_tlv_state: present
- med:
- global_med: true
- application:
- - name: "guest-voice"
- vlan_id: 2
- l2_priority: 3
- code_point_value: 4
- - name: voice
- priority_tagged: true
- l2_priority: 3
- code_point_value: 4
- location_identification:
- - loc_info: ecs-elin
- value: 12345678911
- state: present
- local_interface:
- fortyGigE 1/3:
- lldp_state: present
- enable: false
- mode: rx
- multiplier: 3
- hello: 8
- dcbx:
- version: auto
- port_role: auto-upstream
- advertise:
- dcbx_tlv: pfc
- dcbx_tlv_state: present
- dcbx_appln_tlv: fcoe
- dcbx_appln_tlv_state:
- dot1_tlv:
- port_tlv:
- protocol_vlan_id: true
- port_vlan_id: true
- vlan_tlv:
- vlan_range: 2-4
- state: present
- dot3_tlv:
- max_frame_size: true
- port_descriptor: true
- management_tlv: management-address system-capabilities
- management_tlv_state: present
- med:
- application:
- - name: guest-voice
- vlan_id: 2
- l2_priority: 3
- code_point_value: 4
- - name: voice
- priority_tagged: true
- l2_priority: 3
- code_point_value: 4
- location_identification:
- - loc_info: ecs-elin
- value: 12345678911 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_lldp/tests/test.yaml
deleted file mode 100644
index 49901101d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/tests/test.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_lldp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_lldp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_lldp/vars/main.yml
deleted file mode 100644
index b10424eab..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_lldp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_lldp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/LICENSE b/ansible_collections/dellemc/os9/roles/os9_logging/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/README.md b/ansible_collections/dellemc/os9/roles/os9_logging/README.md
deleted file mode 100644
index ee10bbc99..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/README.md
+++ /dev/null
@@ -1,148 +0,0 @@
-Logging role
-============
-
-This role facilitates the configuration of global logging attributes, and it supports the configuration of logging servers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The Logging role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If the `os9_cfg_generate` variable is set to true, it generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_logging keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``logging`` | list | Configures the logging server (see ``logging.*``) | os9 |
-| ``logging.ip`` | string (required) | Configures the IPv4 address for the logging server (A.B.C.D format) | os9 |
-| ``logging.secure_port`` | integer | Specifies log messages over the TLS port, CA certificates must be installed to specify the log messages over TLS port | os9 |
-| ``logging.tcp_port`` | integer | Specifies log messages over the TCP port if *secure_port* is not defined | os9 |
-| ``logging.udp_port`` | integer | Specifies log messages over the UDP port if both TCP and the secure port key are not defined | os9 |
-| ``logging.vrf`` | dict | Specifies a VRF instance to be used to reach the host | os9 |
-| ``logging.vrf.name`` | string | Specifies the VRF name | os9 |
-| ``logging.vrf.secure_port`` | integer | Specifies log messages over the TLS port, CA certificates must be installed to specify the log messages over TLS port | os9 |
-| ``logging.vrf.tcp_port`` | integer | Specifies log messages over the TCP port if *secure_port key* is not defined | os9 |
-| ``logging.vrf.udp_port`` | integer | Specifies log messages over the UDP port if both TCP and *secure_port_key* is not defined | os9 |
-| ``logging.vrf.state`` | string: absent,present\* | Deletes VRF instance of the logging server if set to absent | os9 |
-| ``logging.state`` | string: absent,present\* | Deletes the logging server if set to absent | os9 |
-| ``buffer`` | integer | Specifies the buffered logging severity level (0 to 7) | os9 |
-| ``console_level`` | integer | Configures the console logging level (0 to 7) | os9 |
-| ``trap_level`` | integer | Configures the syslog server severity level (0 to 7) | os9|
-| ``syslog_version`` | integer | Configures the syslog version (0/1) | os9 |
-| ``monitor`` | integer | Configures the terminal line logging level (0 to 7) | os9|
-| ``history`` | integer | Configures the syslog history table (0 to 7) | os9 |
-| ``history_size`` | integer | Specifies the history table size | os9 |
-| ``on`` | boolean | Enables logging to all supported destinations if set to true | os9 |
-| ``extended`` | boolean | Enables extended logging if set to true | os9 |
-| ``coredump`` | dict | Configures coredump logging | os9 |
-| ``coredump.server`` | dict | Specifies all server details | os9 |
-| ``coredump.server.server_ip`` | string (required) | Specifies the IPv4/IPv6 address of the logging server | os9 |
-| ``coredump.server.username`` | string | Specifies the username to be configured | os9 |
-| ``coredump.server.password`` | string | Specifies the password to be configured | os9 |
-| ``coredump.server.state`` | string: present,absent\* | Deletes the coredump server if set to absent | os9 |
-| ``coredump.stackunit`` |dict | Specifies details for enabling a coredump on the stack-unit | os9 |
-| ``coredump.stackunit.all`` | boolean | Enables a coredump on all stack-units | os9 |
-| ``coredump.stackunit.unit_num`` | integer | Specifies the stack-unit number (0 to 5) | os9 |
-| ``coredump.stackunit.state`` | string: present,absent\*| Deletes the stack-unit coredump if set to absent | os9 |
-| ``source_interface`` | string | Configures the source interface for logging | os9 |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USE`R environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_logging* role to completely set up logging servers. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_logging:
- logging:
- - ip : 1.1.1.1
- state: present
- - ip: 2.2.2.2
- secure_port: 1025
- tcp_port: 1024
- udp_port: 2000
- state: present
- - ip: 3.3.3.3
- vrf:
- name: test
- secure_port: 1024
- tcp_port: 1025
- udp_port: 2000
- state: present
- secure_port: 1025
- tcp_port: 2000
- udp_port: 1025
- state: present
- buffer: 5
- console: 7
- trap: 5
- version: 5
- history: 4
- history_size: 3
- monitor: 5
- on: true
- extended: true
- coredump:
- server:
- server_ip: 2.2.2.2
- username: u1
- password: pwd
- state: present
- stackunit:
- all: true
- unit_num: 5
- state: present
- source_interface: "fortyGigE 1/9"
-
-**Simple playbook to setup logging — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_logging
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/defaults/main.yml
deleted file mode 100644
index ef0a1c97f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_logging \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/handlers/main.yml
deleted file mode 100644
index 36b3d65aa..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_logging \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/meta/main.yml
deleted file mode 100644
index e3895760a..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_logging role facilitates the configuration of logging attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/tasks/main.yml
deleted file mode 100644
index 6e6497251..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating logging configuration for os9"
- template:
- src: os9_logging.j2
- dest: "{{ build_dir }}/logging9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning logging configuration for os9"
- dellemc.os9.os9_config:
- src: os9_logging.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/templates/os9_logging.j2 b/ansible_collections/dellemc/os9/roles/os9_logging/templates/os9_logging.j2
deleted file mode 100644
index 024375651..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/templates/os9_logging.j2
+++ /dev/null
@@ -1,198 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure logging commands for os9 Devices
-os9_logging:
- logging:
- - ip : 1.1.1.1
- state: present
- - ip: 2.2.2.2
- secure_port: 1025
- tcp_port: 1024
- udp_port: 2000
- state: present
- - ip: 3.3.3.3
- vrf:
- name: test
- secure_port: 1024
- tcp_port: 1025
- udp_port: 2000
- state: present
- secure_port: 1025
- tcp_port: 2000
- udp_port: 1025
- state: present
- buffer: 6
- console: 7
- trap: 5
- version: 5
- history: 4
- history_size: 3
- monitor: 5
- on: true
- extended: true
- coredump:
- server:
- server_ip: 2.2.2.2
- username: u1
- password: pwd
- state: present
- stackunit:
- all: true
- unit_num: 5
- state: present
- source_interface: "fortyGigE 1/3"
-###################################################}
-{% if os9_logging is defined and os9_logging %}
-{% for key,value in os9_logging.items() %}
- {% if key == "buffer" %}
- {% if value %}
-logging buffered {{ value }}
- {% else %}
-no logging buffered
- {% endif %}
-
- {% elif key == "console" %}
- {% if value %}
-logging console {{ value }}
- {% else %}
-no logging console
- {% endif %}
-
- {% elif key == "monitor" %}
- {% if value %}
-logging monitor {{ value }}
- {% else %}
-no logging monitor
- {% endif %}
-
- {% elif key == "source_interface" %}
- {% if value %}
-logging source-interface {{ value }}
- {% else %}
-no logging source-interface
- {% endif %}
-
- {% elif key == "version" %}
- {% if value %}
-logging version {{ value }}
- {% else %}
-no logging version
- {% endif %}
-
- {% elif key == "history" %}
- {% if value %}
-logging history {{ value }}
- {% else %}
-no logging history
- {% endif %}
-
- {% elif key == "history_size" %}
- {% if value %}
-logging history size {{ value }}
- {% else %}
-no logging history size
- {% endif %}
-
- {% elif key == "trap" %}
- {% if value %}
-logging trap {{ value }}
- {% else %}
-no logging trap
- {% endif %}
-
- {% elif key == "extended" %}
- {% if value %}
-logging extended
- {% else %}
-no logging extended
- {% endif %}
-
- {% elif key == "on" %}
- {% if value %}
-logging on
- {% else %}
-no logging on
- {% endif %}
-
- {% elif key == "logging" %}
- {% if value %}
- {% for item in value %}
- {% if item.ip is defined and item.ip %}
- {% if item.vrf is defined and item.vrf %}
- {% if item.vrf.name is defined and item.vrf.name %}
- {% if item.vrf.state is defined and item.vrf.state == "absent" %}
- {% if item.vrf.secure_port is defined and item.vrf.secure_port %}
-no logging {{ item.ip }} vrf {{ item.vrf.name }} secure {{ item.vrf.secure_port }}
- {% elif item.vrf.tcp_port is defined and item.vrf.tcp_port %}
-no logging {{ item.ip }} vrf {{ item.vrf.name }} tcp {{ item.vrf.tcp_port }}
- {% elif item.vrf.udp_port is defined and item.vrf.udp_port %}
-no logging {{ item.ip }} vrf {{ item.vrf.name }} udp {{ item.vrf.udp_port }}
- {% endif %}
- {% else %}
- {% if item.vrf.secure_port is defined and item.vrf.secure_port %}
-logging {{ item.ip }} vrf {{ item.vrf.name }} secure {{ item.vrf.secure_port }}
- {% elif item.vrf.tcp_port is defined and item.vrf.tcp_port %}
-logging {{ item.ip }} vrf {{ item.vrf.name }} tcp {{ item.vrf.tcp_port }}
- {% elif item.vrf.udp_port is defined and item.vrf.udp_port %}
-logging {{ item.ip }} vrf {{ item.vrf.name }} udp {{ item.vrf.udp_port }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if item.state is defined and item.state == "absent" %}
- {% if item.secure_port is defined and item.secure_port %}
-no logging {{ item.ip }} secure {{ item.secure_port }}
- {% elif item.tcp_port is defined and item.tcp_port %}
-no logging {{ item.ip }} tcp {{ item.tcp_port }}
- {% elif item.udp_port is defined and item.udp_port %}
-no logging {{ item.ip }} udp {{ item.udp_port }}
- {% else %}
-no logging {{ item.ip }}
- {% endif %}
- {% else %}
- {% if item.secure_port is defined and item.secure_port %}
-logging {{ item.ip }} secure {{ item.secure_port }}
- {% elif item.tcp_port is defined and item.tcp_port %}
-logging {{ item.ip }} tcp {{ item.tcp_port }}
- {% elif item.udp_port is defined and item.udp_port %}
-logging {{ item.ip }} udp {{ item.udp_port }}
- {% else %}
-logging {{ item.ip }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% elif key == "coredump" %}
- {% if value %}
- {% if value.server is defined and value.server %}
- {% if value.server.server_ip is defined and value.server.server_ip %}
- {% if value.server.state is defined and value.server.state == "absent" %}
-no logging coredump server {{ value.server.server_ip }}]
- {% else %}
- {% if value.server.username is defined and value.server.username and value.server.password is defined and value.server.password %}
-logging coredump server {{ value.server.server_ip }} username {{ value.server.username }} password {{ value.server.password }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if value.stackunit is defined and value.stackunit %}
- {% if value.stackunit.all is defined and value.stackunit.all %}
- {% set my_str = "all " %}
- {% else %}
- {% if value.stackunit.unit_num is defined and value.stackunit.unit_num %}
- {% set my_str = value.stackunit.unit_num|string %}
- {% endif %}
- {% endif %}
- {% if value.stackunit.state is defined and value.stackunit.state == "absent" %}
-no logging coredump stack-unit {{ my_str }}
- {% else %}
-logging coredump stack-unit {{ my_str }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_logging/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_logging/tests/main.os9.yaml
deleted file mode 100644
index 0ff9482c8..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/tests/main.os9.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# vars file for dellemc.os9.os9_logging,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_logging:
- logging:
- - ip: 1.1.1.1
- state: present
- - ip: 2.2.2.2
- secure_port: 1025
- tcp_port: 1024
- udp_port: 2000
- state: present
- - ip: 3.3.3.3
- vrf:
- name: test
- secure_port: 1024
- tcp_port: 1025
- udp_port: 2000
- state: present
- secure_port: 1025
- tcp_port: 2000
- udp_port: 1025
- state: present
- buffer: 6
- console: 7
- trap: 5
- version: 5
- history: 4
- history_size: 3
- monitor: 5
- on: true
- extended: true
- coredump:
- server:
- server_ip: 2.2.2.2
- username: u1
- password: pwd
- state: present
- stackunit:
- all: true
- unit_num: 5
- state: present
- source_interface: "fortyGigE 1/9" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_logging/tests/test.yaml
deleted file mode 100644
index 3f87d4c6c..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_logging \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_logging/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_logging/vars/main.yml
deleted file mode 100644
index bc9f7c335..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_logging/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_logging \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_ntp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/README.md b/ansible_collections/dellemc/os9/roles/os9_ntp/README.md
deleted file mode 100644
index 81f5f39f4..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/README.md
+++ /dev/null
@@ -1,98 +0,0 @@
-NTP role
-========
-
-This role facilitates the configuration of network time protocol (NTP) attributes, and it specifically enables configuration of NTP server. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The NTP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value.
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_ntp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``server`` | list | Configures the NTP server (see ``server.*``) | os9 |
-| ``server.ip`` | string (required) | Configures an IPv4 address for the NTP server (A.B.C.D format) | os9 |
-| ``server.vrf`` | list | Configures the NTP server for VRF instance; list item contains the names of the VRF instance | os9 |
-| ``server.state`` | string: absent,present\* | Deletes the NTP server if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_ntp* role to set the NTP server, source ip, authentication and broadcast service. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When the `os9_cfg_generate` variable is set to true, it generates the configuration commands as a .part file in *build_dir* path. By default it is set to false. The example writes a simple playbook that only references the *os9_ntp* role. By including the role, you automatically get access to all of the tasks to configure NTP attributes.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- host: leaf1
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_ntp:
- source: ethernet 1/1/2
- master: 5
- authenticate: true
- authentication_key:
- - key_num: 123
- key_string_type: 7
- key_string: test
- state: present
- trusted_key:
- - key_num: 1323
- state: present
- server:
- - ip: 2.2.2.2
- key: 345
- prefer: true
- state: present
- intf:
- ethernet 1/1/2:
- disable: true
- broadcast: true
-
-**Simple playbook to setup NTP — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_ntp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/defaults/main.yml
deleted file mode 100644
index 835ccd0a0..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_ntp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/handlers/main.yml
deleted file mode 100644
index f8519dd1b..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_ntp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/meta/main.yml
deleted file mode 100644
index 1def65b10..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_ntp role facilitates the configuration of NTP attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/tasks/main.yml
deleted file mode 100644
index 9ca82a360..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating NTP configuration for os9"
- template:
- src: os9_ntp.j2
- dest: "{{ build_dir }}/ntp9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning NTP configuration for os9"
- dellemc.os9.os9_config:
- src: os9_ntp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/templates/os9_ntp.j2 b/ansible_collections/dellemc/os9/roles/os9_ntp/templates/os9_ntp.j2
deleted file mode 100644
index be4536c37..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/templates/os9_ntp.j2
+++ /dev/null
@@ -1,41 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure NTP commands for os9 Devices
-os9_ntp:
- server:
- - ip: 2.2.2.2
- vrf:
- - test
- - management
- state: present
-###################################################}
-{% if os9_ntp is defined and os9_ntp %}
-
-{% for key,value in os9_ntp.items() %}
- {% if key == "server" and value %}
- {% for item in value %}
- {% if item.ip is defined and item.ip %}
- {% if item.state is defined and item.state == "absent" %}
- {% if item.vrf is defined and item.vrf %}
- {% for vrf_name in item.vrf %}
-no ntp server vrf {{ vrf_name }} {{ item.ip }}
- {% endfor %}
- {% else %}
-no ntp server {{ item.ip }}
- {% endif %}
- {% else %}
- {% if item.vrf is defined and item.vrf %}
- {% for vrf_name in item.vrf %}
-ntp server vrf {{ vrf_name }} {{ item.ip }}
- {% endfor %}
- {% else %}
-ntp server {{ item.ip }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/main.os9.yaml
deleted file mode 100644
index f5f4680b9..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/main.os9.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-# vars file for
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_ntp:
- server:
- - ip: 2.2.2.2
- vrf:
- - test
- - tes
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_ntp/tests/test.yaml
deleted file mode 100644
index 0e636d6cf..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: localhost
- connection: network_cli
- roles:
- - dellemc.os9.os9_ntp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_ntp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_ntp/vars/main.yml
deleted file mode 100644
index 7b69f09e3..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_ntp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_ntp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/LICENSE b/ansible_collections/dellemc/os9/roles/os9_prefix_list/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/README.md b/ansible_collections/dellemc/os9/roles/os9_prefix_list/README.md
deleted file mode 100644
index a33434f3f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/README.md
+++ /dev/null
@@ -1,110 +0,0 @@
-Prefix-list role
-================
-
-This role facilitates the configuration of a prefix-list. It supports the configuration of an IP prefix-list, and assigns the prefix-list to line terminals. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The prefix-list role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_prefix_list keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``type`` | string (required): ipv4,ipv6 | Configures an L3 (IPv4/IPv6) prefix-list | os9 |
-| ``name`` | string (required) | Configures the prefix-list name | os9 |
-| ``description`` | string | Configures the prefix-list description | os9 |
-| ``entries`` | list | Configures rules in the prefix-list (see ``seqlist.*``) | os9 |
-| ``entries.number`` | int (required) | Specifies the sequence number of the prefix-list rule | os9 |
-| ``entries.permit`` | boolean (required): true,false | Specifies the rule to permit packets if set to true, and specifies to reject packets if set to false | os9 |
-| ``entries.net_num`` | string (required) | Specifies the network number | os9 |
-| ``entries.mask`` | string (required) | Specifies the mask | os9 |
-| ``entries.condition_list`` | list | Configures conditions to filter packets (see ``condition_list.*``)| os9 |
-| ``condition_list.condition`` | list | Specifies the condition to filter packets from the source address | os9 |
-| ``condition_list.prelen`` | string (required) | Specifies the allowed prefix length | os9 |
-| ``entries.state`` | string: absent,present\* | Deletes the rule from the prefix-list if set to absent | os9 |
-| ``state`` | string: absent,present\* | Deletes the prefix-list if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_prefix_list* role to configure prefix_list for both IPv4 and IPv6. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_prefix_list* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_prefix_list:
- - type: ipv4
- name: spine-leaf
- description: Redistribute loopback and leaf networks
- entries:
- - number: 5
- permit: true
- net_num: 10.0.0.0
- mask: 23
- condition_list:
- - condition: ge
- prelen: 32
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: present
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_prefix_list
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/defaults/main.yml
deleted file mode 100644
index 3226617a9..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_prefix_list \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/handlers/main.yml
deleted file mode 100644
index e1a2d9595..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_prefix_list \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/meta/main.yml
deleted file mode 100644
index 27affba94..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/meta/main.yml
+++ /dev/null
@@ -1,17 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_prefix_list role facilitates the configuration of prefix list attributes in devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tasks/main.yml
deleted file mode 100644
index 4ab6c2245..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating prefix list configuration for os9"
- template:
- src: os9_prefix_list.j2
- dest: "{{ build_dir }}/prefixlist9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning prefix list configuration for os9"
- dellemc.os9.os9_config:
- src: os9_prefix_list.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/templates/os9_prefix_list.j2 b/ansible_collections/dellemc/os9/roles/os9_prefix_list/templates/os9_prefix_list.j2
deleted file mode 100644
index 63c7086a2..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/templates/os9_prefix_list.j2
+++ /dev/null
@@ -1,81 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{####################################
-Purpose:
-Configure pl on OS9 devices
-os9_prefix_list:
- - name: testpl
- type: ipv4
- description: pl
- entries:
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: present
- state: present
-#####################################}
-{% if (os9_prefix_list is defined and os9_prefix_list) %}
- {% for val in os9_prefix_list %}
- {% if val.name is defined and val.name %}
- {% if val.state is defined and val.state == "absent" %}
- {% if val.type is defined and val.type == "ipv4" %}
-no ip prefix-list {{ val.name }}
- {% elif val.type is defined and val.type == "ipv6" %}
-no ipv6 prefix-list {{ val.name }}
- {% endif %}
- {% else %}
- {% if val.type is defined and val.type == "ipv4" %}
-ip prefix-list {{ val.name }}
- {% elif val.type is defined and val.type == "ipv6" %}
-ipv6 prefix-list {{ val.name }}
- {% endif %}
- {% if val.description is defined %}
- {% if val.description %}
- description {{ val.description }}
- {% else %}
- no description
- {% endif %}
- {% endif %}
- {% if val.entries is defined and val.entries %}
- {% for rule in val.entries %}
- {% if rule.number is defined and rule.number %}
- {% if rule.state is defined and rule.state == "absent" %}
- no seq {{ rule.number }}
- {% else %}
- {% if rule.permit is defined %}
- {% if rule.permit %}
- {% set is_permit = "permit" %}
- {% else %}
- {% set is_permit = "deny" %}
- {% endif %}
- {% endif %}
- {% if rule.net_num is defined and rule.net_num %}
- {% if rule.net_num == "any" %}
- seq {{rule.number}} {{is_permit}} any
- {% elif rule.mask is defined and rule.mask %}
- {% if rule.condition_list is defined and rule.condition_list %}
- {% set condition_string = [' '] %}
- {% set item = "" %}
- {% for condition in rule.condition_list %}
- {% set item= condition_string[0] + condition.condition + ' ' + condition.prelen|string + ' ' %}
- {% if condition_string.insert(0,item) %} {% endif %}
- {% endfor %}
- seq {{rule.number}} {{is_permit}} {{rule.net_num}}/{{rule.mask}}{{ condition_string[0] }}
- {% else %}
- seq {{rule.number}} {{is_permit}} {{rule.net_num}}/{{rule.mask}}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {%endfor%}
-{%endif%} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/main.os9.yaml
deleted file mode 100644
index aceb1cd49..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/main.os9.yaml
+++ /dev/null
@@ -1,33 +0,0 @@
----
-# vars file for dellemc.os9.os9_prefix_list,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_prefix_list:
- - type: ipv4
- name: spine-leaf
- description: Redistribute loopback and leaf networks
- entries:
- - number: 5
- permit: true
- net_num: 10.0.0.0
- mask: 23
- condition_list:
- - condition: ge
- prelen: 32
- - number: 10
- permit: true
- net_num: 10.0.0.0
- mask: 8
- condition_list:
- - condition: ge
- prelen: 26
- - number: 19
- permit: true
- net_num: 20.0.0.0
- mask: 16
- condition_list:
- - condition: ge
- prelen: 17
- - condition: le
- prelen: 18
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/test.yaml
deleted file mode 100644
index 09ef1a384..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_prefix_list \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_prefix_list/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_prefix_list/vars/main.yml
deleted file mode 100644
index 9b3bccf52..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_prefix_list/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_prefix_list \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/LICENSE b/ansible_collections/dellemc/os9/roles/os9_sflow/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/README.md b/ansible_collections/dellemc/os9/roles/os9_sflow/README.md
deleted file mode 100644
index 80b3ed90e..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/README.md
+++ /dev/null
@@ -1,120 +0,0 @@
-sFlow role
-==========
-
-This role facilitates the configuration of global and interface level sFlow attributes. It supports the configuration of sFlow collectors at the global level, enable/disable, and specification of sFlow polling-interval, sample-rate, max-datagram size, and so on are supported at the interface and global level. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The sFlow role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take the `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os9_sflow` (dictionary) contains keys along with *interface name* (dictionary)
-- Interface name can correspond to any of the valid os9 physical interfaces with the unique interface identifier name
-- Interface name must be in *<interfacename> <tuple>* format; physical interface name can be in *fortyGigE 1/1* format
-- Variables and values are case-sensitive
-
-**os9_sflow keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``sflow_enable`` | boolean: true,false\* | Enables sFlow at a global level | os9 |
-| ``collector`` | list | Configures collector information (see ``collector.*``); only two collectors can be configured on os9 devices | os9 |
-| ``collector.collector_ip`` | string (required) | Configures an IPv4/IPv6 address for the collector | os9 |
-| ``collector.agent_addr`` | string (required) | Configures an IPv4/IPv6 address for the sFlow agent to the collector | os9 |
-| ``collector.udp_port`` | integer | Configures UDP port range at the collector level (1 to 65535) | os9 |
-| ``collector.max_datagram_size`` | integer | Configures the maximum datagram size for the sFlow datagrams generated (400 to 1500) | os9 |
-| ``collector.vrf`` | boolean: true,false* | Configures the management VRF to reach collector if set to true; can be enabled only for IPv4 collector addresses | os9 |
-| ``polling_interval`` | integer | Configures the global default counter polling-interval (15 to 86400) | os9 |
-| ``sample_rate`` | integer | Configures the global default sample-rate (256 to 8388608) | os9 |
-| ``extended_switch`` | boolean: true,false\* | Enables packing extended information for the switch if set to true | os9 |
-| ``max_header_size`` | boolean: true,false\* | Enables extended header copy size of 256 bytes if set to true at the global level | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-**interface name keys**
-
-| Key | Type | Notes |
-|------------|---------------------------|---------------------------------------------------------|
-| ``sflow_enable`` | boolean: true,false\* | Enables sFlow at the interface level |
-| ``ingress_enable`` | boolean: true,false\* | Enables ingress sFlow at the interface level |
-| ``polling_interval`` | integer | Configures the interface level default counter polling-interval (15 to 86400) |
-| ``max_header_size`` | boolean: true,false\* | Enables extended header copy size of 256 bytes if set to true at the interface level |
-| ``sample_rate`` | integer | Configures the interface level default sample-rate (256 to 8388608) |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories,or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_sflow* role to configure sFlow attributes at interface and global level. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with the corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_sflow* role. By including the role, you automatically get access to all of the tasks to configure sFlow features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_sflow:
- sflow_enable: true
- collector:
- - collector_ip: 1.1.1.1
- agent_addr: 2.2.2.2
- udp_port: 2
- max_datagram_size: 1000
- vrf: true
- state: present
- polling_interval: 30
- sample_rate: 1024
- extended_switch : true
- max_header_size: true
- fortyGigE 1/1:
- sflow_enable : true
- ingress_enable: true
- polling_interval: 30
- sample_rate: 1024
- max_header_size: true
-
-**Simple playbook to setup sflow — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_sflow
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/defaults/main.yml
deleted file mode 100644
index ecfc70664..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_sflow \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/handlers/main.yml
deleted file mode 100644
index 1441cc304..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_sflow \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/meta/main.yml
deleted file mode 100644
index ca9409c6a..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_sflow role facilitates the configuration of sflow attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/tasks/main.yml
deleted file mode 100644
index 63d0c2a88..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating sflow configuration for os9"
- template:
- src: os9_sflow.j2
- dest: "{{ build_dir }}/sflow9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning sflow configuration for os9"
- dellemc.os9.os9_config:
- src: os9_sflow.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/templates/os9_sflow.j2 b/ansible_collections/dellemc/os9/roles/os9_sflow/templates/os9_sflow.j2
deleted file mode 100644
index be9c47d14..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/templates/os9_sflow.j2
+++ /dev/null
@@ -1,143 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure sflow commands for os9 Devices
-os9_sflow:
- sflow_enable: true
- collector:
- - collector_ip: 1.1.1.1
- agent_addr: 2.2.2.2
- udp_port: 2
- max_datagram_size: 1000
- vrf: true
- state: present
- polling_interval: 30
- sample_rate: 1024
- extended_switch : true
- max_header_size: true
- fortyGigE 1/1:
- sflow_enable : true
- ingress_enable: true
- polling_interval: 30
- sample_rate: 1024
- max_header_size: true
-###################################################}
-{% if os9_sflow is defined and os9_sflow %}
-
-{% if os9_sflow %}
-{% for key,value in os9_sflow.items() %}
- {% if key == "sflow_enable" %}
- {% if value %}
-sflow enable
- {% else %}
-no sflow enable
- {% endif %}
-
- {% elif key == "collector" %}
- {% if value %}
- {% for item in value %}
- {% if item.state is defined and item.state == "absent" %}
- {% if item.collector_ip is defined and item.agent_addr is defined %}
- {% if item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.max_datagram_size is defined and item.max_datagram_size and item.vrf is defined and item.vrf %}
-no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} max-datagram-size {{ item.max_datagram_size }} vrf management
- {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.max_datagram_size is defined and item.max_datagram_size %}
-no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} max-datagram-size {{ item.max_datagram_size }}
- {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.vrf is defined and item.vrf %}
-no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} vrf management
- {% elif item.collector_ip and item.agent_addr and item.vrf is defined and item.vrf and item.max_datagram_size is defined and item.max_datagram_size %}
-no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} max-datagram-size {{ item.max_datagram_size }} vrf management
- {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port %}
-no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }}
- {% elif item.collector_ip and item.agent_addr and item.max_datagram_size is defined and item.max_datagram_size %}
-no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} max-datagram-size {{ item.max_datagram_size }}
- {% elif item.collector_ip and item.agent_addr and item.vrf is defined and item.vrf %}
-no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} vrf management
- {% elif item.collector_ip and item.agent_addr %}
-no sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if item.collector_ip is defined and item.agent_addr is defined %}
- {% if item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.max_datagram_size is defined and item.max_datagram_size and item.vrf is defined and item.vrf %}
-sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} max-datagram-size {{ item.max_datagram_size }} vrf management
- {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.max_datagram_size is defined and item.max_datagram_size %}
-sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} max-datagram-size {{ item.max_datagram_size }}
- {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port and item.vrf is defined and item.vrf %}
-sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }} vrf management
- {% elif item.collector_ip and item.agent_addr and item.vrf is defined and item.vrf and item.max_datagram_size is defined and item.max_datagram_size %}
-sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} max-datagram-size {{ item.max_datagram_size }} vrf management
- {% elif item.collector_ip and item.agent_addr and item.udp_port is defined and item.udp_port %}
-sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} {{ item.udp_port }}
- {% elif item.collector_ip and item.agent_addr and item.max_datagram_size is defined and item.max_datagram_size %}
-sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} max-datagram-size {{ item.max_datagram_size }}
- {% elif item.collector_ip and item.agent_addr and item.vrf is defined and item.vrf %}
-sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }} vrf management
- {% elif item.collector_ip and item.agent_addr %}
-sflow collector {{ item.collector_ip }} agent-addr {{ item.agent_addr }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% elif key =="polling_interval" %}
- {% if value %}
-sflow polling-interval {{ value }}
- {% else %}
-no sflow polling-interval
- {% endif %}
-
- {% elif key =="sample_rate" %}
- {% if value %}
-sflow sample-rate {{ value }}
- {% else %}
-no sflow sample-rate
- {% endif %}
-
- {% elif key == "extended_switch" %}
- {% if value %}
-sflow extended-switch enable
- {% else %}
-no sflow extended-switch enable
- {% endif %}
-
- {% elif key == "max_header_size" %}
- {% if value %}
-sflow max-header-size extended
- {% else %}
-no sflow max-header-size extended
- {% endif %}
-
- {% elif '/' in key %}
- {% set intf_vars = os9_sflow[key] %}
-interface {{ key }}
- {% if intf_vars.sflow_enable is defined and intf_vars.sflow_enable %}
- sflow enable
- {% else %}
- no sflow enable
- {% endif %}
- {% if intf_vars.ingress_enable is defined and intf_vars.ingress_enable %}
- sflow ingress-enable
- {% else %}
- no sflow ingress-enable
- {% endif %}
- {% if intf_vars.max_header_size is defined and intf_vars.max_header_size %}
- sflow max-header-size extended
- {% else %}
- no sflow max-header-size extended
- {% endif %}
- {% if intf_vars.polling_interval is defined and intf_vars.polling_interval %}
- sflow polling-interval {{ intf_vars.polling_interval }}
- {% else %}
- no sflow polling-interval
- {% endif %}
- {% if intf_vars.sample_rate is defined and intf_vars.sample_rate %}
- sflow sample-rate {{ intf_vars.sample_rate }}
- {% else %}
- no sflow sample-rate
- {% endif %}
-
- {% endif %}
-{% endfor %}
-{% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/main.os9.yaml
deleted file mode 100644
index 548611c05..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/main.os9.yaml
+++ /dev/null
@@ -1,35 +0,0 @@
----
-# vars file for dellemc.os9.os9_sflow,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_sflow:
- sflow: true
- collector:
- - collector_ip: 1.1.1.1
- agent_addr: 2.2.2.2
- udp_port:
- max_datagram_size: 1000
- vrf: true
- state: present
- - collector_ip: 2.2.2.2
- agent_addr: 2.2.2.2
- udp_port: 3
- max_datagram_size: 1002
- vrf: test
- state: absent
- polling_interval: 24
- sample_rate: 256
- extended_switch: true
- max_header_size: true
- fortyGigE 1/1:
- sflow: true
- ingress_enable: true
- polling_interval: 30
- sample_rate: 1024
- max_header_size: true
- fortyGigE 1/2:
- sflow: true
- ingress_enable: true
- polling_interval: 20
- sample_rate: 256
- max_header_size: true \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_sflow/tests/test.yaml
deleted file mode 100644
index 8f931d3a1..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_sflow \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_sflow/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_sflow/vars/main.yml
deleted file mode 100644
index e79c81ba2..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_sflow/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_sflow \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_snmp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/README.md b/ansible_collections/dellemc/os9/roles/os9_snmp/README.md
deleted file mode 100644
index 0e458b962..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/README.md
+++ /dev/null
@@ -1,192 +0,0 @@
-SNMP role
-=========
-
-This role facilitates the configuration of global SNMP attributes. It supports the configuration of SNMP server attributes including users, group, community, location, and traps. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The SNMP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_snmp keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``snmp_contact`` | string | Configures SNMP contact information | os9 |
-| ``snmp_server_vrf`` | string | Specifies vrf instance for snmp requests, removes the vrf instance for snmp requests if kept blank | os9 |
-| ``snmp_location`` | string | Configures SNMP location information | os9 |
-| ``snmp_community`` | list | Configures SNMP community information (see ``snmp_community.*``) | os9 |
-| ``snmp_community.name`` | string (required) | Configures the SNMP community string | os9 |
-| ``snmp_community.access_mode`` | string: ro,rw | Configures access-mode for the community | os9 |
-| ``snmp_community.state`` | string: absent,present\* | Deletes the SNMP community information if set to absent | os9 |
-| ``snmp_host`` | list | Configures SNMP hosts to receive SNMP traps (see ``snmp_host.*``) | os9 |
-| ``snmp_host.ipv4`` | string | Configures the IPv4 address for the SNMP trap host | os9 |
-| ``snmp_host.ipv6`` | stirng | Configures the IPv6 address for the SNMP trap host | os9 |
-| ``snmp_host.communitystring`` | string | Configures the SNMP community string of the trap host | os9 |
-| ``snmp_host.udpport`` | string | Configures the UDP number of the SNMP trap host (0 to 65535) | os9 |
-| ``snmp_host.version`` | string (required) | Specifies the SNMP version of the host (either 1 or 2c or 3) | os9 |
-| ``snmp_host.vrf`` | list | Configures the SNMP VRF trap for the SNMP host (list of VRF names) | os9 |
-| ``snmp_host.state`` | string: absent,present\* | Deletes the SNMP trap host if set to absent | os9 |
-| ``snmp_traps`` | list | Configures SNMP traps (see ``snmp_traps.*``) | os9 |
-| ``snmp_traps.name`` | string | Enables SNMP traps | os9 |
-| ``snmp_traps.state`` | string: absent,present\* | Deletes the SNMP trap if set to absent | os9 |
-| ``snmp_engine_id`` | string | Configures the SNMPv3 engineID for the local agent | os9 |
-| ``snmp_view`` | list | Configures SNMPv3 view information (see ``snmp_view.*``) | os9 |
-| ``snmp_view.name`` | string | Configures the SNMP view name (up to 20 characters) | os9 |
-| ``snmp_view.oid_subtree`` | integer | Configures the SNMP view for the OID subtree | os9 |
-| ``snmp_view.include`` | boolean: true,false | Specifies whether the MIB family should be included or excluded from the view | os9 |
-| ``snmp_user`` | list | Configures SNMP users for each group name (see ``snmp_user.*``) | os9 |
-| ``snmp_user.name`` | string (required) | Configures the SNMP user name | os9 |
-| ``snmp_user.group_name`` | string (required) | Configures the SNMP group name for the user | os9 |
-| ``snmp_user.version`` | string: 1,2c,3 (required) | Configures a user entry with the specified SNMP version (either 1 or 2c or 3) | os9 |
-| ``snmp_user.access_list`` | dictionary | Configures access-list details; required to configure or negate if defined | os9 |
-| ``snmp_user.access_list.access`` | string | Configures the access-list associated with the user | os9 |
-| ``snmp_user.access_list.ipv6`` | string | Configures the IPv6 access-list associated with the user | os9 |
-| ``snmp_user.encryption`` | boolean: true,false\* | Specifies the encryption for the SNMP user if set to true | os9 |
-| ``snmp_user.auth_algorithm`` | string: md5,sha | Configures the authorization algorithm for the SNMP user | os9 |
-| ``snmp_user.auth_pass`` | string | Configures the authentication password for the user | os9 |
-| ``snmp_user.state`` | string: absent,present\* | Deletes the SNMP user if set to absent | os9 |
-| ``snmp_group`` | list | Configures SNMP groups (see ``snmp_group.*``) | os9 |
-| ``snmp_group.name`` | string (required) | Configures the SNMP group name | os9 |
-| ``snmp_group.version`` | string (required) | Configures the group entry with the specified SNMP version (either 1 or 2c or 3) | os9 |
-| ``snmp_group.access_list`` | dict | Configures access-list entries for the group; required to configure or negate if defined | os9 |
-| ``snmp_group.access_list.access`` | string | Configures the access-list associated with the group | os9 |
-| ``snmp_group.access_list.ipv6`` | string | Configures the IPv6 access-list associated with the group | os9 |
-| ``snmp_group.view`` | dict | Configures view entries for the group; required to configure or negate if defined | os9 |
-| ``snmp_group.view.notify`` | string | Configures notify view associated with the group | os9 |
-| ``snmp_group.view.read`` | string | Configures read view associated with the group | os9 |
-| ``snmp_group.view.write`` | string | Configures write view associated with the group | os9 |
-| ``snmp_group.context`` | list | Configures context list entries (see ``snmp_group.context.*``) | os9 |
-| ``snmp_group.context.context_name`` | string | Configures SNMP-group entries with specified context name | os9 |
-| ``snmp_group.context.access_list`` | dictionary | Configures access-list entries for the group with context | os9 |
-| ``snmp_group.context.access_list.access`` | string | Configures the access-list associated with the group | os9 |
-| ``snmp_group.context.access_list.ipv6`` | string | Configures the IPv6 access-list associated with the group | os9 |
-| ``snmp_group.context.view`` | dictionary | Configures view entries for the group with context | os9 |
-| ``snmp_group.context.view.notify`` | string | Configures notify view associated with the group | os9 |
-| ``snmp_group.context.view.read`` | string | Configures read view associated with the group | os9 |
-| ``snmp_group.context.view.write`` | string | Configures write view associated with the group | os9 |
-| ``snmp_group.context.state`` | string: absent,present | Deletes the context entries with the group if set to absent | os9 |
-| ``snmp_group.state`` | string: absent,present\* | Deletes the associated SNMP group if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_snmp* role to completely set up the SNMP server attributes. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_snmp* role. By including the role, you automatically get access to all of the tasks to configure SNMP features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_snmp:
- snmp_contact: test
- snmp_location: chennai
- snmp_server_vrf: test
- snmp_community:
- - name: public
- access_mode: ro
- state: present
- - name: private
- access_mode: rw
- state: present
- snmp_host:
- - ipv6: 2001:4898:f0:f09b::2000
- version: "3"
- security_level: auth
- communitystring:
- udpport:
- state: absent
- snmp_traps:
- - name: config
- state: present
- snmp_engine_id: 1234567890
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: false
- state: absent
- snmp_user:
- - name: user_1
- group_name: grp1
- version: 3
- access_list:
- access: a1
- ipv6: ip1
- encryption: true
- auth_algorithm: md5
- auth_pass: 12345678
- state: present
- snmp_group:
- - name: group_1
- version: 2c
- access_list:
- access: a1
- ipv6: ip1
- state: absent
- - name: group_2
- version: 3
- security_level: priv
- access_list:
- access: a1
- ipv6: ip1
- context:
- - context_name: c1
- state: present
- - context_name: c2
- access_list:
- access: a1
- view:
- read: r1
- state: present
- state: present
-
-**Simple playbook to setup snmp — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_snmp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/defaults/main.yml
deleted file mode 100644
index 22c7b89bb..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_snmp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/handlers/main.yml
deleted file mode 100644
index f04bb2b58..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_snmp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/meta/main.yml
deleted file mode 100644
index 9c7bc2e01..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_snmp role facilitates the configuration of snmp attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/tasks/main.yml
deleted file mode 100644
index 18e77e056..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating SNMP configuration for os9"
- template:
- src: os9_snmp.j2
- dest: "{{ build_dir }}/snmp9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning SNMP configuration for os9"
- dellemc.os9.os9_config:
- src: os9_snmp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/templates/os9_snmp.j2 b/ansible_collections/dellemc/os9/roles/os9_snmp/templates/os9_snmp.j2
deleted file mode 100644
index 6033604e0..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/templates/os9_snmp.j2
+++ /dev/null
@@ -1,524 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure snmp commands for os9 Devices
-os9_snmp:
- snmp_contact: test
- snmp_location: chennai
- snmp_server_vrf: test
- snmp_community:
- - name: public
- access_mode: ro
- state: present
- - name: private
- access_mode: rw
- state: present
- snmp_context:
- - name: SNMP1
- state: absent
- snmp_packet_size: 8
- snmp_host:
- - ipv4: 1.1.1.1
- version: 3
- security_level: auth
- vrf:
- - test
- - management
- communitystring: msft
- udpport: 162
- state: absent
- - ipv6: 2001:4898:f0:f09b::2000
- version: 1
- state: present
- snmp_traps:
- - name: config
- state: present
- snmp_engine_id: 1234567890
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: true
- state: absent
- snmp_user:
- - name: user_1
- group_name: grp1
- encryption : true
- auth_algorithm: md5
- auth_pass: 12345678
- version: 3
- access_list:
- access: a2
- ipv6: ip1
- state: present
- - name: user_2
- group_name: grp2
- version: "2c"
- access_list:
- ipv6: ip1
- state: absent
- snmp_group:
- - name: group_1
- version: 2c
- access_list:
- access: a1
- ipv6: ip1
- view:
- read: r1
- write: w1
- notify: n1
- context:
- - context_name: c1
- access_list:
- access: a1
- ipv6: ip1
- view:
- read: r1
- write: w1
- notify: n1
- state: present
- - context_name: c2
- state: present
- state: present
- - name: group_2
- version: 3
- security_level: auth
- access_list:
- access: a1
- ipv6: ip1
- state: present
-###################################################}
-{% if os9_snmp is defined and os9_snmp %}
-
-{% if os9_snmp %}
-{% for key,value in os9_snmp.items() %}
- {% if key == "snmp_contact" %}
- {% if value %}
-snmp-server contact {{ value }}
- {% else %}
-no snmp-server contact
- {% endif %}
-
- {% elif key == "snmp_location" %}
- {% if value %}
-snmp-server location {{ value }}
- {% else %}
-no snmp-server location
- {% endif %}
-
- {% elif key == "snmp_server_vrf" %}
- {% if value %}
-snmp-server vrf {{ value }}
- {% else %}
-no snmp-server vrf
- {% endif %}
-
- {% elif key == "snmp_community" %}
- {% if value %}
- {% for item in value %}
- {% if item.name is defined and item.name %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server community {{ item.name }}
- {% else %}
- {% if item.access_mode is defined and item.access_mode %}
-snmp-server community {{ item.name }} {{ item.access_mode }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% elif key == "snmp_packet_size" %}
- {% if value %}
-snmp-server packetsize {{ value }}
- {% else %}
-no snmp-server packetsize
- {% endif %}
-
- {% elif key == "snmp_context" %}
- {% if value %}
- {% for item in value %}
- {% if item.name is defined and item.name %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server context {{ item.name }}
- {% else %}
-snmp-server context {{ item.name }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% elif key == "snmp_host" and value %}
- {% for item in value %}
- {% if item.version is defined and item.version == "2c" or item.version == 1 %}
- {% set my_version = item.version|string %}
- {% elif item.version is defined and item.version == 3 %}
- {% if item.security_level is defined and item.security_level %}
- {% set my_version = "3"+" "+item.security_level %}
- {% endif %}
- {% endif %}
- {% if item.state is defined and item.state == "absent" %}
- {% if item.vrf is defined and item.vrf %}
- {% for vrf_name in item.vrf %}
- {% if item.ipv4 is defined and item.ipv4 %}
- {% if item.communitystring is defined and item.communitystring %}
- {% if item.udpport is defined and item.udpport %}
-no snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }}
- {% else %}
-no snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} {{ item.communitystring }}
- {% endif %}
- {% else %}
-no snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} public udp-port 162
- {% endif %}
- {% endif %}
- {% endfor %}
- {% else %}
- {% if item.ipv4 is defined and item.ipv4 %}
- {% if item.communitystring is defined and item.communitystring %}
- {% if item.udpport is defined and item.udpport %}
-no snmp-server host {{ item.ipv4 }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }}
- {% else %}
-no snmp-server host {{ item.ipv4 }} traps version {{ my_version }} {{ item.communitystring }}
- {% endif %}
- {% else %}
-no snmp-server host {{ item.ipv4 }} traps version {{ my_version }} public udp-port 162
- {% endif %}
- {% elif item.ipv6 is defined and item.ipv6 %}
- {% if item.communitystring is defined and item.communitystring %}
- {% if item.udpport is defined and item.udpport %}
-no snmp-server host {{ item.ipv6 }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }}
- {% else %}
-no snmp-server host {{ item.ipv6 }} traps version {{ my_version }} {{ item.communitystring }}
- {% endif %}
- {% else %}
-no snmp-server host {{ item.ipv6 }} traps version {{ my_version }} public udp-port 162
- {% endif %}
- {% endif %}
- {% endif %}
- {% else %}
- {% if item.vrf is defined and item.vrf %}
- {% for vrf_name in item.vrf %}
- {% if item.ipv4 is defined and item.ipv4 %}
- {% if item.communitystring is defined and item.communitystring %}
- {% if item.udpport is defined and item.udpport %}
-snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }}
- {% else %}
-snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} {{ item.communitystring }} udp-port 162
- {% endif %}
- {% else %}
-snmp-server host {{ item.ipv4 }} vrf {{ vrf_name }} traps version {{ my_version }} public udp-port 162
- {% endif %}
- {% endif %}
- {% endfor %}
- {% else %}
- {% if item.ipv4 is defined and item.ipv4 %}
- {% if item.communitystring is defined and item.communitystring %}
- {% if item.udpport is defined and item.udpport %}
-snmp-server host {{ item.ipv4 }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }}
- {% else %}
-snmp-server host {{ item.ipv4 }} traps version {{ my_version }} {{ item.communitystring }} udp-port 162
- {% endif %}
- {% else %}
-snmp-server host {{ item.ipv4 }} traps version {{ my_version }} public udp-port 162
- {% endif %}
- {% elif item.ipv6 is defined and item.ipv6 %}
- {% if item.communitystring is defined and item.communitystring %}
- {% if item.udpport is defined and item.udpport %}
-snmp-server host {{ item.ipv6 }} traps version {{ my_version }} {{ item.communitystring }} udp-port {{ item.udpport }}
- {% else %}
-snmp-server host {{ item.ipv6 }} traps version {{ my_version }} {{ item.communitystring }} udp-port 162
- {% endif %}
- {% else %}
-snmp-server host {{ item.ipv6 }} traps version {{ my_version }} public udp-port 162
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-
- {% elif key == "snmp_traps" %}
- {% if value %}
- {% for val in value %}
- {% if val.name is defined and val.name %}
- {% if val.state is defined and val.state == "absent" %}
- {% if val.name == "all" %}
-no snmp-server enable traps
- {% else %}
-no snmp-server enable traps {{ val.name }}
- {% endif %}
- {% else %}
- {% if val.name == "all" %}
- {% set trap_list = ['bgp','snmp authentication coldstart linkdown linkup syslog-reachable syslog-unreachable','vrrp','lacp','entity','stack','stp','ecfm','vlt','fips','ets','xstp','isis','config','pfc','envmon cam-utilization fan supply temperature','ecmp'] %}
- {% for name in trap_list %}
-snmp-server enable traps {{ name }}
- {% endfor %}
- {% else %}
-snmp-server enable traps {{ val.name }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% elif key == "snmp_engine_id" %}
- {% if value %}
-snmp-server engineID local {{ value }}
- {% else %}
-no snmp-server engineID local
- {% endif %}
-
- {% elif key == "snmp_view" %}
- {% if value %}
- {% for item in value %}
- {% if item.name is defined and item.name %}
- {% if item.oid_subtree is defined and item.oid_subtree %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server view {{ item.name }} {{ item.oid_subtree }}
- {% else %}
- {% if item.include is defined %}
- {% if item.include %}
-snmp-server view {{ item.name }} {{ item.oid_subtree }} included
- {% elif not item.include %}
-snmp-server view {{ item.name }} {{ item.oid_subtree }} excluded
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% elif key == "snmp_user" %}
- {% if value %}
- {% for item in value %}
- {% if item.name is defined and item.name %}
- {% if item.group_name is defined and item.group_name %}
- {% if item.version is defined and item.version == "2c" or item.version == 1 %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }}
- {% else %}
- {% if item.access_list is defined and item.access_list %}
- {% if item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %}
-snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }} access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }}
- {% elif item.access_list.access is defined and item.access_list.access %}
-snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }} access {{ item.access_list.access }}
- {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 %}
-snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }} ipv6 {{ item.access_list.ipv6 }}
- {% endif %}
- {% else %}
-snmp-server user {{ item.name }} {{ item.group_name }} {{ item.version|string }}
- {% endif %}
- {% endif %}
- {% elif item.version is defined and item.version == 3 %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server user {{ item.name }} {{ item.group_name }} 3
- {% else %}
- {% if item.access_list is defined and item.access_list %}
- {% if item.encryption is defined and item.encryption and item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 encrypted auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }}
- {% elif item.encryption is defined and item.encryption and item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.access is defined and item.access_list.access %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 encrypted auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }}
- {% elif item.encryption is defined and item.encryption and item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.ipv6 is defined and item.access_list.ipv6 %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 encrypted auth {{ item.auth_algorithm }} {{ item.auth_pass }} ipv6 {{ item.access_list.ipv6 }}
- {% elif item.encryption is defined and item.encryption and item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 encrypted auth {{ item.auth_algorithm }} {{ item.auth_pass }}
- {% elif item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }}
- {% elif item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.access is defined and item.access_list.access %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }}
- {% elif item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass and item.access_list.ipv6 is defined and item.access_list.ipv6 %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 auth {{ item.auth_algorithm }} {{ item.auth_pass }} access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }}
- {% elif item.auth_algorithm is defined and item.auth_algorithm and item.auth_pass is defined and item.auth_pass %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 auth {{ item.auth_algorithm }} {{ item.auth_pass }}
- {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 access {{ item.access_list.access }} ipv6 {{ item.access_list.ipv6 }}
- {% elif item.access_list.access is defined and item.access_list.access %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 access {{ item.access_list.access }}
- {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 %}
-snmp-server user {{ item.name }} {{ item.group_name }} 3 ipv6 {{ item.access_list.ipv6 }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% elif key == "snmp_group" and value %}
- {% for item in value %}
- {% if item.name is defined and item.name %}
- {% if item.version is defined and item.version == "2c" or item.version == 1 %}
- {% set my_version = item.version|string %}
- {% elif item.version is defined and item.version == 3 %}
- {% if item.security_level is defined and item.security_level %}
- {% set my_version = "3"+" "+item.security_level %}
- {% endif %}
- {% endif %}
- {% if item.context is defined and item.context %}
- {% set my_entry = [] %}
- {% for it in item.context %}
- {% if it.context_name is defined and it.context_name %}
- {% if it.access_list is defined and it.access_list and it.view is defined and it.view %}
- {% if it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" notify "+it.view.notify+" read "+it.view.read+" write "+it.view.write) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" notify "+it.view.notify+" read "+it.view.read) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" notify "+it.view.notify+" write "+it.view.write) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.write is defined and it.view.write and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" write "+it.view.write+" read "+it.view.read) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" notify "+it.view.notify) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" read "+it.view.read) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6 +" write "+it.view.write) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read and it.view.write is defined and it.view.write%}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" notify "+it.view.notify+" read "+it.view.read+" write "+it.view.write) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" notify "+it.view.notify+" read "+it.view.read) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.view.notify is defined and it.view.notify and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" notify "+it.view.notify+" write "+it.view.write) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.view.write is defined and it.view.write and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" write "+it.view.write+" read "+it.view.read) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.view.notify is defined and it.view.notify %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" notify "+it.view.notify) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" read "+it.view.read) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" write "+it.view.write) %}{% endif %}
- {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read and it.view.write is defined and it.view.write%}
- {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" notify "+it.view.notify+" read "+it.view.read+" write "+it.view.write) %}{% endif %}
- {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" notify "+it.view.notify+" read "+it.view.read) %}{% endif %}
- {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" notify "+it.view.notify+" write "+it.view.write) %}{% endif %}
- {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.write is defined and it.view.write and it.view.read is defined and it.view.read %} {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" write "+it.view.write+" read "+it.view.read) %}{% endif %}
- {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.notify is defined and it.view.notify %} {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" notify "+it.view.notify) %}{% endif %}
- {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+"read "+it.view.read) %}{% endif %}
- {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 and it.view.write is defined and it.view.write %} {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6+" write "+it.view.write) %}{% endif %}
- {% endif %}
- {% elif it.access_list is defined and it.access_list %}
- {% if it.access_list.access is defined and it.access_list.access and it.access_list.ipv6 is defined and it.access_list.ipv6 %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access+" ipv6 "+it.access_list.ipv6) %}{% endif %}
- {% elif it.access_list.access is defined and it.access_list.access %}
- {% if my_entry.append("context "+it.context_name+" access "+it.access_list.access) %}{% endif %}
- {% elif it.access_list.ipv6 is defined and it.access_list.ipv6 %}
- {% if my_entry.append("context "+it.context_name+" ipv6 "+it.access_list.ipv6) %}{% endif %}
- {% endif %}
- {% elif it.view is defined and it.view %}
- {% if it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" notify "+it.view.notify+" read "+it.view.read+" write "+it.view.write) %}{% endif %}
- {% elif it.view.notify is defined and it.view.notify and it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" notify "+it.view.notify+" read "+it.view.read) %}{% endif %}
- {% elif it.view.notify is defined and it.view.notify and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" notify "+it.view.notify+" write "+it.view.write) %}{% endif %}
- {% elif it.view.read is defined and it.view.read and it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" read "+it.view.read+" write "+it.view.write) %}{% endif %}
- {% elif it.view.read is defined and it.view.read %}
- {% if my_entry.append("context "+it.context_name+" read "+it.view.read) %}{% endif %}
- {% elif it.view.notify is defined and it.view.notify %}
- {% if my_entry.append("context "+it.context_name+" notify "+it.view.notify) %}{% endif %}
- {% elif it.view.write is defined and it.view.write %}
- {% if my_entry.append("context "+it.context_name+" write "+it.view.write) %}{% endif %}
- {% endif %}
- {% else %}
- {% if my_entry.append("context "+it.context_name) %}{% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if item.access_list is defined and item.access_list and item.view is defined and item.view %}
- {% if item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %}
- {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify+" read "+item.view.read+" write "+item.view.write %}
- {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read %}
- {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify+" read "+item.view.read %}
- {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %}
- {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" read "+item.view.read+" write "+item.view.write %}
- {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.write is defined and item.view.write %}
- {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify+" write "+item.view.write %}
- {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify %}
- {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify %}
- {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.read is defined and item.view.read %}
- {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 +" read "+item.view.read %}
- {% elif item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.write is defined and item.view.write %}
- {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6+" write "+item.view.write %}
- {% elif item.access_list.access is defined and item.access_list.access and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %}
- {% set my_en = "access "+item.access_list.access+" notify "+item.view.notify+" read "+item.view.read+" write "+item.view.write %}
- {% elif item.access_list.access is defined and item.access_list.access and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read %}
- {% set my_en = "access "+item.access_list.access+" notify "+item.view.notify+" read "+item.view.read %} {% elif item.access_list.access is defined and item.access_list.access and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %}
- {% set my_en = "access "+item.access_list.access+" read "+item.view.read+" write "+item.view.write %} {% elif item.access_list.access is defined and item.access_list.access and item.view.notify is defined and item.view.notify and item.view.write is defined and item.view.write %}
- {% set my_en = "access "+item.access_list.access +" notify "+item.view.notify+" write "+item.view.write %}
- {% elif item.access_list.access is defined and item.access_list.access and item.view.notify is defined and item.view.notify %}
- {% set my_en = "access "+item.access_list.access +" notify "+item.view.notify %}
- {% elif item.access_list.access is defined and item.access_list.access and item.view.read is defined and item.view.read %}
- {% set my_en = "access "+item.access_list.access+" read "+item.view.read %}
- {% elif item.access_list.access is defined and item.access_list.access and item.view.write is defined and item.view.write %}
- {% set my_en = "access "+item.access_list.access+" write "+item.view.write %}
- {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %}
- {% set my_en = "ipv6 "+item.access_list.ipv6+" notify "+item.view.notify+" read "+item.view.read+" write "+item.view.write %}
- {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read %}
- {% set my_en = "ipv6 "+item.access_list.ipv6+" notify "+item.view.notify+" read "+item.view.read %} {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %}
- {% set my_en = "ipv6 "+item.access_list.ipv6+" read "+item.view.read+" write "+item.view.write %} {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify and item.view.write is defined and item.view.write %}
- {% set my_en = "ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify+" write "+item.view.write %}
- {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.notify is defined and item.view.notify %}
- {% set my_en = "ipv6 "+item.access_list.ipv6 +" notify "+item.view.notify %}
- {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.read is defined and item.view.read %}
- {% set my_en = "ipv6 "+item.access_list.ipv6+" read "+item.view.read %}
- {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 and item.view.write is defined and item.view.write %}
- {% set my_en = "ipv6 "+item.access_list.ipv6+" write "+item.view.write %}
-
- {% endif %}
- {% elif item.access_list is defined and item.access_list %}
- {% if item.access_list.access is defined and item.access_list.access and item.access_list.ipv6 is defined and item.access_list.ipv6 %}
- {% set my_en = "access "+item.access_list.access+" ipv6 "+item.access_list.ipv6 %}
- {% elif item.access_list.access is defined and item.access_list.access %}
- {% set my_en = "access "+item.access_list.access %}
- {% elif item.access_list.ipv6 is defined and item.access_list.ipv6 %}
- {% set my_en = "ipv6 "+item.access_list.ipv6 %}
- {% endif %}
- {% elif item.view is defined and item.view %}
- {% if item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %}
- {% set my_en = "notify "+item.view.notify+" read "+item.view.read+" write "+item.view.write %}
- {% elif item.view.notify is defined and item.view.notify and item.view.read is defined and item.view.read %}
- {% set my_en = "notify "+item.view.notify+" read "+item.view.read %}
- {% elif item.view.notify is defined and item.view.notify and item.view.write is defined and item.view.write %}
- {% set my_en ="notify "+item.view.notify+" write "+item.view.write %}
- {% elif item.view.read is defined and item.view.read and item.view.write is defined and item.view.write %}
- {% set my_en = "read "+item.view.read+" write "+item.view.write %}
- {% elif item.view.read is defined and item.view.read %}
- {% set my_en = "read "+item.view.read %}
- {% elif item.view.notify is defined and item.view.notify %}
- {% set my_en = "notify "+item.view.notify %}
- {% elif item.view.write is defined and item.view.write %}
- {% set my_en = "write "+item.view.write %}
- {% endif %}
- {% endif %}
- {% if item.state is defined and item.state == "absent" %}
-no snmp-server group {{ item.name }} {{ my_version }}
- {% else %}
- {% if my_en is defined and my_en %}
-snmp-server group {{ item.name }} {{ my_version }} {{ my_en }}
- {% else %}
-snmp-server group {{ item.name }} {{ my_version }}
- {% endif %}
- {% endif %}
- {% set my_en = "" %}
- {% if item.context is defined %}
- {% set i = 0 %}
- {% for it in item.context %}
- {% if it.state is defined and it.state == "absent" %}
-no snmp-server group {{ item.name }} {{ my_version }} context {{ it.context_name }}
- {% else %}
-snmp-server group {{ item.name }} {{ my_version }} {{ my_entry[i] }}
- {% endif %}
- {% set i = i+1 %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endfor %}
-{% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/main.os9.yaml
deleted file mode 100644
index 98cba1e5a..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/main.os9.yaml
+++ /dev/null
@@ -1,83 +0,0 @@
----
-# vars file for dellemc.os9.os9_snmp,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_snmp:
- snmp_contact: test
- snmp_location: chennai
- snmp_server_vrf: test
- snmp_context:
- - name: SNMP1
- state: present
- snmp_packet_size: 16
- snmp_community:
- - name: public
- access_mode: ro
- state: present
- - name: private
- access_mode: rw
- state: present
- snmp_host:
- - ipv4: 1.1.1.1
- version: 3
- vrf:
- - test
- - management
- security_level: noauth
- communitystring: ab
- udpport: 1
- state: absent
- - ipv6: 2001:4898:f0:f09b::2000
- version: 1
- state: present
- snmp_traps:
- - name: config
- state: present
- snmp_engine_id: 1234567890
- snmp_view:
- - name: view_1
- oid_subtree: 2
- include: false
- state: absent
- snmp_user:
- - name: user_1
- group_name: grp1
- version: 3
- access_list:
- access: a1
- encryption: true
- auth_algorithm: md5
- auth_pass: 12345678
- state: present
- - name: user_2
- group_name: grp1
- version: "2c"
- access_list:
- access: a2
- ipv6: ip1
- state: present
- snmp_group:
- - name: group_1
- version: 2c
- access_list:
- access: a1
- ipv6: ip1
- context:
- - context_name: c1
- state: present
- - context_name: c2
- access_list:
- access: a1
- ipv6: ip1
- view:
- notify: n1
- read: r1
- write: w1
- state: absent
- - name: group_2
- version: 3
- security_level: priv
- access_list:
- access: a1
- ipv6: ip1
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_snmp/tests/test.yaml
deleted file mode 100644
index cc1736a4a..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_snmp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_snmp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_snmp/vars/main.yml
deleted file mode 100644
index 9fa364343..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_snmp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_snmp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/LICENSE b/ansible_collections/dellemc/os9/roles/os9_system/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/README.md b/ansible_collections/dellemc/os9/roles/os9_system/README.md
deleted file mode 100644
index 36e5e1633..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/README.md
+++ /dev/null
@@ -1,223 +0,0 @@
-System role
-===========
-
-This role facilitates the configuration of global system attributes, and it specifically enables configuration of hostname and enable password. It supports the configuration of management route, hash alogrithm, clock, line terminal, banner, and reload type. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The System role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc_netowrking.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_system keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``hostname`` | string | Configures a hostname to the device (no negate command) | os9 |
-| ``unique_hostname`` | boolean: true,false\* | Configures a unique hostname in the switch, only applicable to the FN IOM and MXL platform modules | os9 |
-| ``enable_password`` | string | Configures the enable password | os9 |
-| ``management_rt`` | list | Configures the management route | os9 |
-| ``management_rt.ip`` | string (required) | Configures the IP destination prefix for the management route (A.B.C.D format for IPv4, A:B:C:D::E format for IPv6) | os9 |
-| ``management_rt.ipv4`` | boolean: true\*,false | Specifies if the management route is an IPv4 or IPv6 address; if false or undefined, the IP is set as IPv6 | os9 |
-| ``management_rt.state`` | string: absent,present\* | Deletes the management route if set to absent | os9 |
-| ``line_terminal`` | dictionary | Configures the terminal line (see ``line_terminal.*``) | os9 |
-| ``line_terminal.<terminal>`` | dictionary | Configures the primary or virtual terminal line (console <line_number> or vty <line_number> values) | os9 |
-| ``<terminal>.exec_timeout`` | string | Configures the EXEC timeout (<min> <sec> values) | os9 |
-| ``<terminal>.exec_banner`` | boolean: true,false\* | Configures the EXEC banner | os9 |
-| ``<terminal>.login_banner`` | boolean: true,false\* | Configures the login banner | os9 |
-| ``<terminal>.motd_banner`` | boolean: true,false\* | Configures the MOTD banner | os9 |
-| ``service_passwd_encryption`` | boolean: true,false | Configures system password encryption | os9 |
-| ``hash_algo`` | dictionary | Configures hash algorithm commands (see ``hash_algo.*``) | os9 |
-| ``hash_algo.algo`` | list | Configures hashing algorithm (see ``algo.*``) | os9 |
-| ``algo.name`` | string (required) | Configures the name of the hashing algorithm | os9 |
-| ``algo.mode`` | string (required) | Configures the hashing algorithm mode | os9 |
-| ``algo.stack_unit`` | integer | Configures the stack-unit for the hashing algorithm | os9 |
-| ``algo.port_set`` | integer | Configures the port-pipe set for the hashing algorithm | os9 |
-| ``algo.state`` | string: absent,present\* | Deletes the hashing algorithm if set to absent | os9 |
-| ``hash_algo.seed`` | list | Configures the hashing algorithm seed (see ``seed.*``) | os9 |
-| ``seed.value`` | integer (required) | Configures the hashing algorithm seed value | os9 |
-| ``seed.stack_unit`` | integer | Configures the stack-unit for the hashing algorithm seed | os9 |
-| ``seed.port_set`` | integer | Configures the port-pipe set for the hashing algorithm seed | os9 |
-| ``seed.state`` | string: absent,present\* | Deletes the hashing algorithm seed if set to absent | os9 |
-| ``banner`` | dictionary | Configures global banner commands (see ``banner.*``) | os9 |
-| ``banner.login`` | dictionary | Configures the login banner (see ``login.*``) | os9 |
-| ``login.ack_enable`` | boolean: true,false | Configures positive acknowledgement | os9 |
-| ``login.ack_prompt`` | string | Configures the positive acknowledgement prompt | os9 |
-| ``login.keyboard_interactive`` | boolean: true,false | Configures the keyboard interactive prompt | os9 |
-| ``login.banner_text`` | string | Configures the banner text for the login banner; 'c <banner-text> c' format where 'c' is a delimiting character | os9 |
-| ``banner.exec`` | string | Configures the banner text for EXEC process creation banner; 'c <banner-text> c' where 'c' is a delimiting character for os9 | os9 |
-| ``banner.motd`` | string | Configures the banner text for the message of the day banner; 'c <banner-text> c' where 'c' is a delimiting character for os9 | os9 |
-| ``load_balance`` | dictionary | Configures the global traffic load balance (see ``load_balance.*``) | os9 |
-| ``load_balance.ingress_port`` | boolean: true,false | Specifies whether to use the source port ID for the hashing algorithm | os9 |
-| ``load_balance.tcp_udp`` | boolean: true, false | Configures whether to use TCP/UDP ports in packets for hashing algorithm | os9 |
-| ``load_balance.ip_selection`` | list | Configures IPv4 key fields to use in hashing algorithm; mutually exclusive with *load_balance.tcp_udp* for os9 devices (see ``ip_selection.*``) | os9 |
-| ``ip_selection.field`` | string | Configures IPv4 key fields to use in hashing algorithm | os9 |
-| ``ip_selection.state`` | string: absent,present\* | Deletes the IPv4 key fields if set to absent | os9 |
-| ``load_balance.ipv6_selection`` | list | Configures IPv6 key fields to use in hashing algorithm; mutually exclusive with *load_balance.tcp_udp* for os9 devices (see ``ipv6_selection.*``) | os9 |
-| ``ipv6_selection.field`` | string | Configures IPv6 key fields to use in hashing algorithm | os9 |
-| ``ipv6_selection.state`` | string: absent,present\* | Deletes the IPv6 key fields if set to absent | os9 |
-| ``load_balance.tunnel`` | dictionary | Configures tunnel key fields to use in hashing algorithm (see ``tunnel.*``) | os9 |
-| ``tunnel.hash_field`` | list | Configures hash field selection (see ``hash_field.*``) | os9 |
-| ``hash_field.name`` | string (required) | Configures the hash field selection | os9 |
-| ``hash_field.header`` | string | Configures header for load balance | os9 |
-| ``hash_field.state`` | string: absent,present\* | Deletes the hash key selection field if set to absent | os9 |
-| ``clock`` | dictionary | Configures time-of-day clock (see ``clock.*``) | os9 |
-| ``clock.summer_time`` | dictionary | Configures summer (daylight savings) time (see ``summer_time.*``) | os9 |
-| ``summer_time.timezone_name`` | string (required) | Configures the time zone name | os9 |
-| ``summer_time.type`` | string (required) | Configures absolute or recurring summer time | os9 |
-| ``summer_time.start_datetime`` | string | Configures start datetime; <date> <month> <year> <hrs:mins> format | os9 |
-| ``summer_time.end_datetime`` | string | Configures end datetime; <date> <month> <year> <hrs:mins> format | os9 |
-| ``summer_time.offset_mins`` | integer | Configures offset minutes to add (1 to 1440) | os9 |
-| ``summer_time.state`` | string: absent,present\* | Deletes the summer time clock if set to absent | os9 |
-| ``clock.timezone`` | dictionary | Configures timezone (see ``timezone.*``) | os9 |
-| ``timezone.name`` | string (required) | Configures the timezone name | os9 |
-| ``timezone.offset_hours`` | integer | Configures offset hours to add (-23 to 23) | os9 |
-| ``timezone.offset_mins`` | integer | Configures offset minutes to add (0 to 59) | os9 |
-| ``timezone.state`` | string: absent,present\* | Deletes the time zone if set to absent | os9 |
-| ``reload_type`` | dictionary | Configures the reload type (see ``reload_type.*``) | os9 |
-| ``reload_type.auto_save`` | boolean: true,false\* | Configures the auto save option for downloaded configuration/script file | os9 |
-| ``reload_type.boot_type`` | string: bmp-reload,normal-reload | Configures the boot type | os9 |
-| ``reload_type.boot_type_state`` | string: absent,present\* | Deletes the boot type if set to absent | os9 |
-| ``reload_type.config_scr_download`` | boolean: true,false\* | Configures whether config/script file needs to be downloaded | os9 |
-| ``reload_type.dhcp_timeout`` | integer | Configures DHCP timeout in minutes (0 to 50) | os9 |
-| ``reload_type.retry_count`` | integer | Configures the number of retries for image and configuration download (0 to 6) | os9 |
-| ``reload_type.relay`` | boolean: true,false\* | Configures the addition of option82 in DHCP client packets | os9 |
-| ``reload_type.relay_remote_id`` | string | Configures customize remote ID | os9 |
-| ``reload_type.vendor_class_identifier`` | boolean: true,false\* | Configures vendor-class-identifier for DHCP option60 | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
-********************
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_system role* to completely set the NTP server, hostname, enable password, management route, hash alogrithm, clock, line terminal, banner and reload type. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. The system role writes a simple playbook that only references the *os9_system* role. By including the role, you automatically get access to all of the tasks to configure system features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_system:
- hostname: host1
- unique_hostname: True
- enable_password: dell
- service_passwd_encryption: true
- banner:
- exec: t hai t
- login:
- ack_enable: true
- ack_prompt: testbanner
- keyboard_interactive: true
- banner_text: cloginbannerc
- motd: t ansibletest t
- hash_algo:
- algo:
- - name: lag
- mode: xor1
- stack_unit: 0
- port_set: 0
- state: present
- - name: ecmp
- mode: xor1
- stack_unit: 0
- port_set: 0
- state: present
- seed:
- - value: 3
- stack_unit: 0
- port_set: 0
- state: present
- - value: 2
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: vlan dest-ip
- state: present
- ipv6_selection:
- - field: dest-ipv6 vlan
- state: present
- tunnel:
- hash_field:
- - name: mac-in-mac
- header: tunnel-header-mac
- state: present
- clock:
- summer_time:
- timezone_name: PST
- type: date
- start_datetime: 2 jan 1993 22:33
- end_datetime: 3 jan 2017 22:33
- offset_mins: 20
- timezone:
- name: IST
- offset_hours: -5
- offset_mins: 20
- reload_type:
- auto_save: true
- boot_type: normal-reload
- boot_type_state: absent
- config_scr_download: true
- dhcp_timeout: 5
- retry_count: 3
- relay: true
- relay_remote_id: ho
- vendor_class_identifier: aa
- management_rt:
- - ip: 10.16.148.254
- state: present
- ipv4: True
- line_terminal:
- vty 0:
- exec_timeout: 40
- exec_banner: true
- vty 1:
- exec_timeout: 40 200
- motd_banner: true
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_system
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/defaults/main.yml
deleted file mode 100644
index 2892046b6..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_system \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/handlers/main.yml
deleted file mode 100644
index d19126d07..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_system \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/meta/main.yml
deleted file mode 100644
index 9b7164400..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_system role facilitates the configuration of system attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/tasks/main.yml
deleted file mode 100644
index a52c5041d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating system configuration for os9"
- template:
- src: os9_system.j2
- dest: "{{ build_dir }}/system9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning system configuration for os9"
- dellemc.os9.os9_config:
- src: os9_system.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/templates/os9_system.j2 b/ansible_collections/dellemc/os9/roles/os9_system/templates/os9_system.j2
deleted file mode 100644
index 594179c94..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/templates/os9_system.j2
+++ /dev/null
@@ -1,422 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-
-Purpose:
-Configure system commands for os9 Devices
-
-os9_system:
- hostname: os9
- unique_hostname: true
- enable_password: xxxxx
- service_passwd_encryption: true
- banner:
- exec: t hai t
- login:
- ack_enable: true
- ack_prompt: testbanner
- keyboard_interactive: true
- banner_text: cloginbannerc
- motd: t ansibletest t
- hash_algo:
- algo:
- - name: lag
- mode: xor1
- stack_unit: 0
- port_set: 0
- state: present
- - name: ecmp
- mode: xor1
- stack_unit: 0
- port_set: 0
- state: present
- seed:
- - value: 3
- stack_unit: 0
- port_set: 0
- state: present
- - value: 2
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: vlan dest-ip
- state: present
- ipv6_selection:
- - field: dest-ipv6 vlan
- state: present
- tunnel:
- hash_field:
- - name: mac-in-mac
- header: tunnel-header-ipv4
- state: present
- clock:
- summer_time:
- timezone_name: PST
- type: date
- start_datetime: 2 jan 1991 22:33
- end_datetime: 3 jan 2017 22:33
- offset_mins: 20
- timezone:
- name: IST
- offset_hours: -5
- offset_mins: 20
- reload_type:
- auto_save: true
- boot_type: normal-reload
- config_scr_download: true
- dhcp_timeout: 5
- retry_count: 3
- relay: true
- relay_remote_id: host
- vendor_class_identifier: aa
- management_rt:
- - ip: 10.16.148.254
- state: present
- ipv4: True
- line_terminal:
- vty 0:
- exec_timeout: 40
- exec_banner: true
- vty 1:
- exec_timeout: 40 200
- motd_banner: true
-###################################################}
-{% if os9_system is defined and os9_system %}
-
-{% if os9_system.hostname is defined and os9_system.hostname %}
-hostname {{ os9_system.hostname }}
-{% endif %}
-{% if os9_system %}
-{% for key,value in os9_system.items() %}
- {% if key == "unique_hostname" %}
- {% if value %}
-feature unique-name
- {% else %}
-no feature unique-name
- {% endif %}
-
- {% elif key == "enable_password" %}
- {% if value %}
-enable password {{ value }}
- {% else %}
-no enable password
- {% endif %}
-
- {% elif key == "service_passwd_encryption" %}
- {% if value %}
-service password-encryption
- {% else %}
-no service password-encryption
- {% endif %}
-
- {% elif key == "clock" and value %}
- {% if value.summer_time is defined and value.summer_time %}
- {% set time_vars = value.summer_time %}
- {% if time_vars.state is defined and time_vars.state == "absent" %}
-no clock summer-time
- {% else %}
- {% if time_vars.timezone_name is defined and time_vars.timezone_name %}
- {% if time_vars.type is defined and time_vars.type %}
- {% if time_vars.start_datetime is defined and time_vars.start_datetime %}
- {% if time_vars.end_datetime is defined and time_vars.end_datetime %}
- {% if time_vars.offset_mins is defined and time_vars.offset_mins %}
-clock summer-time {{ time_vars.timezone_name }} {{ time_vars.type }} {{ time_vars.start_datetime }} {{ time_vars.end_datetime }} {{ time_vars.offset_mins }}
- {% else %}
-clock summer-time {{ time_vars.timezone_name }} {{ time_vars.type }} {{ time_vars.start_datetime }} {{ time_vars.end_datetime }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if value.timezone is defined and value.timezone %}
- {% set timezone_vars = value.timezone %}
- {% if timezone_vars.state is defined and timezone_vars.state == "absent" %}
-no clock timezone
- {% else %}
- {% if timezone_vars.name is defined and timezone_vars.name %}
- {% if timezone_vars.offset_hours is defined and timezone_vars.offset_hours %}
- {% if timezone_vars.offset_mins is defined and timezone_vars.offset_mins %}
-clock timezone {{ timezone_vars.name }} {{ timezone_vars.offset_hours }} {{ timezone_vars.offset_mins }}
- {% else %}
-clock timezone {{ timezone_vars.name }} {{ timezone_vars.offset_hours }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
-
-
- {% elif key == "hash_algo" and value %}
- {% if value.algo is defined and value.algo %}
- {% for item in value.algo %}
- {% if item.name is defined and item.name %}
- {% if item.mode is defined and item.mode %}
- {% if item.state is defined and item.state == "absent" %}
- {% if item.stack_unit is defined and item.stack_unit >= 0 %}
- {% if item.port_set is defined and item.port_set >= 0 %}
-no hash-algorithm {{ item.name }} {{ item.mode }} stack-unit {{ item.stack_unit }} port-set {{ item.port_set }}
- {% else %}
-no hash-algorithm {{ item.name }} {{ item.mode }} stack-unit {{ item.stack_unit }}
- {% endif %}
- {% else %}
-no hash-algorithm {{ item.name }} {{ item.mode }}
- {% endif %}
- {% else %}
- {% if item.stack_unit is defined and item.stack_unit >= 0 %}
- {% if item.port_set is defined and item.port_set >= 0 %}
-hash-algorithm {{ item.name }} {{ item.mode }} stack-unit {{ item.stack_unit }} port-set {{ item.port_set }}
- {% else %}
-hash-algorithm {{ item.name }} {{ item.mode }} stack-unit {{ item.stack_unit }}
- {% endif %}
- {% else %}
-hash-algorithm {{ item.name }} {{ item.mode }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if value.seed is defined and value.seed %}
- {% for item in value.seed %}
- {% if item.value is defined and item.value >= 0 %}
- {% if item.state is defined and item.state == "absent" %}
- {% if item.stack_unit is defined and item.stack_unit >= 0 %}
- {% if item.port_set is defined and item.port_set >= 0 %}
-no hash-algorithm seed {{ item.value }} stack-unit {{ item.stack_unit }} port-set {{ item.port_set }}
- {% else %}
-no hash-algorithm seed {{ item.value }} stack-unit {{ item.stack_unit }}
- {% endif %}
- {% else %}
-no hash-algorithm seed {{ item.value }}
- {% endif %}
- {% else %}
- {% if item.stack_unit is defined and item.stack_unit >= 0 %}
- {% if item.port_set is defined and item.port_set >= 0 %}
-hash-algorithm seed {{ item.value }} stack-unit {{ item.stack_unit }} port-set {{ item.port_set }}
- {% else %}
-hash-algorithm seed {{ item.value }} stack-unit {{ item.stack_unit }}
- {% endif %}
- {% else %}
-hash-algorithm seed {{ item.value }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% elif key == "banner" and value %}
- {% if value.exec is defined %}
- {% if value.exec %}
-banner exec {{ value.exec }}
- {% else %}
-no banner exec
- {% endif %}
- {% endif %}
- {% if value.motd is defined %}
- {% if value.motd %}
-banner motd {{ value.motd }}
- {% else %}
-no banner motd
- {% endif %}
- {% endif %}
- {% if value.login is defined and value.login %}
- {% set login_vars = value.login %}
- {% if login_vars.ack_enable is defined %}
- {% if login_vars.ack_enable %}
-banner login acknowledgment enable
- {% else %}
-no banner login acknowledgment enable
- {% endif %}
- {% endif %}
- {% if login_vars.ack_prompt is defined %}
- {% if login_vars.ack_prompt %}
-banner login acknowledgment prompt {{ login_vars.ack_prompt }}
- {% else %}
-no banner login acknowledgment prompt
- {% endif %}
- {% endif %}
- {% if login_vars.keyboard_interactive is defined %}
- {% if login_vars.keyboard_interactive %}
-banner login keyboard-interactive
- {% else %}
-no banner login keyboard-interactive
- {% endif %}
- {% endif %}
- {% if login_vars.banner_text is defined %}
- {% if login_vars.banner_text %}
-banner login {{ login_vars.banner_text }}
- {% else %}
-no banner login
- {% endif %}
- {% endif %}
- {% endif %}
- {% elif key == "load_balance" and value %}
- {% if value.ingress_port is defined %}
- {% if value.ingress_port %}
-load-balance ingress-port enable
- {% else %}
-no load-balance ingress-port enable
- {% endif %}
- {% endif %}
- {% if value.tcp_udp is defined %}
- {% if value.tcp_udp %}
-load-balance tcp-udp enable
- {% else %}
-no load-balance tcp-udp enable
- {% endif %}
- {% endif %}
- {% if value.ip_selection is defined and value.ip_selection %}
- {% for item in value.ip_selection %}
- {% if item.field is defined and item.field %}
- {% if item.state is defined and item.state == "absent" %}
-no load-balance ip-selection
- {% else %}
-load-balance ip-selection {{ item.field }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if value.ipv6_selection is defined and value.ipv6_selection %}
- {% for item in value.ipv6_selection %}
- {% if item.field is defined and item.field %}
- {% if item.state is defined and item.state == "absent" %}
-no load-balance ipv6-selection
- {% else %}
-load-balance ipv6-selection {{ item.field }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if value.tunnel is defined and value.tunnel %}
- {% set tunnel_vars = value.tunnel %}
- {% if tunnel_vars.hash_field is defined and tunnel_vars.hash_field %}
- {% for item in tunnel_vars.hash_field %}
- {% if item.name is defined and item.name %}
- {% if item.header is defined and item.header %}
- {% if item.state is defined and item.state == "absent" %}
-no load-balance tunnel {{ item.name }} {{ item.header }}
- {% else %}
-load-balance tunnel {{ item.name }} {{ item.header }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-
- {% elif key == "reload_type" and value %}
-reload-type
- {% if value.auto_save is defined %}
- {% if value.auto_save %}
- auto-save enable
- {% else %}
- auto-save disable
- {% endif %}
- {% endif %}
- {% if value.boot_type is defined and value.boot_type %}
- {% if boot_type_state is defined and boot_type_state =="absent" %}
- no boot-type {{ value.boot_type }}
- {% else %}
- boot-type {{ value.boot_type }}
- {% endif %}
- {% endif %}
- {% if value.config_scr_download is defined %}
- {% if value.config_scr_download %}
- config-scr-download enable
- {% else %}
- config-scr-download disable
- {% endif %}
- {% endif %}
- {% if value.dhcp_timeout is defined %}
- {% if value.dhcp_timeout >=0 %}
- dhcp-timeout {{ value.dhcp_timeout }}
- {% else %}
- no dhcp-timeout 0
- {% endif %}
- {% endif %}
- {% if value.retry_count is defined %}
- {% if value.retry_count >=0 %}
- retry-count {{ value.retry_count }}
- {% else %}
- no retry-count 0
- {% endif %}
- {% endif %}
- {% if value.relay is defined %}
- {% if value.relay %}
- relay enable
- {% else %}
- relay disable
- {% endif %}
- {% endif %}
- {% if value.relay_remote_id is defined %}
- {% if value.relay_remote_id %}
- relay remote-id {{ value.relay_remote_id }}
- {% else %}
- no relay remote-id a
- {% endif %}
- {% endif %}
- {% if value.vendor_class_identifier is defined %}
- {% if value.vendor_class_identifier %}
- vendor-class-identifier {{ value.vendor_class_identifier }}
- {% else %}
- no vendor-class-identifier a
- {% endif %}
- {% endif %}
-
- {% elif key == "management_rt" and value %}
- {% for item in value %}
- {% if item.ip is defined and item.ip %}
- {% if item.ipv4 is defined and item.ipv4 %}
- {% if item.state is defined and item.state == "absent" %}
-no management route 0.0.0.0/0 {{ item.ip }}
- {% else %}
-management route 0.0.0.0/0 {{ item.ip }}
- {% endif %}
- {% else %}
- {% if item.state is defined and item.state == "absent" %}
-no management route ::/0 {{ item.ip }}
- {% else %}
-management route ::/0 {{ item.ip }}
- {% endif %}
- {% endif%}
- {% endif %}
- {% endfor %}
-
- {% elif key == "line_terminal" and value %}
- {% for key in value.keys() %}
- {% set vty_vars = value[key] %}
-line {{ key }}
- {% if vty_vars.exec_timeout is defined %}
- {% if vty_vars.exec_timeout %}
- {% set timeout = (vty_vars.exec_timeout | string).split(" ") %}
- {% if timeout | length > 1 %}
- exec-timeout {{ vty_vars.exec_timeout }}
- {% else %}
- exec-timeout {{ vty_vars.exec_timeout }} 0
- {% endif %}
- {% else %}
- no exec-timeout
- {% endif %}
- {% endif %}
- {% if vty_vars.exec_banner is defined %}
- {% if vty_vars.exec_banner %}
- exec-banner
- {% else %}
- no exec-banner
- {% endif %}
- {% endif %}
- {% if vty_vars.motd_banner is defined %}
- {% if vty_vars.motd_banner %}
- motd-banner
- {% else %}
- no motd-banner
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
-{% endfor %}
-{% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_system/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_system/tests/main.os9.yaml
deleted file mode 100644
index 474f282d0..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/tests/main.os9.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-# vars file for dellemc.os9.os9_system,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_system:
- hostname: os9
- enable_password: calvin
- line_terminal:
- vty 0:
- exec_timeout: 40
- vty 1:
- exec_timeout: 40 200
- service_passwd_encryption: true
- banner:
- exec: t hai t
- login:
- ack_enable: true
- ack_prompt: testbanner
- keyboard_interactive: true
- banner_text: cloginbannerc
- motd: t ansibletest t
- hash_algo:
- algo:
- - name: lag
- mode: xor1
- stack_unit: 0
- port_set: 0
- state: present
- - name: ecmp
- mode: xor1
- stack_unit: 0
- port_set: 0
- state: present
- seed:
- - value: 3
- stack_unit: 0
- port_set: 0
- state: present
- - value: 2
- state: present
- load_balance:
- ingress_port: true
- ip_selection:
- - field: vlan dest-ip
- state: present
- ipv6_selection:
- - field: dest-ipv6 vlan
- state: present
- tunnel:
- hash_field:
- - name: mac-in-mac
- header: tunnel-header-mac
- state: present
- clock:
- summer_time:
- timezone_name: PST
- type: date
- start_datetime: 2 jan 1993 22:33
- end_datetime: 3 jan 2017 22:33
- offset_mins: 20
- timezone:
- name: IST
- offset_hours: -5
- offset_mins: 20
- reload_type:
- auto_save: true
- boot_type: normal-reload
- boot_type_state: absent
- config_scr_download: true
- dhcp_timeout: 5
- retry_count: 3
- relay: true
- relay_remote_id: ho
- vendor_class_identifier: aa \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_system/tests/test.yaml
deleted file mode 100644
index 4d1422200..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_system \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_system/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_system/vars/main.yml
deleted file mode 100644
index f056f57be..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_system/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_system \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/LICENSE b/ansible_collections/dellemc/os9/roles/os9_users/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/README.md b/ansible_collections/dellemc/os9/roles/os9_users/README.md
deleted file mode 100644
index 2fadbe708..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/README.md
+++ /dev/null
@@ -1,109 +0,0 @@
-Users role
-==========
-
-This role facilitates the configuration of global system user attributes, and it supports the configuration of CLI users. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The users role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_users list keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``userrole`` | stirng (required) | Configures the role name which can be configured for users | os9 |
-| ``userrole_state`` | string: absent,present\* | Deletes the user role with specified name if set to absent | os9 |
-| ``userrole_inherit`` | string: netadmin,netoperator,secadmin,sysadmin\* | Specifies the existing role name to inherit the permissions | os9 |
-| ``username`` | string (required) | Configures the username which must adhere to specific format guidelines (valid usernames begin with A-Z, a-z, or 0-9 and can also contain `@#$%^&*-_= +;<>,.~` characters) | os9 |
-| ``password`` | string | Configures the password set for the username; | os9 |
-| ``role`` | string | Configures the role assigned to the user | os9 |
-| ``privilege`` | int | Configures the privilege level for the user (0 to 15); if this key is ommitted, the default privilege is 1 for both os9 | os9 |
-| ``access_class`` | string | Configures the access-class for the user | os9 |
-| ``pass_key`` | integer: 0\*,7 | Configures the password as encrypted if set to 7 in os9 devices | os9 |
-| ``secret`` | string | Configures line password as secret in os9 devices | os9 |
-| ``secret_key`` | integer: 0\*,5 | Configures the secret line password using md5 encrypted algorithm | os9 |
-| ``state`` | string: absent,present\* | Deletes a user account if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_users* role to configure global system user attributes. The example creates a hosts file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file. It writes a simple playbook that only references the *os9_users* role. By including the role, you automatically get access to all of the tasks to configure user features.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_users:
- - userrole: role1
- userrole_state: present
- userrole_inherit: sysadmin
- - username: u1
- password: test
- role: sysadmin
- privilege: 0
- state: absent
- - username: u1
- password: false
- privilege: 1
- access_class: a1
- role: netadmin
- state: present
- - username: u2
- secret: test1
- secret_key : 0
- access_class: a2
- privilege: 3
- role: sysadmin
- state: present
-
-**Simple playbook to setup users — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_users
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/defaults/main.yml
deleted file mode 100644
index b0770388f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_users \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/handlers/main.yml
deleted file mode 100644
index 0b4397912..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_users \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/meta/main.yml
deleted file mode 100644
index 9dae624a2..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_users role facilitates the configuration of user attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/tasks/main.yml
deleted file mode 100644
index 6708c02ae..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating users configuration for os9"
- template:
- src: os9_users.j2
- dest: "{{ build_dir }}/users9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning users configuration for os9"
- dellemc.os9.os9_config:
- src: os9_users.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/templates/os9_users.j2 b/ansible_collections/dellemc/os9/roles/os9_users/templates/os9_users.j2
deleted file mode 100644
index 64f8256a4..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/templates/os9_users.j2
+++ /dev/null
@@ -1,141 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{#############################################
-Purpose:
-Configure users commands for os9 Devices
-os9_users:
- - userrole: role1
- userrole_state: present
- userrole_inherit: sysadmin
- - username: test
- password: test
- pass_key: 7
- access_class: a1
- role: sysadmin
- privilege: 0
- state: present
- - username: u1
- password: false
- privilege: 1
- access_class: a1
- role: netadmin
- state: present
- - username: u2
- secret: test1
- secret_key : 0
- access_class: a2
- privilege: 3
- role: sysadmin
- state: present
-###################################################}
-{% if os9_users is defined and os9_users %}
-{% for item in os9_users %}
- {% if item.userrole is defined and item.userrole %}
- {% if item.userrole_state is defined and item.userrole_state == "absent" %}
- {% for item in os9_users %}
- {% if item.username is defined and item.username %}
- {% if item.state is defined and item.state == "absent" %}
-no username {{ item.username }}
- {% endif %}
- {% endif %}
- {% endfor %}
-no userrole {{ item.userrole }}
- {% else %}
- {% if item.userrole_inherit is defined and item.userrole_inherit %}
-userrole {{ item.userrole }} inherit {{ item.userrole_inherit }}
- {% else %}
-userrole {{ item.userrole }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if item.role_permission is defined and item.role_permission %}
- {% if item.role_permission.mode is defined and (item.role_permission.mode == "configure" or item.role_permission.mode == "exec" or item.role_permission.mode == "interface" or item.role_permission.mode == "line" or item.role_permission.mode == "route-map" or item.role_permission.mode == "router") %}
- {% if item.role_permission.action is defined and (item.role_permission.action == "reset" or item.role_permission.action == "addrole" or item.role_permission.action == "deleterole") %}
- {% if item.role_permission.line is defined and item.role_permission.line %}
- {% if item.role_permission.action != "reset" and item.role_permission.role_name is defined and item.role_permission.role_name %}
- {% if item.role_permission.state is defined and item.role_permission.state == "absent" %}
-norole {{ item.role_permission.mode }} {{ item.role_permission.action }} {{ item.role_permission.role_name }} {{ item.role_permission.line }}
- {% else %}
-role {{ item.role_permission.mode }} {{ item.role_permission.action }} {{ item.role_permission.role_name }} {{ item.role_permission.line }}
- {% endif %}
- {% else %}
-role {{ item.role_permission.mode }} reset {{ item.role_permission.line }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if item.username is defined and item.username %}
- {% if item.state is defined and item.state == "absent" %}
-no username {{ item.username }}
- {% else %}
- {% if item.password is defined and item.password %}
- {% if item.pass_key is defined and item.pass_key %}
-{% set passwd = item.pass_key|string+" "+item.password %}
- {% else %}
-{% set passwd = item.password %}
- {% endif %}
- {% if item.privilege is defined and item.privilege and item.access_class is defined and item.access_class and item.role is defined and item.role %}
-username {{ item.username }} password {{ passwd }} privilege {{ item.privilege }} role {{ item.role }} access-class {{ item.access_class }}
- {% elif item.role is defined and item.role and item.privilege is defined and item.privilege %}
-username {{ item.username }} password {{ passwd }} privilege {{ item.privilege }} role {{ item.role }}
- {% elif item.role is defined and item.role and item.access_class is defined and item.access_class %}
-username {{ item.username }} password {{ passwd }} access-class {{ item.access_class }} role {{ item.role }}
- {% elif item.privilege is defined and item.privilege and item.access_class is defined and item.access_class %}
-username {{ item.username }} password {{ passwd }} access-class {{ item.access_class }} privilege {{ item.privilege }}
- {% elif item.role is defined and item.role %}
-username {{ item.username }} password {{ passwd }} role {{ item.role }}
- {% elif item.privilege is defined and item.privilege %}
-username {{ item.username }} password {{ passwd }} privilege {{ item.privilege }}
- {% elif item.access_class is defined and item.access_class %}
-username {{ item.username }} password {{ passwd }} access-class {{ item.access_class }}
- {% else %}
-username {{ item.username }} password {{ passwd }}
- {% endif %}
- {% elif item.secret is defined and item.secret %}
- {% if item.secret_key is defined and item.secret_key %}
-{% set passwd = item.secret_key|string+" " +item.secret %}
- {% else %}
-{% set passwd = item.secret %}
- {% endif %}
- {% if item.privilege is defined and item.privilege and item.access_class is defined and item.access_class and item.role is defined and item.role %}
-username {{ item.username }} secret {{ passwd }} role {{ item.role }} privilege {{ item.privilege }} access-class {{
- item.access_class }}
- {% elif item.role is defined and item.role and item.privilege is defined and item.privilege %}
-username {{ item.username }} secret {{ passwd }} role {{ item.role }} privilege {{ item.privilege }}
- {% elif item.role is defined and item.role and item.access_class is defined and item.access_class %}
-username {{ item.username }} secret {{ passwd }} role {{ item.role }} access-class {{ item.access_class }}
- {% elif item.privilege is defined and item.privilege and item.access_class is defined and item.access_class %}
-username {{ item.username }} secret {{ passwd }} privilege {{ item.privilege }} access-class {{ item.access_class }}
- {% elif item.role is defined and item.role %}
-username {{ item.username }} secret {{ passwd }} role {{ item.role }}
- {% elif item.privilege is defined and item.privilege %}
-username {{ item.username }} secret {{ passwd }} privilege {{ item.privilege }}
- {% elif item.access_class is defined and item.access_class %}
-username {{ item.username }} secret {{ passwd }} access-class {{ item.access_class }}
- {% else %}
-username {{ item.username }} secret {{ passwd }}
- {% endif %}
- {% else %}
- {% if item.privilege is defined and item.privilege and item.access_class is defined and item.access_class and item.role is defined and item.role %}
-username {{ item.username }} nopassword role {{ item.role }} privilege {{ item.privilege }} access-class {{ item.access_class }}
- {% elif item.role is defined and item.role and item.privilege is defined and item.privilege %}
-username {{ item.username }} nopassword role {{ item.role }} privilege {{ item.privilege }}
- {% elif item.role is defined and item.role and item.access_class is defined and item.access_class %}
-username {{ item.username }} nopassword role {{ item.role }} access-class {{ item.access_class }}
- {% elif item.privilege is defined and item.privilege and item.access_class is defined and item.access_class %}
-username {{ item.username }} nopassword privilege {{ item.privilege }} access-class {{ item.access_class }}
- {% elif item.role is defined and item.role %}
-username {{ item.username }} nopassword role {{ item.role }}
- {% elif item.privilege is defined and item.privilege %}
-username {{ item.username }} nopassword privilege {{ item.privilege }}
- {% elif item.access_class is defined and item.access_class %}
-username {{ item.username }} nopassword access-class {{ item.access_class }}
- {% else %}
-username {{ item.username }} nopassword
- {% endif %}
-
- {% endif %}
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_users/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_users/tests/main.os9.yaml
deleted file mode 100644
index e2882d20a..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/tests/main.os9.yaml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# vars file for dellemc.os9.os9_users,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_users:
- - userrole: role1
- userrole_state: present
- userrole_inherit: sysadmin
- - username: test
- password: test
- pass_key: 7
- access_class: a1
- role: role1
- privilege: 0
- state: present
- - username: u1
- password: false
- privilege: 1
- access_class: a1
- role: netadmin
- state: present
- - username: u2
- secret: test1
- secret_key: 0
- access_class: a2
- privilege: 3
- role: sysadmin
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_users/tests/test.yaml
deleted file mode 100644
index 1e2649112..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_users \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_users/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_users/vars/main.yml
deleted file mode 100644
index 05fc40b84..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_users/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_users \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/LICENSE b/ansible_collections/dellemc/os9/roles/os9_vlan/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/README.md b/ansible_collections/dellemc/os9/roles/os9_vlan/README.md
deleted file mode 100644
index b2ea5ec1c..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/README.md
+++ /dev/null
@@ -1,105 +0,0 @@
-VLAN role
-=========
-
-This role facilitates configuring virtual LAN (VLAN) attributes. It supports the creation and deletion of a VLAN and its member ports. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The VLAN role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- For variables with no state variable, setting an empty value for the variable negates the corresponding configuration
-- `os9_vlan` (dictionary) holds the key with the VLAN ID key and default-vlan key.
-- VLAN ID key should be in format "vlan <ID>" (1 to 4094)
-- Variables and values are case-sensitive
-
-**os9_vlan**
-
-| Key | Type | Notes | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``default_vlan`` | boolean | Configures the default VLAN feature as diabled if set to true | os9 |
-
-**VLAN ID keys**
-
-| Key | Type | Notes | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``name`` | string | Configures the name of the VLAN | os9 |
-| ``description`` | string | Configures a single line description for the VLAN | os9 |
-| ``tagged_members`` | list | Specifies the list of port members to be tagged to the corresponding VLAN (see ``tagged_members.*``) | os9 |
-| ``tagged_members.port`` | string | Specifies valid device interface names to be tagged for each VLAN | os9 |
-| ``tagged_members.state`` | string: absent,present | Deletes the tagged association for the VLAN if set to absent | os9 |
-| ``untagged_members`` | list | Specifies the list of port members to be untagged to the corresponding VLAN (see ``untagged_members.*``) | os9 |
-| ``untagged_members.port`` | string | Specifies valid device interface names to be untagged for each VLAN | os9 |
-| ``untagged_members.state`` | string: absent,present | Deletes the untagged association for the VLAN if set to absent | os9 |
-| ``state`` | string: absent,present\* | Deletes the VLAN corresponding to the ID if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars directories* or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-## Example playbook
-
-This example uses the *os9_vlan* role to setup the VLAN ID and name, and it configures tagged and untagged port members for the VLAN. You can also delete the VLAN with the ID or delete the members associated to it. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the os9_vlan role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_vlan:
- default_vlan: true
- vlan 100:
- name: "Mgmt Network"
- description: "Int-vlan"
- tagged_members:
- - port: fortyGigE 1/30
- state: absent
- untagged_members:
- - port: fortyGigE 1/14
- state: present
- state: present
-
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_vlan
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/defaults/main.yml
deleted file mode 100644
index 2e62ad6e5..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_vlan \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/handlers/main.yml
deleted file mode 100644
index 93bec0dca..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_vlan \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/meta/main.yml
deleted file mode 100644
index 74ac54cd7..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_vlan role facilitates the configuration of VLAN attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/tasks/main.yml
deleted file mode 100644
index d460f275d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating VLAN configuration for os9"
- template:
- src: os9_vlan.j2
- dest: "{{ build_dir }}/vlan9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning VLAN configuration for os9"
- dellemc.os9.os9_config:
- src: os9_vlan.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/templates/os9_vlan.j2 b/ansible_collections/dellemc/os9/roles/os9_vlan/templates/os9_vlan.j2
deleted file mode 100644
index e9da9e5f6..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/templates/os9_vlan.j2
+++ /dev/null
@@ -1,79 +0,0 @@
-#jinja2: trim_blocks: True, lstrip_blocks: True
-{##########################################
-Purpose:
-Configure VLAN Interface commands for os9 Devices
-os9_vlan:
- default_vlan: true
- VLAN 1:
- name: "vlan2"
- description: "int-vlan"
- tagged_members:
- - port: fortyGigE 0/32
- state: present
- - port: fortyGigE 0/40
- state: absent
- untagged_members:
- - port: fortyGigE 0/92
- state: absent
- - port: fortyGigE 0/44
- state: present
- state: present
-#########################################}
-{% if os9_vlan is defined and os9_vlan %}
-{% for key,value in os9_vlan.items() %}
- {% if key == "default_vlan" %}
- {% if value %}
-default-vlan disable
- {% else %}
-no default-vlan disable
- {% endif %}
- {% else %}
-
- {% set vlan_id = key.split(" ") %}
- {% set vlan_vars = os9_vlan[key] %}
- {% if vlan_vars.state is defined and vlan_vars.state == "absent" %}
-no interface Vlan {{ vlan_id[1] }}
- {% else %}
-interface Vlan {{ vlan_id[1] }}
- {% if vlan_vars.name is defined%}
- {% if vlan_vars.name %}
- name {{ vlan_vars.name }}
- {% else %}
- no name
- {% endif %}
- {% endif %}
- {% if vlan_vars.description is defined %}
- {% if vlan_vars.description %}
- description {{ vlan_vars.description }}
- {% else %}
- no description
- {% endif %}
- {% endif %}
- {% if vlan_vars.untagged_members is defined %}
- {% for ports in vlan_vars.untagged_members %}
- {% if ports.port is defined and ports.port %}
- {% if ports.state is defined and ports.state == "absent" %}
- no untagged {{ ports.port }}
- {% else %}
- untagged {{ ports.port }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% if vlan_vars.tagged_members is defined %}
- {% for ports in vlan_vars.tagged_members %}
- {% if ports.port is defined and ports.port %}
- {% if ports.state is defined and ports.state == "absent" %}
- no tagged {{ ports.port }}
- {% else %}
- tagged {{ ports.port }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
- {% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/main.os9.yaml
deleted file mode 100644
index 7f74b3b4d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/main.os9.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
----
-# vars file for dellemc.os9.os9_vlan,
-# below gives a example configuration
-# Sample variables for OS9 device
-os9_vlan:
- default_vlan: true
- vlan 100:
- name: "Blue Network"
- description: "Interface-vlan"
- tagged_members:
- - port: fortyGigE 1/2
- state: present
- - port: fortyGigE 1/11
- state: present
- untagged_members:
- - port: fortyGigE 1/3
- state: present
- - port: fortyGigE 1/10
- state: present
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_vlan/tests/test.yaml
deleted file mode 100644
index 1dfd42bd7..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_vlan \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlan/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlan/vars/main.yml
deleted file mode 100644
index cd2ceef64..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlan/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_vlan \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/LICENSE b/ansible_collections/dellemc/os9/roles/os9_vlt/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/README.md b/ansible_collections/dellemc/os9/roles/os9_vlt/README.md
deleted file mode 100644
index 2154fbd81..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/README.md
+++ /dev/null
@@ -1,132 +0,0 @@
-VLT role
-========
-
-This role facilitates the configuration of the basics of virtual link trunking (VLT) to provide a loop-free topology. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The VLT role requires an SSH connection for connectivity to your Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_vlt keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``domain`` | integer (required) | Configures the VLT domain identification number (1 to 1000) | os9 |
-| ``backup_destination`` | string | Configures an IPv4 address for the VLT backup link (A.B.C.D format or X:X:X:X::X format) | os9 |
-| ``destination_type`` | string | Configures the backup destination based on this destination type (IPv4 or IPv6)| os9 |
-| ``backup_destination_vrf`` | string | Configures the virtual routing and forwarding (VRF) instance through which the backup destination IP is reachable (*vrfname* must be present) | os9|
-| ``VLTi`` | integer | Configures the peer link port-channel ID for the VLT domain (1 to 4096) | os9 |
-| ``peer_routing`` | boolean | Configures VLT peer routing | os9 |
-| ``peer_routing_timeout`` | integer | Configures the timeout for peer routing (1 to 65535)| os9 |
-| ``multicast_peer_routing_timeout`` | integer | Configures the timeout for multicast peer routing (1 to 1200) | os9 |
-| ``priority`` | integer | Configures the primary priority to the corresponding channel ID | os9 |
-| ``unit_id`` | integer | Configures the system unit ID for VLT (either 0 or 1) | os9 |
-| ``vlt_peers`` | dictionary | Contains objects to configure the VLT peer port-channel (see ``vlt_peers.*``) | os9 |
-| ``vlt_peers.<portchannelid>`` | dictionary | Configures the VLT peer port-channel (`Po <portchannelid> value`) | os9 |
-| ``vlt_peers.<portchannelid>.peer_lag`` | integer | Configures the port-channel ID of the VLT peer lag | os9 |
-| ``system_mac`` | string | Configures the system MAC address for VLT | os9 |
-| ``delay_restore`` | integer | Configures the delay in bringing up VLT ports after reload or peer-link restoration (default 90)| os9 |
-| ``delay_restore_abort_threshold`` | integer | Configures the wait interval for VLT delay-restore timer to abort (default 60) | os9 |
-| ``proxy_gateway`` | dictionary | Contains objects to configure the VLT proxy gateway (see ``proxy_gateway.*``) | os9 |
-| ``proxy_gateway.static`` | dictionary | Contains objects to configure the static VLT proxy gateway (see ``static.*``) | os9 |
-| ``static.remote_mac`` | list | Configures the remote MAC for static VLT proxy gateway (see ``remote_mac.*``) | os9 |
-| ``remote_mac.address`` | string | Configures the remote MAC address for the static VLT proxy gateway | os9 |
-| ``remote_mac.exclude_vlan_range`` | string | Configures the exclude VLAN for the static VLT proxy gateway | os9 |
-| ``remote_mac.state`` | string: absent,present | Deletes the remote MAC address or exclude VLAN configured on the proxy gateway if set to absent | os9 |
-| ``static.proxy_static_state`` | string: absent,present | Deletes the static VLT proxy gateway if set to absent | os9 |
-| ``proxy_gateway.lldp`` | dictionary | Contains objects to configure LLDP VLT proxy gateway (see ``lldp.*`` for each item); mutually exclusive with *proxy_gateway.static* | os9 |
-| ``lldp.peer_domain_link`` | list | Configures the VLT proxy gateway interface (see ``peer_domain_link.*``) | os9 |
-| ``peer_domain_link.port_channel_id`` | integer | Configures the port-channel for the VLT proxy gateway | os9 |
-| ``peer_domain_link.exclude_vlan_range`` | string | Configures to exclude VLAN for LLDP VLT proxy gateway | os9 |
-| ``peer_domain_link.state`` | string: absent,present | Deletes the port-channel or exclude VLAN configured on the proxy gateway if set to absent | os9 |
-| ``lldp.proxy_lldp_state`` | string: absent,present | Deletes the LLDP VLT proxy gateway if set to absent | os9 |
-| ``lldp.vlt_peer_mac`` | boolean | Configures the proxy gateway transmit for square VLT | os9 |
-| ``lldp.peer_timeout`` | integer | Configures the proxy gateway restore timer (1 to 65535) | os9 |
-| ``state`` | string: absent,present | Deletes the VLT instance if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network OS roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory, or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_vlt* role to setup a VLT-domain. It creates a *hosts* file with the switch details and corresponding variables.The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_vlt* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
- os9_vlt:
- domain: 1
- backup_destination: 192.168.1.1
- destination_type: "ipv4"
- priority: 1
- VLTi: 101
- backup_destination_vrf: VLTi-KEEPALIVE
- peer_routing: true
- peer_routing_timeout: 200
- multicast_peer_routing_timeout: 250
- unit_id: 0
- system_mac: aa:aa:aa:aa:aa:aa
- delay_restore: 100
- delay_restore_abort_threshold: 110
- proxy_gateway:
- static:
- remote_mac:
- - address: aa:aa:aa:aa:aa:aa
- exclude_vlan_range: 2
- state: present
- proxy_static_state: present
- vlt_peers:
- Po 12:
- peer_lag: 13
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_vlt
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/defaults/main.yml
deleted file mode 100644
index 7d2e3ec8b..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_vlt \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/handlers/main.yml
deleted file mode 100644
index 703bdba9b..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_vlt \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/meta/main.yml
deleted file mode 100644
index bb33e1c1b..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_vlt role facilitates the configuration of VLT attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/tasks/main.yml
deleted file mode 100644
index 34a532452..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating VLT configuration for os9"
- template:
- src: os9_vlt.j2
- dest: "{{ build_dir }}/vlt9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning VLT configuration for os9"
- dellemc.os9.os9_config:
- src: os9_vlt.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2 b/ansible_collections/dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2
deleted file mode 100644
index 6dd6303ee..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/templates/os9_vlt.j2
+++ /dev/null
@@ -1,217 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-Purpose:
-Configure VLT commands for os9 Devices.
-os9_vlt:
- domain: 1
- backup_destination: 192.168.1.1
- destination_type: ipv4
- priority: 1
- VLTi: 101
- backup_destination_vrf: VLTi-KEEPALIVE
- unit_id: 0
- peer_routing: True
- peer_routing_timeout: 200
- multicast_peer_routing_timeout: 300
- vlt_peers:
- Po 12:
- peer_lag: 13
- system_mac: aa:aa:aa:aa:aa:aa
- delay_restore: 100
- delay_restore_abort_threshold: 110
- proxy_gateway:
- static:
- remote_mac:
- - address: aa:aa:aa:aa:aa:aa
- exclude_vlan_range: 2
- state: present
- proxy_static_state: present
- lldp:
- vlt_peer_mac: true
- peer_timeout: 20
- peer_domain_link:
- - port_channel_id: 10
- exclude_vlan_range: 3
- state: present
- proxy_lldp_state: present
-
- state: present
-################################}
-{% if os9_vlt is defined and os9_vlt %}
- {% if os9_vlt.vlt_peers is defined and os9_vlt.vlt_peers %}
- {% for key in os9_vlt.vlt_peers.keys() %}
- {% set channel_id = key.split(" ") %}
- {% set peer_vars = os9_vlt.vlt_peers[key] %}
-interface Port-channel {{ channel_id[1] }}
- {% if peer_vars.peer_lag is defined %}
- {% if peer_vars.peer_lag %}
- vlt-peer-lag port-channel {{ peer_vars.peer_lag}}
- {% else %}
- no vlt-peer-lag
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if os9_vlt.domain is defined and os9_vlt.domain %}
- {% if os9_vlt.state is defined and os9_vlt.state == 'absent' %}
-no vlt domain {{ os9_vlt.domain }}
- {% else %}
-vlt domain {{ os9_vlt.domain }}
- {% if os9_vlt.backup_destination is defined %}
- {% if os9_vlt.backup_destination %}
- {% if os9_vlt.destination_type is defined %}
- {% if os9_vlt.destination_type == 'ipv6' %}
- back-up destination ipv6 {{ os9_vlt.backup_destination }}
- {% elif os9_vlt.destination_type == 'ipv4' %}
- {% if os9_vlt.backup_destination_vrf is defined and os9_vlt.backup_destination_vrf %}
- back-up destination {{ os9_vlt.backup_destination }} vrf {{ os9_vlt.backup_destination_vrf }}
- {% else %}
- back-up destination {{ os9_vlt.backup_destination }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% else %}
- no back-up destination
- {% endif %}
- {% endif %}
- {% if os9_vlt.VLTi is defined %}
- {% if os9_vlt.VLTi %}
- peer-link port-channel {{ os9_vlt.VLTi }}
- {% else %}
- no peer-link
- {% endif %}
- {% endif %}
- {% if os9_vlt.priority is defined %}
- {% if os9_vlt.priority %}
- primary-priority {{ os9_vlt.priority }}
- {% else %}
- no primary-priority
- {% endif %}
- {% endif %}
- {% if os9_vlt.unit_id is defined %}
- {% if os9_vlt.unit_id >= 0 %}
- unit-id {{ os9_vlt.unit_id }}
- {% else %}
- no unit-id
- {% endif %}
- {% endif %}
- {% if os9_vlt.peer_routing is defined %}
- {% if os9_vlt.peer_routing == True %}
- peer-routing
- {% else %}
- no peer-routing
- {% endif %}
- {% endif %}
- {% if os9_vlt.peer_routing_timeout is defined %}
- {% if os9_vlt.peer_routing_timeout %}
- peer-routing-timeout {{ os9_vlt.peer_routing_timeout }}
- {% else %}
- no peer-routing-timeout
- {% endif %}
- {% endif %}
- {% if os9_vlt.multicast_peer_routing_timeout is defined %}
- {% if os9_vlt.multicast_peer_routing_timeout %}
- multicast peer-routing timeout {{ os9_vlt.multicast_peer_routing_timeout }}
- {% else %}
- no multicast peer-routing timeout
- {% endif %}
- {% endif %}
- {% if os9_vlt.system_mac is defined and os9_vlt.system_mac %}
- system-mac mac-address {{ os9_vlt.system_mac }}
- {% else %}
- no system-mac
- {% endif %}
- {% if os9_vlt.delay_restore is defined %}
- {% if os9_vlt.delay_restore %}
- delay-restore {{ os9_vlt.delay_restore }}
- {% else %}
- no delay-restore
- {% endif %}
- {% endif %}
- {% if os9_vlt.delay_restore_abort_threshold is defined %}
- {% if os9_vlt.delay_restore_abort_threshold %}
- delay-restore abort-threshold {{ os9_vlt.delay_restore_abort_threshold }}
- {% else %}
- no delay-restore abort-threshold
- {% endif %}
- {% endif %}
-
- {% if os9_vlt.proxy_gateway is defined and os9_vlt.proxy_gateway %}
- {% for key in os9_vlt.proxy_gateway.keys() %}
- {% if key == "static" %}
- {% set static_vars = os9_vlt.proxy_gateway[key] %}
- {% if static_vars.proxy_static_state is defined and static_vars.proxy_static_state =="absent" %}
- no proxy-gateway static
- {% else %}
- proxy-gateway static
- {% if static_vars.remote_mac is defined and static_vars.remote_mac %}
- {% for mac in static_vars.remote_mac %}
- {% if mac.state is defined and mac.state =="absent" %}
- {% if mac.address is defined and mac.address %}
- {% if mac.exclude_vlan_range is defined and mac.exclude_vlan_range %}
- no remote-mac-address {{ mac.address }} exclude-vlan {{ mac.exclude_vlan_range }}
- {% else %}
- no remote-mac-address {{ mac.address }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if mac.address is defined and mac.address %}
- {% if mac.exclude_vlan_range is defined and mac.exclude_vlan_range %}
- remote-mac-address {{ mac.address }} exclude-vlan {{ mac.exclude_vlan_range }}
- {% else %}
- remote-mac-address {{ mac.address }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% elif key == "lldp" %}
- {% set lldp_vars = os9_vlt.proxy_gateway[key] %}
- {% if lldp_vars.proxy_lldp_state is defined and lldp_vars.proxy_lldp_state =="absent" %}
- no proxy-gateway lldp
- {% else %}
- proxy-gateway lldp
- {% if lldp_vars.peer_domain_link is defined and lldp_vars.peer_domain_link %}
- {% for mac in lldp_vars.peer_domain_link %}
- {% if mac.state is defined and mac.state =="absent" %}
- {% if mac.port_channel_id is defined and mac.port_channel_id %}
- {% if mac.exclude_vlan_range is defined and mac.exclude_vlan_range %}
- no peer-domain-link port-channel {{ mac.port_channel_id }} exclude-vlan {{ mac.exclude_vlan_range }}
- {% else %}
- no peer-domain-link port-channel {{ mac.port_channel_id }}
- {% endif %}
- {% endif %}
- {% else %}
- {% if mac.port_channel_id is defined and mac.port_channel_id %}
- {% if mac.exclude_vlan_range is defined and mac.exclude_vlan_range %}
- peer-domain-link port-channel {{ mac.port_channel_id }} exclude-vlan {{ mac.exclude_vlan_range }}
- {% else %}
- peer-domain-link port-channel {{ mac.port_channel_id }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% if lldp_vars.vlt_peer_mac is defined %}
- {% if lldp_vars.vlt_peer_mac %}
- vlt-peer-mac transmit
- {% else %}
- no vlt-peer-mac transmit
- {% endif %}
- {% endif %}
- {% if lldp_vars.peer_timeout is defined %}
- {% if lldp_vars.peer_timeout %}
- peer-timeout {{ lldp_vars.peer_timeout }}
- {% else %}
- no peer-timeout 2
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-
- {% endif %}
- {% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/main.os9.yaml
deleted file mode 100644
index 999d3b5c0..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/main.os9.yaml
+++ /dev/null
@@ -1,39 +0,0 @@
----
-# vars file for dellemc.os9.os9_vlt,
-# below gives a example configuration
-# Sample variables for OS9 device
-os9_vlt:
- domain: 3
- backup_destination: 1.1.1.1
- destination_type: ipv4
- backup_destination_vrf: test
- priority: 1
- VLTi: 100
- peer_routing: True
- peer_routing_timeout: 200
- multicast_peer_routing_timeout: 250
- system_mac: aa:aa:aa:aa:aa:aa
- delay_restore: 100
- delay_restore_abort_threshold: 110
- proxy_gateway:
- static:
- remote_mac:
- - address: aa:aa:aa:aa:aa:aa
- exclude_vlan_range: 2
- state: present
- proxy_static_state: present
- lldp:
- vlt_peer_mac: true
- peer_timeout: 20
- peer_domain_link:
- - port_channel_id: 10
- exclude_vlan_range: 3
- state: present
- proxy_lldp_state: present
- vlt_peers:
- Po 12:
- peer_lag: 13
- Po 10:
- peer_lag: 14
- unit_id: 1
- state: present \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_vlt/tests/test.yaml
deleted file mode 100644
index c5a1dcf08..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_vlt \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vlt/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_vlt/vars/main.yml
deleted file mode 100644
index 115316740..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vlt/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_vlt \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/LICENSE b/ansible_collections/dellemc/os9/roles/os9_vrf/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/README.md b/ansible_collections/dellemc/os9/roles/os9_vrf/README.md
deleted file mode 100644
index 22792b6fc..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/README.md
+++ /dev/null
@@ -1,125 +0,0 @@
-VRF role
-========
-
-This role facilitates to configure the basics of virtual routing and forwarding (VRF) that helps in the partition of physical routers to multiple virtual routers. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The vrf role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the variable `ansible_network_os` that can take the `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**os9_vrf keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``vrfdetails`` | list | Configures the list of VRF instances (see ``instances.*``) | os9 |
-| ``vrfdetails.vrf_name`` | string | Specifies the VRF instance name (default is management) | os9 |
-| ``vrfdetails.vrf_id`` | integer (required) | Configures the VRF ID for the corresponding VRF | os9 |
-| ``vrfdetails.description`` | string | Configures a one line description for the VRF | os9 |
-| ``vrfdetails.state`` | string | Deletes the VRF instance name if set to absent | os9 |
-| ``vrfdetails.tagged_portname`` | list | Specifies list of valid interface names | os9 |
-| ``tagged_portname.port`` | string | Specifies valid interface name | os9 |
-| ``tagged_portname.state`` | string | Deletes VRF association in the interface if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Dependencies
-------------
-
-The *os9_vrf* role is built on modules included in the core Ansible code. These modules were added in Ansible version 2.2.0
-
-Example playbook
-----------------
-
-This example uses the *os9_vrf* role to setup a VRF and associate it to an interface. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that references the *os9_vrf* role.
-
-*upd_src_ip_loopback_id* has an dependency with association of the interface in a VRF, and the *os9_vrf* role needs to be invoked twice with different input dictionary one for the create and one for *upd_src_ip_loopback_id*.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1 for os9 device
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_vrf:
- vrfdetails:
- - vrf_name: "os9vrf"
- state: "present"
- ip_route_import:
- community_value: "10:20"
- state: "present"
- ip_route_export:
- community_value: "30:40"
- state: "present"
- ipv6_route_import:
- community_value: "40:50"
- state: "absent"
- ipv6_route_export:
- community_value: "60:70"
- state: "absent"
- map_ip_interface:
- - intf_id : "loopback11"
- state : "present"
-
- os9_vrf_upd_src_loopback:
- vrfdetails:
- - vrf_name: "os9vrf"
- state: "present"
- upd_src_ip_loopback_id: 11
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_vrf
-
-**Simple playbook to setup os9 with upd_src_ip_loopback_id — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_vrf
- - hosts: leaf1
- vars:
- os9_vrf: "{{ os9_vrf_upd_src_loopback }}"
- roles:
- - dellemc.os9.os9_vrf
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/defaults/main.yml
deleted file mode 100644
index 5f46d6469..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_vrf \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/handlers/main.yml
deleted file mode 100644
index accc50fbf..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_vrf \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/meta/main.yml
deleted file mode 100644
index f70f98b6a..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2017-2020 Dell Inc. or its subsidiaries. All Rights Reserved.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_vrf role facilitates the configuration of VRF attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - emc
- - dellemc
- - os9 \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/tasks/main.yml
deleted file mode 100644
index 8f4592123..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating VRF configuration for os9"
- template:
- src: os9_vrf.j2
- dest: "{{ build_dir }}/vrf9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False'))| bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning VRF configuration for os9"
- dellemc.os9.os9_config:
- src: os9_vrf.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/templates/os9_vrf.j2 b/ansible_collections/dellemc/os9/roles/os9_vrf/templates/os9_vrf.j2
deleted file mode 100644
index 20690130e..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/templates/os9_vrf.j2
+++ /dev/null
@@ -1,68 +0,0 @@
-#jinja2: trim_blocks: True,lstrip_blocks: True
-{################################
-Purpose:
-Configure VRF on os9 Devices.
-os9_vrf:
- vrfdetails:
- - vrf_id: 1
- vrf_name: VLTi-KEEPALIVE
- description: VRF-to-support-Peer-Keepalive-Link
- state: present
- tagged_portname:
- - port: fortyGige 1/2
- state: present
- - port: fortyGige 1/3
- state: absent
-################################}
-{% if (os9_vrf is defined and os9_vrf) %}
-{% if os9_vrf.vrfdetails is defined %}
- {% for vrf in os9_vrf.vrfdetails %}
- {% if vrf.vrf_name is defined %}
- {% if vrf.vrf_name %}
- {% if vrf.state is defined and vrf.state == 'absent' %}
- {% if vrf.tagged_portname is defined and vrf.tagged_portname %}
- {% for tag in vrf.tagged_portname %}
- {% if tag.state is defined and tag.state == 'absent' %}
- {% if tag.port is defined and tag.port %}
-interface {{ tag.port }}
- no ip vrf forwarding
- exit
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-no ip vrf {{ vrf.vrf_name }}
- {% else %}
- {% if vrf.vrf_id is defined %}
- {% if vrf.vrf_id %}
-feature vrf
-ip vrf {{ vrf.vrf_name }} {{ vrf.vrf_id }}
- {% if vrf.description is defined %}
- {% if vrf.description %}
- description {{ vrf.description }}
- {% else %}
- no description sample
- {% endif %}
- {% endif %}
- {% if vrf.tagged_portname is defined %}
- {% if vrf.tagged_portname %}
- {% for tag in vrf.tagged_portname %}
- {% if tag.port is defined and tag.port %}
-interface {{ tag.port }}
- {% if tag.state is defined and tag.state == 'absent' %}
- no ip vrf forwarding
- {% else %}
- ip vrf forwarding {{ vrf.vrf_name }}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
-{% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/main.os9.yaml
deleted file mode 100644
index 865dcc3c1..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/main.os9.yaml
+++ /dev/null
@@ -1,15 +0,0 @@
----
-# vars file for dellemc.os9.os9_vrf,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_vrf:
- vrfdetails:
- - vrf_id: 23
- vrf_name: VRFi-KEEPALIVE
- description: test
- state: absent
- tagged_portname:
- - port: fortyGigE 1/7
- state: absent
- - port: fortyGigE 1/8
- state: absent \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_vrf/tests/test.yaml
deleted file mode 100644
index 286efc507..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_vrf \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrf/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrf/vars/main.yml
deleted file mode 100644
index 0d4921a9f..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrf/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_vrf \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_vrrp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/README.md b/ansible_collections/dellemc/os9/roles/os9_vrrp/README.md
deleted file mode 100644
index 794471581..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/README.md
+++ /dev/null
@@ -1,148 +0,0 @@
-VRRP role
-=========
-
-This role facilitates configuring virtual router redundancy protocol (VRRP) attributes. It supports the creation of VRRP groups for interfaces and setting the VRRP group attributes. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The VRRP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take the `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value for any variable negates the corresponding configuration
-- `os9_vrrp` (dictionary) holds a dictionary with the interface name key
-- Interface name can correspond to any of the valid os9 interface with a unique interface identifier name
-- Physical interfaces names must be in *<interfacename> <tuple>* format (for example *fortyGigE 1/1*)
-- Logical interface names must be in *<logical_interfacename> <id>* format (for example, *vlan 1* for os9)
-- Variables and values are case-sensitive
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|-----------------------|
-| ``vrrp`` | dictionary | Configures VRRP commands (see ``vrrp.*``) | os9 |
-| ``delay_min`` | integer | Configures the minimum delay timer applied after interface up event (0 to 900 | os9 |
-| ``delay_reload`` | integer | Configures the minimum delay timer applied after boot (0 to 900) | os9 |
-| ``vrrp_group`` | list | Configures VRRP group commands (see ``vrrp_group.*``) | os9 |
-| ``vrrp_group.type`` | string: ipv6,ipv4 | Specifies the type of the VRRP group | os9 |
-| ``vrrp_group.group_id`` | integer (required) | Configures the ID for the VRRP group (1 to 255) | os9 |
-| ``vrrp_group.description`` | string | Configures a single line description for the VRRP group | os9 |
-| ``vrrp_group.virtual_address`` | string | Configures a virtual-address to the VRRP group (A.B.C.D format) | os9 |
-| ``vrrp_group.enable`` | boolean: true,false | Enables/disables the VRRP group at the interface | os9 |
-| ``vrrp_group.preempt`` | boolean: true\*,false | Configures preempt mode on the VRRP group | os9 |
-| ``vrrp_group.priority`` |integer | Configures priority for the VRRP group (1 to 255; default 100) | os9 |
-| ``vrrp_group.version`` | string: 2\*,3,both | Configures the VRRP version of the VRRP group; not supported when *vrrp_group.type* is "ipv6" | os9 |
-| ``vrrp_group.hold_time_centisecs`` | integer | Configures the hold-time for the VRRP group in centiseconds (0 to 65525 and in multiple of 25; default 100); centisecs gets converted into seconds in version 2 | os9 |
-| ``vrrp_group.adv_interval_centisecs`` | integer | Configures the advertisement interval for the VRRP group in centiseconds (25 to 4075; default 100) and in multiple of 25; centisecs gets converted into seconds in version 2 | os9 |
-| ``vrrp_group.track_interface`` | list | Configures the track interface of the VRRP group (see ``track.*``) | os9 |
-| ``track_interface.resource_id`` | integer | Configures the object tracking resource ID of the VRRP group; mutually exclusive with *track.interface* | os9 |
-| ``track_interface.interface`` | string | Configures the track interface of the VRRP group (<interface name> <interface number> format) | os9 |
-| ``track_interface.priority_cost`` | integer | Configures the priority cost for track interface of the VRRP group (1 to 254; default 10) | os9 |
-| ``track_interface.state`` | string: present\*,absent | Deletes the specific track interface from the VRRP group if set to absent | os9 |
-| ``vrrp_group.track_interface_state`` | string: present*,absent | Deletes all track interfaces from the VRRP group if set to absent | os9 |
-| ``vrrp_group.authentication`` | dictionary | Configures the authentication type for the VRRP group (see ``authentication.*``); not supported when ``vrrp_group.type`` is "ipv6" | os9 |
-| ``authentication.key`` | string (required): 0,7,LINE | Configures the authentication key for the VRRP group | os9 |
-| ``authentication.key_string`` | string | Configures the user key string; if key is 7, this variable takes the hidden user key string; if key is 0, this variable takes the unencrypted user key (clear-text); supported only if the value of *authentication.key* is 7 or 0 | os9 |
-| ``authentication.state`` | string: present\*,absent | Deletes authentication from the interface VRRP group if set to absent | os9 |
-| ``vrrp_group.state`` | string: present\*,absent | Deletes the VRRP group from the interface if set to absent | os9 |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories, or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-----------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_vrrp* role to configure VRRP commands at the interfaces. It creates a *hosts* file with the switch details and corresponding variables. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in *build_dir* path. By default, the variable is set to false. It writes a simple playbook that only references the *os9_vrrp* role.
-
-**Sample hosts file**
-
- leaf1 ansible_host= <ip_address>
-
-**Sample host_vars/leaf1**
-
- hostname: leaf1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
- os9_vrrp:
- fortyGigE 1/5:
- vrrp:
- delay_min: 2
- delay_reload: 3
- vrrp_group:
- - group_id: 2
- type: ipv6
- description: "Interface-vrrp-ipv6"
- virtual_address: 2001:4898:5808:ffa3::9
- enable: true
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- - interface: port-channel 120
- priority_cost: 20
- - interface: fortyGigE 1/11
- state: present
- track_interface_state: present
- adv_interval_centisecs: 200
- hold_time_centisecs: 20
- - group_id: 4
- state: present
- description: "Interface-vrrp4"
- virtual_address: 10.28.0.2
- enable: true
- priority: 120
- preempt: false
- version: both
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- - interface: port-channel 120
- priority_cost: 20
- - interface: fortGigE 1/10
- state: present
- track_interface_state: present
- adv_interval_centisecs: 225
- hold_time_centisecs: 25
- authentication:
- key: 0
- key_string: vrrpkey
- state: present
-
-**Simple playbook to setup system — leaf.yaml**
-
- - hosts: leaf1
- roles:
- - dellemc.os9.os9_vrrp
-
-**Run**
-
- ansible-playbook -i hosts leaf.yaml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/defaults/main.yml
deleted file mode 100644
index 2d4f53173..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_vrrp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/handlers/main.yml
deleted file mode 100644
index 78760008d..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_vrrp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/meta/main.yml
deleted file mode 100644
index db226e2c9..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/meta/main.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: >
- The os9_vrrp role facilitates the configuration of Virtual Router Redundancy Protocol (VRRP) attributes in
- devices running Dell EMC Networking Operating Systems.
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - dellemc
- - emc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/tasks/main.yml
deleted file mode 100644
index b8d3bed1c..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating VRRP configuration for os9"
- template:
- src: os9_vrrp.j2
- dest: "{{ build_dir }}/vrrp9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning VRRP configuration for os9"
- dellemc.os9.os9_config:
- src: os9_vrrp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/templates/os9_vrrp.j2 b/ansible_collections/dellemc/os9/roles/os9_vrrp/templates/os9_vrrp.j2
deleted file mode 100644
index f3e4a1dfc..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/templates/os9_vrrp.j2
+++ /dev/null
@@ -1,218 +0,0 @@
-#jinja2: trim_blocks: True, lstrip_blocks: True
-{##########################################
-Purpose:
-Configure VRRP commands for os9 Devices
-os9_vrrp:
- fortyGigE 1/4:
- vrrp:
- delay_min: 2
- delay_reload: 2
- vrrp_group:
- - group_id: 2
- type: ipv6
- description: "Interface-vrrp-ipv6"
- virtual_address: 2001:4898:5808:ffa3::9
- enable: true
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- - interface: port-channel 120
- priority_cost: 20
- - interface: fortyGigE 1/10
- state: present
- track_interface_state: present
- adv_interval_centisecs: 200
- hold_time_centisecs: 20
- state: present
-
- - group_id: 4
- state: present
- description: "Interface-vrrp4"
- virtual_address: 10.2.0.1
- enable: true
- priority: 120
- preempt: false
- version: 2
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- - interface: port-channel 120
- priority_cost: 20
- - interface: fortyGigE 1/12
- state: present
- track_interface_state: present
- adv_interval_centisecs: 200
- hold_time_centisecs: 20
- authentication:
- key: 0
- key_string: vrrpkey
- state: present
-#########################################}
-{% if os9_vrrp is defined and os9_vrrp %}
-{% for key,value in os9_vrrp.items() %}
-interface {{ key }}
- {% if value %}
- {% if value.vrrp is defined and value.vrrp %}
- {% if value.vrrp.delay_min is defined %}
- {% if value.vrrp.delay_min >=0 %}
- vrrp delay minimum {{ value.vrrp.delay_min }}
- {% else %}
- no vrrp delay minimum
- {% endif %}
- {% endif %}
- {% if value.vrrp.delay_reload is defined %}
- {% if value.vrrp.delay_reload >=0 %}
- vrrp delay reload {{ value.vrrp.delay_reload }}
- {% else %}
- vrrp delay reload {{ value.vrrp.delay_reload }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% for group in value.vrrp_group %}
- {% if group.group_id is defined and group.group_id %}
- {% if group.state is defined and group.state == "absent" %}
- {% if group.type is defined and group.type == "ipv6" %}
- no vrrp-ipv6-group {{ group.group_id }}
- {% else %}
- no vrrp-group {{ group.group_id }}
- {% endif %}
- {% else %}
- {% if group.type is defined and group.type == "ipv6" %}
- vrrp-ipv6-group {{ group.group_id }}
- {% else %}
- vrrp-group {{ group.group_id }}
- {% endif %}
- {% if group.type is not defined or not group.type == "ipv6" %}
- {% if group.version is defined %}
- {% if group.version %}
- version {{ group.version }}
- {% else %}
- no version
- {% endif %}
- {% endif %}
- {% endif %}
- {% if group.adv_interval_centisecs is defined %}
- {% if group.adv_interval_centisecs %}
- {% if group.version is not defined or (group.version is defined and group.version == 2) %}
- {% set adv_int = group.adv_interval_centisecs/100 %}
- {% if group.type is defined and group.type == "ipv6" %}
- advertise-interval centisecs {{ group.adv_interval_centisecs }}
- {% else %}
- advertise-interval {{ adv_int|int }}
- {% endif %}
- {% else %}
- advertise-interval centisecs {{ group.adv_interval_centisecs }}
- {% endif %}
- {% else %}
- no advertise-interval
- {% endif %}
- {% endif %}
- {% if group.hold_time_centisecs is defined %}
- {% if group.hold_time_centisecs >= 0 %}
- {% if group.version is not defined or (group.version is defined and group.version == 2) %}
- {% set hold_time = group.hold_time_centisecs/100 %}
- {% if group.type is defined and group.type == "ipv6" %}
- hold-time centisecs {{ group.hold_time_centisecs }}
- {% else %}
- hold-time {{ hold_time|int }}
- {% endif %}
- {% else %}
- hold-time centisecs {{ group.hold_time_centisecs }}
- {% endif %}
- {% else %}
- no hold-time
- {% endif %}
- {% endif %}
- {% if group.track_interface_state is defined and group.track_interface_state == "absent" %}
- no track
- {% else %}
- {% if group.track_interface is defined and group.track_interface %}
- {% for track_item in group.track_interface %}
- {% if track_item.state is defined and track_item.state == "absent" %}
- {% if track_item.resource_id is defined and track_item.resource_id %}
- no track {{ track_item.resource_id }}
- {% elif track_item.interface is defined and track_item.interface %}
- no track {{ track_item.interface }}
- {% endif %}
- {% else %}
- {% if track_item.resource_id is defined and track_item.resource_id %}
- {% if track_item.priority_cost is defined and track_item.priority_cost %}
- track {{ track_item.resource_id }} priority-cost {{ track_item.priority_cost }}
- {% else %}
- track {{ track_item.resource_id }}
- {% endif %}
- {% elif track_item.interface is defined and track_item.interface %}
- {% if track_item.priority_cost is defined and track_item.priority_cost %}
- track {{ track_item.interface }} priority-cost {{ track_item.priority_cost }}
- {% else %}
- track {{ track_item.interface }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
- {% if group.type is not defined or not group.type == "ipv6" %}
- {% if group.authentication is defined and group.authentication %}
- {% if group.authentication.state is defined and group.authentication.state == "absent" %}
- no authentication-type
- {% else %}
- {% if group.authentication.key is defined %}
- {% if group.version is not defined or (group.version is defined and group.version == 2) %}
- {% if group.authentication.key == 0 or group.authentication.key == 7 %}
- {% if group.authentication.key_string is defined and group.authentication.key_string %}
- authentication-type simple {{ group.authentication.key }} {{ group.authentication.key_string }}
- {% endif %}
- {% elif group.authentication.key %}
- authentication-type simple {{ group.authentication.key }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% if group.virtual_address is defined %}
- {% if group.virtual_address %}
- virtual-address {{ group.virtual_address }}
- {% else %}
- no virtual-address
- {% endif %}
- {% endif %}
- {% if group.description is defined %}
- {% if group.description %}
- description {{ group.description }}
- {% else %}
- no description
- {% endif %}
- {% endif %}
- {% if group.preempt is defined %}
- {% if group.preempt %}
- preempt
- {% else %}
- no preempt
- {% endif %}
- {% endif %}
- {% if group.enable is defined %}
- {% if group.enable %}
- no disable
- {% else %}
- disable
- {% endif %}
- {% endif %}
- {% if group.priority is defined %}
- {% if group.priority %}
- priority {{ group.priority }}
- {% else %}
- no priority
- {% endif %}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
-{% endfor %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml
deleted file mode 100644
index 856d381cc..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/main.os9.yaml
+++ /dev/null
@@ -1,59 +0,0 @@
----
-# vars file for dellemc.os9.os9_vrrp,
-# below gives a example configuration
-# Sample variables for OS9 device
-os9_vrrp:
- fortyGigE 0/28:
- vrrp:
- delay_min: 4
- delay_reload: 5
- vrrp_group:
- - group_id: 2
- type: ipv6
- description: "Interface-vrrp-ipv6"
- virtual_address: 2001:4898:5808:ffa3::9
- enable: true
- priority: 120
- preempt: false
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- - interface: port-channel 120
- priority_cost: 20
- - interface: fortyGigE 0/40
- state: present
- track_interface_state: present
- adv_interval_centisecs: 200
- hold_time_centisecs: 20
- state: present
- - group_id: 4
- state: present
- description: "Interface-vrrp4"
- virtual_address: 10.28.0.2
- enable: true
- priority: 120
- preempt: false
- version: 3
- track_interface:
- - resource_id: 3
- priority_cost: 25
- state: present
- - interface: port-channel 120
- priority_cost: 20
- - interface: fortyGigE 0/20
- state: absent
- track_interface_state: present
- adv_interval_centisecs: 200
- hold_time_centisecs: 200
- authentication:
- key: 0
- key_string: vrrpkey
- state: present
- - group_id: 3
- state: present
- description: "Interface-vrrp3"
- virtual_address: 10.28.0.3
- enable: true
- priority: 120
- preempt: false \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/test.yaml
deleted file mode 100644
index a12c274f1..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_vrrp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_vrrp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_vrrp/vars/main.yml
deleted file mode 100644
index c241486b6..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_vrrp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_vrrp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/LICENSE b/ansible_collections/dellemc/os9/roles/os9_xstp/LICENSE
deleted file mode 100644
index 2c9b8e1fd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/LICENSE
+++ /dev/null
@@ -1,674 +0,0 @@
- GNU GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
- Preamble
-
- The GNU General Public License is a free, copyleft license for
-software and other kinds of works.
-
- The licenses for most software and other practical works are designed
-to take away your freedom to share and change the works. By contrast,
-the GNU General Public License is intended to guarantee your freedom to
-share and change all versions of a program--to make sure it remains free
-software for all its users. We, the Free Software Foundation, use the
-GNU General Public License for most of our software; it applies also to
-any other work released this way by its authors. You can apply it to
-your programs, too.
-
- When we speak of free software, we are referring to freedom, not
-price. Our General Public Licenses are designed to make sure that you
-have the freedom to distribute copies of free software (and charge for
-them if you wish), that you receive source code or can get it if you
-want it, that you can change the software or use pieces of it in new
-free programs, and that you know you can do these things.
-
- To protect your rights, we need to prevent others from denying you
-these rights or asking you to surrender the rights. Therefore, you have
-certain responsibilities if you distribute copies of the software, or if
-you modify it: responsibilities to respect the freedom of others.
-
- For example, if you distribute copies of such a program, whether
-gratis or for a fee, you must pass on to the recipients the same
-freedoms that you received. You must make sure that they, too, receive
-or can get the source code. And you must show them these terms so they
-know their rights.
-
- Developers that use the GNU GPL protect your rights with two steps:
-(1) assert copyright on the software, and (2) offer you this License
-giving you legal permission to copy, distribute and/or modify it.
-
- For the developers' and authors' protection, the GPL clearly explains
-that there is no warranty for this free software. For both users' and
-authors' sake, the GPL requires that modified versions be marked as
-changed, so that their problems will not be attributed erroneously to
-authors of previous versions.
-
- Some devices are designed to deny users access to install or run
-modified versions of the software inside them, although the manufacturer
-can do so. This is fundamentally incompatible with the aim of
-protecting users' freedom to change the software. The systematic
-pattern of such abuse occurs in the area of products for individuals to
-use, which is precisely where it is most unacceptable. Therefore, we
-have designed this version of the GPL to prohibit the practice for those
-products. If such problems arise substantially in other domains, we
-stand ready to extend this provision to those domains in future versions
-of the GPL, as needed to protect the freedom of users.
-
- Finally, every program is threatened constantly by software patents.
-States should not allow patents to restrict development and use of
-software on general-purpose computers, but in those that do, we wish to
-avoid the special danger that patents applied to a free program could
-make it effectively proprietary. To prevent this, the GPL assures that
-patents cannot be used to render the program non-free.
-
- The precise terms and conditions for copying, distribution and
-modification follow.
-
- TERMS AND CONDITIONS
-
- 0. Definitions.
-
- "This License" refers to version 3 of the GNU General Public License.
-
- "Copyright" also means copyright-like laws that apply to other kinds of
-works, such as semiconductor masks.
-
- "The Program" refers to any copyrightable work licensed under this
-License. Each licensee is addressed as "you". "Licensees" and
-"recipients" may be individuals or organizations.
-
- To "modify" a work means to copy from or adapt all or part of the work
-in a fashion requiring copyright permission, other than the making of an
-exact copy. The resulting work is called a "modified version" of the
-earlier work or a work "based on" the earlier work.
-
- A "covered work" means either the unmodified Program or a work based
-on the Program.
-
- To "propagate" a work means to do anything with it that, without
-permission, would make you directly or secondarily liable for
-infringement under applicable copyright law, except executing it on a
-computer or modifying a private copy. Propagation includes copying,
-distribution (with or without modification), making available to the
-public, and in some countries other activities as well.
-
- To "convey" a work means any kind of propagation that enables other
-parties to make or receive copies. Mere interaction with a user through
-a computer network, with no transfer of a copy, is not conveying.
-
- An interactive user interface displays "Appropriate Legal Notices"
-to the extent that it includes a convenient and prominently visible
-feature that (1) displays an appropriate copyright notice, and (2)
-tells the user that there is no warranty for the work (except to the
-extent that warranties are provided), that licensees may convey the
-work under this License, and how to view a copy of this License. If
-the interface presents a list of user commands or options, such as a
-menu, a prominent item in the list meets this criterion.
-
- 1. Source Code.
-
- The "source code" for a work means the preferred form of the work
-for making modifications to it. "Object code" means any non-source
-form of a work.
-
- A "Standard Interface" means an interface that either is an official
-standard defined by a recognized standards body, or, in the case of
-interfaces specified for a particular programming language, one that
-is widely used among developers working in that language.
-
- The "System Libraries" of an executable work include anything, other
-than the work as a whole, that (a) is included in the normal form of
-packaging a Major Component, but which is not part of that Major
-Component, and (b) serves only to enable use of the work with that
-Major Component, or to implement a Standard Interface for which an
-implementation is available to the public in source code form. A
-"Major Component", in this context, means a major essential component
-(kernel, window system, and so on) of the specific operating system
-(if any) on which the executable work runs, or a compiler used to
-produce the work, or an object code interpreter used to run it.
-
- The "Corresponding Source" for a work in object code form means all
-the source code needed to generate, install, and (for an executable
-work) run the object code and to modify the work, including scripts to
-control those activities. However, it does not include the work's
-System Libraries, or general-purpose tools or generally available free
-programs which are used unmodified in performing those activities but
-which are not part of the work. For example, Corresponding Source
-includes interface definition files associated with source files for
-the work, and the source code for shared libraries and dynamically
-linked subprograms that the work is specifically designed to require,
-such as by intimate data communication or control flow between those
-subprograms and other parts of the work.
-
- The Corresponding Source need not include anything that users
-can regenerate automatically from other parts of the Corresponding
-Source.
-
- The Corresponding Source for a work in source code form is that
-same work.
-
- 2. Basic Permissions.
-
- All rights granted under this License are granted for the term of
-copyright on the Program, and are irrevocable provided the stated
-conditions are met. This License explicitly affirms your unlimited
-permission to run the unmodified Program. The output from running a
-covered work is covered by this License only if the output, given its
-content, constitutes a covered work. This License acknowledges your
-rights of fair use or other equivalent, as provided by copyright law.
-
- You may make, run and propagate covered works that you do not
-convey, without conditions so long as your license otherwise remains
-in force. You may convey covered works to others for the sole purpose
-of having them make modifications exclusively for you, or provide you
-with facilities for running those works, provided that you comply with
-the terms of this License in conveying all material for which you do
-not control copyright. Those thus making or running the covered works
-for you must do so exclusively on your behalf, under your direction
-and control, on terms that prohibit them from making any copies of
-your copyrighted material outside their relationship with you.
-
- Conveying under any other circumstances is permitted solely under
-the conditions stated below. Sublicensing is not allowed; section 10
-makes it unnecessary.
-
- 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
-
- No covered work shall be deemed part of an effective technological
-measure under any applicable law fulfilling obligations under article
-11 of the WIPO copyright treaty adopted on 20 December 1996, or
-similar laws prohibiting or restricting circumvention of such
-measures.
-
- When you convey a covered work, you waive any legal power to forbid
-circumvention of technological measures to the extent such circumvention
-is effected by exercising rights under this License with respect to
-the covered work, and you disclaim any intention to limit operation or
-modification of the work as a means of enforcing, against the work's
-users, your or third parties' legal rights to forbid circumvention of
-technological measures.
-
- 4. Conveying Verbatim Copies.
-
- You may convey verbatim copies of the Program's source code as you
-receive it, in any medium, provided that you conspicuously and
-appropriately publish on each copy an appropriate copyright notice;
-keep intact all notices stating that this License and any
-non-permissive terms added in accord with section 7 apply to the code;
-keep intact all notices of the absence of any warranty; and give all
-recipients a copy of this License along with the Program.
-
- You may charge any price or no price for each copy that you convey,
-and you may offer support or warranty protection for a fee.
-
- 5. Conveying Modified Source Versions.
-
- You may convey a work based on the Program, or the modifications to
-produce it from the Program, in the form of source code under the
-terms of section 4, provided that you also meet all of these conditions:
-
- a) The work must carry prominent notices stating that you modified
- it, and giving a relevant date.
-
- b) The work must carry prominent notices stating that it is
- released under this License and any conditions added under section
- 7. This requirement modifies the requirement in section 4 to
- "keep intact all notices".
-
- c) You must license the entire work, as a whole, under this
- License to anyone who comes into possession of a copy. This
- License will therefore apply, along with any applicable section 7
- additional terms, to the whole of the work, and all its parts,
- regardless of how they are packaged. This License gives no
- permission to license the work in any other way, but it does not
- invalidate such permission if you have separately received it.
-
- d) If the work has interactive user interfaces, each must display
- Appropriate Legal Notices; however, if the Program has interactive
- interfaces that do not display Appropriate Legal Notices, your
- work need not make them do so.
-
- A compilation of a covered work with other separate and independent
-works, which are not by their nature extensions of the covered work,
-and which are not combined with it such as to form a larger program,
-in or on a volume of a storage or distribution medium, is called an
-"aggregate" if the compilation and its resulting copyright are not
-used to limit the access or legal rights of the compilation's users
-beyond what the individual works permit. Inclusion of a covered work
-in an aggregate does not cause this License to apply to the other
-parts of the aggregate.
-
- 6. Conveying Non-Source Forms.
-
- You may convey a covered work in object code form under the terms
-of sections 4 and 5, provided that you also convey the
-machine-readable Corresponding Source under the terms of this License,
-in one of these ways:
-
- a) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by the
- Corresponding Source fixed on a durable physical medium
- customarily used for software interchange.
-
- b) Convey the object code in, or embodied in, a physical product
- (including a physical distribution medium), accompanied by a
- written offer, valid for at least three years and valid for as
- long as you offer spare parts or customer support for that product
- model, to give anyone who possesses the object code either (1) a
- copy of the Corresponding Source for all the software in the
- product that is covered by this License, on a durable physical
- medium customarily used for software interchange, for a price no
- more than your reasonable cost of physically performing this
- conveying of source, or (2) access to copy the
- Corresponding Source from a network server at no charge.
-
- c) Convey individual copies of the object code with a copy of the
- written offer to provide the Corresponding Source. This
- alternative is allowed only occasionally and noncommercially, and
- only if you received the object code with such an offer, in accord
- with subsection 6b.
-
- d) Convey the object code by offering access from a designated
- place (gratis or for a charge), and offer equivalent access to the
- Corresponding Source in the same way through the same place at no
- further charge. You need not require recipients to copy the
- Corresponding Source along with the object code. If the place to
- copy the object code is a network server, the Corresponding Source
- may be on a different server (operated by you or a third party)
- that supports equivalent copying facilities, provided you maintain
- clear directions next to the object code saying where to find the
- Corresponding Source. Regardless of what server hosts the
- Corresponding Source, you remain obligated to ensure that it is
- available for as long as needed to satisfy these requirements.
-
- e) Convey the object code using peer-to-peer transmission, provided
- you inform other peers where the object code and Corresponding
- Source of the work are being offered to the general public at no
- charge under subsection 6d.
-
- A separable portion of the object code, whose source code is excluded
-from the Corresponding Source as a System Library, need not be
-included in conveying the object code work.
-
- A "User Product" is either (1) a "consumer product", which means any
-tangible personal property which is normally used for personal, family,
-or household purposes, or (2) anything designed or sold for incorporation
-into a dwelling. In determining whether a product is a consumer product,
-doubtful cases shall be resolved in favor of coverage. For a particular
-product received by a particular user, "normally used" refers to a
-typical or common use of that class of product, regardless of the status
-of the particular user or of the way in which the particular user
-actually uses, or expects or is expected to use, the product. A product
-is a consumer product regardless of whether the product has substantial
-commercial, industrial or non-consumer uses, unless such uses represent
-the only significant mode of use of the product.
-
- "Installation Information" for a User Product means any methods,
-procedures, authorization keys, or other information required to install
-and execute modified versions of a covered work in that User Product from
-a modified version of its Corresponding Source. The information must
-suffice to ensure that the continued functioning of the modified object
-code is in no case prevented or interfered with solely because
-modification has been made.
-
- If you convey an object code work under this section in, or with, or
-specifically for use in, a User Product, and the conveying occurs as
-part of a transaction in which the right of possession and use of the
-User Product is transferred to the recipient in perpetuity or for a
-fixed term (regardless of how the transaction is characterized), the
-Corresponding Source conveyed under this section must be accompanied
-by the Installation Information. But this requirement does not apply
-if neither you nor any third party retains the ability to install
-modified object code on the User Product (for example, the work has
-been installed in ROM).
-
- The requirement to provide Installation Information does not include a
-requirement to continue to provide support service, warranty, or updates
-for a work that has been modified or installed by the recipient, or for
-the User Product in which it has been modified or installed. Access to a
-network may be denied when the modification itself materially and
-adversely affects the operation of the network or violates the rules and
-protocols for communication across the network.
-
- Corresponding Source conveyed, and Installation Information provided,
-in accord with this section must be in a format that is publicly
-documented (and with an implementation available to the public in
-source code form), and must require no special password or key for
-unpacking, reading or copying.
-
- 7. Additional Terms.
-
- "Additional permissions" are terms that supplement the terms of this
-License by making exceptions from one or more of its conditions.
-Additional permissions that are applicable to the entire Program shall
-be treated as though they were included in this License, to the extent
-that they are valid under applicable law. If additional permissions
-apply only to part of the Program, that part may be used separately
-under those permissions, but the entire Program remains governed by
-this License without regard to the additional permissions.
-
- When you convey a copy of a covered work, you may at your option
-remove any additional permissions from that copy, or from any part of
-it. (Additional permissions may be written to require their own
-removal in certain cases when you modify the work.) You may place
-additional permissions on material, added by you to a covered work,
-for which you have or can give appropriate copyright permission.
-
- Notwithstanding any other provision of this License, for material you
-add to a covered work, you may (if authorized by the copyright holders of
-that material) supplement the terms of this License with terms:
-
- a) Disclaiming warranty or limiting liability differently from the
- terms of sections 15 and 16 of this License; or
-
- b) Requiring preservation of specified reasonable legal notices or
- author attributions in that material or in the Appropriate Legal
- Notices displayed by works containing it; or
-
- c) Prohibiting misrepresentation of the origin of that material, or
- requiring that modified versions of such material be marked in
- reasonable ways as different from the original version; or
-
- d) Limiting the use for publicity purposes of names of licensors or
- authors of the material; or
-
- e) Declining to grant rights under trademark law for use of some
- trade names, trademarks, or service marks; or
-
- f) Requiring indemnification of licensors and authors of that
- material by anyone who conveys the material (or modified versions of
- it) with contractual assumptions of liability to the recipient, for
- any liability that these contractual assumptions directly impose on
- those licensors and authors.
-
- All other non-permissive additional terms are considered "further
-restrictions" within the meaning of section 10. If the Program as you
-received it, or any part of it, contains a notice stating that it is
-governed by this License along with a term that is a further
-restriction, you may remove that term. If a license document contains
-a further restriction but permits relicensing or conveying under this
-License, you may add to a covered work material governed by the terms
-of that license document, provided that the further restriction does
-not survive such relicensing or conveying.
-
- If you add terms to a covered work in accord with this section, you
-must place, in the relevant source files, a statement of the
-additional terms that apply to those files, or a notice indicating
-where to find the applicable terms.
-
- Additional terms, permissive or non-permissive, may be stated in the
-form of a separately written license, or stated as exceptions;
-the above requirements apply either way.
-
- 8. Termination.
-
- You may not propagate or modify a covered work except as expressly
-provided under this License. Any attempt otherwise to propagate or
-modify it is void, and will automatically terminate your rights under
-this License (including any patent licenses granted under the third
-paragraph of section 11).
-
- However, if you cease all violation of this License, then your
-license from a particular copyright holder is reinstated (a)
-provisionally, unless and until the copyright holder explicitly and
-finally terminates your license, and (b) permanently, if the copyright
-holder fails to notify you of the violation by some reasonable means
-prior to 60 days after the cessation.
-
- Moreover, your license from a particular copyright holder is
-reinstated permanently if the copyright holder notifies you of the
-violation by some reasonable means, this is the first time you have
-received notice of violation of this License (for any work) from that
-copyright holder, and you cure the violation prior to 30 days after
-your receipt of the notice.
-
- Termination of your rights under this section does not terminate the
-licenses of parties who have received copies or rights from you under
-this License. If your rights have been terminated and not permanently
-reinstated, you do not qualify to receive new licenses for the same
-material under section 10.
-
- 9. Acceptance Not Required for Having Copies.
-
- You are not required to accept this License in order to receive or
-run a copy of the Program. Ancillary propagation of a covered work
-occurring solely as a consequence of using peer-to-peer transmission
-to receive a copy likewise does not require acceptance. However,
-nothing other than this License grants you permission to propagate or
-modify any covered work. These actions infringe copyright if you do
-not accept this License. Therefore, by modifying or propagating a
-covered work, you indicate your acceptance of this License to do so.
-
- 10. Automatic Licensing of Downstream Recipients.
-
- Each time you convey a covered work, the recipient automatically
-receives a license from the original licensors, to run, modify and
-propagate that work, subject to this License. You are not responsible
-for enforcing compliance by third parties with this License.
-
- An "entity transaction" is a transaction transferring control of an
-organization, or substantially all assets of one, or subdividing an
-organization, or merging organizations. If propagation of a covered
-work results from an entity transaction, each party to that
-transaction who receives a copy of the work also receives whatever
-licenses to the work the party's predecessor in interest had or could
-give under the previous paragraph, plus a right to possession of the
-Corresponding Source of the work from the predecessor in interest, if
-the predecessor has it or can get it with reasonable efforts.
-
- You may not impose any further restrictions on the exercise of the
-rights granted or affirmed under this License. For example, you may
-not impose a license fee, royalty, or other charge for exercise of
-rights granted under this License, and you may not initiate litigation
-(including a cross-claim or counterclaim in a lawsuit) alleging that
-any patent claim is infringed by making, using, selling, offering for
-sale, or importing the Program or any portion of it.
-
- 11. Patents.
-
- A "contributor" is a copyright holder who authorizes use under this
-License of the Program or a work on which the Program is based. The
-work thus licensed is called the contributor's "contributor version".
-
- A contributor's "essential patent claims" are all patent claims
-owned or controlled by the contributor, whether already acquired or
-hereafter acquired, that would be infringed by some manner, permitted
-by this License, of making, using, or selling its contributor version,
-but do not include claims that would be infringed only as a
-consequence of further modification of the contributor version. For
-purposes of this definition, "control" includes the right to grant
-patent sublicenses in a manner consistent with the requirements of
-this License.
-
- Each contributor grants you a non-exclusive, worldwide, royalty-free
-patent license under the contributor's essential patent claims, to
-make, use, sell, offer for sale, import and otherwise run, modify and
-propagate the contents of its contributor version.
-
- In the following three paragraphs, a "patent license" is any express
-agreement or commitment, however denominated, not to enforce a patent
-(such as an express permission to practice a patent or covenant not to
-sue for patent infringement). To "grant" such a patent license to a
-party means to make such an agreement or commitment not to enforce a
-patent against the party.
-
- If you convey a covered work, knowingly relying on a patent license,
-and the Corresponding Source of the work is not available for anyone
-to copy, free of charge and under the terms of this License, through a
-publicly available network server or other readily accessible means,
-then you must either (1) cause the Corresponding Source to be so
-available, or (2) arrange to deprive yourself of the benefit of the
-patent license for this particular work, or (3) arrange, in a manner
-consistent with the requirements of this License, to extend the patent
-license to downstream recipients. "Knowingly relying" means you have
-actual knowledge that, but for the patent license, your conveying the
-covered work in a country, or your recipient's use of the covered work
-in a country, would infringe one or more identifiable patents in that
-country that you have reason to believe are valid.
-
- If, pursuant to or in connection with a single transaction or
-arrangement, you convey, or propagate by procuring conveyance of, a
-covered work, and grant a patent license to some of the parties
-receiving the covered work authorizing them to use, propagate, modify
-or convey a specific copy of the covered work, then the patent license
-you grant is automatically extended to all recipients of the covered
-work and works based on it.
-
- A patent license is "discriminatory" if it does not include within
-the scope of its coverage, prohibits the exercise of, or is
-conditioned on the non-exercise of one or more of the rights that are
-specifically granted under this License. You may not convey a covered
-work if you are a party to an arrangement with a third party that is
-in the business of distributing software, under which you make payment
-to the third party based on the extent of your activity of conveying
-the work, and under which the third party grants, to any of the
-parties who would receive the covered work from you, a discriminatory
-patent license (a) in connection with copies of the covered work
-conveyed by you (or copies made from those copies), or (b) primarily
-for and in connection with specific products or compilations that
-contain the covered work, unless you entered into that arrangement,
-or that patent license was granted, prior to 28 March 2007.
-
- Nothing in this License shall be construed as excluding or limiting
-any implied license or other defenses to infringement that may
-otherwise be available to you under applicable patent law.
-
- 12. No Surrender of Others' Freedom.
-
- If conditions are imposed on you (whether by court order, agreement or
-otherwise) that contradict the conditions of this License, they do not
-excuse you from the conditions of this License. If you cannot convey a
-covered work so as to satisfy simultaneously your obligations under this
-License and any other pertinent obligations, then as a consequence you may
-not convey it at all. For example, if you agree to terms that obligate you
-to collect a royalty for further conveying from those to whom you convey
-the Program, the only way you could satisfy both those terms and this
-License would be to refrain entirely from conveying the Program.
-
- 13. Use with the GNU Affero General Public License.
-
- Notwithstanding any other provision of this License, you have
-permission to link or combine any covered work with a work licensed
-under version 3 of the GNU Affero General Public License into a single
-combined work, and to convey the resulting work. The terms of this
-License will continue to apply to the part which is the covered work,
-but the special requirements of the GNU Affero General Public License,
-section 13, concerning interaction through a network will apply to the
-combination as such.
-
- 14. Revised Versions of this License.
-
- The Free Software Foundation may publish revised and/or new versions of
-the GNU General Public License from time to time. Such new versions will
-be similar in spirit to the present version, but may differ in detail to
-address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Program specifies that a certain numbered version of the GNU General
-Public License "or any later version" applies to it, you have the
-option of following the terms and conditions either of that numbered
-version or of any later version published by the Free Software
-Foundation. If the Program does not specify a version number of the
-GNU General Public License, you may choose any version ever published
-by the Free Software Foundation.
-
- If the Program specifies that a proxy can decide which future
-versions of the GNU General Public License can be used, that proxy's
-public statement of acceptance of a version permanently authorizes you
-to choose that version for the Program.
-
- Later license versions may give you additional or different
-permissions. However, no additional obligations are imposed on any
-author or copyright holder as a result of your choosing to follow a
-later version.
-
- 15. Disclaimer of Warranty.
-
- THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
-APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
-HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
-OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
-THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
-IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
-ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
-
- 16. Limitation of Liability.
-
- IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
-WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
-THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
-GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
-USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
-DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
-PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
-EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGES.
-
- 17. Interpretation of Sections 15 and 16.
-
- If the disclaimer of warranty and limitation of liability provided
-above cannot be given local legal effect according to their terms,
-reviewing courts shall apply local law that most closely approximates
-an absolute waiver of all civil liability in connection with the
-Program, unless a warranty or assumption of liability accompanies a
-copy of the Program in return for a fee.
-
- END OF TERMS AND CONDITIONS
-
- How to Apply These Terms to Your New Programs
-
- If you develop a new program, and you want it to be of the greatest
-possible use to the public, the best way to achieve this is to make it
-free software which everyone can redistribute and change under these terms.
-
- To do so, attach the following notices to the program. It is safest
-to attach them to the start of each source file to most effectively
-state the exclusion of warranty; and each file should have at least
-the "copyright" line and a pointer to where the full notice is found.
-
- <one line to give the program's name and a brief idea of what it does.>
- Copyright (c) 2020, Dell Inc. All rights reserved.
-
- This program is free software: you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation, either version 3 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-
- You should have received a copy of the GNU General Public License
- along with this program. If not, see <https://www.gnu.org/licenses/>.
-
-Also add information on how to contact you by electronic and paper mail.
-
- If the program does terminal interaction, make it output a short
-notice like this when it starts in an interactive mode:
-
- <program> Copyright (c) 2020, Dell Inc. All rights reserved.
- This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
- This is free software, and you are welcome to redistribute it
- under certain conditions; type `show c' for details.
-
-The hypothetical commands `show w' and `show c' should show the appropriate
-parts of the General Public License. Of course, your program's commands
-might be different; for a GUI interface, you would use an "about box".
-
- You should also get your employer (if you work as a programmer) or school,
-if any, to sign a "copyright disclaimer" for the program, if necessary.
-For more information on this, and how to apply and follow the GNU GPL, see
-<https://www.gnu.org/licenses/>.
-
- The GNU General Public License does not permit incorporating your program
-into proprietary programs. If your program is a subroutine library, you
-may consider it more useful to permit linking proprietary applications with
-the library. If this is what you want to do, use the GNU Lesser General
-Public License instead of this License. But first, please read
-<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/README.md b/ansible_collections/dellemc/os9/roles/os9_xstp/README.md
deleted file mode 100644
index 09223b8dd..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/README.md
+++ /dev/null
@@ -1,127 +0,0 @@
-# xSTP role
-
-This role facilitates the configuration of xSTP attributes. It supports multiple version of spanning-tree protocol (STP), rapid spanning-tree (RSTP), rapid per-VLAN spanning-tree (Rapid PVST+), multiple spanning-tree (MST), and per-VLAN spanning-tree (PVST). It supports the configuration of bridge priority, enabling and disabling spanning-tree, creating and deleting instances, and mapping virtual LAN (VLAN) to instances. This role is abstracted for Dell EMC PowerSwitch platforms running Dell EMC OS9.
-
-The xSTP role requires an SSH connection for connectivity to a Dell EMC OS9 device. You can use any of the built-in OS connection variables.
-
-Role variables
---------------
-
-- Role is abstracted using the `ansible_network_os` variable that can take `dellemc.os9.os9` as the value
-- If `os9_cfg_generate` is set to true, the variable generates the role configuration commands in a file
-- `os9_xstp` (dictionary) contains the hostname (dictionary)
-- Hostname is the value of the *hostname* variable that corresponds to the name of the OS device
-- Any role variable with a corresponding state variable set to absent negates the configuration of that variable
-- Setting an empty value to any variable negates the corresponding configuration
-- Variables and values are case-sensitive
-
-**hostname keys**
-
-| Key | Type | Description | Support |
-|------------|---------------------------|---------------------------------------------------------|----------------------|
-| ``type`` | string (required) | Configures the type of spanning-tree mode specified including STP, RSTP, PVST, and MSTP | os9 |
-| ``enable`` | boolean: true,false | Enables/disables the spanning-tree protocol specified in the type variable | os9 |
-| ``stp`` | dictionary | Configures simple spanning-tree protocol (see ``stp.* keys``) | os9 |
-| ``stp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os9 |
-| ``stp.state`` | string: absent,present\* | Deletes the configured STP if set to absent | os9 |
-| ``rstp`` | dictionary | Configures rapid spanning-tree (see ``rstp.*``) | os9 |
-| ``rstp.bridge_priority`` | integer | Configures bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096) | os9 |
-| ``rstp.state ``| string: absent,present\* | Deletes the configured RSTP in os9 devices if set to absent | os9 |
-| ``pvst`` | dictionary | Configures per-VLAN spanning-tree protocol (see ``pvst.*``) | os9 |
-| ``pvst.vlan`` | list | Configures the VLAN for PVST (see ``vlan.*``) | os9 |
-| ``vlan.range_or_id`` | string | Configures a VLAN/range of VLANs for the per-VLAN spanning-tree protocol | os9 |
-| ``vlan.bridge_priority`` | integer | Configures bridge-priority for the per-VLAN spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *vlan.root* | os9 |
-| ``pvst.state`` | string: absent,present\* | Deletes the configured PVST if set to absent | os9 |
-| ``mstp`` | dictionary | Configures multiple spanning-tree protocol (see ``mstp.*``) | os9 |
-| ``mstp.mstp_instances`` | list | Configures a MSTP instance (see ``mstp_instances.*``) | os9 |
-| ``mstp_instances.number`` | integer | Configures the multiple spanning-tree instance number | os9 |
-| ``mstp_instances.vlans`` | string | Configures a VLAN/range of VLANs by mapping it to the instance number in os9 devices | os9 |
-| ``mstp_instances.bridge_priority`` | integer | Configures the bridge-priority for the spanning-tree (0 to 61440 in multiples of 4096); mutually exclusive with *mstp_instances.root* | os9 |
-| ``mstp_instances.vlans_state`` | string: absent,present\* | Deletes a set of VLANs mapped to the spanning-tree instance if set to absent | os9 |
-| ``mstp.state`` | string: absent,present\* | Deletes the configured MSTP if set to absent | os9 |
-| ``intf`` | list | Configures multiple spanning-tree in an interface (see ``intf.*``) | os9 |
-| ``intf <interface name>``| dictionary | Configures the interface name (see ``intf.<interface name>.*``) | os9 |
-| ``intf.<interface name>.stp_type`` | list: stp,mstp,pvst,rstp | Configures the list of spanning-tree in an interface | os9 |
-| ``intf.<interface name>.edge_port`` | boolean: true,false | in os9 devices according to the stp_type EdgePort is configured; | os9 |
-
-> **NOTE**: Asterisk (_*_) denotes the default value if none is specified.
-
-Connection variables
---------------------
-
-Ansible Dell EMC network roles require connection information to establish communication with the nodes in your inventory. This information can exist in the Ansible *group_vars* or *host_vars* directories or inventory or in the playbook itself.
-
-| Key | Required | Choices | Description |
-|-------------|----------|------------|-------------------------------------------------------|
-| ``ansible_host`` | yes | | Specifies the hostname or address for connecting to the remote device over the specified transport |
-| ``ansible_port`` | no | | Specifies the port used to build the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_PORT` option is used; it defaults to 22 |
-| ``ansible_ssh_user`` | no | | Specifies the username that authenticates the CLI login for the connection to the remote device; if value is unspecified, the `ANSIBLE_REMOTE_USER` environment variable value is used |
-| ``ansible_ssh_pass`` | no | | Specifies the password that authenticates the connection to the remote device |
-| ``ansible_become`` | no | yes, no\* | Instructs the module to enter privileged mode on the remote device before sending any commands; if value is unspecified, the `ANSIBLE_BECOME` environment variable value is used, and the device attempts to execute all commands in non-privileged mode |
-| ``ansible_become_method`` | no | enable, sudo\* | Instructs the module to allow the become method to be specified for handling privilege escalation; if value is unspecified, the `ANSIBLE_BECOME_METHOD` environment variable value is used |
-| ``ansible_become_pass`` | no | | Specifies the password to use if required to enter privileged mode on the remote device; if ``ansible_become`` is set to no this key is not applicable |
-| ``ansible_network_os`` | yes | os9, null\* | Loads the correct terminal and cliconf plugins to communicate with the remote device |
-
-> **NOTE**: Asterisk (\*) denotes the default value if none is specified.
-
-Example playbook
-----------------
-
-This example uses the *os9_xstp* role to configure different variants of spanning-tree. Based on the type of STP and defined objects, VLANs are associated and bridge priorities are assigned. It creates a *hosts* file with the switch details, and a *host_vars* file with connection variables. The corresponding role variables are defined in the *vars/main.yml* file at the role path. The hosts file should define the `ansible_network_os` variable with corresponding Dell EMC OS9 name.
-
-It writes a simple playbook that only references the *os9_xstp* role. By including the role, you automatically get access to all of the tasks to configure xSTP. When `os9_cfg_generate` is set to true, the variable generates the configuration commands as a .part file in build_dir path. By default, this variable is set to false. The example writes a simple playbook that only references the *os9_xstp* role.
-
-**Sample hosts file**
-
- spine1 ansible_host= <ip_address>
-
-**Sample host_vars/spine1**
-
- hostname: spine1
- ansible_become: yes
- ansible_become_method: xxxxx
- ansible_become_pass: xxxxx
- ansible_ssh_user: xxxxx
- ansible_ssh_pass: xxxxx
- ansible_network_os: dellemc.os9.os9
- build_dir: ../temp/os9
-
-
-**Sample vars/main.yml**
-
- os9_xstp:
- type: rstp
- enable: true
- stp:
- bridge_priority: 4096
- state: present
- rstp:
- bridge_priority: 4096
- pvst:
- vlan:
- - range_or_id: 10
- bridge_priority: 4096
- mstp:
- mstp_instances:
- - number: 1
- vlans: 10,12
- bridge_priority: 4096
- vlans_state: present
- intf:
- fortyGigE 1/25:
- stp_type:
- - stp
- - mstp
- edge_port: true
-
-**Simple playbook to setup system — spine.yml**
-
- - hosts: spine
- roles:
- - dellemc.os9.os9_xstp
-
-**Run**
-
- ansible-playbook -i hosts spine.yml
-
-(c) 2017-2020 Dell Inc. or its subsidiaries. All rights reserved.
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/defaults/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/defaults/main.yml
deleted file mode 100644
index d49cf4a3e..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/defaults/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# defaults file for dellemc.os9.os9_xstp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/handlers/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/handlers/main.yml
deleted file mode 100644
index 818e833de..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/handlers/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# handlers file for dellemc.os9.os9_xstp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/meta/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/meta/main.yml
deleted file mode 100644
index 009fcceae..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/meta/main.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright (c) 2020 Dell Inc.
----
-galaxy_info:
- author: Dell EMC Networking Engineering
- description: The os9_xstp role facilitates the configuration of STP attributes in devices running Dell EMC Networking Operating Systems.
- company: Dell Inc
- license: GPLv3
- min_ansible_version: 2.9.6
-
- platforms:
- - name: os9
-
- galaxy_tags:
- - networking
- - dell
- - dellemc
- - emc
- - os9
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/tasks/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/tasks/main.yml
deleted file mode 100644
index c98c538a8..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/tasks/main.yml
+++ /dev/null
@@ -1,16 +0,0 @@
----
-# tasks file for os9
- - name: "Generating xSTP configuration for os9"
- template:
- src: os9_xstp.j2
- dest: "{{ build_dir }}/xstp9_{{ hostname }}.conf.part"
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9") and ((os9_cfg_generate | default('False')) | bool)
-# notify: save config os9
- register: generate_output
-
- - name: "Provisioning xSTP configuration for os9"
- dellemc.os9.os9_config:
- src: os9_xstp.j2
- when: (ansible_network_os is defined and ansible_network_os == "dellemc.os9.os9")
-# notify: save config os9
- register: output
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/templates/os9_xstp.j2 b/ansible_collections/dellemc/os9/roles/os9_xstp/templates/os9_xstp.j2
deleted file mode 100644
index b21ee592b..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/templates/os9_xstp.j2
+++ /dev/null
@@ -1,160 +0,0 @@
-#jinja2: trim_blocks: True, lstrip_blocks: True
-{#############################################
-PURPOSE: Configure xSTP commands for os9 Devices
-os9_xstp:
- type: stp
- enable: true
- stp:
- bridge_priority: 4096
- state: present
-
- rstp:
- bridge_priority: 4096
- state: present
-
- pvst:
- vlan:
- - range_or_id: 10
- bridge_priority: 4096
- state: present
-
- mstp:
- mstp_instances:
- - number: 1
- vlans: 10,12
- vlans_state: present
- bridge_priority: 4096
- state: present
- intf:
- fortyGigE 1/1:
- stp_type:
- - rstp
- - mstp
- edge_port: true
-############################################}
-{% if os9_xstp is defined and os9_xstp %}
-{% set xstp_vars = os9_xstp %}
-{% if xstp_vars.type is defined and xstp_vars.type %}
- {% if xstp_vars.type == "stp" %}
-protocol spanning-tree 0
- {% else %}
-protocol spanning-tree {{ xstp_vars.type }}
- {% endif %}
- {% if xstp_vars.enable is defined %}
- {% if xstp_vars.enable %}
- no disable
- {% else %}
- disable
- {% endif %}
- {% endif %}
-{% endif %}
-
-{% if xstp_vars.stp is defined and xstp_vars.stp %}
- {% set val = xstp_vars.stp %}
- {% if val.state is defined and val.state == "absent" %}
-no protocol spanning-tree 0
- {% else %}
- {% if val.bridge_priority is defined %}
-protocol spanning-tree 0
- {% if val.bridge_priority == 0 or val.bridge_priority %}
- bridge-priority {{ val.bridge_priority }}
- {% else %}
- no bridge-priority
- {% endif %}
- {% endif %}
- {% endif %}
-{% endif %}
-
-{% if xstp_vars.rstp is defined and xstp_vars.rstp %}
- {% set val = xstp_vars.rstp %}
- {% if val.state is defined and val.state == "absent" %}
-no protocol spanning-tree rstp
- {% else %}
- {% if val.bridge_priority is defined %}
-protocol spanning-tree rstp
- {% if val.bridge_priority == 0 or val.bridge_priority %}
- bridge-priority {{ val.bridge_priority }}
- {% else %}
- no bridge-priority
- {% endif %}
- {% endif %}
- {% endif %}
-{% endif %}
-
-{% if xstp_vars.pvst is defined and xstp_vars.pvst %}
- {% set val = xstp_vars.pvst %}
- {% if val.state is defined and val.state == "absent" %}
-no protocol spanning-tree pvst
- {% else %}
- {% if val.vlan is defined and val.vlan %}
-protocol spanning-tree pvst
- {% for vlan in val.vlan %}
- {% if vlan.range_or_id is defined and vlan.range_or_id %}
- {% if vlan.bridge_priority is defined %}
- {% if vlan.bridge_priority == 0 or vlan.bridge_priority %}
- vlan {{ vlan.range_or_id }} bridge-priority {{ vlan.bridge_priority }}
- {% else %}
- no vlan {{ vlan.range_or_id }} bridge-priority
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endif %}
-
-{% if xstp_vars.mstp is defined and xstp_vars.mstp %}
- {% set val = xstp_vars.mstp %}
- {% if val.state is defined and val.state == "absent" %}
-no protocol spanning-tree mstp
- {% else %}
- {% if val.mstp_instances is defined and val.mstp_instances %}
-protocol spanning-tree mstp
- {% for instance in val.mstp_instances %}
- {% if instance.number is defined and instance.number %}
- {% if instance.bridge_priority is defined %}
- {% if instance.bridge_priority == 0 or instance.bridge_priority %}
- MSTI {{ instance.number }} bridge-priority {{ instance.bridge_priority }}
- {% else %}
- no MSTI {{ instance.number }} bridge-priority
- {% endif %}
- {% endif %}
- {% if instance.vlans is defined and instance.vlans %}
- {% if instance.vlans_state is defined and instance.vlans_state == "absent" %}
- no MSTI {{ instance.number }} VLAN {{ instance.vlans }}
- {% else %}
- MSTI {{ instance.number }} VLAN {{ instance.vlans }}
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endif %}
- {% endif %}
-{% endif %}
-
-{% if xstp_vars.intf is defined and xstp_vars.intf %}
- {% for intr in xstp_vars.intf.keys() %}
- {% set intf_vars = xstp_vars.intf[intr] %}
-interface {{ intr }}
- {% for type in intf_vars.stp_type %}
- {% if type == "stp" %}
- {% if intf_vars.edge_port is defined %}
- {% if not intf_vars.edge_port %}
- no spanning-tree 0 portfast
- {% else %}
- spanning-tree 0 portfast bpduguard
- {% endif %}
- {% endif %}
- {% else %}
- {% if intf_vars.edge_port is defined %}
- {% if intf_vars.edge_port %}
- spanning-tree {{ type }} edge-port
- {% else %}
- no spanning-tree {{ type }} edge-port
- {% endif %}
- {% endif %}
- {% endif %}
- {% endfor %}
- {% endfor %}
-{% endif %}
-{% endif %} \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/inventory.yaml b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/inventory.yaml
deleted file mode 100644
index 5fd33c945..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/inventory.yaml
+++ /dev/null
@@ -1,20 +0,0 @@
-spine1 ansible_host=100.94.210.44
-spine2 ansible_host=10.11.182.26
-leaf1 ansible_host=10.11.182.27
-leaf2 ansible_host=10.11.182.28
-leaf3 ansible_host=10.11.182.29
-leaf4 ansible_host=10.11.182.30
-
-[spine]
-spine1
-spine2
-
-[leaf]
-leaf1
-leaf2
-leaf3
-leaf4
-
-[datacenter:children]
-spine
-leaf
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/main.os9.yaml b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/main.os9.yaml
deleted file mode 100644
index 7f30b0832..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/main.os9.yaml
+++ /dev/null
@@ -1,34 +0,0 @@
----
-# vars file for dellemc.os9.os9_xstp,
-# below gives a sample configuration
-# Sample variables for OS9 device
-os9_xstp:
- type: rstp
- enable: true
- stp:
- bridge_priority: 4096
- state: present
-
- rstp:
- bridge_priority: 4096
- state: present
-
- pvst:
- vlan:
- - range_or_id: 10
- bridge_priority: 4096
- state: present
-
- mstp:
- mstp_instances:
- - number: 1
- vlans: 10,12
- bridge_priority: 4096
- vlans_state: present
- state: present
- intf:
- fortyGigE 1/25:
- stp_type:
- - stp
- - mstp
- edge_port: true \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/test.yaml b/ansible_collections/dellemc/os9/roles/os9_xstp/tests/test.yaml
deleted file mode 100644
index 77da96716..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/tests/test.yaml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-- hosts: datacenter
- connection: network_cli
- roles:
- - dellemc.os9.os9_xstp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/roles/os9_xstp/vars/main.yml b/ansible_collections/dellemc/os9/roles/os9_xstp/vars/main.yml
deleted file mode 100644
index d2fefb056..000000000
--- a/ansible_collections/dellemc/os9/roles/os9_xstp/vars/main.yml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-# vars file for dellemc.os9.os9_xstp \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/tests/.gitignore b/ansible_collections/dellemc/os9/tests/.gitignore
deleted file mode 100644
index ea1472ec1..000000000
--- a/ansible_collections/dellemc/os9/tests/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-output/
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/defaults/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/defaults/main.yaml
deleted file mode 100644
index 55a93fc23..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/cli.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/cli.yaml
deleted file mode 100644
index 7152815d7..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/cli.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact:
- test_items: "{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ item }}"
- with_items: "{{ test_items }}" \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/main.yaml
deleted file mode 100644
index d4898c29b..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] } \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator
deleted file mode 100644
index 42a164c85..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/bad_operator
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- debug: msg="START cli/bad_operator.yaml"
-
-- name: test bad operator
- os9_command:
- commands:
- - show version
- - show interfaces TenGigabitEthernet 0/0
- wait_for:
- - "result[0] contains 'Description : blah'"
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed == true"
- - "result.msg is defined"
-
-- debug: msg="END cli/bad_operator.yaml"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/contains b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/contains
deleted file mode 100644
index 2f56a11f9..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/contains
+++ /dev/null
@@ -1,20 +0,0 @@
----
-- debug: msg="START cli/contains.yaml"
-
-- name: test contains operator
- os9_command:
- commands:
- - show version
- - show interface TenGigabitEthernet 0/0
- wait_for:
- - "result[0] contains 2.0"
- - "result[1] contains TenGigabitEthernet "
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
-
-- debug: msg="END cli/contains.yaml"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/invalid b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/invalid
deleted file mode 100644
index cffc24f81..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/invalid
+++ /dev/null
@@ -1,28 +0,0 @@
----
-- debug: msg="START cli/invalid.yaml"
-
-- name: run invalid command
- os9_command:
- commands: ['show foo']
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed"
-
-- name: run commands that include invalid command
- os9_command:
- commands:
- - show version
- - show foo
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed"
-
-- debug: msg="END cli/invalid.yaml"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/output b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/output
deleted file mode 100644
index 1fd537880..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/output
+++ /dev/null
@@ -1,29 +0,0 @@
----
-- debug: msg="START cli/output.yaml"
-
-- name: get output for single command
- os9_command:
- commands: ['show version']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
-
-- name: get output for multiple commands
- os9_command:
- commands:
- - show version
- - show interfaces
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
- - "result.stdout | length == 2"
-
-- debug: msg="END cli/output.yaml"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml
deleted file mode 100644
index 80d19518e..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/show_commands.yaml
+++ /dev/null
@@ -1,74 +0,0 @@
----
-- debug: msg="START cli/show_commands.yaml"
-
-- name: test bad operator
- os9_command:
- commands:
- - show version
- - show interfaces TenGigabitEthernet 0/0
- wait_for:
- - "result[0] contains 'Description : blah'"
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed == true"
- - "result.msg is defined"
-
-- name: get output for single command
- os9_command:
- commands: ['show version']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
-
-- name: get output for multiple commands
- os9_command:
- commands:
- - show version
- - show interfaces
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.stdout is defined"
- - "result.stdout | length == 2"
-
-- name: show run command with grep Option
- os9_command:
- commands:
- - show run | grep username
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.stdout | length == 1"
-
-- name: Execute multiple show commands continously
- os9_command:
- commands:
- - show system
- - show file-systems
- - show startup-config
- - show tech-support
- - show logging
- - show system brief | grep Management
- provider: "{{ cli }}"
- retries: 8
- interval: 5
- register: result
-
-- assert:
- that:
- - "result.stdout | length == 6"
-
-- debug: msg="END cli/show_commands.yaml"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/timeout b/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/timeout
deleted file mode 100644
index 60dbb761f..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_command/os9_command/tests/cli/timeout
+++ /dev/null
@@ -1,19 +0,0 @@
----
-- debug: msg="START cli/timeout.yaml"
-
-- name: test bad condition
- os9_command:
- commands:
- - show version
- wait_for:
- - "result[0] contains bad_value_string"
- provider: "{{ cli }}"
- register: result
- ignore_errors: yes
-
-- assert:
- that:
- - "result.failed == true"
- - "result.msg is defined"
-
-- debug: msg="END cli/timeout.yaml"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/defaults/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/cli.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/cli.yaml
deleted file mode 100644
index 346bdf2d1..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/cli.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ item }}"
- with_items: "{{ test_items }}"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/main.yaml
deleted file mode 100644
index 415c99d8b..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] }
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml
deleted file mode 100644
index d737a4909..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/configcommands.yaml
+++ /dev/null
@@ -1,134 +0,0 @@
----
-- debug: msg="START cli/config command execution"
-
-- name: COnfigure managemnet protocol telnet
- os9_config:
- lines: ['hostname {{ inventory_hostname }}', 'ip telnet server enable']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
-
-- name: Create new username and set password
- os9_config:
- lines: ['username test password test123']
- provider: "{{ cli }}"
- register: result
-
-- name: Update the new user test privilige using replace line Option
- os9_config:
- lines:
- - username test password test123 privilege 15
- provider: "{{ cli }}"
- replace: line
- save: no
- register: result
-
-- name: Validate the newly created username using show run command use the keyword wait_for
- os9_command:
- commands:
- - show running-config | grep username
- - show running-config | grep username | grep test
- wait_for:
- - "result[0] contains test"
- provider: "{{ cli }}"
-
-- name: Configure SNMP v2 credentials on device and enable traps
- os9_config:
- lines:
- - snmp-server community ansibleread ro
- - snmp-server community ansiblewrite rw
- - snmp-server enable traps
- - snmp-server host 10.16.148.142 traps version 2c public udp-port 162
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
-
-- name: Validate is newly created snmp v2 is available in running config
- os9_command:
- commands:
- - show running-config | grep snmp-server
- wait_for:
- - "result[0] contains ansibleread"
- - "result[0] contains ansiblewrite"
- provider: "{{ cli }}"
- register: result
-
-- name: Configure Clock timezone
- os9_config:
- lines: "clock timezone UTC 0 0"
- provider: "{{ cli }}"
- register: result
-
-- name: Configure Logging to NMS Server
- os9_config:
- lines:
- - logging 10.16.148.142
- #before:
- # - no logging 10.16.148.142
- #ignore: yes
- provider: "{{ cli }}"
- register: result
-
-- name: Configure Default Gateway
- os9_config:
- lines:
- - management route 0.0.0.0/0 10.16.148.254
- provider: "{{ cli }}"
- register: result
-
- #- assert:
- # that:
- # - "result.changed == true"
- #- "'management route 0.0.0.0/0 10.16.148.254' in result.updates"
-
-- name: Enable spanning tree protocol using parent , before and after keywords in config module
- os9_config:
- lines:
- - no disable
- - hello-time 1
- - max-age 6
- - forward-delay 4
- - bridge-priority 0
- parents: ['protocol spanning-tree rstp']
- before: ['no protocol spanning-tree rstp']
- after: ['exit']
- provider: "{{ cli }}"
- register: result
-
-- name: save the running config into startup config using save keyword in os9 config module
- os9_config:
- save: yes
- provider: "{{ cli }}"
- register: result
-
-- name: Validate the newly added commands are available in startup-config
- os9_command:
- commands:
- - show startup-config
- provider: "{{ cli }}"
- register: result
-
-- name: COnfigure new vlan using src file given as input and backup the configuration
- os9_config:
- src: vlan_config.txt
- provider: "{{ cli }}"
- update: merge
- backup: yes
- register: result
-
-
-- name: Validate the check Option for update in Dell os9 config using the config file provided with config option
- os9_config:
- src: vlan_config.txt
- provider: "{{ cli }}"
- update: check
- config: Aggregation1_config.2016-09-06@15:26:02
- register: result
-
-- debug: msg="END cli/configcommands"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml
deleted file mode 100644
index 65df0afa1..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/toplevel.yaml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-- debug: msg="START cli/toplevel.yaml"
-
-- name: setup
- os9_config:
- lines: ['hostname {{ inventory_hostname }}']
- provider: "{{ cli }}"
- match: none
-
-- name: configure top level command
- os9_config:
- lines: ['hostname foo']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == true"
- - "'hostname foo' in result.updates"
-
-- name: configure top level command idempotent check
- os9_config:
- lines: ['hostname foo']
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
-
-- name: teardown
- os9_config:
- lines: ['hostname {{ inventory_hostname }}']
- provider: "{{ cli }}"
- match: none
-
-- debug: msg="END cli/toplevel.yaml"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt b/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt
deleted file mode 100644
index 894052832..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_config/os9_config/tests/cli/vlan_config.txt
+++ /dev/null
@@ -1,9 +0,0 @@
-interface Vlan 1000
- description "vlan added from ansible"
- name Testansible-1000
- ip unreachables
- ip helper-address 100.1.1.1
- ip udp-helper udp-port 1000
- no shutdown
-~
-
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml
deleted file mode 100644
index 5f709c5aa..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/defaults/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-testcase: "*"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml
deleted file mode 100644
index 346bdf2d1..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/cli.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: collect all cli test cases
- find:
- paths: "{{ role_path }}/tests/cli"
- patterns: "{{ testcase }}.yaml"
- register: test_cases
-
-- name: set test_items
- set_fact: test_items="{{ test_cases.files | map(attribute='path') | list }}"
-
-- name: run test case
- include: "{{ item }}"
- with_items: "{{ test_items }}"
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml
deleted file mode 100644
index 415c99d8b..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tasks/main.yaml
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- { include: cli.yaml, tags: ['cli'] }
diff --git a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml b/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml
deleted file mode 100644
index 9315f3445..000000000
--- a/ansible_collections/dellemc/os9/tests/integration/targets/os9_facts/os9_facts/tests/cli/testcases_facts.yaml
+++ /dev/null
@@ -1,55 +0,0 @@
----
-- debug: msg="START cli/testcases_facts.yaml"
-
-- name: Get all the interfaces facts
- os9_facts:
- gather_subset:
- - interfaces
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts is defined"
- - "result.ansible_facts.ansible_net_all_ipv4_addresses is defined"
- - "result.ansible_facts.ansible_net_interfaces is defined"
- - "result.ansible_facts.ansible_net_neighbors is defined"
-
-- name: Get all the facts Excpet Interfaces using ! Operator and validate
- os9_facts:
- gather_subset:
- - "!interfaces"
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts is defined"
- - "result.ansible_facts.ansible_net_all_ipv4_addresses is not defined"
- - "result.ansible_facts.ansible_net_interfaces is not defined"
- - "result.ansible_facts.ansible_net_neighbors is not defined"
- - "result.ansible_facts.ansible_net_config is defined"
- - "result.ansible_facts.ansible_net_filesystems is defined"
-
-- name: Test with multiple subsets provided
- os9_facts:
- gather_subset:
- - config
- - hardware
- provider: "{{ cli }}"
- register: result
-
-- assert:
- that:
- - "result.changed == false"
- - "result.ansible_facts is defined"
- - "result.ansible_facts.ansible_net_filesystems is defined"
- - "result.ansible_facts.ansible_net_memtotal_mb is defined"
- - "result.ansible_facts.ansible_net_memfree_mb is defined"
- - "result.ansible_facts.ansible_net_config is defined"
-
-
-
-- debug: msg="START cli/testcases_facts.yaml"
diff --git a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.10.txt b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.10.txt
deleted file mode 100644
index 57ab8ae6d..000000000
--- a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.10.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-plugins/action/os9.py action-plugin-docs
-plugins/modules/os9_config.py validate-modules:parameter-list-no-elements
-plugins/modules/os9_facts.py validate-modules:parameter-list-no-elements
-plugins/modules/os9_command.py validate-modules:parameter-list-no-elements \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.11.txt b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.11.txt
deleted file mode 100644
index 57ab8ae6d..000000000
--- a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.11.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-plugins/action/os9.py action-plugin-docs
-plugins/modules/os9_config.py validate-modules:parameter-list-no-elements
-plugins/modules/os9_facts.py validate-modules:parameter-list-no-elements
-plugins/modules/os9_command.py validate-modules:parameter-list-no-elements \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.9.txt b/ansible_collections/dellemc/os9/tests/sanity/ignore-2.9.txt
deleted file mode 100644
index 711efc62e..000000000
--- a/ansible_collections/dellemc/os9/tests/sanity/ignore-2.9.txt
+++ /dev/null
@@ -1 +0,0 @@
-plugins/action/os9.py action-plugin-docs \ No newline at end of file
diff --git a/ansible_collections/dellemc/os9/tests/sanity/requirements.txt b/ansible_collections/dellemc/os9/tests/sanity/requirements.txt
deleted file mode 100644
index 3e3a96692..000000000
--- a/ansible_collections/dellemc/os9/tests/sanity/requirements.txt
+++ /dev/null
@@ -1,4 +0,0 @@
-packaging # needed for update-bundled and changelog
-sphinx ; python_version >= '3.5' # docs build requires python 3+
-sphinx-notfound-page ; python_version >= '3.5' # docs build requires python 3+
-straight.plugin ; python_version >= '3.5' # needed for hacking/build-ansible.py which will host changelog generation and requires python 3+
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_config.cfg b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_config.cfg
deleted file mode 100644
index b8f62da5e..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_config.cfg
+++ /dev/null
@@ -1,13 +0,0 @@
-!
-hostname router
-!
-interface fortyGigE 1/6
- ip address 1.2.3.4/24
- description test string
-!
-interface fortyGigE 1/7
- ip address 6.7.8.9/24
- description test string
- shutdown
-!
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_src.cfg b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_src.cfg
deleted file mode 100644
index 7ab333875..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/os9_config_src.cfg
+++ /dev/null
@@ -1,12 +0,0 @@
-!
-hostname foo
-!
-interface fortyGigE 1/6
- no ip address
-!
-interface fortyGigE 1/7
- ip address 6.7.8.9/24
- description test string
- shutdown
-!
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_file-systems b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_file-systems
deleted file mode 100644
index 1c02bb6a9..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_file-systems
+++ /dev/null
@@ -1,10 +0,0 @@
- Size(b) Free(b) Feature Type Flags Prefixes
- 6429872128 5582319616 FAT32 USERFLASH rw flash:
- - - unformatted USERFLASH rw fcmfs:
- 241172480 91893760 Unknown NFSMOUNT rw nfsmount:
- - - - network rw ftp:
- - - - network rw tftp:
- - - - network rw scp:
- - - - network rw http:
- - - - network rw https:
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_interfaces b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_interfaces
deleted file mode 100644
index 5f19f38bd..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_interfaces
+++ /dev/null
@@ -1,1259 +0,0 @@
-TenGigabitEthernet 0/0 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1048580
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 10000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:13:21
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:16:47
-
-
-TenGigabitEthernet 0/1 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1048708
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 10000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:17:48
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:16:49
-
-
-TenGigabitEthernet 0/2 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1048836
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 10000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:18:30
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:17:31
-
-
-TenGigabitEthernet 0/3 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1048964
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 10000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:18:33
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:17:35
-
-
-fortyGigE 0/4 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1049093
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:38:08
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:37:09
-
-
-fortyGigE 0/8 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1049605
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:38:08
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:40:18
-
-
-fortyGigE 0/12 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1050117
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:41:18
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:40:20
-
-
-fortyGigE 0/16 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1050629
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:42:41
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:41:43
-
-
-fortyGigE 0/20 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1051141
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:43:10
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:42:12
-
-
-fortyGigE 0/24 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1051653
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:43:45
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:42:47
-
-
-fortyGigE 0/28 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1052165
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:44:35
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:43:37
-
-
-fortyGigE 0/32 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1052677
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:44:53
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:43:54
-
-
-fortyGigE 0/36 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1053189
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:46:20
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:45:21
-
-
-fortyGigE 0/40 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1053701
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:46:32
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:45:33
-
-
-fortyGigE 0/44 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1054213
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:46:56
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:45:58
-
-
-fortyGigE 0/48 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1054725
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:47:10
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:46:11
-
-
-fortyGigE 0/52 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1055237
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:47:22
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:46:24
-
-
-fortyGigE 0/56 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1055749
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:47:47
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:46:48
-
-
-fortyGigE 0/60 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1056261
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:47:58
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:47:00
-
-
-fortyGigE 0/64 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1056773
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:48:26
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:47:28
-
-
-fortyGigE 0/68 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1057285
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:48:38
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:47:40
-
-
-fortyGigE 0/72 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1057797
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:49:05
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:48:07
-
-
-fortyGigE 0/76 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1058309
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:49:17
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:48:18
-
-
-fortyGigE 0/80 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1058821
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:49:36
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:48:37
-
-
-fortyGigE 0/84 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1059333
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:49:58
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:49:00
-
-
-fortyGigE 0/88 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1059845
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:50:12
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:49:14
-
-
-fortyGigE 0/92 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1060357
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:50:36
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:49:37
-
-
-fortyGigE 0/96 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1060869
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:50:50
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:49:52
-
-
-fortyGigE 0/100 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1061381
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:51:16
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:50:17
-
-
-fortyGigE 0/104 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1061893
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:51:26
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:50:28
-
-
-fortyGigE 0/108 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1062405
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:51:50
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:50:52
-
-
-fortyGigE 0/112 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1062917
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:52:02
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:51:04
-
-
-fortyGigE 0/116 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1063429
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:52:14
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:51:15
-
-
-fortyGigE 0/120 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1063941
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:52:44
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:51:45
-
-
-fortyGigE 0/124 is down, line protocol is down
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 1064453
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 40000 Mbit
-Flowcontrol rx off tx off
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:52:55
-Queueing strategy: fifo
-Input Statistics:
- 0 packets, 0 bytes
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 runts, 0 giants, 0 throttles
- 0 CRC, 0 overrun, 0 discarded
-Output Statistics:
- 0 packets, 0 bytes, 0 underruns
- 0 64-byte pkts, 0 over 64-byte pkts, 0 over 127-byte pkts
- 0 over 255-byte pkts, 0 over 511-byte pkts, 0 over 1023-byte pkts
- 0 Multicasts, 0 Broadcasts, 0 Unicasts
- 0 throttles, 0 discarded, 0 collisions, 0 wreddrops
-Rate info (interval 299 seconds):
- Input 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
- Output 00.00 Mbits/sec, 0 packets/sec, 0.00% of line-rate
-Time since last interface status change: 13:51:56
-
-
-ManagementEthernet 0/0 is up, line protocol is up
-Hardware is DellEth, address is 90:b1:1c:f4:a2:8f
- Current address is 90:b1:1c:f4:a2:8f
-Pluggable media not present
-Interface index is 7340033
-Internet address is 10.16.148.71/16
-Mode of IPv4 Address Assignment : MANUAL
-DHCP Client-ID(61): 90b11cf4a28f
-Virtual-IP is not set
-Virtual-IP IPv6 address is not set
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed 1000 Mbit, Mode full duplex
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:52:17
-Queueing strategy: fifo
- Input 111338 packets, 7239813 bytes, 96163 multicast
- Received 0 errors, 0 discarded
- Output 8316 packets, 1491845 bytes, 0 multicast
- Output 0 errors, 0 invalid protocol
-Time since last interface status change: 13:52:13
-
-
-ManagementEthernet 1/0 is up, line protocol is not present
-Hardware is DellEth, address is not set
-Interface index is 8388609
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed auto, Mode full duplex
-ARP type: ARPA, ARP Timeout 04:00:00
-Queueing strategy: fifo
-Time since last interface status change: 13:52:33
-
-
-ManagementEthernet 2/0 is up, line protocol is not present
-Hardware is DellEth, address is not set
-Interface index is 9437185
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed auto, Mode full duplex
-ARP type: ARPA, ARP Timeout 04:00:00
-Queueing strategy: fifo
-Time since last interface status change: 13:52:33
-
-
-ManagementEthernet 3/0 is up, line protocol is not present
-Hardware is DellEth, address is not set
-Interface index is 10485761
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed auto, Mode full duplex
-ARP type: ARPA, ARP Timeout 04:00:00
-Queueing strategy: fifo
-Time since last interface status change: 13:52:43
-
-
-ManagementEthernet 4/0 is up, line protocol is not present
-Hardware is DellEth, address is not set
-Interface index is 11534337
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed auto, Mode full duplex
-ARP type: ARPA, ARP Timeout 04:00:00
-Queueing strategy: fifo
-Time since last interface status change: 13:52:43
-
-
-ManagementEthernet 5/0 is up, line protocol is not present
-Hardware is DellEth, address is not set
-Interface index is 12582913
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed auto, Mode full duplex
-ARP type: ARPA, ARP Timeout 04:00:00
-Queueing strategy: fifo
-Time since last interface status change: 13:52:53
-
-
-Vlan 1 is down, line protocol is down
-Address is 90:b1:1c:f4:a2:8f, Current address is 90:b1:1c:f4:a2:8f
-Interface index is 1275068928
-Internet address is not set
-Mode of IPv4 Address Assignment : NONE
-DHCP Client-ID :90b11cf4a28f
-MTU 1554 bytes, IP MTU 1500 bytes
-LineSpeed auto
-ARP type: ARPA, ARP Timeout 04:00:00
-Last clearing of "show interface" counters 13:53:06
-Queueing strategy: fifo
-Time since last interface status change: 13:53:06
-Input Statistics:
- 0 packets, 0 bytes
-Output Statistics:
- 0 packets, 0 bytes
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_inventory b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_inventory
deleted file mode 100644
index 90c0295e5..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_inventory
+++ /dev/null
@@ -1,19 +0,0 @@
-System Type : S6000
-System Mode : 1.0
-Software Version : 9.12(0.0)
-
-Unit Type Serial Number Part Number Rev Piece Part ID Rev Svc Tag Exprs Svc Code
---------------------------------------------------------------------------------------------------------------
-* 0 S6000-01-FE-32T NA 08YWFG A00 CN-08YWFG-28298-3AG-0031 A00 6BJ8VS1 137 581 490 89
- 0 S6000-PWR-AC NA 0T9FNW A00 CN-0T9FNW-28298-3AG-0119 A00 NA NA
- 0 S6000-FAN NA 0MGDH8 A00 CN-0MGDH8-28298-3AG-0094 A00 NA NA
- 0 S6000-FAN NA 0MGDH8 A00 CN-0MGDH8-28298-3AG-0096 A00 NA NA
- 0 S6000-FAN NA 0MGDH8 A00 CN-0MGDH8-28298-3AG-0095 A00 NA NA
-
- * - Management Unit
-
-
-Software Protocol Configured
---------------------------------------------------------------
- LLDP
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_ipv6_interface b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_ipv6_interface
deleted file mode 100644
index 0cc43da94..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_ipv6_interface
+++ /dev/null
@@ -1,26 +0,0 @@
-fortyGigE 0/16 is down, line protocol is down
- IPV6 is enabled
- Link Local address: fe80::92b1:1cff:fef4:a28f
- Global Unicast address(es):
- 2001:4898:5808:ffa2::5, subnet is 2001:4898:5808:ffa2::4/126 (MANUAL)
- Remaining lifetime: infinite
- Global Anycast address(es):
- Joined Group address(es):
- ff02::1
- ff02::2
- ff02::1:ff00:5
- ff02::1:fff4:a28f
- IP MTU is 1500 bytes
- ND MTU is 0
- ICMP redirects are not sent
- DAD is enabled, number of DAD attempts: 3
- ND reachable time is 35780 milliseconds
- ND base reachable time is 30000 milliseconds
- ND advertised reachable time is 0 milliseconds
- ND advertised retransmit interval is 0 milliseconds
- ND router advertisements are sent every 198 to 600 seconds
- ND router advertisements live for 1800 seconds
- ND advertised hop limit is 64
- IPv6 hop limit for originated packets is 64
- IPv6 unicast RPF check is not supported
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail
deleted file mode 100644
index a868571ce..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_lldp_neighbors_detail
+++ /dev/null
@@ -1,35 +0,0 @@
-========================================================================
- Local Interface Ma 0/0 has 1 neighbor
- Total Frames Out: 1747
- Total Frames In: 10333
- Total Neighbor information Age outs: 0
- Total Multiple Neighbors Detected: 0
- Total Frames Discarded: 0
- Total In Error Frames: 0
- Total Unrecognized TLVs: 0
- Total TLVs Discarded: 0
- Next packet will be sent after 17 seconds
- The neighbors are given below:
- -----------------------------------------------------------------------
-
- Remote Chassis ID Subtype: Mac address (4)
- Remote Chassis ID: 90:b1:1c:f4:2f:6d
- Remote Port Subtype: Interface name (5)
- Remote Port ID: TenGigabitEthernet 0/33
- Remote Port Description: TenGigabitEthernet 0/33
- Local Port ID: ManagementEthernet 0/0
- Locally assigned remote Neighbor Index: 1
- Remote TTL: 20
- Information valid for next 17 seconds
- Time since last information change of this neighbor: 14:54:48
- Remote System Name: swlab1-maa-tor-A2
- Remote System Desc: Dell Real Time Operating System Software. Dell
- Operating System Version: 2.0. Dell Application Software Version:
- 9.11(2.0) Copyright (c) 1999-2017Dell Inc. All Rights Reserved.Build
- Time: Tue Apr 25 21:22:59 2017
- Existing System Capabilities: Repeater Bridge Router
- Enabled System Capabilities: Repeater Bridge Router
- Remote Port Vlan ID: 148
- Port and Protocol Vlan ID: 148, Capability: Supported, Status: Enabled
- ---------------------------------------------------------------------------
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_memory__except_Processor b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_memory__except_Processor
deleted file mode 100644
index c2f65415e..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_memory__except_Processor
+++ /dev/null
@@ -1,4 +0,0 @@
- ===========================
- Total(b) Used(b) Free(b) Lowest(b) Largest(b)
- 3203911680 3172120 3200739560 3200673304 3200739560
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config
deleted file mode 100644
index 4804ebba6..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config
+++ /dev/null
@@ -1,238 +0,0 @@
-Current Configuration ...
-! Version 9.12(0.0)
-! Last configuration change at Thu Jan 11 06:53:29 2018 by admin
-!
-!
-logging coredump stack-unit 0
-logging coredump stack-unit 1
-logging coredump stack-unit 2
-logging coredump stack-unit 3
-logging coredump stack-unit 4
-logging coredump stack-unit 5
-!
-hostname Dell
-!
-protocol lldp
-!
-redundancy auto-synchronize full
-!
-enable password 7 b125455cf679b208e79b910e85789edf
-!
-username admin password 7 1d28e9f33f99cf5c
-!
-stack-unit 0 quad-port-profile 0,8,16,24,32,36,40,44,48,52,56,60,64,68,72,76,80,84,88,92,100,108,116,124
-!
-stack-unit 0 provision S6000
-!
-stack-unit 0 port 0 portmode quad
-!
-interface TenGigabitEthernet 0/0
- no ip address
- shutdown
-!
-interface TenGigabitEthernet 0/1
- no ip address
- shutdown
-!
-interface TenGigabitEthernet 0/2
- no ip address
- shutdown
-!
-interface TenGigabitEthernet 0/3
- no ip address
- shutdown
-!
-interface fortyGigE 0/4
- no ip address
- shutdown
-!
-interface fortyGigE 0/8
- no ip address
- shutdown
-!
-interface fortyGigE 0/12
- no ip address
- shutdown
-!
-interface fortyGigE 0/16
- no ip address
- ipv6 address 2001:4898:5808:ffa2::5/126
- shutdown
-!
-interface fortyGigE 0/20
- no ip address
- switchport
- ip access-group ipv6-ssh-only in
- shutdown
-!
-interface fortyGigE 0/24
- no ip address
- switchport
- mac access-group ssh-only-mac in
- mac access-group ssh-only-mac out
- shutdown
-!
-interface fortyGigE 0/28
- no ip address
- switchport
- mac access-group ssh-only-mac in
- mac access-group ssh-only-mac out
- shutdown
-!
-interface fortyGigE 0/32
- no ip address
- switchport
- ip access-group ipv6-ssh-only out
- shutdown
-!
-interface fortyGigE 0/36
- no ip address
- shutdown
-!
-interface fortyGigE 0/40
- no ip address
- shutdown
-!
-interface fortyGigE 0/44
- no ip address
- shutdown
-!
-interface fortyGigE 0/48
- no ip address
- shutdown
-!
-interface fortyGigE 0/52
- no ip address
- shutdown
-!
-interface fortyGigE 0/56
- no ip address
- shutdown
-!
-interface fortyGigE 0/60
- no ip address
- shutdown
-!
-interface fortyGigE 0/64
- no ip address
- shutdown
-!
-interface fortyGigE 0/68
- no ip address
- shutdown
-!
-interface fortyGigE 0/72
- no ip address
- shutdown
-!
-interface fortyGigE 0/76
- no ip address
- shutdown
-!
-interface fortyGigE 0/80
- no ip address
- shutdown
-!
-interface fortyGigE 0/84
- no ip address
- shutdown
-!
-interface fortyGigE 0/88
- no ip address
- shutdown
-!
-interface fortyGigE 0/92
- no ip address
- shutdown
-!
-interface fortyGigE 0/96
- no ip address
- shutdown
-!
-interface fortyGigE 0/100
- no ip address
- shutdown
-!
-interface fortyGigE 0/104
- no ip address
- shutdown
-!
-interface fortyGigE 0/108
- no ip address
- shutdown
-!
-interface fortyGigE 0/112
- no ip address
- shutdown
-!
-interface fortyGigE 0/116
- no ip address
- shutdown
-!
-interface fortyGigE 0/120
- no ip address
- shutdown
-!
-interface fortyGigE 0/124
- no ip address
- shutdown
-!
-interface ManagementEthernet 0/0
- ip address 10.16.148.71/16
- no shutdown
-!
-interface ManagementEthernet 1/0
- no shutdown
-!
-interface ManagementEthernet 2/0
- no shutdown
-!
-interface ManagementEthernet 3/0
- no shutdown
-!
-interface ManagementEthernet 4/0
- no shutdown
-!
-interface ManagementEthernet 5/0
- no shutdown
-!
-interface Vlan 1
-!untagged fortyGigE 0/20-32
-!
-ipv6 access-list ipv6-ssh-only
- description ipv6acl
- remark 1 ipv6
- seq 10 permit ipv6 2001:4898::/32 any
- seq 20 permit tcp any eq 2 2404:f801::/32
- seq 30 permit tcp any 2a01:110::/31 ack
- seq 40 permit tcp any any
-!
-mac access-list extended ssh-only-mac
- description macacl
- remark 1 mac
- seq 5 permit any any count
- seq 6 deny any any
-!
-ip ssh server enable
-!
-line console 0
-line vty 0
-line vty 1
- access-class ipv6-ssh-only ipv6
-line vty 2
- access-class ipv6-ssh-only ipv6
-line vty 3
- access-class ipv6-ssh-only ipv6
-line vty 4
-line vty 5
-line vty 6
-line vty 7
-line vty 8
-line vty 9
-!
-reload-type
- boot-type normal-reload
- config-scr-download enable
-!
-end
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname
deleted file mode 100644
index 9a2c181a9..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_running-config__grep_hostname
+++ /dev/null
@@ -1 +0,0 @@
-hostname os9_sw1
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_version b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_version
deleted file mode 100644
index e385cf3ef..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/fixtures/show_version
+++ /dev/null
@@ -1,18 +0,0 @@
-Dell Real Time Operating System Software
-Dell Operating System Version: 2.0
-Dell Application Software Version: 9.10(0.1P13)
-Copyright (c) 1999-2016 by Dell Inc. All Rights Reserved.
-Build Time: Wed Sep 7 23:48:35 2016
-Build Path: /sites/eqx/work/swbuild01_1/build01/E9-10-0/SW/SRC
-Dell Networking OS uptime is 12 week(s), 6 day(s), 9 hour(s), 20 minute(s)
-
-System image file is "system://A"
-
-System Type: S6000-ON
-Control Processor: Intel Centerton with 3 Gbytes (3203911680 bytes) of memory, core(s) 2.
-
-16G bytes of boot flash memory.
-
- 1 32-port TE/FG (SI-ON)
- 32 Forty GigabitEthernet/IEEE 802.3 interface(s)
-
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/os9_module.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/os9_module.py
deleted file mode 100644
index 57ea4e688..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/os9_module.py
+++ /dev/null
@@ -1,88 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import os
-import json
-
-from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
-
-
-fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
-fixture_data = {}
-
-
-def load_fixture(name):
- path = os.path.join(fixture_path, name)
-
- if path in fixture_data:
- return fixture_data[path]
-
- with open(path) as f:
- data = f.read()
-
- try:
- data = json.loads(data)
- except Exception:
- pass
-
- fixture_data[path] = data
- return data
-
-
-class TestDellos9Module(ModuleTestCase):
-
- def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
-
- self.load_fixtures(commands)
-
- if failed:
- result = self.failed()
- self.assertTrue(result['failed'], result)
- else:
- result = self.changed(changed)
- self.assertEqual(result['changed'], changed, result)
-
- if commands is not None:
- if sort:
- self.assertEqual(sorted(commands), sorted(result['updates']), result['updates'])
- else:
- self.assertEqual(commands, result['updates'], result['updates'])
-
- return result
-
- def failed(self):
- with self.assertRaises(AnsibleFailJson) as exc:
- self.module.main()
-
- result = exc.exception.args[0]
- self.assertTrue(result['failed'], result)
- return result
-
- def changed(self, changed=False):
- with self.assertRaises(AnsibleExitJson) as exc:
- self.module.main()
-
- result = exc.exception.args[0]
- self.assertEqual(result['changed'], changed, result)
- return result
-
- def load_fixtures(self, commands=None):
- pass
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_command.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_command.py
deleted file mode 100644
index 6353d8f5e..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_command.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from ansible.compat.tests.mock import patch
-from ansible_collections.dellemc.os9.plugins.modules import os9_command
-from units.modules.utils import set_module_args
-from .os9_module import TestDellos9Module, load_fixture
-
-
-class TestDellos9CommandModule(TestDellos9Module):
-
- module = os9_command
-
- def setUp(self):
- super(TestDellos9CommandModule, self).setUp()
-
- self.mock_run_commands = patch('ansible.modules.network.os9.os9_command.run_commands')
- self.run_commands = self.mock_run_commands.start()
-
- def tearDown(self):
- super(TestDellos9CommandModule, self).tearDown()
- self.mock_run_commands.stop()
-
- def load_fixtures(self, commands=None):
-
- def load_from_file(*args, **kwargs):
- module, commands = args
- output = list()
-
- for item in commands:
- try:
- obj = json.loads(item['command'])
- command = obj['command']
- except ValueError:
- command = item['command']
- filename = str(command).replace(' ', '_')
- output.append(load_fixture(filename))
- return output
-
- self.run_commands.side_effect = load_from_file
-
- def test_os9_command_simple(self):
- set_module_args(dict(commands=['show version']))
- result = self.execute_module()
- self.assertEqual(len(result['stdout']), 1)
- self.assertTrue(result['stdout'][0].startswith('Dell Real Time'))
-
- def test_os9_command_multiple(self):
- set_module_args(dict(commands=['show version', 'show version']))
- result = self.execute_module()
- self.assertEqual(len(result['stdout']), 2)
- self.assertTrue(result['stdout'][0].startswith('Dell Real Time'))
-
- def test_os9_command_wait_for(self):
- wait_for = 'result[0] contains "Dell Real"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for))
- self.execute_module()
-
- def test_os9_command_wait_for_fails(self):
- wait_for = 'result[0] contains "test string"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for))
- self.execute_module(failed=True)
- self.assertEqual(self.run_commands.call_count, 10)
-
- def test_os9_command_retries(self):
- wait_for = 'result[0] contains "test string"'
- set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
- self.execute_module(failed=True)
- self.assertEqual(self.run_commands.call_count, 2)
-
- def test_os9_command_match_any(self):
- wait_for = ['result[0] contains "Dell Real"',
- 'result[0] contains "test string"']
- set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
- self.execute_module()
-
- def test_os9_command_match_all(self):
- wait_for = ['result[0] contains "Dell Real"',
- 'result[0] contains "Operating System"']
- set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
- self.execute_module()
-
- def test_os9_command_match_all_failure(self):
- wait_for = ['result[0] contains "Dell Real"',
- 'result[0] contains "test string"']
- commands = ['show version', 'show version']
- set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
- self.execute_module(failed=True)
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_config.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_config.py
deleted file mode 100644
index 8c159eb57..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_config.py
+++ /dev/null
@@ -1,148 +0,0 @@
-#
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-from ansible.compat.tests.mock import patch
-from ansible_collections.dellemc.os9.plugins.modules import os9_config
-from units.modules.utils import set_module_args
-from .os9_module import TestDellos9Module, load_fixture
-
-
-class TestDellos9ConfigModule(TestDellos9Module):
-
- module = os9_config
-
- def setUp(self):
- super(TestDellos9ConfigModule, self).setUp()
-
- self.mock_get_config = patch('ansible.modules.network.os9.os9_config.get_config')
- self.get_config = self.mock_get_config.start()
-
- self.mock_load_config = patch('ansible.modules.network.os9.os9_config.load_config')
- self.load_config = self.mock_load_config.start()
-
- self.mock_run_commands = patch('ansible.modules.network.os9.os9_config.run_commands')
- self.run_commands = self.mock_run_commands.start()
-
- def tearDown(self):
- super(TestDellos9ConfigModule, self).tearDown()
- self.mock_get_config.stop()
- self.mock_load_config.stop()
- self.mock_run_commands.stop()
-
- def load_fixtures(self, commands=None):
- config_file = 'os9_config_config.cfg'
- self.get_config.return_value = load_fixture(config_file)
- self.load_config.return_value = None
-
- def test_os9_config_unchanged(self):
- src = load_fixture('os9_config_config.cfg')
- set_module_args(dict(src=src))
- self.execute_module()
-
- def test_os9_config_src(self):
- src = load_fixture('os9_config_src.cfg')
- set_module_args(dict(src=src))
- commands = ['hostname foo', 'interface fortyGigE 1/6',
- 'no ip address']
- self.execute_module(changed=True, commands=commands)
-
- def test_os9_config_backup(self):
- set_module_args(dict(backup=True))
- result = self.execute_module()
- self.assertIn('__backup__', result)
-
- def test_os9_config_save(self):
- set_module_args(dict(save=True))
- self.execute_module(changed=True)
- self.assertEqual(self.run_commands.call_count, 1)
- self.assertEqual(self.get_config.call_count, 0)
- self.assertEqual(self.load_config.call_count, 0)
- args = self.run_commands.call_args[0][1]
- self.assertDictContainsSubset({'command': 'copy running-config startup-config'}, args[0])
-# self.assertIn('copy running-config startup-config\r', args)
-
- def test_os9_config_lines_wo_parents(self):
- set_module_args(dict(lines=['hostname foo']))
- commands = ['hostname foo']
- self.execute_module(changed=True, commands=commands)
-
- def test_os9_config_lines_w_parents(self):
- set_module_args(dict(lines=['shutdown'], parents=['interface fortyGigE 1/6']))
- commands = ['interface fortyGigE 1/6', 'shutdown']
- self.execute_module(changed=True, commands=commands)
-
- def test_os9_config_before(self):
- set_module_args(dict(lines=['hostname foo'], before=['snmp-server contact bar']))
- commands = ['snmp-server contact bar', 'hostname foo']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os9_config_after(self):
- set_module_args(dict(lines=['hostname foo'], after=['snmp-server contact bar']))
- commands = ['hostname foo', 'snmp-server contact bar']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os9_config_before_after_no_change(self):
- set_module_args(dict(lines=['hostname router'],
- before=['snmp-server contact bar'],
- after=['snmp-server location chennai']))
- self.execute_module()
-
- def test_os9_config_config(self):
- config = 'hostname localhost'
- set_module_args(dict(lines=['hostname router'], config=config))
- commands = ['hostname router']
- self.execute_module(changed=True, commands=commands)
-
- def test_os9_config_replace_block(self):
- lines = ['description test string', 'test string']
- parents = ['interface fortyGigE 1/6']
- set_module_args(dict(lines=lines, replace='block', parents=parents))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands)
-
- def test_os9_config_match_none(self):
- lines = ['hostname router']
- set_module_args(dict(lines=lines, match='none'))
- self.execute_module(changed=True, commands=lines)
-
- def test_os9_config_match_none(self):
- lines = ['ip address 1.2.3.4/24', 'description test string']
- parents = ['interface fortyGigE 1/6']
- set_module_args(dict(lines=lines, parents=parents, match='none'))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os9_config_match_strict(self):
- lines = ['ip address 1.2.3.4/24', 'description test string',
- 'shutdown']
- parents = ['interface fortyGigE 1/6']
- set_module_args(dict(lines=lines, parents=parents, match='strict'))
- commands = parents + ['shutdown']
- self.execute_module(changed=True, commands=commands, sort=False)
-
- def test_os9_config_match_exact(self):
- lines = ['ip address 1.2.3.4/24', 'description test string',
- 'shutdown']
- parents = ['interface fortyGigE 1/6']
- set_module_args(dict(lines=lines, parents=parents, match='exact'))
- commands = parents + lines
- self.execute_module(changed=True, commands=commands, sort=False)
diff --git a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_facts.py b/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_facts.py
deleted file mode 100644
index 2a563ef12..000000000
--- a/ansible_collections/dellemc/os9/tests/unit/modules/network/os9/test_os9_facts.py
+++ /dev/null
@@ -1,106 +0,0 @@
-# (c) 2020 Red Hat Inc.
-#
-# This file is part of Ansible
-#
-# Ansible is free software: you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation, either version 3 of the License, or
-# (at your option) any later version.
-#
-# Ansible is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
-
-# Make coding more python3-ish
-from __future__ import (absolute_import, division, print_function)
-__metaclass__ = type
-
-import json
-
-from ansible.compat.tests.mock import patch
-from units.modules.utils import set_module_args
-from .os9_module import TestDellos9Module, load_fixture
-from ansible_collections.dellemc.os9.plugins.modules import os9_facts
-
-
-class TestDellos9Facts(TestDellos9Module):
-
- module = os9_facts
-
- def setUp(self):
- super(TestDellos9Facts, self).setUp()
-
- self.mock_run_command = patch(
- 'ansible.modules.network.os9.os9_facts.run_commands')
- self.run_command = self.mock_run_command.start()
-
- def tearDown(self):
- super(TestDellos9Facts, self).tearDown()
-
- self.mock_run_command.stop()
-
- def load_fixtures(self, commands=None):
-
- def load_from_file(*args, **kwargs):
- module, commands = args
- output = list()
-
- for item in commands:
- try:
- obj = json.loads(item)
- command = obj['command']
- except ValueError:
- command = item
- if '|' in command:
- command = str(command).replace('|', '')
- filename = str(command).replace(' ', '_')
- filename = filename.replace('/', '7')
- output.append(load_fixture(filename))
- return output
-
- self.run_command.side_effect = load_from_file
-
- def test_os9_facts_gather_subset_default(self):
- set_module_args(dict())
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals('os9_sw1', ansible_facts['ansible_net_hostname'])
- self.assertIn('fortyGigE 0/24', ansible_facts['ansible_net_interfaces'].keys())
- self.assertEquals(3128820, ansible_facts['ansible_net_memtotal_mb'])
- self.assertEquals(3125722, ansible_facts['ansible_net_memfree_mb'])
-
- def test_os9_facts_gather_subset_config(self):
- set_module_args({'gather_subset': 'config'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('config', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals('os9_sw1', ansible_facts['ansible_net_hostname'])
- self.assertIn('ansible_net_config', ansible_facts)
-
- def test_os9_facts_gather_subset_hardware(self):
- set_module_args({'gather_subset': 'hardware'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('hardware', ansible_facts['ansible_net_gather_subset'])
- self.assertEquals(['flash', 'fcmfs', 'nfsmount', 'ftp', 'tftp', 'scp', 'http', 'https'], ansible_facts['ansible_net_filesystems'])
- self.assertEquals(3128820, ansible_facts['ansible_net_memtotal_mb'])
- self.assertEquals(3125722, ansible_facts['ansible_net_memfree_mb'])
-
- def test_os9_facts_gather_subset_interfaces(self):
- set_module_args({'gather_subset': 'interfaces'})
- result = self.execute_module()
- ansible_facts = result['ansible_facts']
- self.assertIn('default', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('interfaces', ansible_facts['ansible_net_gather_subset'])
- self.assertIn('fortyGigE 0/24', ansible_facts['ansible_net_interfaces'].keys())
- self.assertEquals(['Ma 0/0'], ansible_facts['ansible_net_neighbors'].keys())
- self.assertIn('ansible_net_interfaces', ansible_facts)
diff --git a/ansible_collections/dellemc/powerflex/.ansible-lint b/ansible_collections/dellemc/powerflex/.ansible-lint
new file mode 100644
index 000000000..f615bf255
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/.ansible-lint
@@ -0,0 +1,2 @@
+exclude_paths:
+ - .github/
diff --git a/ansible_collections/dellemc/powerflex/.ansible-lint-ignore b/ansible_collections/dellemc/powerflex/.ansible-lint-ignore
new file mode 100644
index 000000000..1b2e7405d
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/.ansible-lint-ignore
@@ -0,0 +1,27 @@
+roles/powerflex_activemq/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_config/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_gateway/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_gateway/vars/CentOS.yml var-naming[no-role-prefix]
+roles/powerflex_gateway/vars/Ubuntu.yml var-naming[no-role-prefix]
+roles/powerflex_gateway/vars/RedHat.yml var-naming[no-role-prefix]
+roles/powerflex_gateway/vars/SLES.yml var-naming[no-role-prefix]
+roles/powerflex_gateway/vars/main.yml var-naming[no-role-prefix]
+roles/powerflex_lia/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_lia/vars/main.yml var-naming[no-role-prefix]
+roles/powerflex_mdm/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_mdm/vars/main.yml var-naming[no-role-prefix]
+roles/powerflex_sdc/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_sdc/vars/main.yml var-naming[no-role-prefix]
+roles/powerflex_sdr/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_sds/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_sds/vars/main.yml var-naming[no-role-prefix]
+roles/powerflex_tb/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_tb/vars/main.yml var-naming[no-role-prefix]
+roles/powerflex_webui/defaults/main.yml var-naming[no-role-prefix]
+roles/powerflex_webui/vars/main.yml var-naming[no-role-prefix]
+roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/converge.yml var-naming[no-role-prefix]
+roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/converge.yml var-naming[no-role-prefix]
+roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/converge.yml var-naming[no-role-prefix]
+roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/converge.yml var-naming[no-role-prefix]
+roles/powerflex_sds/molecule/sds_installation/converge.yml var-naming[no-role-prefix]
+roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/converge.yml var-naming[no-role-prefix]
diff --git a/ansible_collections/dellemc/powerflex/.github/CODEOWNERS b/ansible_collections/dellemc/powerflex/.github/CODEOWNERS
index f59815dc4..f783d12bf 100644
--- a/ansible_collections/dellemc/powerflex/.github/CODEOWNERS
+++ b/ansible_collections/dellemc/powerflex/.github/CODEOWNERS
@@ -12,8 +12,7 @@
# Jennifer John (Jennifer-John)
# Meenakshi Dembi (meenakshidembi691)
# Pavan Mudunuri (Pavan-Mudunuri)
-# Previnkumar G (Previnkumar-G)
# Trisha Datta (trisha-dell)
# for all files:
-* @kuttattz @Bhavneet-Sharma @Jennifer-John @meenakshidembi691 @Pavan-Mudunuri @Previnkumar-G @trisha-dell
+* @kuttattz @Bhavneet-Sharma @Jennifer-John @meenakshidembi691 @Pavan-Mudunuri @trisha-dell
diff --git a/ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml
index 6a2f0fe7f..988cba19e 100644
--- a/ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml
+++ b/ansible_collections/dellemc/powerflex/.github/workflows/ansible-test.yml
@@ -2,9 +2,8 @@ name: CI
on:
push:
- branches: [ main ]
+ branches: [main]
pull_request:
- branches: [ main ]
schedule:
- cron: '0 3 * * *'
@@ -15,15 +14,15 @@ jobs:
strategy:
fail-fast: false
matrix:
- ansible-version: [stable-2.12]
+ ansible-version: [stable-2.14, stable-2.15, stable-2.16, devel]
steps:
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
- - name: Set up Python 3.9
- uses: actions/setup-python@v1
+ - name: Set up Python 3.11
+ uses: actions/setup-python@v4
with:
- python-version: 3.9
+ python-version: 3.11
- name: Install ansible (${{ matrix.ansible-version }})
run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
@@ -39,49 +38,36 @@ jobs:
###
# Unit tests (OPTIONAL)
- #
+ #
# https://docs.ansible.com/ansible/latest/dev_guide/testing_units.html
unit:
- name: Unit Tests
+ name: Unit Tests (Ⓐ${{ matrix.ansible }} with ${{ matrix.python }} python)
needs: [build]
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
- python-version: ["3.9", "3.10", "3.11"]
- ansible-version: [stable-2.12, stable-2.13, stable-2.14]
+ python: ['3.9', '3.10', '3.11']
+ ansible:
+ - stable-2.14
+ - stable-2.15
+ - stable-2.16
+ - devel
exclude:
- # Python 3.11 is supported only from ansible-core 2.14 onwards
- - python-version: "3.11"
- ansible-version: stable-2.12
- - python-version: "3.11"
- ansible-version: stable-2.13
-
+ - ansible: stable-2.16
+ python: '3.9'
+ - ansible: devel
+ python: '3.9'
steps:
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
+ - name: Perform unit testing with ansible-test
+ uses: ansible-community/ansible-test-gh-action@release/v1
with:
- python-version: ${{ matrix.python-version }}
-
- - name: Install ansible (${{ matrix.ansible-version }}) version
- run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
-
- - name: Download migrated collection artifacts
- uses: actions/download-artifact@v1
- with:
- name: collection
- path: .cache/collection-tarballs
-
- - name: Setup Unit test Pre-requisites
- run: |
- ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz
- if [ -f /home/runner/.ansible/collections/ansible_collections/dellemc/powerflex/tests/requirements.txt ]; then pip install -r /home/runner/.ansible/collections/ansible_collections/dellemc/powerflex/tests/requirements.txt; fi
-
- - name: Run Unit tests using ansible-test
- run: ansible-test units -v --color --python ${{ matrix.python-version }} --coverage
- working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/powerflex
+ testing-type: units
+ coverage: always
+ ansible-core-version: ${{ matrix.ansible }}
+ target-python-version: ${{ matrix.python }}
###
# Sanity tests (REQUIRED)
@@ -89,37 +75,75 @@ jobs:
# https://docs.ansible.com/ansible/latest/dev_guide/testing_sanity.html
sanity:
- name: Sanity Tests
+ name: Sanity (Ⓐ${{ matrix.ansible }} with ${{ matrix.python }} python)
+ needs: [build]
+ strategy:
+ matrix:
+ python: ['3.9', '3.10', '3.11']
+ ansible:
+ - stable-2.14
+ - stable-2.15
+ - stable-2.16
+ - devel
+ exclude:
+ - ansible: stable-2.16
+ python: '3.9'
+ - ansible: devel
+ python: '3.9'
+ runs-on: ubuntu-latest
+ steps:
+ - name: Perform sanity testing
+ uses: ansible-community/ansible-test-gh-action@release/v1
+ with:
+ ansible-core-version: ${{ matrix.ansible }}
+ target-python-version: ${{ matrix.python }}
+ testing-type: sanity
+ pull-request-change-detection: true
+ coverage: never
+
+ lint:
+ name: Ansible lint
runs-on: ubuntu-latest
needs: [build]
strategy:
fail-fast: false
matrix:
- ansible-version: [stable-2.12, stable-2.13, stable-2.14]
+ python-version: ["3.9", "3.10", "3.11"]
+ ansible-version: [stable-2.14, stable-2.15, stable-2.16, devel]
+ exclude:
+ # Ansible-core 2.16 is supported only from Python 3.10 onwards
+ - python-version: "3.9"
+ ansible-version: stable-2.16
+ - python-version: '3.9'
+ ansible-version: devel
steps:
- - name: Set up Python 3.9
- uses: actions/setup-python@v1
+ # Important: This sets up your GITHUB_WORKSPACE environment variable
+ - name: Checkout the source code
+ uses: actions/checkout@v3
with:
- # it is just required to run that once as "ansible-test sanity" in the docker image
- # will run on all python versions it supports.
- python-version: 3.9
+ fetch-depth: 0 # needed for progressive mode to work
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python-version }}
- name: Install ansible (${{ matrix.ansible-version }}) version
run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
+ - name: Install ansible lint
+ run: pip install ansible-lint --disable-pip-version-check
+
- name: Download migrated collection artifacts
uses: actions/download-artifact@v1
with:
name: collection
path: .cache/collection-tarballs
- - name: Setup Sanity test Pre-requisites
+ - name: Install collection build
run: ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz
- # run ansible-test sanity inside of Docker.
- # The docker container has all the pinned dependencies that are required
- # and all python versions ansible supports.
- - name: Run sanity tests
- run: ansible-test sanity --docker -v --color
- working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/powerflex
+ - name: Run Ansible lint
+ run: ansible-lint --show-relpath
+ working-directory: /home/runner/work/ansible-powerflex/ansible-powerflex
diff --git a/ansible_collections/dellemc/powerflex/.gitignore b/ansible_collections/dellemc/powerflex/.gitignore
new file mode 100644
index 000000000..29185f6d8
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/.gitignore
@@ -0,0 +1,3 @@
+output
+__pycache__/
+*.log
diff --git a/ansible_collections/dellemc/powerflex/CHANGELOG.rst b/ansible_collections/dellemc/powerflex/CHANGELOG.rst
index 85d9eedbe..8e67089a3 100644
--- a/ansible_collections/dellemc/powerflex/CHANGELOG.rst
+++ b/ansible_collections/dellemc/powerflex/CHANGELOG.rst
@@ -5,6 +5,79 @@ Dellemc.PowerFlex Change Logs
.. contents:: Topics
+v2.2.0
+======
+
+Minor Changes
+-------------
+
+- The Info module is enhanced to retrieve lists related to fault sets, service templates, deployments, and managed devices.
+- The SDS module has been enhanced to facilitate SDS creation within a fault set.
+
+New Modules
+-----------
+
+- dellemc.powerflex.fault_set - Manage Fault Sets on Dell PowerFlex
+
+v2.1.0
+======
+
+Minor Changes
+-------------
+
+- Added support for PowerFlex Denver version(4.5.x) to TB and Config role.
+
+v2.0.1
+======
+
+Minor Changes
+-------------
+
+- Added Ansible role to support creation and deletion of protection domain, storage pool and fault set.
+- Added Ansible role to support installation and uninstallation of Active MQ.
+- Added support for PowerFlex Denver version(4.5.x)
+- Added support for SDC installation on ESXi, Rocky Linux and Windows OS.
+
+v1.9.0
+======
+
+Minor Changes
+-------------
+
+- Added Ansible role to support installation and uninstallation of Gateway.
+- Added Ansible role to support installation and uninstallation of SDR.
+- Added Ansible role to support installation and uninstallation of Web UI.
+
+v1.8.0
+======
+
+Minor Changes
+-------------
+
+- Added Ansible role to support installation and uninstallation of LIA.
+- Added Ansible role to support installation and uninstallation of MDM.
+- Added Ansible role to support installation and uninstallation of SDS.
+- Added Ansible role to support installation and uninstallation of TB.
+
+v1.7.0
+======
+
+Minor Changes
+-------------
+
+- Added Ansible role to support installation and uninstallation of SDC.
+- Added sample playbooks for the modules.
+- Device module is enhanced to support force addition of device to the SDS.
+- Info module is enhanced to list statistics in snapshot policies.
+- Replication consistency group module is enhanced to support failover, restore, reverse, switchover, and sync operations.
+- SDC module is enhanced to configure performance profile and to remove SDC.
+- Updated modules to adhere with ansible community guidelines.
+
+New Modules
+-----------
+
+- dellemc.powerflex.snapshot_policy - Manage snapshot policies on Dell PowerFlex
+
v1.6.0
======
diff --git a/ansible_collections/dellemc/powerflex/FILES.json b/ansible_collections/dellemc/powerflex/FILES.json
index 4ede25f55..63f1250cd 100644
--- a/ansible_collections/dellemc/powerflex/FILES.json
+++ b/ansible_collections/dellemc/powerflex/FILES.json
@@ -8,6 +8,20 @@
"format": 1
},
{
+ "name": ".ansible-lint",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1faa2fe6cb8f8029a0aae03332dc309e5144309c0a7ae2df24c25727e6f70cf",
+ "format": 1
+ },
+ {
+ "name": ".ansible-lint-ignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4b4fe6df81a08da8afb8ed93daaa9b64b5611e25105de856cc868c84730dda31",
+ "format": 1
+ },
+ {
"name": ".github",
"ftype": "dir",
"chksum_type": null,
@@ -18,7 +32,7 @@
"name": ".github/CODEOWNERS",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4412819a133de95ababdc5c0c84eec9f0a5aef9aeeb4480949a234b36eef5ab0",
+ "chksum_sha256": "02e53b61090f135ec71115de45f5be3ec18cf5ebe90c17eeb41e4a15a0fa5df5",
"format": 1
},
{
@@ -81,14 +95,21 @@
"name": ".github/workflows/ansible-test.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2251f32ec892a841be8731bb3aa6eda6576295a0f36070aa695c747163a2685f",
+ "chksum_sha256": "8d53fd6c5db3875651823edfc1873621987d946e7dcc591ff4c17eb92963df52",
+ "format": 1
+ },
+ {
+ "name": ".gitignore",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "694bb98c227881d903739c8f06c133d492a4b328a0c748372f3a25e03bde4e3d",
"format": 1
},
{
"name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a1337d01a02ea059a85bcbd9432d6c705f3f00fb6a1446592bab6f2b25cf5923",
+ "chksum_sha256": "22481877ddc56823870ff6e2bdd9eec6c683d39bd79f991076e3dcb75488eedd",
"format": 1
},
{
@@ -109,7 +130,7 @@
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9baeed19af496eed600058a06ae630e36264a702714a542832631d1fab314b21",
+ "chksum_sha256": "b9979afcbcc34a8b63191ce20efa873eefdcbe51a2fa53860477eb79f3563aa2",
"format": 1
},
{
@@ -123,14 +144,14 @@
"name": "changelogs/.plugin-cache.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2aac2827117fcb7c4cea8a7cb983444cf76bc2e9e9e06efaa0fe53636e67cd70",
+ "chksum_sha256": "8d9577349e7feb8bdd19d062101868da3cca572de5cb9dfda913c25256dbd15f",
"format": 1
},
{
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "277bd367c5b619493b34186e20ef45bcfc4d637ee720989fa9e86888336dba3a",
+ "chksum_sha256": "51a6d9b03a9ac47e7bd547d5d375b6e1021ceff6dc26ac69f06c0fbbc9f618d5",
"format": 1
},
{
@@ -179,21 +200,21 @@
"name": "docs/CONTRIBUTING.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9562b4e8244177783c2c8f2957504debc712365301b74cf89bb9b30ae828bdc4",
+ "chksum_sha256": "6826d5757fe5a1bece4b674a3c304b6834602d4cd6aeab7bc86bfe85a36913fe",
"format": 1
},
{
"name": "docs/INSTALLATION.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "882f1c42c4e696b6261a1835474e0a12e1b8bf2d1b267668d6758bd9bac2c979",
+ "chksum_sha256": "baa58ede82a6b28b7a2d83beead5ac1a100975dcdfca4cb3e0e44253dae316e5",
"format": 1
},
{
"name": "docs/ISSUE_TRIAGE.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "40924c971f83b32f4f76fa47f03efef00cce6c71342f3c1f806e07e80123fdd3",
+ "chksum_sha256": "bf51c339af375a21f6448ff4c504fca9c94362332f73d28bfc8ce2d3b1bb6983",
"format": 1
},
{
@@ -207,21 +228,21 @@
"name": "docs/MAINTAINER_GUIDE.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9ac1eabe5d52c3d5e8551cdf66fe082681210ebc42c2a76354d3d8424c007445",
+ "chksum_sha256": "50e95d3b07b93b9633f1c77b6eaa5a19e4f4f7e9825498d958cc071ef014ac42",
"format": 1
},
{
"name": "docs/Release Notes.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "07bb492fdf0c2f31e9dc06951b1687954470c2986f8d67615abd3e7b3c20c058",
+ "chksum_sha256": "75ef358ffba38ac7cf03ff24d68626d9e5112a046a2b1eba2a10a6d345a97c0d",
"format": 1
},
{
"name": "docs/SECURITY.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f42eb3ec9c98a1525a25e029842ec8d1b8c74f47fda04ba71ef33618005b80f9",
+ "chksum_sha256": "27159d1a795f9d6328a1f85bd95b7b1f55b698de821c979b2feba6e6a70599e9",
"format": 1
},
{
@@ -242,77 +263,91 @@
"name": "docs/modules/device.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "46ca8976a32f4063c01fcc9eaf438a33f9e2daa0cb37f7a3f9e6282ca33cb42c",
+ "chksum_sha256": "3b2a96dab77d81e653c02750a23f0417e1c2ff24b31026dcd0f22134e8c2666e",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/fault_set.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ca14b067fd1764db25799feb20a0b15c2999ae9aa9f015ef6b06c8759c34f7f",
"format": 1
},
{
"name": "docs/modules/info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c5ab92b8dbd1a21ef2aa6d80bae38f2ab9e2138ca83a4906d81cfa5f1f28f72c",
+ "chksum_sha256": "ff01e0057c388959a6460ac1b1b6f026ecf7d57e190067beed9e28876cad4a67",
"format": 1
},
{
"name": "docs/modules/mdm_cluster.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "29668a2e0b10c61fc7c5446359b5ddfe8cb3b00245028d440c30b1d7dca2051b",
+ "chksum_sha256": "653df53c3af3b726a58b24fc0a78d882655a35ea40283530044ec78b5e3b7023",
"format": 1
},
{
"name": "docs/modules/protection_domain.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f8ef67d0e6f1a8bfb56e670db89dc59c7e8597a257f20a0acb0cc062fcafa14b",
+ "chksum_sha256": "789a3c50b0037017815b0f0b911ee462f50389da17ab1d55c649d572f137c822",
"format": 1
},
{
"name": "docs/modules/replication_consistency_group.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e2261304db5cbee90c7ad6350f97b952c12cf9b2fab2420c545357c6901aa9e4",
+ "chksum_sha256": "d371f5f275878b1e4995902946845a7b1e947705bd432593714d50069d20611e",
"format": 1
},
{
"name": "docs/modules/replication_pair.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e953f529b134d196e956d4a41dd23d0c6e399e8e38ecf1a626ee3616551a031f",
+ "chksum_sha256": "0a1c6ac731f5fd9343b52a740474045f2600ab0867a73692facc619b68cba6ce",
"format": 1
},
{
"name": "docs/modules/sdc.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "625ee7f9eadbc534e907cff27572c7bd0b20f1344907e7a69ae219bcef07f425",
+ "chksum_sha256": "0fbbcfcfc18c7c8ce473e614d4858887150aa689b4ba251b6f61997011c7c049",
"format": 1
},
{
"name": "docs/modules/sds.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "32d801b928f96c4ff6cf0453a6ef7e682886ff9c138a674e7251c70bb6404363",
+ "chksum_sha256": "0646d49b08b50b182c8730a5dd2ee2033e11afbeeac93f3bd45cd3e62cd702ff",
"format": 1
},
{
"name": "docs/modules/snapshot.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9b22fcc5dbb030ccf891fdfea5af1a701de60dfab3c45c1e48a748e93f375979",
+ "chksum_sha256": "cf0f97870e12989f585e770efd5571b5de22a6154b73df241c3c1a700e7cb6fe",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/snapshot_policy.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e4e54cfcca15c4e906727771c4055b8dce46dd9089daf39d694d82a688599156",
"format": 1
},
{
"name": "docs/modules/storagepool.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8d69cf12ab9aacbf631022bb261a51f55cf4431a93be6a181b1baf174765fadc",
+ "chksum_sha256": "6eda9bc51092e6737beb53c7d16b22989888802171c28786ec1459733df62b5f",
"format": 1
},
{
"name": "docs/modules/volume.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f9396dd953a3b076107e9df14ea6cb4234bf6e4a5d8f258162bc6319caf2dcfe",
+ "chksum_sha256": "67dead1b0e93a922e10e44969da93e2aa17604fc13078d76477f5f39d4391114",
"format": 1
},
{
@@ -333,7 +368,196 @@
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3593e8970a00d95a557377bd7af2f50e5212620def3ed4134c989a33dfd8ec4f",
+ "chksum_sha256": "67490e6204f2b0ef55e2cad348fa79da6d137455dd1e9a25c51cac5cc22dd464",
+ "format": 1
+ },
+ {
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/device.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5e4383ecec8db6a9688a08165d4b69e0785f16e2899e8fe494a43f723a67781",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/fault_set.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68bca9de5442d9babc69af7f0e6b0d5211371e87887fcfaaf4ee16683ff5f2ac",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36b79dc7559347030ccf349b815be9e6431a807b3ab9064e009163c10970b397",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/mdm_cluster.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e4985543886231427949e4e953d67879618c90f9fa4a8de7f0f68b6aec3b3f3",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/protection_domain.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "724461ac790970ef8eeafe747cd307bef239bb777ebeca34462088785bbff289",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/replication_consistency_group.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "748579a1e3a3c23dc4bd8eabfa4010dc05c14df990fa0b9f6c362e03424f8975",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/replication_pair.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "84bb70c79296a52f29afb0ba9e2dd50a78984bb27813557907d23413a256dc38",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/sdc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d45e67ade07a162a48566777e8580693e213a2712f7766e3a64b0d1dd0f4be1",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/sds.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "807c5576f3cbf5fa23a608d2323289459675ac4f05b285c1ef8dcfe9986febc1",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/snapshot.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad72095356b2a0c26be85a2a1bbdf88b0a7d6765975f158191506c374ea64ec5",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/snapshot_policy.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a03fd882ceab91443e588e0f67f7a485750d3009dadaac077a284e4f19626a4",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/storagepool.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7059aed35251235a82b26324e6fab795fc260c72851d72affc54295b8733e94a",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/volume.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "30700d36779e822b4f3d41282238e60a6fa12589d3601038402d17d3463a9b30",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/group_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/group_vars/all",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05ae8d3b9bba106581f346b384915000397c6a481054917b4a398fa6646fa93b",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/host_vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/host_vars/node2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1dc167e7f271cff2a5604331d50db9ec8347d653d5e1af2aacaeb3650b5534a1",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ffb23d7831198734c5309e8318b299a7a9acd2eb0496faab883be891ed0ab3e",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/site.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c9299ecdfebf82aecf23576dafdd5c8fcf22ee3cc09b530eb589b59584844d23",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/site_powerflex45.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ca3a21c0f998486d5c01a0e60da7d40556f1c1f7e761d1e67d5e43a88c53a743",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/uninstall_powerflex.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "740113596a3141bb24ff16434c4118ff7ab37400efebd1e5fea4cd6ed0ac7d91",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/uninstall_powerflex45.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0c976ffd9d4100867876c06c7adb0c76a62e7688b23a3d617b5785d2f0d3127",
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/roles/vars_files/connection.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2f056bba9e21e4c3e3c1448d98f5f156e59265d2e777a160999d29781eadb68",
"format": 1
},
{
@@ -354,7 +578,7 @@
"name": "plugins/doc_fragments/powerflex.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "042fd3430b1ecc1ce01ce3efafa2f2d0fca1d814b891a756f686b5b543eb3bef",
+ "chksum_sha256": "801968c2f21f016d0f0861831e74ed4fc804e68a39b021c05f39b38437978bc0",
"format": 1
},
{
@@ -386,6 +610,34 @@
"format": 1
},
{
+ "name": "plugins/module_utils/storage/dell/libraries",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/dell/libraries/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/dell/libraries/configuration.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52238590dd2a4c4ad071b030b815e59a6e44b47cd27d513a9ee29c46574498a1",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/storage/dell/libraries/powerflex_base.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4dc786237539c84bae0371a2df3b4b4daaaaf9fc3672b5eb38228ed838e820fe",
+ "format": 1
+ },
+ {
"name": "plugins/module_utils/storage/dell/logging_handler.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -396,7 +648,7 @@
"name": "plugins/module_utils/storage/dell/utils.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "87e9c4d3570ace6a236080e285e3b3c12b4e5c763064334b861ddb38ea37b264",
+ "chksum_sha256": "402f820473dc2725d9d3fa1787854861651003a86873c1613951f5faefe9e68c",
"format": 1
},
{
@@ -410,84 +662,98 @@
"name": "plugins/modules/device.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9531ecfeaa468b126d90e0932723d12d434dd604a089de4770b0b2dfcd5b9253",
+ "chksum_sha256": "b105384eabdfe9a37857beea28f6407ede241572378c345f24c5576b18c92c47",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/fault_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abcd956cd704a085cc14ea5ffb860b5386285828e3b4885c89868c6f4d8bf376",
"format": 1
},
{
"name": "plugins/modules/info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "119a221a17294de856e4840235897100116fe32e51f11aae69fc735ba0979a78",
+ "chksum_sha256": "af50f59fca9f9bc84a8ecd622173279f436a7ac96654b86a5afa547aa317123f",
"format": 1
},
{
"name": "plugins/modules/mdm_cluster.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f431240cc0c646a9da72c2537c3cb0d549525082e28380e5f50076ca565c87a4",
+ "chksum_sha256": "f86ed2d81a47fd46d4a5caf8558fa7c471a5f0923eb5e9b5b04b5b22ddc8ac3c",
"format": 1
},
{
"name": "plugins/modules/protection_domain.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "53142b5ab071e1b841af1857e0408df1d2d29b9e5a261a04b0acf1e84a3eb851",
+ "chksum_sha256": "e361fb3eb15155e96e6c6a96e6499864631728497ebe515032d4821a6ad6cf0d",
"format": 1
},
{
"name": "plugins/modules/replication_consistency_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c803e9eae58c8eb1da6af18374ef3d30249b7da818cf11e3d8daabb1a3cc421",
+ "chksum_sha256": "c89740feeebf6b438580a445e7cb1910ce7ee65985f4d35f0539523b75748bc8",
"format": 1
},
{
"name": "plugins/modules/replication_pair.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5bab68d67ea04ba0edbb316edf95bad9a71a3400b250edcb5c19e114629f8543",
+ "chksum_sha256": "d4894497203afa31721e55dc1010b7e760d77dcbc1d1b870932f0423dcc7c018",
"format": 1
},
{
"name": "plugins/modules/sdc.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b8104d04864a2270a5ceb5cf7e66f681125dec8510faf60d0054f4ae0e8739c2",
+ "chksum_sha256": "c00725c74b13a4f7c14915c005fa39379c1163e88ac30af8f792191b4e385259",
"format": 1
},
{
"name": "plugins/modules/sds.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "dc02c40e9b6d49c9df942e275de073a4854cfb6e0b27f2a876583e3d094b7803",
+ "chksum_sha256": "e82ea900c116a86b5ecd3b014ea18aa2a5562bb4d4fd02f30cd994573ab2840f",
"format": 1
},
{
"name": "plugins/modules/snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3f653d2f3c1f0dc8a2d496cab89e240fe7a77e7d3d5e7f88a47f718ae0dbc07c",
+ "chksum_sha256": "6db23d9479a439ba60ccae6e840e2c6f1780fd86dbf662c97d226c2cf44589a4",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/snapshot_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19b773c17db1c93a834068233ed02d38d243665e392b9dc0ced445050a1ee208",
"format": 1
},
{
"name": "plugins/modules/storagepool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7f9c9179a1cfe50510946135ee8ff0eb7b9b027e4f7d7afa53cc20e35f6a1b5d",
+ "chksum_sha256": "efd6c30ca7e5d8d61c13e3e1c154d28d80c8962e256c7dc4a0114f34e41c678d",
"format": 1
},
{
"name": "plugins/modules/volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4c8e0677e4071d2288a6b13969279e10873e41410eaabf80c344f56206dcedb9",
+ "chksum_sha256": "05e5c8cb7d221301b4ecf801a216b1d914b165ffce7f13a6195704807868414a",
"format": 1
},
{
"name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b02f408522b5f335ac6c1ef0d7ee5dd72a0a24492de516545ac4071d315882db",
+ "chksum_sha256": "f4719535bc9626a724bf0cf63b26d3a7400b6e3651707c96b97b57feec0d7966",
"format": 1
},
{
@@ -498,45 +764,2096 @@
"format": 1
},
{
- "name": "tests",
+ "name": "roles",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/requirements.txt",
+ "name": "roles/README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "65e6091d1c8d88a703555bd13590bb95248fb0b7376d3ed1d660e2b9d65581c8",
+ "chksum_sha256": "af67416ffb08aba153e5a175c6bf11b3143377b5157c6a38bf1ec60c134a7ab7",
"format": 1
},
{
- "name": "tests/sanity",
+ "name": "roles/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f79dd5ffbd02ca2fab32d6a0f598f6ae0b3b74510531434059dbf2f7817f8099",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6755bbede6d1e524cdd77d56db388826625b72d3c34505c9ea035c7729836cc",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b82c7ccfa9146c89e0c96cde77331352f7dce52b8b941f435e0840ad8353b42c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe170de55352213d09fa2fec1345ee50866e5189fc542d80e534423d28f9d1f8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47b2715cc84c7e0d2b09d87d89fbe40e296b75ee3b486ebff26df70948e66e83",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/molecule/activemq_install",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/molecule/activemq_install/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a107a478ed271356da7529757ab82d4a86a75ece8fbaa4676a6bb52e6a752943",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/molecule/activemq_install/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/molecule/activemq_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/molecule/activemq_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1afaa3cef1fdd48d1337dd0a240df5882652af2e0540bbc2ec630943b9b2120",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/molecule/activemq_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/tasks/install_activemq.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77bbb231e144677d15f51387b6fd097141881f7ea3fe195ee288b5cc1e265386",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f030cf68a9440e55f64453550db811a81520f22639e0c5137611c88c90bd4631",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/tasks/uninstall_activemq.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "305f78c22d168356444da38b6458315727b09dc64883405a1b2f6df81886ece9",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_activemq/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cae3210b759727b69f931f52984f70ef5daacccf7d9c9ace228b2fdfefa98ae5",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e225b63e0df93aa50aadd844eb2387ad0a85d12fca71e71b6f553508d1c4f46c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58ae5fc45575b66485d65079e57a6900228d9e899606083f9cf07e32b6c6915c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_java_CentOS.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f3c2c23c0040011d8e51fdf258b301fb3b6fd7c83dbedea9de1866ede4aaeed",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_java_RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f3c2c23c0040011d8e51fdf258b301fb3b6fd7c83dbedea9de1866ede4aaeed",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_java_Rocky.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f3c2c23c0040011d8e51fdf258b301fb3b6fd7c83dbedea9de1866ede4aaeed",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_java_SLES.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b51bd9a722a0fc78c239b55d613ce99ee84acf3ed78a32527260880919432b8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_java_Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6aaeed0116c9bee61cefd1567a7da5b47e1e3cd16ce4a8aa946c0df779683ded",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_packages_CentOS.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b8e04b6364ab1c48faad08fb920bc773c93fc683a6810bd1f818565b964f5d6",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_packages_RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad23ff6e1d29ef0a873310263d7263a260cf00b4dce9829de2648431d432cbd8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_packages_Rocky.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b8e04b6364ab1c48faad08fb920bc773c93fc683a6810bd1f818565b964f5d6",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_packages_SLES.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ad23ff6e1d29ef0a873310263d7263a260cf00b4dce9829de2648431d432cbd8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_packages_Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5bd502a4e483634be08c3714d9de47d015d240014de4f80a2642db50f17e341a",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_packages_VMkernel.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f79f89df904d70f64bde83e6ab21d158016c988b894ff8f89d5f4f20ad54900",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_packages_WindowsOS.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c465a08183097556c47104e59e1af4e72728946f3b313e6db817b1d2dab5e36f",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/install_powerflex.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78b2d64d46817cf14b236a3abbdbfe89e7944939fa6c64048c914cc6591470d8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ef454a91cb89c05213f2533d1ffd0587159395d84719c68d126165b6e93e0ffe",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/vars/CentOS.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c3b690f7ffdaabd86cb539b4372e9695c211b9dce2614ce0db26d919c18943a",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c800cb014ab63853a900a4b9f8d74a189d2ec287d49cfb5014be7b6bbb27ebb2",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/vars/Rocky.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c800cb014ab63853a900a4b9f8d74a189d2ec287d49cfb5014be7b6bbb27ebb2",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/vars/SLES.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b1f0ded0f52745cbf6f34e5219c18429328fbdff918b545e601d8925f2c3ba84",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/vars/Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f15bca60317d404b90485e5a37088bdba2de34a685cdcf916713e175c5350e61",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/vars/VMkernel.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26252cf7865a30e28c65dafd3b489a29ddf34bb7ced073e6ccd769f5c28d51fd",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_common/vars/WindowsOS.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "26252cf7865a30e28c65dafd3b489a29ddf34bb7ced073e6ccd769f5c28d51fd",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50f0bc8167947aa34a1b0cf9cea69990d9a40a293f79783ec8da7fd812371bfa",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d4b0772eafd5452c699dec19d5da4ac1fa054a3194790e62365a0a65f860498",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0a1a6a397916a8e44d902f9d7928880b18d9162de9451d0c4c26498fce1f74dc",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "67ce4af79de10ac4e36f88569c04f3c6ab48f7c547cb7a255b210367b72127e0",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/molecule/configure_protection_domain",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/molecule/configure_protection_domain/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6fdfc4a605e3c48451c2d209c9db4c13e5c80049a1ca99abf00eac1c2eae95f",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/molecule/configure_protection_domain/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3118853f9e23c714f8c950127932f93c233be8b0c23cfffe7adc089e84db1128",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_config/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6c5b40a10152e19face2a4c0d04c0535edda12e1cd75a44e468cb0c3afc50e79",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78da0171fbf8710887bbd39b5acf826680369452fd2da5891d3782b2011e9670",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b5f08d57ce4cdfb0ea87265fcccccf9cd17db90e0e4dedd310c7509db00be7bd",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9efe72f76dd2dd4bd46b702eac3a8ae2a6672217ed883912592faa615ef5009",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "058a0ad19f4acacb127afed55109379e9ece33b4daa3af988c96073dc4a83d0a",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0bcd2a1c60ed6c530cb1658d4f190ca9ed27fe874af7bde2cd5ed61e1ee54a48",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_installation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_installation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0d268a507bae0c798781e45a056c42fa922728fddce8fbc4b7947a4577710223",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_installation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "103ad6ab73d206689f78f486207a61bce8b81ed00c4af630db08fdd499d22a48",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07e75205f81f1ef5ce32019fd265ff69cd41ca7f9f8ea4350d850007a11f1e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbab1f810b6ab487e71d471233bf64841998669a1864ec0ed804ed105752380d",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/molecule/gateway_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/tasks/install_gateway.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19e292b4d470e21255049aa4fbea7b29d3de1815187954f9e979e5af5821b1ba",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/tasks/install_keepalived.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c5012b69295e36ac759ab5fa4aa16cf0b2ac7a336964aa75b9e3f8473c4f0e64",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c49d3ebb5baa84f39f8c7d7e051e352aad04f8146dbb3ac67f9e30369870c2ed",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/tasks/uninstall_gateway.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f2f246e702551abd867899d2425d11d03a9fd6a18209b7c8061fe97a06e775b",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/templates/keepalived.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "863d4319f4275a7f426e88636e6da3bdf48e47fa941e0ffff5be5c91ee82c5d7",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/vars/CentOS.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51ad2fc40b6f7a22e610d192fd8db399e6f80727d46036899160c8a87695cbcf",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51ad2fc40b6f7a22e610d192fd8db399e6f80727d46036899160c8a87695cbcf",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/vars/SLES.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51ad2fc40b6f7a22e610d192fd8db399e6f80727d46036899160c8a87695cbcf",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/vars/Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "51ad2fc40b6f7a22e610d192fd8db399e6f80727d46036899160c8a87695cbcf",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_gateway/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e15fb4eaca6121330eaf6c8bb0806486a6181e6dc01c33280bcb8d9f2725937",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21c25b94f7bb413f8fccc84d2d07930b2e229fabbd102ea07e18a9816d0a111f",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cda68414c37f37fa79b0b614cede6cf518f2590ce191d9b3ef3320706cbd35d8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d3a506ea087962f1516d76f9e061ecd071c9ca5837a6769bb3a49ba67e0e56b5",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_install",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_install/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7ca1b5a0d00a83e4c6fa99cdbb9434b372d542bc618aab4afee362656df64b2d",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_install/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f5c5ec7322ef081a8f5cb2a01a0fb089cd54f8d5d61c2b207c4fd013b074272",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07e75205f81f1ef5ce32019fd265ff69cd41ca7f9f8ea4350d850007a11f1e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "93ab98e341639d90f13e11aad6896f9984f33c3e2a70fd966f8800e8726c2fa7",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/molecule/lia_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/tasks/install_lia.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ace8d8c2789df5acea0bc156f0db8730471c2996ee7ca0194211a63708c72eb",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a9c13a1d411868f588fb056390e73b987564b46252090ceaf8bd39a5d079e90",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/tasks/uninstall_lia.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d01b0079ccdd0f0c8d9f0b750cb06d293d2a947c1d9495270c42f5d0522396e8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_lia/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8a48687c0206129cf2c25a2f27c0e348a2a77ba706ceaa06bef4f413e2e8618",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "24207df570457b47d51e23a304ea6dbfd69226d630efd6b3059a1b7d1017bd63",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f0157c2ccd54a764f80279a2d5b28daa31b459dc6afafe7004b6ae32c154fd0",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/meta/argument_spec.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6f7a63718623aadf88e7ad90fcb807bca342e54123f3afc06db58d526aea5afb",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "488a2623195e46d379ca093f85696993276b836a4383fabcf322230f844f0b98",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/molecule/mdm_installation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/molecule/mdm_installation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e53f77f0eeba3073115f13986a4929920e8b4854e445e27e4b8d22b89e8877c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/molecule/mdm_installation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/molecule/mdm_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/molecule/mdm_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e6fd302767d35dc7efe0cfce2ec5a8ec30eb4a8973d169876381ebe04d6e93a",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/molecule/mdm_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks/add_certs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44f64893ed75464ab59e0396ce0ed30730c1cecc4023c8dbbb6a52e9e2789f2a",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks/install_mdm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0ed3f5966ab8e9febfd505ea92a5c6e39eecd709ed5ab8ff3c7f32f22551e2c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks/install_powerflex3x_mdm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf85c85f40c93bace8684a17a0326921e190bcecdc028d38fea65bb10d82d824",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9c56bea05dbe554fec279bb36958e567bd784461f451e0d1b1e2342f833c8b15",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "002b7d40098887264323d57dd8f7d3a0c5e3648c52618056a3d1b014065c4cc3",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks/mdm_set_facts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "964e2e7af5c8e65f8c394349480de1a138a0d5f75cae6bd068d90a36b206dfd3",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks/remove_mdm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "715f7a6393ead6b40bedeca50e77f0b5acb2b2b084ab96974cd63eef274521ab",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/tasks/uninstall_mdm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fbc39000ccaf46c6b11752b14170bfdf8f358ecec4abfc4156be148fcb89f2dd",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/vars/CentOS.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0835a62a7ce35708916899ff96820f3ef59738f9d4eea873c24159aa22c25912",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0835a62a7ce35708916899ff96820f3ef59738f9d4eea873c24159aa22c25912",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/vars/SLES.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4537b87aacbcef7e0b9688ec63cadf406a05482a2a6d67f363898124b0246bf9",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/vars/Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1605f49f1d1907ca15ff20bc675bd5f1698cab3eb75f316f3395ada48ad009a2",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_mdm/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1001e39b69436d27e5bf7d6189e67c35f3b085bd1b27d92ed08496b39ada5991",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fae5f2f2eb80349652cf02f87caf76ad149c41998c6ef2e291289f25ff5d849c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9cee69ae196e0f0abecc93111ffeda653e64c7ee46e3cd7d413ae96bb96879e0",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a57d62c97bd821f681daff01237cae53aa374797d72b5648f552b4fefb4659ce",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eea1ded78ed8ffcf2e2d95d74f9a93c51a6fe0bd9583457ebd4eb440a2fd4b0b",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1bcff33e6d9440c3222812394d2f0e040f5243f7480c5d1712f1e45014d994df",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07e75205f81f1ef5ce32019fd265ff69cd41ca7f9f8ea4350d850007a11f1e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_installation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_installation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ac1e705337d1ca2e9726935da337dd4bd7f38dcf92a9564f164fad1388d479c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_installation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a5ae0cc210b48e046f8d047e483c5512022792d51b020aefd85ce455f3ff096",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07e75205f81f1ef5ce32019fd265ff69cd41ca7f9f8ea4350d850007a11f1e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8fd5671bbffbbd61bb65f32e7922cf0a81eb9cd1f740e2ebffd279719fdef04c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/sdc_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/var_values.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19a9cf102ab96db1e9767003d7e3633d4941faafde44a6881884d4c34c6020f5",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/wrong_sdc_credentials",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/wrong_sdc_credentials/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ded6dbcd70dff56f2631e6a244255f95be1cf02cc0aa69aff21259ae7fd83a1c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/wrong_sdc_credentials/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1ba46d85f8ab818324fd855aab079cda7d64620fa10790e8244164c5bdb652d5",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/wrong_sdc_credentials/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "848afca44a73743eaa85dbb917a74063e8c32fa006effa466e7bc118c8047462",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/wrong_sdc_ip",
"ftype": "dir",
"chksum_type": null,
"chksum_sha256": null,
"format": 1
},
{
- "name": "tests/sanity/ignore-2.12.txt",
+ "name": "roles/powerflex_sdc/molecule/wrong_sdc_ip/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7f97b722802c66b7fa85d54a109df152f9f14eea245c70e274fcd1431148642",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/molecule/wrong_sdc_ip/inventory",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f7aeba3eeadbdef5786e878836b153893618d27d584833642125387a860f65db",
+ "chksum_sha256": "083126dbceb96fa9eda3133b66dae550267420475dd047dea661d9a7a41d076e",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.13.txt",
+ "name": "roles/powerflex_sdc/molecule/wrong_sdc_ip/molecule.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f7aeba3eeadbdef5786e878836b153893618d27d584833642125387a860f65db",
+ "chksum_sha256": "848afca44a73743eaa85dbb917a74063e8c32fa006effa466e7bc118c8047462",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/tasks/configure_sdc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "835f953daacd8245f0f42663642da91d3217814ad3b6eb86464ccfd1ac5b094e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/tasks/install_sdc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "edcacc16abd2e2ddca0262e215130db8851d691f1c52ec54b641a1390180b386",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e8fc7838167ed1117469801021bfc4280c1dad474fc2966b5d08a1536354f48",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/tasks/register_esxi_sdc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ed36076c66cd63ccefb2b3deee053948a973e06525e02122da1000536533ea63",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/tasks/remove_sdc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "04d013b6a62d8de30992fa523069b5b8b62a265aa8d4be5fc26074049680fed5",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/tasks/uninstall_esxi_sdc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "65156b0e16111eaaedb8541d63e044d2ff75e007bbfd57b75fad4fb2c633ca64",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/tasks/uninstall_sdc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d90f77f4e0282050ebaa950b6a5408699afc88d437624a2d4f22e729c006c3b2",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/templates/driver_sync.conf.j2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5141f8e9855c43c072c5ea9d183b04abd77bce087e0f28e5fc1c4ec03406d6ad",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdc/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cac1229947ca43a32a09b18d5d8a612cbe34a27c91651a9fde2e17ede3ac14d",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a691f0f80b76f468b3778063faf2e9212499ab7299178b009b8b860c37824d80",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "754ebb4bd2dbdc114ed56c6f4ae3a6178d0255ef886b841b164b22d215ef4e5d",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8de2736e7e478c6305a0b65938ed54196ffcd0488150653220ed1f0f8d36ab74",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "50b31345b42827e025fd5afec900b3b23647c3dfc2d5cef152dcf407544f7f0b",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9696a14f08f84ec5053b13c2763227ac8f639a868a305a6c16f6ba76ffeecb01",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e5dd006b187975e92aa28bdc72d3096fb5943d8f5f218784f0f96cbd38127ed",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07e75205f81f1ef5ce32019fd265ff69cd41ca7f9f8ea4350d850007a11f1e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation_invalid_pd",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7960142d685e35f7c1f7f9be5e31c9dd0c8d94571265598e271f75059da1e982",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07e75205f81f1ef5ce32019fd265ff69cd41ca7f9f8ea4350d850007a11f1e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb27f1902fa084c8ef53597d01549d0449b0d341678f2471d16d9072bc010ef",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/sdr_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/molecule/var_values.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "382637add6d6a2db94e224c132f86b0d9380b501f36c9ab4268325df9c430504",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/tasks/add_sdr.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fc927ff472f1c3ad858e340c56d7e82c9bdfcbb44d48e5a9d03338285689f129",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c76dfc83066a2d92a77b25fa60fdfb15aef7e3dec5502dca515e088bed69afee",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/tasks/remove_sdr.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2852a831ab356a6d98ffc7c4168ab07555371253333dcf020fe651a06d1aa56c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/tasks/sdr_set_facts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0002943fb0a2f5b53a94bb3eed51c925aae0082afaa410e7311f2186e7e5a503",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sdr/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c315c7375a4cd99eb3ae1ddc4787aad21fb5a12612f8e443a4db508939afe66",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "89d4828a0e5898780fbe6fe9280824d3de1f26d12b7e06e2e9da8253f0fb8af2",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "289a6838353344a9d0d2b4eb5fe8fe492cb288921c81a056134dd75643b31f0e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/meta/argument_spec.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97b60036a372edaf905d2cdef4308350b54d33c97abc28a3b20ece909cba19a0",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "764bcf3640368151dd82536115c56321c99f7800f00ed836e0f8525112f86711",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/molecule/sds_installation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/molecule/sds_installation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52385b21a6278abf2eb63119413f9ee69f28d486e024941845e08df89a57dcb2",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/molecule/sds_installation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/molecule/sds_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/molecule/sds_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d71ebf5f0128d362492c7cc0295e445741ac74b9b08393fcaae1c59954206860",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/molecule/sds_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/molecule/var_values.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8b7c4419e76fff49ac90e65639d49cd0cbfdb761ef9f0d57c990a11a89d73618",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/tasks/install_sds.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23d5238154edc4827205019c6b649941414f6f80155d6b0273a74c6f435f3c46",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d095071bd11063c88ad32c000f9195035b18974a09689d73a25af4675627dd0",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/tasks/uninstall_sds.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e532c9c65cc6ba5ac908094bd36e86981665ca2957af2b1049a820ed6c46544",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_sds/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d8445cfb0caca38e7d795dd6377f7f82c28e5a19362a987ac2a4b7f8c67dd30",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a4c2a6465154e52d18d00744cf92fb9323f791a521a95855036ae95a57ef4f33",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4245290a68a27bf29cb14c1ec2f7819b966d793bf206f9f96546f75943d856ea",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/meta/argument_spec.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8a0837c5ad44db6b6992cf7872cee9e14b61fc329f42622d0d51c2683454a39",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b47f1125de3a8508e82f8f24565340ff4be18fe9ac627bf353714feb79c41243",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "968447c9752cfdc4ba23f931124685b222438b2d777e95b123f94271c51540b5",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/tb_installation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/tb_installation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6565516f258d1145478dc9f9875692a4e360efb4747e8716e7c2a4dc4fca1de8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/tb_installation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/tb_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/tb_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3201f8a7b21a280ae338081acae67ccaaaacf2933f0cfc6d8431123c340a754",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/tb_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/var_values.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e11323f688c1f2cd2a14b673f3b693e276a8fa7f74c40451c191c0d4e1fc7dc6",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/wrong_tb_credentials",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/wrong_tb_credentials/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "801f340e649aa9c47d6a276ba04b0fd935343827681b4a44f9ef4e147bc4da73",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/wrong_tb_credentials/inventory",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d4d765dfe4bad1c992c32818407ee9bf4371a8ea27effc8f68cd8895bcb03586",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/molecule/wrong_tb_credentials/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "848afca44a73743eaa85dbb917a74063e8c32fa006effa466e7bc118c8047462",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/tasks/install_tb.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "caa6a56b3fa75ddbb6faf6e1f1d66f41365b876126c16739de31d331a1c97d73",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/tasks/install_tb3x.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "97b692e2c115a1e7b4b11f4e8de9dd5260720f683dae1259c2840eff2701fa2d",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/tasks/install_tb4x.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40b6cf736e196e90e599dc4808970ebd64b9a1f848ad7ea0c29184408ecb6ea8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08a3777cb59f0f1717c06fa2684a2e302604864aa583375248bb44bb6d1c970a",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/tasks/set_tb_ips.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f6c75067f5575f48ada843abd9456ace5582fdc9f8e0d5483ea46724a0f35f0",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/tasks/uninstall_tb.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8cecdc3db2cde3ad690e85e5f61e60532228c0448f8d9211a7caa502c783fa03",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_tb/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55503bf2cc541fd1a1ab486a6a373b4a7137db3dfb447fe9204b6a632a616658",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "672eec983b5b452217a7e3171ab25d01f49b129eef9385879bcbfcde2ce13643",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d069029f1fc52b0e96c74117aedc12b00eefd65a80dd0328cb6bc73971d5aed1",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/meta/argument_specs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "faacb710d3c980aa1cd61fe93e796995d6a81fd909f9c3602016228009c280ca",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b61f169bc3623baedec1ca9fc731fdde72b54b5dd9a0cc91a6768dc93bbe5d1",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_installation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_installation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb770492b3ab6c09306cc3c5b73929f9d00a26aaf66e8f026d1a1e057dfa8c3c",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_installation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb0b220f53dfd25374445f277fc8d624b645baa8b17724f963423e54cfd610ad",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb07e75205f81f1ef5ce32019fd265ff69cd41ca7f9f8ea4350d850007a11f1e",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_uninstallation",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_uninstallation/converge.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c14da947fb44bed580dd1158cc0d6ce7632f27aa35ff7bbdab860d42cea58d69",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/molecule/webui_uninstallation/molecule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f52d711103d50a437830c6fbcd04fb4bab49a0f82f6d26d1c791c6e8488dd090",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/tasks/install_webui.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "744e4d2c5c8c207cb66f0182a8693398337b7b01187bff23bfc93187db151af8",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb47d17fbae2d1e555c21a33c91df81945b093be1e700fcca57041e72178d337",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/tasks/uninstall_webui.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f5b341c049bea1530507bc9d254405e46c52935c95f8e58133dbc263d0be8e0",
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "roles/powerflex_webui/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "245d6c7575348c20112262cadaab84a9b121061ca65e5a35b9d5bf6e77e1bb33",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
"format": 1
},
{
"name": "tests/sanity/ignore-2.14.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f7aeba3eeadbdef5786e878836b153893618d27d584833642125387a860f65db",
+ "chksum_sha256": "2bbf73bd4314d2ddf3be259531d6053876cf3eedfb2064d3f499230d35a5e29d",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.15.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bbf73bd4314d2ddf3be259531d6053876cf3eedfb2064d3f499230d35a5e29d",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.16.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "284e11dfcae8cd7417880d605cff0c5fca335d506122707fc0dff6485480c1eb",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.17.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e67ee7fbc5af526a8d088ddfdb7ce9a576c4fadac1d8146a5615ddc6654d3269",
"format": 1
},
{
@@ -575,6 +2892,41 @@
"format": 1
},
{
+ "name": "tests/unit/plugins/module_utils/libraries",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/libraries/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/libraries/fail_json.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4651e84e9043d17705aa947c73b9e7ec49b71b11f7c5cbb5612f7e91dc0ff92",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/libraries/initial_mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "751778383a2d8f1f5b31d6c6d91753ae34573734123732fdc86693005a90926a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/libraries/powerflex_unit_base.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de609408a22a2862a0acde77a4d5824e4447a543f94b78f392c909f63a6b01b3",
+ "format": 1
+ },
+ {
"name": "tests/unit/plugins/module_utils/mock_api_exception.py",
"ftype": "file",
"chksum_type": "sha256",
@@ -582,10 +2934,31 @@
"format": 1
},
{
+ "name": "tests/unit/plugins/module_utils/mock_device_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d7ee89ad10ea3b96aaebeaa9f28613bc42eba0bf044e4c894361623d28f1fad1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_fail_json.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0eb805b800a7d22fd4df5e1c1af400fb97039287432df90aa7cb2e14a4f8f465",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_fault_set_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "85d0af59ac2484455b0437ee20f4281042bffded623ad997e0578aa5dffbc8b7",
+ "format": 1
+ },
+ {
"name": "tests/unit/plugins/module_utils/mock_info_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5af286883c568b21c90cf5444cde5742123db9819355ffaac2904bb68f31977c",
+ "chksum_sha256": "cab119466db38050d440bc7201e8b8a5124c5ec0696f4a695089a970c02d7602",
"format": 1
},
{
@@ -599,21 +2972,28 @@
"name": "tests/unit/plugins/module_utils/mock_protection_domain_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1005c8842b81ff4c5613992e5f80fb25cfa6ac36d1a9274e574caf04d7510584",
+ "chksum_sha256": "28de95a3b777048b61d1ef5ef4813fb5051bcbd200342750336cc988baa53f39",
"format": 1
},
{
"name": "tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5bf628a051d160856652bda22e45d35303db612874edc75ed4e2e8b4a270fba3",
+ "chksum_sha256": "7bc14ef8f7a52a7a2b3a9046129d4fae30472f023b25b4676310244abff77e24",
"format": 1
},
{
"name": "tests/unit/plugins/module_utils/mock_replication_pair_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "46cd89d10f82a9d5e6992ff1b7470d79c37e6da91daed75d6381e8d52d45dca4",
+ "chksum_sha256": "520bd7a73c6eba60ce38a116236b35c61a168562c11f7ca20681110f8699540a",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_sdc_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b60f8ff13f723a48ef0fffe747d73c3ffac1296f72e08a3444eeb70959fd0f22",
"format": 1
},
{
@@ -624,17 +3004,31 @@
"format": 1
},
{
+ "name": "tests/unit/plugins/module_utils/mock_sds_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d582368c1264a56168e5a09adeeeed2fb90a51482abf8fef89c8dc369ccc7508",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_snapshot_policy_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f4007b601d0de25a813c234ba5f5248b4d0dc406d7b6e36cf6331db5bc17bd6",
+ "format": 1
+ },
+ {
"name": "tests/unit/plugins/module_utils/mock_storagepool_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f2552190a68b46919f44632fe65972825624ab201e330771e1992e57de253d27",
+ "chksum_sha256": "5e7e3dfc7f6ac68a53092f5ba3292ec4c7c861f6972ca9c290f223ef10c8afad",
"format": 1
},
{
"name": "tests/unit/plugins/module_utils/mock_volume_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5e61698f03d1a7ec229c5ffb6a4def656e806e5dd234a0e15b2136fba839a2d7",
+ "chksum_sha256": "e13dde31e52c7f6b1644b3e139ffa6cbb3a36fa7a3f51f273f85c778a3ea0cc5",
"format": 1
},
{
@@ -652,10 +3046,24 @@
"format": 1
},
{
+ "name": "tests/unit/plugins/modules/test_device.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "594ee3ea44a69c55a9ab207b180f4ab0f704a30ccbf5d9cee33744e1d7867148",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_fault_set.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "883cd39ea6dc8150116ad11ca9545edf893b8b511b7d2abe20879926d0f7c029",
+ "format": 1
+ },
+ {
"name": "tests/unit/plugins/modules/test_info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ed576811997bf0be077369720b8cd5e0cfeacaabc1a51146236ba6fbfdff0492",
+ "chksum_sha256": "e50a6d20e1e279a687e1b6b699530a3abe8d83768e0d215189e54b0f9d8a12f0",
"format": 1
},
{
@@ -669,42 +3077,63 @@
"name": "tests/unit/plugins/modules/test_protection_domain.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8d2d1e320a857f994db4e48ce5406ca5bbfe21cd7c37d9f8d3bb1e07db8d333e",
+ "chksum_sha256": "fd400c16d0e64a7197daaed295af4d76953136911171258051688aec388ae0f9",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_replication_consistency_group.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "3e96fa4bddbbf108235fe087e8c3d2d0a0f737d38048150c4936296ca6a5288f",
+ "chksum_sha256": "f0093d18a711eecb2287755008d89d3a16dcc81f342e0104614f08f3c47cbd48",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_replication_pair.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ede467557d94a229f775030f699ebd93e5f33be6061fcf76f78797fdf2039b6f",
+ "chksum_sha256": "971564355415601cfe4cd54aa55ce6a6b1ca56e9ba064e56cfd1761de6683c87",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_sdc.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f990cc3ab9a042f124f93a6b2c772f8433ab606f9325e1999dd93a3593cea5d",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_sds.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "798578aceabaf9a3b5323117cbbd02b1b9dde2091ce5c210c63273d372cb7f90",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_snapshot_policy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27f26fc2d95cb5d4d22f74a42b36795c33cc7a4739f5ac0027d608b735f15fe7",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_storagepool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4f78779e6b346b612915d92323ca0d1850e68da65b95f32205cc585b622b48be",
+ "chksum_sha256": "8c6bad9def6e6b32b7358bca2c4494be3c077fe49b47b08fc2e0c7305fcdb685",
"format": 1
},
{
"name": "tests/unit/plugins/modules/test_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4742f5675d613a39bd5929b24e6f515d034bebf8defc1c98bb8fe69444785015",
+ "chksum_sha256": "baca53f593d97d7c3ab2f76767c1d00870cb7cee265b704f216d85c73cb268ac",
"format": 1
},
{
- "name": "ansible.cfg",
+ "name": "tests/unit/requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5360ab997ea2c7ed8a6efc7e8324e7b6ec7479af057fe15ff23fe885f05b58b2",
+ "chksum_sha256": "65e6091d1c8d88a703555bd13590bb95248fb0b7376d3ed1d660e2b9d65581c8",
"format": 1
}
],
diff --git a/ansible_collections/dellemc/powerflex/MANIFEST.json b/ansible_collections/dellemc/powerflex/MANIFEST.json
index c4490da54..50f25fa4c 100644
--- a/ansible_collections/dellemc/powerflex/MANIFEST.json
+++ b/ansible_collections/dellemc/powerflex/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "dellemc",
"name": "powerflex",
- "version": "1.6.0",
+ "version": "2.2.0",
"authors": [
"Akash Shendge <ansible.team@dell.com>",
"Arindam Datta <ansible.team@dell.com>",
@@ -10,7 +10,8 @@
"Rajshree Khare <ansible.team@dell.com>",
"Bhavneet Sharma <ansible.team@dell.com>",
"Ananthu S Kuttattu <ansible.team@dell.com>",
- "Trisha Datta <ansible.team@dell.com>"
+ "Trisha Datta <ansible.team@dell.com>",
+ "Pavan Mudunuri <ansible.team@dell.com>"
],
"readme": "README.md",
"tags": [
@@ -23,16 +24,16 @@
],
"license_file": null,
"dependencies": {},
- "repository": "https://github.com/dell/ansible-powerflex/tree/1.6.0",
- "documentation": "https://github.com/dell/ansible-powerflex/tree/1.6.0/docs",
- "homepage": "https://github.com/dell/ansible-powerflex/tree/1.6.0",
+ "repository": "https://github.com/dell/ansible-powerflex/tree/2.2.0",
+ "documentation": "https://github.com/dell/ansible-powerflex/tree/2.2.0/docs",
+ "homepage": "https://github.com/dell/ansible-powerflex/tree/2.2.0",
"issues": "https://www.dell.com/community/Automation/bd-p/Automation"
},
"file_manifest_file": {
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "9401ba4e254d7dab2e4f6dda4e4fdeeba8908b05c66552d423da74ef44e53ec3",
+ "chksum_sha256": "7b6cac9b3795806daa37e451654c294ca112eab9b180e45ccc02b4ae0c2f12ff",
"format": 1
},
"format": 1
diff --git a/ansible_collections/dellemc/powerflex/README.md b/ansible_collections/dellemc/powerflex/README.md
index b40e14f64..a12d86b97 100644
--- a/ansible_collections/dellemc/powerflex/README.md
+++ b/ansible_collections/dellemc/powerflex/README.md
@@ -2,33 +2,33 @@
The Ansible Modules for Dell Technologies (Dell) PowerFlex allow Data Center and IT administrators to use RedHat Ansible to automate and orchestrate the provisioning and management of Dell PowerFlex storage systems.
-The capabilities of the Ansible modules are managing SDCs, volumes, snapshots, storage pools, replication consistency groups, replication pairs, SDSs, devices, protection domains, MDM cluster, and to gather high level facts from the storage system. The options available are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request.
+The capabilities of the Ansible modules are managing SDCs, volumes, snapshots, snapshot policy, storage pools, replication consistency groups, replication pairs, SDSs, devices, protection domains, MDM cluster, Fault Set and to gather high level facts from the storage system. The options available are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request.
## Table of contents
-* [Code of conduct](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/CODE_OF_CONDUCT.md)
-* [Maintainer guide](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/MAINTAINER_GUIDE.md)
-* [Committer guide](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/COMMITTER_GUIDE.md)
-* [Contributing guide](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/CONTRIBUTING.md)
-* [Branching strategy](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/BRANCHING.md)
-* [List of adopters](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/ADOPTERS.md)
-* [Maintainers](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/MAINTAINERS.md)
-* [Support](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/SUPPORT.md)
+* [Code of conduct](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/CODE_OF_CONDUCT.md)
+* [Maintainer guide](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/MAINTAINER_GUIDE.md)
+* [Committer guide](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/COMMITTER_GUIDE.md)
+* [Contributing guide](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/CONTRIBUTING.md)
+* [Branching strategy](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/BRANCHING.md)
+* [List of adopters](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/ADOPTERS.md)
+* [Maintainers](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/MAINTAINERS.md)
+* [Support](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/SUPPORT.md)
* [License](#license)
-* [Security](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/SECURITY.md)
+* [Security](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/SECURITY.md)
* [Prerequisites](#prerequisites)
* [List of Ansible modules for Dell PowerFlex](#list-of-ansible-modules-for-dell-powerflex)
* [Installation and execution of Ansible modules for Dell PowerFlex](#installation-and-execution-of-ansible-modules-for-dell-powerflex)
* [Releasing, Maintenance and Deprecation](#releasing-maintenance-and-deprecation)
## License
-The Ansible collection for PowerFlex is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-powerflex/blob/1.6.0/LICENSE) for the full terms. Ansible modules and modules utilities that are part of the Ansible collection for PowerFlex are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-powerflex/blob/1.6.0/MODULE-LICENSE) for the full terms.
+The Ansible collection for PowerFlex is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-powerflex/blob/2.2.0/LICENSE) for the full terms. Ansible modules and modules utilities that are part of the Ansible collection for PowerFlex are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-powerflex/blob/2.2.0/MODULE-LICENSE) for the full terms.
## Prerequisites
| **Ansible Modules** | **PowerFlex/VxFlex OS Version** | **SDK version** | **Python version** | **Ansible** |
|---------------------|-----------------------|-------|--------------------|--------------------------|
-| v1.6.0 |3.5 <br> 3.6 <br> 4.0 | 1.7.0 | 3.9.x <br> 3.10.x <br> 3.11.x | 2.12 <br> 2.13 <br> 2.14 |
+| v2.2.0 |3.6 <br> 4.0 <br> 4.5 | 1.9.0 | 3.9.x <br> 3.10.x <br> 3.11.x | 2.14 <br> 2.15 <br> 2.16 |
* Please follow PyPowerFlex installation instructions on [PyPowerFlex Documentation](https://github.com/dell/python-powerflex)
@@ -36,27 +36,29 @@ The Ansible collection for PowerFlex is released and licensed under the GPL-3.0
The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed.
## List of Ansible modules for Dell PowerFlex
- * [Info module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/info.rst)
- * [Snapshot module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/snapshot.rst)
- * [SDC module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/sdc.rst)
- * [Storage pool module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/storagepool.rst)
- * [Volume module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/volume.rst)
- * [SDS module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/sds.rst)
- * [Device Module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/device.rst)
- * [Protection Domain Module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/protection_domain.rst)
- * [MDM Cluster Module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/mdm_cluster.rst)
- * [Replication Consistency Grop Module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/replication_consistency_group.rst)
- * [Replication Pair Module](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/modules/replication_pair.rst)
+ * [Info module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/info.rst)
+ * [Snapshot module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/snapshot.rst)
+ * [SDC module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/sdc.rst)
+ * [Storage pool module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/storagepool.rst)
+ * [Volume module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/volume.rst)
+ * [SDS module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/sds.rst)
+ * [Device Module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/device.rst)
+ * [Protection Domain Module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/protection_domain.rst)
+ * [MDM Cluster Module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/mdm_cluster.rst)
+ * [Replication Consistency Group Module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/replication_consistency_group.rst)
+ * [Replication Pair Module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/replication_pair.rst)
+ * [Snapshot Policy Module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/snapshot_policy.rst)
+ * [Fault Sets Module](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/modules/fault_set.rst)
## Installation and execution of Ansible modules for Dell PowerFlex
-The installation and execution steps of Ansible modules for Dell PowerFlex can be found [here](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/INSTALLATION.md).
+The installation and execution steps of Ansible modules for Dell PowerFlex can be found [here](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/INSTALLATION.md).
## Releasing, Maintenance and Deprecation
-Ansible Modules for Dell Technnologies PowerFlex follows [Semantic Versioning](https://semver.org/).
+Ansible Modules for Dell Technologies PowerFlex follows [Semantic Versioning](https://semver.org/).
New version will be release regularly if significant changes (bug fix or new feature) are made in the collection.
-Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/BRANCHING.md).
+Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/BRANCHING.md).
Ansible Modules for Dell Technologies PowerFlex deprecation cycle is aligned with that of [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html). \ No newline at end of file
diff --git a/ansible_collections/dellemc/powerflex/ansible.cfg b/ansible_collections/dellemc/powerflex/ansible.cfg
deleted file mode 100644
index c10d1da22..000000000
--- a/ansible_collections/dellemc/powerflex/ansible.cfg
+++ /dev/null
@@ -1,484 +0,0 @@
-# config file for ansible -- https://ansible.com/
-# ===============================================
-
-# nearly all parameters can be overridden in ansible-playbook
-# or with command line flags. ansible will read ANSIBLE_CONFIG,
-# ansible.cfg in the current working directory, .ansible.cfg in
-# the home directory or /etc/ansible/ansible.cfg, whichever it
-# finds first
-
-[defaults]
-
-# some basic default values...
-
-#inventory = /etc/ansible/hosts
-#library = /usr/share/my_modules/
-#module_utils = /usr/share/my_module_utils/
-#remote_tmp = ~/.ansible/tmp
-#local_tmp = ~/.ansible/tmp
-#plugin_filters_cfg = /etc/ansible/plugin_filters.yml
-#forks = 5
-#poll_interval = 15
-#sudo_user = root
-#ask_sudo_pass = True
-#ask_pass = True
-#transport = smart
-#remote_port = 22
-#module_lang = C
-#module_set_locale = False
-
-# plays will gather facts by default, which contain information about
-# the remote system.
-#
-# smart - gather by default, but don't regather if already gathered
-# implicit - gather by default, turn off with gather_facts: False
-# explicit - do not gather by default, must say gather_facts: True
-#gathering = implicit
-
-# This only affects the gathering done by a play's gather_facts directive,
-# by default gathering retrieves all facts subsets
-# all - gather all subsets
-# network - gather min and network facts
-# hardware - gather hardware facts (longest facts to retrieve)
-# virtual - gather min and virtual facts
-# facter - import facts from facter
-# ohai - import facts from ohai
-# You can combine them using comma (ex: network,virtual)
-# You can negate them using ! (ex: !hardware,!facter,!ohai)
-# A minimal set of facts is always gathered.
-#gather_subset = all
-
-# some hardware related facts are collected
-# with a maximum timeout of 10 seconds. This
-# option lets you increase or decrease that
-# timeout to something more suitable for the
-# environment.
-# gather_timeout = 10
-
-# additional paths to search for roles in, colon separated
-#roles_path = /etc/ansible/roles
-
-# uncomment this to disable SSH key host checking
-#host_key_checking = False
-
-# change the default callback, you can only have one 'stdout' type enabled at a time.
-#stdout_callback = skippy
-
-
-## Ansible ships with some plugins that require whitelisting,
-## this is done to avoid running all of a type by default.
-## These setting lists those that you want enabled for your system.
-## Custom plugins should not need this unless plugin author specifies it.
-
-# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
-#callback_whitelist = timer, mail
-
-# Determine whether includes in tasks and handlers are "static" by
-# default. As of 2.0, includes are dynamic by default. Setting these
-# values to True will make includes behave more like they did in the
-# 1.x versions.
-#task_includes_static = False
-#handler_includes_static = False
-
-# Controls if a missing handler for a notification event is an error or a warning
-#error_on_missing_handler = True
-
-# change this for alternative sudo implementations
-#sudo_exe = sudo
-
-# What flags to pass to sudo
-# WARNING: leaving out the defaults might create unexpected behaviours
-#sudo_flags = -H -S -n
-
-# SSH timeout
-#timeout = 10
-
-# default user to use for playbooks if user is not specified
-# (/usr/bin/ansible will use current user as default)
-#remote_user = root
-
-# logging is off by default unless this path is defined
-# if so defined, consider logrotate
-#log_path = /var/log/ansible.log
-
-# default module name for /usr/bin/ansible
-#module_name = command
-
-# use this shell for commands executed under sudo
-# you may need to change this to bin/bash in rare instances
-# if sudo is constrained
-#executable = /bin/sh
-
-# if inventory variables overlap, does the higher precedence one win
-# or are hash values merged together? The default is 'replace' but
-# this can also be set to 'merge'.
-#hash_behaviour = replace
-
-# by default, variables from roles will be visible in the global variable
-# scope. To prevent this, the following option can be enabled, and only
-# tasks and handlers within the role will see the variables there
-#private_role_vars = yes
-
-# list any Jinja2 extensions to enable here:
-#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
-
-# if set, always use this private key file for authentication, same as
-# if passing --private-key to ansible or ansible-playbook
-#private_key_file = /path/to/file
-
-# If set, configures the path to the Vault password file as an alternative to
-# specifying --vault-password-file on the command line.
-#vault_password_file = /path/to/vault_password_file
-
-# format of string {{ ansible_managed }} available within Jinja2
-# templates indicates to users editing templates files will be replaced.
-# replacing {file}, {host} and {uid} and strftime codes with proper values.
-#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
-# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
-# in some situations so the default is a static string:
-#ansible_managed = Ansible managed
-
-# by default, ansible-playbook will display "Skipping [host]" if it determines a task
-# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
-# messages. NOTE: the task header will still be shown regardless of whether or not the
-# task is skipped.
-#display_skipped_hosts = True
-
-# by default, if a task in a playbook does not include a name: field then
-# ansible-playbook will construct a header that includes the task's action but
-# not the task's args. This is a security feature because ansible cannot know
-# if the *module* considers an argument to be no_log at the time that the
-# header is printed. If your environment doesn't have a problem securing
-# stdout from ansible-playbook (or you have manually specified no_log in your
-# playbook on all of the tasks where you have secret information) then you can
-# safely set this to True to get more informative messages.
-#display_args_to_stdout = False
-
-# by default (as of 1.3), Ansible will raise errors when attempting to dereference
-# Jinja2 variables that are not set in templates or action lines. Uncomment this line
-# to revert the behavior to pre-1.3.
-#error_on_undefined_vars = False
-
-# by default (as of 1.6), Ansible may display warnings based on the configuration of the
-# system running ansible itself. This may include warnings about 3rd party packages or
-# other conditions that should be resolved if possible.
-# to disable these warnings, set the following value to False:
-#system_warnings = True
-
-# by default (as of 1.4), Ansible may display deprecation warnings for language
-# features that should no longer be used and will be removed in future versions.
-# to disable these warnings, set the following value to False:
-#deprecation_warnings = True
-
-# (as of 1.8), Ansible can optionally warn when usage of the shell and
-# command module appear to be simplified by using a default Ansible module
-# instead. These warnings can be silenced by adjusting the following
-# setting or adding warn=yes or warn=no to the end of the command line
-# parameter string. This will for example suggest using the git module
-# instead of shelling out to the git command.
-# command_warnings = False
-
-
-# set plugin path directories here, separate with colons
-#action_plugins = /usr/share/ansible/plugins/action
-#cache_plugins = /usr/share/ansible/plugins/cache
-#callback_plugins = /usr/share/ansible/plugins/callback
-#connection_plugins = /usr/share/ansible/plugins/connection
-#lookup_plugins = /usr/share/ansible/plugins/lookup
-#inventory_plugins = /usr/share/ansible/plugins/inventory
-#vars_plugins = /usr/share/ansible/plugins/vars
-#filter_plugins = /usr/share/ansible/plugins/filter
-#test_plugins = /usr/share/ansible/plugins/test
-#terminal_plugins = /usr/share/ansible/plugins/terminal
-#strategy_plugins = /usr/share/ansible/plugins/strategy
-
-
-# by default, ansible will use the 'linear' strategy but you may want to try
-# another one
-#strategy = free
-
-# by default callbacks are not loaded for /bin/ansible, enable this if you
-# want, for example, a notification or logging callback to also apply to
-# /bin/ansible runs
-#bin_ansible_callbacks = False
-
-
-# don't like cows? that's unfortunate.
-# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
-#nocows = 1
-
-# set which cowsay stencil you'd like to use by default. When set to 'random',
-# a random stencil will be selected for each task. The selection will be filtered
-# against the `cow_whitelist` option below.
-#cow_selection = default
-#cow_selection = random
-
-# when using the 'random' option for cowsay, stencils will be restricted to this list.
-# it should be formatted as a comma-separated list with no spaces between names.
-# NOTE: line continuations here are for formatting purposes only, as the INI parser
-# in python does not support them.
-#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
-# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
-# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
-
-# don't like colors either?
-# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
-#nocolor = 1
-
-# if set to a persistent type (not 'memory', for example 'redis') fact values
-# from previous runs in Ansible will be stored. This may be useful when
-# wanting to use, for example, IP information from one group of servers
-# without having to talk to them in the same playbook run to get their
-# current IP information.
-#fact_caching = memory
-
-
-# retry files
-# When a playbook fails by default a .retry file will be created in ~/
-# You can disable this feature by setting retry_files_enabled to False
-# and you can change the location of the files by setting retry_files_save_path
-
-#retry_files_enabled = False
-#retry_files_save_path = ~/.ansible-retry
-
-# squash actions
-# Ansible can optimise actions that call modules with list parameters
-# when looping. Instead of calling the module once per with_ item, the
-# module is called once with all items at once. Currently this only works
-# under limited circumstances, and only with parameters named 'name'.
-#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper
-
-# prevents logging of task data, off by default
-#no_log = False
-
-# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
-#no_target_syslog = False
-
-# controls whether Ansible will raise an error or warning if a task has no
-# choice but to create world readable temporary files to execute a module on
-# the remote machine. This option is False by default for security. Users may
-# turn this on to have behaviour more like Ansible prior to 2.1.x. See
-# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
-# for more secure ways to fix this than enabling this option.
-#allow_world_readable_tmpfiles = False
-
-# controls the compression level of variables sent to
-# worker processes. At the default of 0, no compression
-# is used. This value must be an integer from 0 to 9.
-#var_compression_level = 9
-
-# controls what compression method is used for new-style ansible modules when
-# they are sent to the remote system. The compression types depend on having
-# support compiled into both the controller's python and the client's python.
-# The names should match with the python Zipfile compression types:
-# * ZIP_STORED (no compression. available everywhere)
-# * ZIP_DEFLATED (uses zlib, the default)
-# These values may be set per host via the ansible_module_compression inventory
-# variable
-#module_compression = 'ZIP_DEFLATED'
-
-# This controls the cutoff point (in bytes) on --diff for files
-# set to 0 for unlimited (RAM may suffer!).
-#max_diff_size = 1048576
-
-# This controls how ansible handles multiple --tags and --skip-tags arguments
-# on the CLI. If this is True then multiple arguments are merged together. If
-# it is False, then the last specified argument is used and the others are ignored.
-# This option will be removed in 2.8.
-#merge_multiple_cli_flags = True
-
-# Controls showing custom stats at the end, off by default
-#show_custom_stats = True
-
-# Controls which files to ignore when using a directory as inventory with
-# possibly multiple sources (both static and dynamic)
-#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
-
-# This family of modules use an alternative execution path optimized for network appliances
-# only update this setting if you know how this works, otherwise it can break module execution
-#network_group_modules=eos, nxos, ios, iosxr, junos, vyos
-
-# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
-# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
-# jinja2 templating language which will be run through the templating engine.
-# ENABLING THIS COULD BE A SECURITY RISK
-#allow_unsafe_lookups = False
-
-# set default errors for all plays
-#any_errors_fatal = False
-
-[inventory]
-# enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini'
-#enable_plugins = host_list, virtualbox, yaml, constructed
-
-# ignore these extensions when parsing a directory as inventory source
-#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
-
-# ignore files matching these patterns when parsing a directory as inventory source
-#ignore_patterns=
-
-# If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise.
-#unparsed_is_failed=False
-
-[privilege_escalation]
-#become=True
-#become_method=sudo
-#become_user=root
-#become_ask_pass=False
-
-[paramiko_connection]
-
-# uncomment this line to cause the paramiko connection plugin to not record new host
-# keys encountered. Increases performance on new host additions. Setting works independently of the
-# host key checking setting above.
-#record_host_keys=False
-
-# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
-# line to disable this behaviour.
-#pty=False
-
-# paramiko will default to looking for SSH keys initially when trying to
-# authenticate to remote devices. This is a problem for some network devices
-# that close the connection after a key failure. Uncomment this line to
-# disable the Paramiko look for keys function
-#look_for_keys = False
-
-# When using persistent connections with Paramiko, the connection runs in a
-# background process. If the host doesn't already have a valid SSH key, by
-# default Ansible will prompt to add the host key. This will cause connections
-# running in background processes to fail. Uncomment this line to have
-# Paramiko automatically add host keys.
-#host_key_auto_add = True
-
-[ssh_connection]
-
-# ssh arguments to use
-# Leaving off ControlPersist will result in poor performance, so use
-# paramiko on older platforms rather than removing it, -C controls compression use
-#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
-
-# The base directory for the ControlPath sockets.
-# This is the "%(directory)s" in the control_path option
-#
-# Example:
-# control_path_dir = /tmp/.ansible/cp
-#control_path_dir = ~/.ansible/cp
-
-# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
-# port and username (empty string in the config). The hash mitigates a common problem users
-# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
-# In those cases, a "too long for Unix domain socket" ssh error would occur.
-#
-# Example:
-# control_path = %(directory)s/%%h-%%r
-#control_path =
-
-# Enabling pipelining reduces the number of SSH operations required to
-# execute a module on the remote server. This can result in a significant
-# performance improvement when enabled, however when using "sudo:" you must
-# first disable 'requiretty' in /etc/sudoers
-#
-# By default, this option is disabled to preserve compatibility with
-# sudoers configurations that have requiretty (the default on many distros).
-#
-#pipelining = False
-
-# Control the mechanism for transferring files (old)
-# * smart = try sftp and then try scp [default]
-# * True = use scp only
-# * False = use sftp only
-#scp_if_ssh = smart
-
-# Control the mechanism for transferring files (new)
-# If set, this will override the scp_if_ssh option
-# * sftp = use sftp to transfer files
-# * scp = use scp to transfer files
-# * piped = use 'dd' over SSH to transfer files
-# * smart = try sftp, scp, and piped, in that order [default]
-#transfer_method = smart
-
-# if False, sftp will not use batch mode to transfer files. This may cause some
-# types of file transfer failures impossible to catch however, and should
-# only be disabled if your sftp version has problems with batch mode
-#sftp_batch_mode = False
-
-# The -tt argument is passed to ssh when pipelining is not enabled because sudo
-# requires a tty by default.
-#use_tty = True
-
-[persistent_connection]
-
-# Configures the persistent connection timeout value in seconds. This value is
-# how long the persistent connection will remain idle before it is destroyed.
-# If the connection doesn't receive a request before the timeout value
-# expires, the connection is shutdown. The default value is 30 seconds.
-#connect_timeout = 30
-
-# Configures the persistent connection retry timeout. This value configures the
-# the retry timeout that ansible-connection will wait to connect
-# to the local domain socket. This value must be larger than the
-# ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout).
-# The default value is 15 seconds.
-#connect_retry_timeout = 15
-
-# The command timeout value defines the amount of time to wait for a command
-# or RPC call before timing out. The value for the command timeout must
-# be less than the value of the persistent connection idle timeout (connect_timeout)
-# The default value is 10 second.
-#command_timeout = 10
-
-[accelerate]
-#accelerate_port = 5099
-#accelerate_timeout = 30
-#accelerate_connect_timeout = 5.0
-
-# The daemon timeout is measured in minutes. This time is measured
-# from the last activity to the accelerate daemon.
-#accelerate_daemon_timeout = 30
-
-# If set to yes, accelerate_multi_key will allow multiple
-# private keys to be uploaded to it, though each user must
-# have access to the system via SSH to add a new key. The default
-# is "no".
-#accelerate_multi_key = yes
-
-[selinux]
-# file systems that require special treatment when dealing with security context
-# the default behaviour that copies the existing context or uses the user default
-# needs to be changed to use the file system dependent context.
-#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p
-
-# Set this to yes to allow libvirt_lxc connections to work without SELinux.
-#libvirt_lxc_noseclabel = yes
-
-[colors]
-#highlight = white
-#verbose = blue
-#warn = bright purple
-#error = red
-#debug = dark gray
-#deprecate = purple
-#skip = cyan
-#unreachable = red
-#ok = green
-#changed = yellow
-#diff_add = green
-#diff_remove = red
-#diff_lines = cyan
-
-
-[diff]
-# Always print diff when running ( same as always running with -D/--diff )
-# always = no
-
-# Set how many context lines to show in diff
-# context = 3
-
-[galaxy]
-server_list = automation_hub
-
-[galaxy_server.automation_hub]
-url=https://cloud.redhat.com/api/automation-hub/
-auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token/
-
-token=eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJhZDUyMjdhMy1iY2ZkLTRjZjAtYTdiNi0zOTk4MzVhMDg1NjYifQ.eyJpYXQiOjE2NzkzMDkyMTcsImp0aSI6IjJmZTdjZjA1LTAxZDQtNDMwMi1iMWNlLTgzNjlhNWJmNjViMyIsImlzcyI6Imh0dHBzOi8vc3NvLnJlZGhhdC5jb20vYXV0aC9yZWFsbXMvcmVkaGF0LWV4dGVybmFsIiwiYXVkIjoiaHR0cHM6Ly9zc28ucmVkaGF0LmNvbS9hdXRoL3JlYWxtcy9yZWRoYXQtZXh0ZXJuYWwiLCJzdWIiOiJmOjUyOGQ3NmZmLWY3MDgtNDNlZC04Y2Q1LWZlMTZmNGZlMGNlNjpqZW5uaWZlcl9qb2huIiwidHlwIjoiT2ZmbGluZSIsImF6cCI6ImNsb3VkLXNlcnZpY2VzIiwibm9uY2UiOiJmZTY2MGYxMS1kODFjLTQ2YWItYTkzNS1hZTAxZmY2MjA2OTciLCJzZXNzaW9uX3N0YXRlIjoiMzI3ZDlhNjgtZTkxMi00N2NiLWI3NDctNWE5YmQzZTJlZjlmIiwic2NvcGUiOiJvcGVuaWQgYXBpLmlhbS5zZXJ2aWNlX2FjY291bnRzIGFwaS5pYW0ub3JnYW5pemF0aW9uIG9mZmxpbmVfYWNjZXNzIiwic2lkIjoiMzI3ZDlhNjgtZTkxMi00N2NiLWI3NDctNWE5YmQzZTJlZjlmIn0.iGbseoF6AXetWNa0sFsfzbmzvizwaBcY0rd14YFJqcU \ No newline at end of file
diff --git a/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml
index 51f28a3fa..b2098aee6 100644
--- a/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml
+++ b/ansible_collections/dellemc/powerflex/changelogs/.plugin-cache.yaml
@@ -6,6 +6,7 @@ plugins:
callback: {}
cliconf: {}
connection: {}
+ filter: {}
httpapi: {}
inventory: {}
lookup: {}
@@ -55,6 +56,11 @@ plugins:
name: snapshot
namespace: ''
version_added: 1.0.0
+ snapshot_policy:
+ description: Manage snapshot policies on Dell PowerFlex
+ name: snapshot_policy
+ namespace: ''
+ version_added: 1.7.0
storagepool:
description: Managing Dell PowerFlex storage pool
name: storagepool
@@ -68,5 +74,6 @@ plugins:
netconf: {}
shell: {}
strategy: {}
+ test: {}
vars: {}
-version: 1.6.0
+version: 2.1.0
diff --git a/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml b/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml
index 37e6bbb43..8211b2b91 100644
--- a/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml
+++ b/ansible_collections/dellemc/powerflex/changelogs/changelog.yaml
@@ -2,90 +2,147 @@ ancestor: null
releases:
1.0.0:
modules:
- - description: Gathering information about Dell PowerFlex
- name: info
- namespace: ''
- - description: Manage SDCs on Dell PowerFlex
- name: sdc
- namespace: ''
- - description: Manage Snapshots on Dell PowerFlex
- name: snapshot
- namespace: ''
- - description: Managing Dell PowerFlex storage pool
- name: storagepool
- namespace: ''
- - description: Manage volumes on Dell PowerFlex
- name: volume
- namespace: ''
+ - description: Gathering information about Dell PowerFlex
+ name: info
+ namespace: ''
+ - description: Manage SDCs on Dell PowerFlex
+ name: sdc
+ namespace: ''
+ - description: Manage Snapshots on Dell PowerFlex
+ name: snapshot
+ namespace: ''
+ - description: Managing Dell PowerFlex storage pool
+ name: storagepool
+ namespace: ''
+ - description: Manage volumes on Dell PowerFlex
+ name: volume
+ namespace: ''
release_date: '2021-03-24'
1.1.0:
changes:
minor_changes:
- - Added dual licensing.
- - Gatherfacts module is enhanced to list devices.
+ - Added dual licensing.
+ - Gatherfacts module is enhanced to list devices.
modules:
- - description: Manage device on Dell PowerFlex
- name: device
- namespace: ''
- - description: Manage SDS on Dell PowerFlex
- name: sds
- namespace: ''
+ - description: Manage device on Dell PowerFlex
+ name: device
+ namespace: ''
+ - description: Manage SDS on Dell PowerFlex
+ name: sds
+ namespace: ''
release_date: '2021-09-28'
1.1.1:
changes:
deprecated_features:
- - The dellemc_powerflex_gatherfacts module is deprecated and replaced with dellemc_powerflex_info
+ - The dellemc_powerflex_gatherfacts module is deprecated and replaced with dellemc_powerflex_info
trivial:
- - Product Guide, Release Notes and ReadMe updated as per community guidelines.
+ - Product Guide, Release Notes and ReadMe updated as per community guidelines.
release_date: '2021-12-16'
1.2.0:
changes:
minor_changes:
- - Names of previously released modules have been changed from dellemc_powerflex_\<module name>
- to \<module name>.
+ - Names of previously released modules have been changed from dellemc_powerflex_\<module
+ name> to \<module name>.
modules:
- - description: Manage Protection Domain on Dell PowerFlex
- name: protection_domain
- namespace: ''
+ - description: Manage Protection Domain on Dell PowerFlex
+ name: protection_domain
+ namespace: ''
release_date: '2022-03-25'
1.3.0:
changes:
minor_changes:
- - Added execution environment manifest file to support building an execution
- environment with ansible-builder.
- - Enabled the check_mode support for info module
+ - Added execution environment manifest file to support building an execution
+ environment with ansible-builder.
+ - Enabled the check_mode support for info module
modules:
- - description: Manage MDM cluster on Dell PowerFlex
- name: mdm_cluster
- namespace: ''
+ - description: Manage MDM cluster on Dell PowerFlex
+ name: mdm_cluster
+ namespace: ''
release_date: '2022-06-28'
1.4.0:
changes:
minor_changes:
- - Added support for 4.0.x release of PowerFlex OS.
- - Info module is enhanced to support the listing volumes and storage pools with
- statistics data.
- - Storage pool module is enhanced to get the details with statistics data.
- - Volume module is enhanced to get the details with statistics data.
+ - Added support for 4.0.x release of PowerFlex OS.
+ - Info module is enhanced to support the listing volumes and storage pools with
+ statistics data.
+ - Storage pool module is enhanced to get the details with statistics data.
+ - Volume module is enhanced to get the details with statistics data.
release_date: '2022-09-27'
1.5.0:
changes:
minor_changes:
- - Info module is enhanced to support the listing replication consistency groups.
- - Renamed gateway_host to hostname
- - Renamed verifycert to validate_certs.
- - Updated modules to adhere with ansible community guidelines.
+ - Info module is enhanced to support the listing replication consistency groups.
+ - Renamed gateway_host to hostname
+ - Renamed verifycert to validate_certs.
+ - Updated modules to adhere with ansible community guidelines.
modules:
- - description: Manage replication consistency groups on Dell PowerFlex
- name: replication_consistency_group
- namespace: ''
+ - description: Manage replication consistency groups on Dell PowerFlex
+ name: replication_consistency_group
+ namespace: ''
release_date: '2022-12-22'
1.6.0:
changes:
minor_changes:
- - Info module is enhanced to support the listing of replication pairs.
+ - Info module is enhanced to support the listing of replication pairs.
modules:
- - description: Manage replication pairs on Dell PowerFlex
- name: replication_pair
- namespace: ''
+ - description: Manage replication pairs on Dell PowerFlex
+ name: replication_pair
+ namespace: ''
release_date: '2023-03-31'
+ 1.7.0:
+ changes:
+ minor_changes:
+ - Added Ansible role to support installation and uninstallation of SDC.
+ - Added sample playbooks for the modules.
+ - Device module is enhanced to support force addition of device to the SDS.
+ - Info module is enhanced to list statistics in snapshot policies.
+ - Replication consistency group module is enhanced to support failover, restore,
+ reverse, switchover, and sync operations.
+ - SDC module is enhanced to configure performance profile and to remove SDC.
+ - Updated modules to adhere with ansible community guidelines.
+ modules:
+ - description: Manage snapshot policies on Dell PowerFlex
+ name: snapshot_policy
+ namespace: ''
+ release_date: '2023-06-30'
+ 1.8.0:
+ changes:
+ minor_changes:
+ - Added Ansible role to support installation and uninstallation of LIA.
+ - Added Ansible role to support installation and uninstallation of MDM.
+ - Added Ansible role to support installation and uninstallation of SDS.
+ - Added Ansible role to support installation and uninstallation of TB.
+ release_date: '2023-08-31'
+ 1.9.0:
+ changes:
+ minor_changes:
+ - Added Ansible role to support installation and uninstallation of Gateway.
+ - Added Ansible role to support installation and uninstallation of SDR.
+ - Added Ansible role to support installation and uninstallation of Web UI.
+ release_date: '2023-09-29'
+ 2.0.1:
+ changes:
+ minor_changes:
+ - Added Ansible role to support creation and deletion of protection domain,
+ storage pool and fault set.
+ - Added Ansible role to support installation and uninstallation of Active MQ.
+ - Added support for PowerFlex Denver version(4.5.x)
+ - Added support for SDC installation on ESXi, Rocky Linux and Windows OS.
+ release_date: '2023-10-31'
+ 2.1.0:
+ changes:
+ minor_changes:
+ - Added support for PowerFlex Denver version(4.5.x) to TB and Config role.
+ release_date: '2023-11-30'
+ 2.2.0:
+ changes:
+ minor_changes:
+ - The SDS module has been enhanced to facilitate SDS creation within
+ a fault set.
+ - The Info module is enhanced to retrieve lists related to fault sets,
+ service templates, deployments, and managed devices.
+ modules:
+ - description: Manage Fault Sets on Dell PowerFlex
+ name: fault_set
+ namespace: ''
+ release_date: '2024-02-29'
diff --git a/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md b/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md
index 642f94d23..726c931d0 100644
--- a/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md
+++ b/ansible_collections/dellemc/powerflex/docs/CONTRIBUTING.md
@@ -1,5 +1,5 @@
<!--
-Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -10,7 +10,7 @@ You may obtain a copy of the License at
# How to contribute
-Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/1.6.0/CODE_OF_CONDUCT.md).
+Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/2.2.0/CODE_OF_CONDUCT.md).
## Table of contents
@@ -76,7 +76,7 @@ Triage helps ensure that issues resolve quickly by:
If you don't have the knowledge or time to code, consider helping with _issue triage_. The Ansible modules for Dell PowerFlex community will thank you for saving them time by spending some of yours.
-Read more about the ways you can [Triage issues](https://github.com/dell/ansible-powerflex/blob/1.6.0/ISSUE_TRIAGE.md).
+Read more about the ways you can [Triage issues](https://github.com/dell/ansible-powerflex/blob/2.2.0/ISSUE_TRIAGE.md).
## Your first contribution
@@ -89,7 +89,7 @@ When you're ready to contribute, it's time to create a pull request.
## Branching
-* [Branching Strategy for Ansible modules for Dell PowerFlex](https://github.com/dell/ansible-powerflex/blob/1.6.0/BRANCHING.md)
+* [Branching Strategy for Ansible modules for Dell PowerFlex](https://github.com/dell/ansible-powerflex/blob/2.2.0/BRANCHING.md)
## Signing your commits
@@ -144,7 +144,7 @@ Make sure that the title for your pull request uses the same format as the subje
### Quality gates for pull requests
-GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-powerflex/blob/1.6.0/SUPPORT.md).
+GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-powerflex/blob/2.2.0/SUPPORT.md).
#### Code sanitization
diff --git a/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md b/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md
index 1dff703ec..86861574b 100644
--- a/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md
+++ b/ansible_collections/dellemc/powerflex/docs/INSTALLATION.md
@@ -1,5 +1,5 @@
<!--
-Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -41,7 +41,7 @@ You may obtain a copy of the License at
* Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/powerflex) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/powerflex) and use this command to install the collection anywhere in your system:
- ansible-galaxy collection install dellemc-powerflex-1.6.0.tar.gz -p <install_path>
+ ansible-galaxy collection install dellemc-powerflex-2.2.0.tar.gz -p <install_path>
* Set the environment variable:
@@ -68,7 +68,7 @@ You may obtain a copy of the License at
## Ansible modules execution
-The Ansible server must be configured with Python library for PowerFlex to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules.
+The Ansible server must be configured with Python library for PowerFlex to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules.
## SSL certificate validation
diff --git a/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md
index 18423aa1e..8871da8e3 100644
--- a/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md
+++ b/ansible_collections/dellemc/powerflex/docs/ISSUE_TRIAGE.md
@@ -1,5 +1,5 @@
<!--
-Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -43,8 +43,8 @@ Should explain what happened, what was expected and how to reproduce it together
- Ansible Version: [e.g. 2.14]
- Python Version [e.g. 3.11]
- - Ansible modules for Dell PowerFlex Version: [e.g. 1.6.0]
- - PowerFlex SDK version: [e.g. PyPowerFlex 1.7.0]
+ - Ansible modules for Dell PowerFlex Version: [e.g. 2.2.0]
+ - PowerFlex SDK version: [e.g. PyPowerFlex 1.9.0]
- Any other additional information...
#### Feature requests
diff --git a/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md b/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md
index b92cebabe..e2d1d90e0 100644
--- a/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md
+++ b/ansible_collections/dellemc/powerflex/docs/MAINTAINER_GUIDE.md
@@ -1,5 +1,5 @@
<!--
-Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -27,7 +27,7 @@ If a candidate is approved, a Maintainer contacts the candidate to invite them t
## Maintainer policies
* Lead by example
-* Follow the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/1.6.0/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-powerflex/blob/1.6.0/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-powerflex/blob/1.6.0/COMMITTER_GUIDE.md) guides
+* Follow the [Code of Conduct](https://github.com/dell/ansible-powerflex/blob/2.2.0/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-powerflex/blob/2.2.0/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-powerflex/blob/2.2.0/COMMITTER_GUIDE.md) guides
* Promote a friendly and collaborative environment within our community
* Be actively engaged in discussions, answering questions, updating defects, and reviewing pull requests
* Criticize code, not people. Ideally, tell the contributor a better way to do what they need.
diff --git a/ansible_collections/dellemc/powerflex/docs/Release Notes.md b/ansible_collections/dellemc/powerflex/docs/Release Notes.md
index a45d82eb7..9ce24b6d0 100644
--- a/ansible_collections/dellemc/powerflex/docs/Release Notes.md
+++ b/ansible_collections/dellemc/powerflex/docs/Release Notes.md
@@ -1,8 +1,8 @@
**Ansible Modules for Dell Technologies PowerFlex**
=========================================
-### Release notes 1.6.0
+### Release notes 2.2.0
-> © 2023 Dell Inc. or its subsidiaries. All rights reserved. Dell
+> © 2024 Dell Inc. or its subsidiaries. All rights reserved. Dell
> and other trademarks are trademarks of Dell Inc. or its
> subsidiaries. Other trademarks may be trademarks of their respective
> owners.
@@ -26,9 +26,9 @@ The table in this section lists the revision history of this document.
Table 1. Revision history
-| Revision | Date | Description |
-|----------|----------------|-------------------------------------------------------------|
-| 01 | March 2023 | Current release of Ansible Modules for Dell PowerFlex 1.6.0 |
+| Revision | Date | Description |
+|----------|-----------------|-------------------------------------------------------------|
+| 01 | February 2024 | Current release of Ansible Modules for Dell PowerFlex 2.2.0 |
Product description
-------------------
@@ -36,7 +36,7 @@ Product description
The Ansible modules for Dell PowerFlex are used to automate and orchestrate
the deployment, configuration, and management of Dell PowerFlex storage
systems. The capabilities of Ansible modules are managing volumes,
-storage pools, SDCs, snapshots, SDSs, replication consistency groups, replication pairs, devices, protection domain and MDM
+storage pools, SDCs, snapshots, snapshot policy, SDSs, replication consistency groups, replication pairs, devices, protection domain, MDM and fault sets.
cluster, and obtaining high-level information about a PowerFlex system information.
The modules use playbooks to list, show, create, delete, and modify
each of the entities.
@@ -44,28 +44,30 @@ each of the entities.
New features and enhancements
-----------------------------
Along with the previous release deliverables, this release supports following features -
-- Info module is enhanced to support the listing of replication pairs.
-- Added new module for replication pairs.
+- Fault set module is introduced to create, get details, rename and delete fault sets.
+- The SDS module has been enhanced to facilitate SDS creation within a fault set.
+- The Info module is enhanced to retrieve lists related to fault sets, service templates, deployments, and managed devices.
Known issues
------------
-- Setting the RF cache and performance profile of the SDS during its creation fails intermittently on PowerFlex version 3.5
-- The creation of replication pair fails when copy_type is specified as OfflineCopy on PowerFlex version 4.0
+- Setting the RF cache and performance profile of the SDS during its creation fails intermittently on PowerFlex version 3.5.
+- The creation of replication pair fails when copy_type is specified as OfflineCopy on PowerFlex version 4.0.
+- Pagination in info module with offset and limit fetches more than expected records when listing service templates or deployments.
+- Templates are fetched using the info module in spite of setting include_templates to false when listing deployments.
Limitations
-----------
-- The API is accepting a negative integer value for overall_limit in the network_limits for a specific protection domain.
+
Distribution
------------
The software package is available for download from the [Ansible Modules
-for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.6.0) page.
+for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/2.2.0) page.
Documentation
-------------
-The documentation is available on [Ansible Modules for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/1.6.0/docs)
+The documentation is available on [Ansible Modules for PowerFlex GitHub](https://github.com/dell/ansible-powerflex/tree/2.2.0/docs)
page. It includes the following:
- README
- Release Notes (this document)
- - Product Guide
diff --git a/ansible_collections/dellemc/powerflex/docs/SECURITY.md b/ansible_collections/dellemc/powerflex/docs/SECURITY.md
index e34152322..77323e456 100644
--- a/ansible_collections/dellemc/powerflex/docs/SECURITY.md
+++ b/ansible_collections/dellemc/powerflex/docs/SECURITY.md
@@ -1,5 +1,5 @@
<!--
-Copyright (c) 2022 Dell Inc., or its subsidiaries. All Rights Reserved.
+Copyright (c) 2024 Dell Inc., or its subsidiaries. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@@ -12,7 +12,7 @@ You may obtain a copy of the License at
The Ansible modules for Dell PowerFlex repository are inspected for security vulnerabilities via blackduck scans and static code analysis.
-In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-powerflex/blob/1.6.0/docs/CONTRIBUTING.md#Pull-requests) for more information.
+In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-powerflex/blob/2.2.0/docs/CONTRIBUTING.md#Pull-requests) for more information.
## Reporting a vulnerability
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/device.rst b/ansible_collections/dellemc/powerflex/docs/modules/device.rst
index 35ae246aa..4fcd82854 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/device.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/device.rst
@@ -20,9 +20,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -134,6 +134,14 @@ Parameters
State of the device.
+ force (optional, bool, False)
+ Using the Force flag to add a device.
+
+ Use this flag, to overwrite existing data on the device.
+
+ Use this flag with caution, because all data on the device will be destroyed.
+
+
hostname (True, str, None)
IP or FQDN of the PowerFlex host.
@@ -201,6 +209,22 @@ Examples
protection_domain_name: "domain1"
external_acceleration_type: "ReadAndWrite"
state: "present"
+ - name: Add a device with force flag
+ dellemc.powerflex.device:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ media_type: "HDD"
+ device_name: "device2"
+ storage_pool_name: "pool1"
+ protection_domain_name: "domain1"
+ external_acceleration_type: "ReadAndWrite"
+ force: true
+ state: "present"
- name: Get device details using device_id
dellemc.powerflex.device:
hostname: "{{hostname}}"
@@ -232,23 +256,23 @@ Examples
state: "present"
- name: Remove a device using device_id
dellemc.powerflex.device:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
- device_id: "76eb7e2f00010000"
- state: "absent"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ device_id: "76eb7e2f00010000"
+ state: "absent"
- name: Remove a device using (current_pathname, sds_id)
dellemc.powerflex.device:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
- current_pathname: "/dev/sdb"
- sds_name: "node1"
- state: "absent"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/fault_set.rst b/ansible_collections/dellemc/powerflex/docs/modules/fault_set.rst
new file mode 100644
index 000000000..55b9972bc
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/docs/modules/fault_set.rst
@@ -0,0 +1,215 @@
+.. _fault_set_module:
+
+
+fault_set -- Manage Fault Sets on Dell PowerFlex
+================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing fault sets on PowerFlex storage system includes creating, getting details, renaming and deleting a fault set.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
+- Python 3.9, 3.10 or 3.11.
+
+
+
+Parameters
+----------
+
+ fault_set_name (optional, str, None)
+ Name of the Fault Set.
+
+ Mutually exclusive with *fault_set_id*.
+
+
+ fault_set_id (optional, str, None)
+ ID of the Fault Set.
+
+ Mutually exclusive with *fault_set_name*.
+
+
+ protection_domain_name (optional, str, None)
+ Name of protection domain.
+
+ Mutually exclusive with *protection_domain_id*.
+
+
+ protection_domain_id (optional, str, None)
+ ID of the protection domain.
+
+ Mutually exclusive with *protection_domain_name*.
+
+
+ fault_set_new_name (optional, str, None)
+ New name of the fault set.
+
+
+ state (optional, str, present)
+ State of the Fault Set.
+
+
+ hostname (True, str, None)
+ IP or FQDN of the PowerFlex host.
+
+
+ username (True, str, None)
+ The username of the PowerFlex host.
+
+
+ password (True, str, None)
+ The password of the PowerFlex host.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with PowerFlex host.
+
+
+ timeout (False, int, 120)
+ Time after which connection will get terminated.
+
+ It is to be mentioned in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is supported.
+ - When *fault_set_name* is provided, *protection_domain_name* or *protection_domain_id* must be provided.
+ - The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+
+ - name: Create Fault Set on Protection Domain
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_name: "{{ fault_set_name }}"
+ protection_domain_name: "{{ pd_name }}"
+ state: present
+
+ - name: Rename Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_name: "{{ fault_set_name }}"
+ fault_set_new_name: "{{ fault_set_new_name }}"
+ state: present
+
+ - name: Get details of a Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_id: "{{ fault_set_id }}"
+ state: present
+
+ - name: Delete Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_id: "{{ fault_set_id }}"
+ state: absent
+
+
+
+Return Values
+-------------
+
+changed (always, bool, false)
+ Whether or not the resource has changed.
+
+
+fault_set_details (always, dict, {'protectionDomainId': 'da721a8300000000', 'protectionDomainName': 'sample-pd', 'name': 'fs_001', 'id': 'eb44b70500000000', 'links': []})
+ Details of fault set.
+
+
+ protectionDomainId (, str, )
+ Unique identifier of the protection domain.
+
+
+ protectionDomainName (, str, )
+ Name of the protection domain.
+
+
+ name (, str, )
+ Name of the fault set.
+
+
+ id (, str, )
+ Unique identifier of the fault set.
+
+
+ SDS (, list, )
+ List of SDS associated to the fault set.
+
+
+ links (, list, )
+ Fault set links.
+
+
+ href (, str, )
+ Fault Set instance URL.
+
+
+ rel (, str, )
+ Relationship of fault set with different entities.
+
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Carlos Tronco (@ctronco) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/info.rst b/ansible_collections/dellemc/powerflex/docs/modules/info.rst
index 735fb04f4..ced8e0aaa 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/info.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/info.rst
@@ -14,6 +14,8 @@ Synopsis
Gathering information about Dell PowerFlex storage system includes getting the api details, list of volumes, SDSs, SDCs, storage pools, protection domains, snapshot policies, and devices.
+Gathering information about Dell PowerFlex Manager includes getting the list of managed devices, deployments and service templates.
+
Requirements
@@ -21,8 +23,8 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.8.0.
- Python 3.9, 3.10 or 3.11.
@@ -51,6 +53,14 @@ Parameters
Replication pairs - ``replication_pair``.
+ Fault Sets - ``fault_set``.
+
+ Service templates - ``service_template``.
+
+ Managed devices - ``managed_device``.
+
+ Deployments - ``deployment``.
+
filters (optional, list, None)
List of filters to support filtered output for storage entities.
@@ -67,12 +77,60 @@ Parameters
filter_operator (True, str, None)
Operation to be performed on filter key.
+ Choice *'contains'* is supported for gather_subset keys *service_template*, *managed_device*, *deployment*.
+
filter_value (True, str, None)
Value of the filter key.
+ limit (optional, int, 50)
+ Page limit.
+
+ Supported for gather_subset keys *service_template*, *managed_device*, *deployment*.
+
+
+ offset (optional, int, 0)
+ Pagination offset.
+
+ Supported for gather_subset keys *service_template*, *managed_device*, *deployment*.
+
+
+ sort (optional, str, None)
+ Sort the returned components based on specified field.
+
+ Supported for gather_subset keys *service_template*, *managed_device*, *deployment*.
+
+ The supported sort keys for the gather_subset can be referred from PowerFlex Manager API documentation in developer.dell.com.
+
+
+ include_devices (optional, bool, True)
+ Include devices in response.
+
+ Applicable when gather_subset is *deployment*.
+
+
+ include_template (optional, bool, True)
+ Include service templates in response.
+
+ Applicable when gather_subset is *deployment*.
+
+
+ full (optional, bool, False)
+ Specify if response is full or brief.
+
+ Applicable when gather_subset is *deployment*, *service_template*.
+
+ For *deployment* specify to use full templates including resources in response.
+
+
+ include_attachments (optional, bool, True)
+ Include attachments.
+
+ Applicable when gather_subset is *service_template*.
+
+
hostname (True, str, None)
IP or FQDN of the PowerFlex host.
@@ -111,6 +169,8 @@ Notes
.. note::
- The *check_mode* is supported.
+ - The supported filter keys for the gather_subset can be referred from PowerFlex Manager API documentation in developer.dell.com.
+ - The *filter*, *sort*, *limit* and *offset* options will be ignored when more than one *gather_subset* is specified along with *service_template*, *managed_device* or *deployment*.
- The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform.
@@ -138,6 +198,7 @@ Examples
- device
- rcg
- replication_pair
+ - fault_set
- name: Get a subset list of PowerFlex volumes
dellemc.powerflex.info:
@@ -152,6 +213,35 @@ Examples
filter_operator: "equal"
filter_value: "ansible_test"
+ - name: Get deployment and resource provisioning info
+ dellemc.powerflex.info:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - managed_device
+ - deployment
+ - service_template
+
+ - name: Get deployment with filter, sort, pagination
+ dellemc.powerflex.info:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - deployment
+ filters:
+ - filter_key: "name"
+ filter_operator: "contains"
+ filter_value: "partial"
+ sort: name
+ limit: 10
+ offset: 10
+ include_devices: true
+ include_template: true
+
Return Values
@@ -638,6 +728,119 @@ Replication_pairs (Always, list, {'copyType': 'OnlineCopy', 'id': '23aa0bc900000
+Fault_Sets (always, list, [{'protectionDomainId': 'da721a8300000000', 'protectionDomainName': 'fault_set_1', 'name': 'at1zbs1t6cp2sds1d1fs1', 'SDS': [], 'id': 'eb44b70500000000', 'links': [{'rel': 'self', 'href': '/api/instances/FaultSet::eb44b70500000000'}, {'rel': '/api/FaultSet/relationship/Statistics', 'href': '/api/instances/FaultSet::eb44b70500000000/relationships/Statistics'}, {'rel': '/api/FaultSet/relationship/Sds', 'href': '/api/instances/FaultSet::eb44b70500000000/relationships/Sds'}, {'rel': '/api/parent/relationship/protectionDomainId', 'href': '/api/instances/ProtectionDomain::da721a8300000000'}]}, {'protectionDomainId': 'da721a8300000000', 'protectionDomainName': 'fault_set_2', 'name': 'at1zbs1t6cp2sds1d1fs3', 'SDS': [], 'id': 'eb44b70700000002', 'links': [{'rel': 'self', 'href': '/api/instances/FaultSet::eb44b70700000002'}, {'rel': '/api/FaultSet/relationship/Statistics', 'href': '/api/instances/FaultSet::eb44b70700000002/relationships/Statistics'}, {'rel': '/api/FaultSet/relationship/Sds', 'href': '/api/instances/FaultSet::eb44b70700000002/relationships/Sds'}, {'rel': '/api/parent/relationship/protectionDomainId', 'href': '/api/instances/ProtectionDomain::da721a8300000000'}]}])
+ Details of fault sets.
+
+
+ protectionDomainId (, str, )
+ The ID of the protection domain.
+
+
+ name (, str, )
+ device name.
+
+
+ id (, str, )
+ device id.
+
+
+
+ManagedDevices (when I(gather_subset) is I(managed_device), list, [{'refId': 'softwareOnlyServer-10.1.1.1', 'refType': None, 'ipAddress': '10.1.1.1', 'currentIpAddress': '10.1.1.1', 'serviceTag': 'VMware-42 15 a5 f9 65 e6 63 0e-36 79 59 73 7b 3a 68 cd-SW', 'model': 'VMware Virtual Platform', 'deviceType': 'SoftwareOnlyServer', 'discoverDeviceType': 'SOFTWAREONLYSERVER_CENTOS', 'displayName': 'vpi1011-c1n1', 'managedState': 'UNMANAGED', 'state': 'READY', 'inUse': False, 'serviceReferences': [], 'statusMessage': None, 'firmwareName': 'Default Catalog - PowerFlex 4.5.0.0', 'customFirmware': False, 'needsAttention': False, 'manufacturer': 'VMware, Inc.', 'systemId': None, 'health': 'RED', 'healthMessage': 'Inventory run failed.', 'operatingSystem': 'N/A', 'numberOfCPUs': 0, 'cpuType': None, 'nics': 0, 'memoryInGB': 0, 'infraTemplateDate': None, 'infraTemplateId': None, 'serverTemplateDate': None, 'serverTemplateId': None, 'inventoryDate': None, 'complianceCheckDate': '2024-02-05T18:31:31.213+00:00', 'discoveredDate': '2024-02-05T18:31:30.992+00:00', 'deviceGroupList': {'paging': None, 'deviceGroup': [{'link': None, 'groupSeqId': -1, 'groupName': 'Global', 'groupDescription': None, 'createdDate': None, 'createdBy': 'admin', 'updatedDate': None, 'updatedBy': None, 'managedDeviceList': None, 'groupUserList': None}]}, 'detailLink': {'title': 'softwareOnlyServer-10.1.1.1', 'href': '/AsmManager/ManagedDevice/softwareOnlyServer-10.1.1.1', 'rel': 'describedby', 'type': None}, 'credId': 'bc97cefb-5eb4-4c20-8e39-d1a2b809c9f5', 'compliance': 'NONCOMPLIANT', 'failuresCount': 0, 'chassisId': None, 'parsedFacts': None, 'config': None, 'hostname': 'vpi1011-c1n1', 'osIpAddress': None, 'osAdminCredential': None, 'osImageType': None, 'lastJobs': None, 'puppetCertName': 'red_hat-10.1.1.1', 'svmAdminCredential': None, 'svmName': None, 'svmIpAddress': None, 'svmImageType': None, 'flexosMaintMode': 0, 'esxiMaintMode': 0, 'vmList': []}])
+ Details of all devices from inventory.
+
+
+ deviceType (, str, )
+ Device Type.
+
+
+ serviceTag (, str, )
+ Service Tag.
+
+
+ serverTemplateId (, str, )
+ The ID of the server template.
+
+
+ state (, str, )
+ The state of the device.
+
+
+ managedState (, str, )
+ The managed state of the device.
+
+
+ compliance (, str, )
+ The compliance state of the device.
+
+
+ systemId (, str, )
+ The system ID.
+
+
+
+Deployments (when I(gather_subset) is I(deployment), list, [{'id': '8aaa80658cd602e0018cda8b257f78ce', 'deploymentName': 'Test-Update - K', 'deploymentDescription': 'Test-Update - K', 'deploymentValid': None, 'retry': False, 'teardown': False, 'teardownAfterCancel': False, 'removeService': False, 'createdDate': '2024-01-05T16:53:21.407+00:00', 'createdBy': 'admin', 'updatedDate': '2024-02-11T17:00:05.657+00:00', 'updatedBy': 'system', 'deploymentScheduledDate': None, 'deploymentStartedDate': '2024-01-05T16:53:22.886+00:00', 'deploymentFinishedDate': None, 'serviceTemplate': {'id': '8aaa80658cd602e0018cda8b257f78ce', 'templateName': 'block-only (8aaa80658cd602e0018cda8b257f78ce)', 'templateDescription': 'Storage - Software Only deployment', 'templateType': 'VxRack FLEX', 'templateVersion': '4.5.0.0', 'templateValid': {'valid': True, 'messages': []}, 'originalTemplateId': 'c44cb500-020f-4562-9456-42ec1eb5f9b2', 'templateLocked': False, 'draft': False, 'inConfiguration': False, 'createdDate': '2024-01-05T16:53:22.083+00:00', 'createdBy': None, 'updatedDate': '2024-02-09T06:00:09.602+00:00', 'lastDeployedDate': None, 'updatedBy': None, 'components': [{'id': '6def7edd-bae2-4420-93bf-9ceb051bbb65', 'componentID': 'component-scaleio-gateway-1', 'identifier': None, 'componentValid': {'valid': True, 'messages': []}, 'puppetCertName': 'scaleio-block-legacy-gateway', 'osPuppetCertName': None, 'name': 'block-legacy-gateway', 'type': 'SCALEIO', 'subType': 'STORAGEONLY', 'teardown': False, 'helpText': None, 'managementIpAddress': None, 'configFile': None, 'serialNumber': None, 'asmGUID': 'scaleio-block-legacy-gateway', 'relatedComponents': {'625b0e17-9b91-4bc0-864c-d0111d42d8d0': 'Node (Software Only)', '961a59eb-80c3-4a3a-84b7-2101e9831527': 'Node (Software Only)-2', 'bca710a5-7cdf-481e-b729-0b53e02873ee': 'Node (Software Only)-3'}, 'resources': [], 'refId': None, 'cloned': False, 'clonedFromId': None, 'manageFirmware': False, 'brownfield': False, 'instances': 1, 'clonedFromAsmGuid': None, 'ip': None}], 'category': 'block-only', 'allUsersAllowed': True, 'assignedUsers': [], 'manageFirmware': True, 'useDefaultCatalog': False, 'firmwareRepository': None, 'licenseRepository': None, 'configuration': None, 'serverCount': 3, 'storageCount': 1, 'clusterCount': 1, 'serviceCount': 0, 'switchCount': 0, 'vmCount': 0, 'sdnasCount': 0, 'brownfieldTemplateType': 'NONE', 'networks': [{'id': '8aaa80648cd5fb9b018cda46e4e50000', 'name': 'mgmt', 'description': '', 'type': 'SCALEIO_MANAGEMENT', 'vlanId': 850, 'static': True, 'staticNetworkConfiguration': {'gateway': '10.1.1.1', 'subnet': '1.1.1.0', 'primaryDns': '10.1.1.1', 'secondaryDns': '10.1.1.1', 'dnsSuffix': None, 'ipRange': [{'id': '8aaa80648cd5fb9b018cda46e5080001', 'startingIp': '10.1.1.1', 'endingIp': '10.1.1.1', 'role': None}], 'ipAddress': None, 'staticRoute': None}, 'destinationIpAddress': '10.1.1.1'}], 'blockServiceOperationsMap': {'scaleio-block-legacy-gateway': {'blockServiceOperationsMap': {}}}}, 'scheduleDate': None, 'status': 'complete', 'compliant': True, 'deploymentDevice': [{'refId': 'scaleio-block-legacy-gateway', 'refType': None, 'logDump': None, 'status': None, 'statusEndTime': None, 'statusStartTime': None, 'deviceHealth': 'GREEN', 'healthMessage': 'OK', 'compliantState': 'COMPLIANT', 'brownfieldStatus': 'NOT_APPLICABLE', 'deviceType': 'scaleio', 'deviceGroupName': None, 'ipAddress': 'block-legacy-gateway', 'currentIpAddress': '10.1.1.1', 'serviceTag': 'block-legacy-gateway', 'componentId': None, 'statusMessage': None, 'model': 'PowerFlex Gateway', 'cloudLink': False, 'dasCache': False, 'deviceState': 'READY', 'puppetCertName': 'scaleio-block-legacy-gateway', 'brownfield': False}], 'vms': None, 'updateServerFirmware': True, 'useDefaultCatalog': False, 'firmwareRepository': {'id': '8aaa80658cd602e0018cd996a1c91bdc', 'name': 'Intelligent Catalog 45.373.00', 'sourceLocation': None, 'sourceType': None, 'diskLocation': None, 'filename': None, 'md5Hash': None, 'username': None, 'password': None, 'downloadStatus': None, 'createdDate': None, 'createdBy': None, 'updatedDate': None, 'updatedBy': None, 'defaultCatalog': False, 'embedded': False, 'state': None, 'softwareComponents': [], 'softwareBundles': [], 'deployments': [], 'bundleCount': 0, 'componentCount': 0, 'userBundleCount': 0, 'minimal': False, 'downloadProgress': 0, 'extractProgress': 0, 'fileSizeInGigabytes': None, 'signedKeySourceLocation': None, 'signature': None, 'custom': False, 'needsAttention': False, 'jobId': None, 'rcmapproved': False}, 'firmwareRepositoryId': '8aaa80658cd602e0018cd996a1c91bdc', 'licenseRepository': None, 'licenseRepositoryId': None, 'individualTeardown': False, 'deploymentHealthStatusType': 'green', 'assignedUsers': [], 'allUsersAllowed': True, 'owner': 'admin', 'noOp': False, 'firmwareInit': False, 'disruptiveFirmware': False, 'preconfigureSVM': False, 'preconfigureSVMAndUpdate': False, 'servicesDeployed': 'NONE', 'precalculatedDeviceHealth': None, 'lifecycleModeReasons': [], 'jobDetails': None, 'numberOfDeployments': 0, 'operationType': 'NONE', 'operationStatus': None, 'operationData': None, 'deploymentValidationResponse': None, 'currentStepCount': None, 'totalNumOfSteps': None, 'currentStepMessage': None, 'customImage': 'os_sles', 'originalDeploymentId': None, 'currentBatchCount': None, 'totalBatchCount': None, 'templateValid': True, 'lifecycleMode': False, 'vds': False, 'scaleUp': False, 'brownfield': False, 'configurationChange': False}])
+ Details of all deployments.
+
+
+ id (, str, )
+ Deployment ID.
+
+
+ deploymentName (, str, )
+ Deployment name.
+
+
+ status (, str, )
+ The status of deployment.
+
+
+ firmwareRepository (, dict, )
+ The firmware repository.
+
+
+ signature (, str, )
+ The signature details.
+
+
+ downloadStatus (, str, )
+ The download status.
+
+
+ rcmapproved (, bool, )
+ If RCM approved.
+
+
+
+
+ServiceTemplates (when I(gather_subset) is I(service_template), list, [{'id': '2434144f-7795-4245-a04b-6fcb771697d7', 'templateName': 'Storage- 100Gb', 'templateDescription': 'Storage Only 4 Node deployment with 100Gb networking', 'templateType': 'VxRack FLEX', 'templateVersion': '4.5-213', 'templateValid': {'valid': True, 'messages': []}, 'originalTemplateId': 'ff80808177f880fc0177f883bf1e0027', 'templateLocked': True, 'draft': False, 'inConfiguration': False, 'createdDate': '2024-01-04T19:47:23.534+00:00', 'createdBy': 'system', 'updatedDate': None, 'lastDeployedDate': None, 'updatedBy': None, 'components': [{'id': '43dec024-85a9-4901-9e8e-fa0d3c417f7b', 'componentID': 'component-scaleio-gateway-1', 'identifier': None, 'componentValid': {'valid': True, 'messages': []}, 'puppetCertName': None, 'osPuppetCertName': None, 'name': 'PowerFlex Cluster', 'type': 'SCALEIO', 'subType': 'STORAGEONLY', 'teardown': False, 'helpText': None, 'managementIpAddress': None, 'configFile': None, 'serialNumber': None, 'asmGUID': None, 'relatedComponents': {'c5c46733-012c-4dca-af9b-af46d73d045a': 'Storage Only Node'}, 'resources': [], 'refId': None, 'cloned': False, 'clonedFromId': None, 'manageFirmware': False, 'brownfield': False, 'instances': 1, 'clonedFromAsmGuid': None, 'ip': None}], 'category': 'Sample Templates', 'allUsersAllowed': False, 'assignedUsers': [], 'manageFirmware': True, 'useDefaultCatalog': True, 'firmwareRepository': None, 'licenseRepository': None, 'configuration': None, 'serverCount': 4, 'storageCount': 0, 'clusterCount': 1, 'serviceCount': 0, 'switchCount': 0, 'vmCount': 0, 'sdnasCount': 0, 'brownfieldTemplateType': 'NONE', 'networks': [{'id': 'ff80808177f8823b0177f8bb82d80005', 'name': 'flex-data2', 'description': '', 'type': 'SCALEIO_DATA', 'vlanId': 105, 'static': True, 'staticNetworkConfiguration': {'gateway': None, 'subnet': '1.1.1.0', 'primaryDns': None, 'secondaryDns': None, 'dnsSuffix': None, 'ipRange': None, 'ipAddress': None, 'staticRoute': None}, 'destinationIpAddress': '1.1.1.0'}], 'blockServiceOperationsMap': {}}])
+ Details of all service templates.
+
+
+ templateName (, str, )
+ Template name.
+
+
+ templateDescription (, str, )
+ Template description.
+
+
+ templateType (, str, )
+ Template type.
+
+
+ templateVersion (, str, )
+ Template version.
+
+
+ category (, str, )
+ The template category.
+
+
+ serverCount (, int, )
+ Server count.
+
+
+
@@ -652,4 +855,6 @@ Authors
~~~~~~~
- Arindam Datta (@dattaarindam) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
+- Jennifer John (@Jennifer-John) <ansible.team@dell.com>
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst b/ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst
index 5131860e1..babb39b6c 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/mdm_cluster.rst
@@ -22,9 +22,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -177,7 +177,7 @@ Notes
.. note::
- Parameters *mdm_name* or *mdm_id* are mandatory for rename and modify virtual IP interfaces.
- Parameters *mdm_name* or *mdm_id* are not required while modifying performance profile.
- - For change MDM cluster ownership operation, only changed as True will be returned and for idempotency case MDM cluster details will be returned.
+ - For change MDM cluster ownership operation, only changed as true will be returned and for idempotency case MDM cluster details will be returned.
- Reinstall all SDC after changing ownership to some newly added MDM.
- To add manager standby MDM, MDM package must be installed with manager role.
- The *check_mode* is supported.
@@ -267,7 +267,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
mdm_name: "mdm_2"
- is_primary: True
+ is_primary: true
state: "present"
- name: Modify performance profile
@@ -300,7 +300,7 @@ Examples
port: "{{port}}"
mdm_name: "mdm_1"
virtual_ip_interface:
- - "ens224"
+ - "ens224"
state: "present"
- name: Clear virtual IP interface of the MDM
@@ -311,7 +311,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
mdm_name: "mdm_1"
- clear_interfaces: True
+ clear_interfaces: true
state: "present"
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst b/ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst
index ca47cb518..84c640ef4 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/protection_domain.rst
@@ -20,9 +20,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst b/ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst
index ceaca0d39..76d959026 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/replication_consistency_group.rst
@@ -12,7 +12,7 @@ replication_consistency_group -- Manage replication consistency groups on Dell P
Synopsis
--------
-Managing replication consistency groups on PowerFlex storage system includes getting details, creating, modifying, creating snapshots, pause, resume, freeze, unfreeze, activate, inactivate and deleting a replication consistency group.
+Managing replication consistency groups on PowerFlex storage system includes getting details, creating, modifying, creating snapshots, pause, resume, freeze, unfreeze, activate, failover, reverse, restore, sync, switchover, inactivate and deleting a replication consistency group.
@@ -20,9 +20,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -73,15 +73,41 @@ Parameters
pause (optional, bool, None)
Pause or resume the RCG.
+ This parameter is deprecated. Use rcg_state instead.
+
+
+ rcg_state (optional, str, None)
+ Specify an action for RCG.
+
+ Failover the RCG.
+
+ Reverse the RCG.
+
+ Restore the RCG.
+
+ Switchover the RCG.
+
+ Pause or resume the RCG.
+
+ Freeze or unfreeze the RCG.
+
+ Synchronize the RCG.
+
+
+ force (optional, bool, None)
+ Force switchover the RCG.
+
freeze (optional, bool, None)
Freeze or unfreeze the RCG.
+ This parameter is deprecated. Use rcg_state instead.
+
pause_mode (optional, str, None)
Pause mode.
- It is required if pause is set as True.
+ It is required if pause is set as true.
target_volume_access_mode (optional, str, None)
@@ -187,7 +213,7 @@ Notes
- The *check_mode* is supported.
- Idempotency is not supported for create snapshot operation.
- There is a delay in reflection of final state of RCG after few update operations on RCG.
- - In 3.6 and above, the replication consistency group will return back to consistent mode on changing to inconsistent mode if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as True.
+ - In 3.6 and above, the replication consistency group will return back to consistent mode on changing to inconsistent mode if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as true.
- The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform.
@@ -217,7 +243,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_id: "{{rcg_id}}"
- create_snapshot: True
+ create_snapshot: true
state: "present"
- name: Create a replication consistency group
@@ -250,7 +276,7 @@ Examples
rpo: 60
target_volume_access_mode: "ReadOnly"
activity_mode: "Inactive"
- is_consistent: True
+ is_consistent: true
- name: Rename replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -270,7 +296,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- pause: True
+ rcg_state: "pause"
pause_mode: "StopDataTransfer"
- name: Resume replication consistency group
@@ -281,7 +307,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- pause: False
+ rcg_state: "resume"
- name: Freeze replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -291,7 +317,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- freeze: True
+ rcg_state: "freeze"
- name: UnFreeze replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -301,7 +327,57 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- freeze: False
+ rcg_state: "unfreeze"
+
+ - name: Failover replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "failover"
+
+ - name: Reverse replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "reverse"
+
+ - name: Restore replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "restore"
+
+ - name: Switchover replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "switchover"
+
+ - name: Synchronize replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "sync"
- name: Delete replication consistency group
dellemc.powerflex.replication_consistency_group:
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst b/ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst
index c83241cd3..254a2eb1d 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/replication_pair.rst
@@ -20,9 +20,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -174,8 +174,8 @@ Notes
.. note::
- The *check_mode* is supported.
+ - In 4.0 the creation of replication pair fails when *copy_type* is specified as ``OfflineCopy``.
- The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform.
- - In 4.0 the creation of replication pair fails when I(copy_type) is specified as C(OfflineCopy).
@@ -186,7 +186,6 @@ Examples
.. code-block:: yaml+jinja
-
- name: Get replication pair details
dellemc.powerflex.replication_pair:
hostname: "{{hostname}}"
@@ -224,11 +223,11 @@ Examples
copy_type: "OnlineCopy"
name: "pair1"
remote_peer:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
- name: Pause replication pair
dellemc.powerflex.replication_pair:
@@ -238,7 +237,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
pair_name: "pair1"
- pause: True
+ pause: true
- name: Resume replication pair
dellemc.powerflex.replication_pair:
@@ -248,7 +247,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
pair_name: "pair1"
- pause: False
+ pause: false
- name: Delete replication pair
dellemc.powerflex.replication_pair:
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/sdc.rst b/ansible_collections/dellemc/powerflex/docs/modules/sdc.rst
index 585267891..ad375ebc7 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/sdc.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/sdc.rst
@@ -20,9 +20,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -58,6 +58,12 @@ Parameters
New name of the SDC. Used to rename the SDC.
+ performance_profile (optional, str, None)
+ Define the performance profile as *Compact* or *HighPerformance*.
+
+ The high performance profile configures a predefined set of parameters for very high performance use cases.
+
+
state (True, str, None)
State of the SDC.
@@ -130,6 +136,25 @@ Examples
sdc_new_name: "centos_sdc_renamed"
state: "present"
+ - name: Modify performance profile of SDC using SDC name
+ dellemc.powerflex.sdc:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ sdc_name: "centos_sdc"
+ performance_profile: "Compact"
+ state: "present"
+
+ - name: Remove SDC using SDC name
+ dellemc.powerflex.sdc:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ sdc_name: "centos_sdc"
+ state: "absent"
+
Return Values
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/sds.rst b/ansible_collections/dellemc/powerflex/docs/modules/sds.rst
index 81a1b3908..f5c29516f 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/sds.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/sds.rst
@@ -20,9 +20,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -109,6 +109,18 @@ Parameters
Default value by API is ``HighPerformance``.
+ fault_set_name (optional, str, None)
+ Name of the fault set.
+
+ Mutually exclusive with *fault_set_id*.
+
+
+ fault_set_id (optional, str, None)
+ Unique identifier of the fault set.
+
+ Mutually exclusive with *fault_set_name*.
+
+
state (True, str, None)
State of the SDS.
@@ -156,7 +168,7 @@ Notes
- There can be 1 or more IPs with role 'sdcOnly'.
- There must be only 1 IP with SDS role (either with role 'all' or 'sdsOnly').
- SDS can be created with RF cache disabled, but, be aware that the RF cache is not always updated. In this case, the user should re-try the operation.
- - The *check_mode* is not supported.
+ - The *check_mode* is supported.
- The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform.
@@ -192,12 +204,14 @@ Examples
port: "{{port}}"
sds_name: "node1"
protection_domain_name: "domain1"
+ fault_set_name: "faultset1"
sds_ip_list:
- ip: "198.10.xxx.xxx"
role: "sdcOnly"
sds_ip_state: "present-in-sds"
rmcache_enabled: true
rmcache_size: 128
+ fault_set_name: "{{fault_set_name}}"
performance_profile: "HighPerformance"
state: "present"
@@ -332,6 +346,9 @@ sds_details (When SDS exists, dict, {'authenticationError': 'None', 'certificate
Fault set ID.
+ faultSetName (, str, )
+ Name of the Fault set.
+
fglMetadataCacheSize (, int, )
FGL metadata cache size.
@@ -513,4 +530,5 @@ Authors
~~~~~~~
- Rajshree Khare (@khareRajshree) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst b/ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst
index dafed946d..052453ad2 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/snapshot.rst
@@ -20,9 +20,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -206,7 +206,7 @@ Examples
validate_certs: "{{validate_certs}}"
snapshot_name: "ansible_snapshot"
vol_name: "ansible_volume"
- read_only: False
+ read_only: false
desired_retention: 2
state: "present"
@@ -227,9 +227,9 @@ Examples
validate_certs: "{{validate_certs}}"
snapshot_id: "fe6cb28200000007"
sdc:
- - sdc_ip: "198.10.xxx.xxx"
- - sdc_id: "663ac0d200000001"
- allow_multiple_mappings: True
+ - sdc_ip: "198.10.xxx.xxx"
+ - sdc_id: "663ac0d200000001"
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
@@ -241,13 +241,13 @@ Examples
validate_certs: "{{validate_certs}}"
snapshot_id: "fe6cb28200000007"
sdc:
- - sdc_ip: "198.10.xxx.xxx"
- iops_limit: 11
- bandwidth_limit: 4096
- - sdc_id: "663ac0d200000001"
- iops_limit: 20
- bandwidth_limit: 2048
- allow_multiple_mappings: True
+ - sdc_ip: "198.10.xxx.xxx"
+ iops_limit: 11
+ bandwidth_limit: 4096
+ - sdc_id: "663ac0d200000001"
+ iops_limit: 20
+ bandwidth_limit: 2048
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/snapshot_policy.rst b/ansible_collections/dellemc/powerflex/docs/modules/snapshot_policy.rst
new file mode 100644
index 000000000..deab7f050
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/docs/modules/snapshot_policy.rst
@@ -0,0 +1,414 @@
+.. _snapshot_policy_module:
+
+
+snapshot_policy -- Manage snapshot policies on Dell PowerFlex
+=============================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing snapshot policies on PowerFlex storage system includes creating, getting details, modifying attributes, adding a source volume, removing a source volume and deleting a snapshot policy.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.8.0.
+- Python 3.9, 3.10 or 3.11.
+
+
+
+Parameters
+----------
+
+ snapshot_policy_name (optional, str, None)
+ The name of the snapshot policy.
+
+ It is unique across the PowerFlex array.
+
+ Mutually exclusive with *snapshot_policy_id*.
+
+
+ snapshot_policy_id (optional, str, None)
+ The unique identifier of the snapshot policy.
+
+ Except create operation, all other operations can be performed using *snapshot_policy_id*.
+
+ Mutually exclusive with *snapshot_policy_name*.
+
+
+ auto_snapshot_creation_cadence (optional, dict, None)
+ The auto snapshot creation cadence of the snapshot policy.
+
+
+ time (optional, int, None)
+ The time between creation of two snapshots.
+
+
+ unit (optional, str, Minute)
+ The unit of the auto snapshot creation cadence.
+
+
+
+ num_of_retained_snapshots_per_level (optional, list, None)
+ Number of retained snapshots per level.
+
+
+ new_name (optional, str, None)
+ New name of the snapshot policy.
+
+
+ access_mode (optional, str, None)
+ Access mode of the snapshot policy.
+
+
+ secure_snapshots (optional, bool, None)
+ Whether to secure snapshots or not.
+
+ Used only in the create operation.
+
+
+ source_volume (optional, list, None)
+ The source volume details to be added or removed.
+
+
+ id (optional, str, None)
+ The unique identifier of the source volume to be added or removed.
+
+ Mutually exclusive with *name*.
+
+
+ name (optional, str, None)
+ The name of the source volume to be added or removed.
+
+ Mutually exclusive with *id*.
+
+
+ auto_snap_removal_action (optional, str, None)
+ Ways to handle the snapshots created by the policy (auto snapshots).
+
+ Must be provided when *state* is set to ``'absent'``.
+
+
+ detach_locked_auto_snapshots (optional, bool, None)
+ Whether to detach the locked auto snapshots during removal of source volume.
+
+
+ state (optional, str, present)
+ The state of the source volume.
+
+ When ``present``, source volume will be added to the snapshot policy.
+
+ When ``absent``, source volume will be removed from the snapshot policy.
+
+
+
+ pause (optional, bool, None)
+ Whether to pause or resume the snapshot policy.
+
+
+ state (optional, str, present)
+ State of the snapshot policy.
+
+
+ hostname (True, str, None)
+ IP or FQDN of the PowerFlex host.
+
+
+ username (True, str, None)
+ The username of the PowerFlex host.
+
+
+ password (True, str, None)
+ The password of the PowerFlex host.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with PowerFlex host.
+
+
+ timeout (False, int, 120)
+ Time after which connection will get terminated.
+
+ It is to be mentioned in seconds.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is supported.
+ - The modules present in the collection named as 'dellemc.powerflex' are built to support the Dell PowerFlex storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Create a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ access_mode: "READ_WRITE"
+ secure_snapshots: false
+ auto_snapshot_creation_cadence:
+ time: 1
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 20
+ state: "present"
+
+ - name: Get snapshot policy details using name
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+
+ - name: Get snapshot policy details using id
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_id: "snapshot_policy_id_1"
+
+ - name: Modify a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ auto_snapshot_creation_cadence:
+ time: 2
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 40
+
+ - name: Rename a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ new_name: "snapshot_policy_name_1_new"
+
+ - name: Add source volume
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ source_volume:
+ - name: "source_volume_name_1"
+ - id: "source_volume_id_2"
+ state: "present"
+
+ - name: Remove source volume
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ source_volume:
+ - name: "source_volume_name_1"
+ auto_snap_removal_action: 'Remove'
+ state: "absent"
+ - id: "source_volume_id_2"
+ auto_snap_removal_action: 'Remove'
+ detach_locked_auto_snapshots: true
+ state: "absent"
+
+ - name: Pause a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ pause: true
+
+ - name: Resume a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ pause: false
+
+ - name: Delete a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, false)
+ Whether or not the resource has changed.
+
+
+snapshot_policy_details (When snapshot policy exists, dict, {'autoSnapshotCreationCadenceInMin': 120, 'id': '15ae842800000004', 'lastAutoSnapshotCreationFailureReason': 'NR', 'lastAutoSnapshotFailureInFirstLevel': False, 'links': [{'href': '/api/instances/SnapshotPolicy::15ae842800000004', 'rel': 'self'}, {'href': '/api/instances/SnapshotPolicy::15ae842800000004/relationships/Statistics', 'rel': '/api/SnapshotPolicy/relationship/Statistics'}, {'href': '/api/instances/SnapshotPolicy::15ae842800000004/relationships/SourceVolume', 'rel': '/api/SnapshotPolicy/relationship/SourceVolume'}, {'href': '/api/instances/SnapshotPolicy::15ae842800000004/relationships/AutoSnapshotVolume', 'rel': '/api/SnapshotPolicy/relationship/AutoSnapshotVolume'}, {'href': '/api/instances/System::0e7a082862fedf0f', 'rel': '/api/parent/relationship/systemId'}], 'maxVTreeAutoSnapshots': 40, 'name': 'Sample_snapshot_policy_1', 'nextAutoSnapshotCreationTime': 1683709201, 'numOfAutoSnapshots': 0, 'numOfCreationFailures': 0, 'numOfExpiredButLockedSnapshots': 0, 'numOfLockedSnapshots': 0, 'numOfRetainedSnapshotsPerLevel': [40], 'numOfSourceVolumes': 0, 'secureSnapshots': False, 'snapshotAccessMode': 'ReadWrite', 'snapshotPolicyState': 'Active', 'statistics': {'autoSnapshotVolIds': [], 'expiredButLockedSnapshotsIds': [], 'numOfAutoSnapshots': 0, 'numOfExpiredButLockedSnapshots': 0, 'numOfSrcVols': 0, 'srcVolIds': []}, 'systemId': '0e7a082862fedf0f', 'timeOfLastAutoSnapshot': 0, 'timeOfLastAutoSnapshotCreationFailure': 0})
+ Details of the snapshot policy.
+
+
+ autoSnapshotCreationCadenceInMin (, int, )
+ The snapshot rule of the snapshot policy.
+
+
+ id (, str, )
+ The ID of the snapshot policy.
+
+
+ lastAutoSnapshotCreationFailureReason (, str, )
+ The reason for the failure of last auto snapshot creation .
+
+
+ name (, str, )
+ Name of the snapshot policy.
+
+
+ lastAutoSnapshotFailureInFirstLevel (, bool, )
+ Whether the last auto snapshot in first level failed.
+
+
+ maxVTreeAutoSnapshots (, int, )
+ Maximum number of VTree auto snapshots.
+
+
+ nextAutoSnapshotCreationTime (, int, )
+ The time of creation of the next auto snapshot.
+
+
+ numOfAutoSnapshots (, int, )
+ Number of auto snapshots.
+
+
+ numOfCreationFailures (, int, )
+ Number of creation failures.
+
+
+ numOfExpiredButLockedSnapshots (, int, )
+ Number of expired but locked snapshots.
+
+
+ numOfLockedSnapshots (, int, )
+ Number of locked snapshots.
+
+
+ numOfRetainedSnapshotsPerLevel (, list, )
+ Number of snapshots retained per level
+
+
+ numOfSourceVolumes (, int, )
+ Number of source volumes.
+
+
+ secureSnapshots (, bool, )
+ Whether the snapshots are secured.
+
+
+ snapshotAccessMode (, str, )
+ Access mode of the snapshots.
+
+
+ snapshotPolicyState (, str, )
+ State of the snapshot policy.
+
+
+ systemId (, str, )
+ Unique identifier of the PowerFlex system.
+
+
+ timeOfLastAutoSnapshot (, str, )
+ Time of the last auto snapshot creation.
+
+
+ timeOfLastAutoSnapshotCreationFailure (, str, )
+ Time of the failure of the last auto snapshot creation.
+
+
+ statistics (, dict, )
+ Statistics details of the snapshot policy.
+
+
+ autoSnapshotVolIds (, list, )
+ Volume Ids of all the auto snapshots.
+
+
+ expiredButLockedSnapshotsIds (, list, )
+ Ids of expired but locked snapshots.
+
+
+ numOfAutoSnapshots (, int, )
+ Number of auto snapshots.
+
+
+ numOfExpiredButLockedSnapshots (, int, )
+ Number of expired but locked snapshots.
+
+
+ numOfSrcVols (, int, )
+ Number of source volumes.
+
+
+ srcVolIds (, list, )
+ Ids of the source volumes.
+
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst b/ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst
index 9037ef73c..76d94966d 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/storagepool.rst
@@ -20,9 +20,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -134,7 +134,6 @@ Examples
.. code-block:: yaml+jinja
-
- name: Get the details of storage pool by name
dellemc.powerflex.storagepool:
hostname: "{{hostname}}"
@@ -173,8 +172,8 @@ Examples
validate_certs: "{{validate_certs}}"
storage_pool_name: "ansible_test_pool"
protection_domain_id: "1c957da800000000"
- use_rmcache: True
- use_rfcache: True
+ use_rmcache: true
+ use_rfcache: true
state: "present"
- name: Rename storage pool by id
diff --git a/ansible_collections/dellemc/powerflex/docs/modules/volume.rst b/ansible_collections/dellemc/powerflex/docs/modules/volume.rst
index 10cf8de84..f3345a6d1 100644
--- a/ansible_collections/dellemc/powerflex/docs/modules/volume.rst
+++ b/ansible_collections/dellemc/powerflex/docs/modules/volume.rst
@@ -22,9 +22,9 @@ Requirements
------------
The below requirements are needed on the host that executes this module.
-- A Dell PowerFlex storage system version 3.5 or later.
-- Ansible-core 2.12 or later.
-- PyPowerFlex 1.6.0.
+- A Dell PowerFlex storage system version 3.6 or later.
+- Ansible-core 2.14 or later.
+- PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
@@ -133,7 +133,7 @@ Parameters
allow_multiple_mappings (optional, bool, None)
Specifies whether to allow or not allow multiple mappings.
- If the volume is mapped to one SDC then for every new mapping *allow_multiple_mappings* has to be passed as True.
+ If the volume is mapped to one SDC then for every new mapping *allow_multiple_mappings* has to be passed as true.
sdc (optional, list, None)
@@ -188,13 +188,13 @@ Parameters
delete_snapshots (optional, bool, None)
- If ``True``, the volume and all its dependent snapshots will be deleted.
+ If ``true``, the volume and all its dependent snapshots will be deleted.
- If ``False``, only the volume will be deleted.
+ If ``false``, only the volume will be deleted.
It can be specified only when the *state* is ``absent``.
- It defaults to ``False``, if not specified.
+ It defaults to ``false``, if not specified.
state (True, str, None)
@@ -262,7 +262,7 @@ Examples
protection_domain_name: "pd_1"
vol_type: "THICK_PROVISIONED"
compression_type: "NORMAL"
- use_rmcache: True
+ use_rmcache: true
size: 16
state: "present"
@@ -274,7 +274,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- allow_multiple_mappings: True
+ allow_multiple_mappings: true
sdc:
- sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764"
access_mode: "READ_WRITE"
@@ -310,7 +310,7 @@ Examples
iops_limit: 20
- sdc_ip: "198.10.xxx.xxx"
access_mode: "READ_ONLY"
- allow_multiple_mappings: True
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
@@ -345,7 +345,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- delete_snapshots: False
+ delete_snapshots: false
state: "absent"
- name: Delete the Volume and all its dependent snapshots
@@ -356,7 +356,7 @@ Examples
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- delete_snapshots: True
+ delete_snapshots: true
state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/meta/runtime.yml b/ansible_collections/dellemc/powerflex/meta/runtime.yml
index 0e8263eda..6fa701d3a 100644
--- a/ansible_collections/dellemc/powerflex/meta/runtime.yml
+++ b/ansible_collections/dellemc/powerflex/meta/runtime.yml
@@ -1,5 +1,5 @@
---
-requires_ansible: ">=2.12"
+requires_ansible: ">=2.14.0"
plugin_routing:
modules:
dellemc_powerflex_gatherfacts:
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/device.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/device.yml
new file mode 100644
index 000000000..34bbb12b5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/device.yml
@@ -0,0 +1,90 @@
+---
+- name: Device Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+
+ tasks:
+ - name: Add a device
+ dellemc.powerflex.device:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ media_type: "HDD"
+ device_name: "device2"
+ storage_pool_name: "pool1"
+ protection_domain_name: "domain1"
+ external_acceleration_type: "ReadAndWrite"
+ state: "present"
+
+ - name: Add a device with force flag
+ dellemc.powerflex.device:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ media_type: "HDD"
+ device_name: "device2"
+ storage_pool_name: "pool1"
+ protection_domain_name: "domain1"
+ external_acceleration_type: "ReadAndWrite"
+ force: true
+ state: "present"
+
+ - name: Get device details using device_id
+ dellemc.powerflex.device:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ device_id: "d7fe088900000000"
+ state: "present"
+
+ - name: Get device details using (current_pathname, sds_name)
+ dellemc.powerflex.device:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node0"
+ state: "present"
+
+ - name: Get device details using (current_pathname, sds_id)
+ dellemc.powerflex.device:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ current_pathname: "/dev/sdb"
+ sds_id: "5717d71800000000"
+ state: "present"
+
+ - name: Remove a device using device_id
+ dellemc.powerflex.device:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ device_id: "76eb7e2f00010000"
+ state: "absent"
+
+ - name: Remove a device using (current_pathname, sds_name)
+ dellemc.powerflex.device:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/fault_set.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/fault_set.yml
new file mode 100644
index 000000000..ee61b3733
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/fault_set.yml
@@ -0,0 +1,67 @@
+---
+- name: Fault set Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ host_port: 443
+ fault_set_name: "sample_fault_set_name_1"
+ protection_domain_name: "Ansible-PD1"
+
+ tasks:
+ - name: Create fault set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ fault_set_name: "{{ fault_set_name }}"
+ protection_domain_name: "{{ protection_domain_name }}"
+
+ - name: Get fault set details using name and protection domain
+ register: result
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ fault_set_name: "{{ fault_set_name }}"
+ protection_domain_name: "{{ protection_domain_name }}"
+
+ - name: Get fault set details using ID
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ fault_set_id: "{{ result.fault_set_details.id }}"
+ state: "present"
+
+ - name: Rename fault set details using ID
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ fault_set_id: "{{ result.fault_set_details.id }}"
+ fault_set_new_name: "fault_set_new_name"
+ state: "present"
+
+ - name: Delete fault set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ fault_set_name: "fault_set_new_name"
+ protection_domain_name: "{{ protection_domain_name }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/info.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/info.yml
new file mode 100644
index 000000000..b1a16785a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/info.yml
@@ -0,0 +1,80 @@
+---
+- name: Gatherfacts Module Operations on PowerFlex
+ hosts: localhost
+ connection: local
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ host_port: 443
+ tasks:
+ - name: Get detailed list of PowerFlex Entities
+ dellemc.powerflex.info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - vol
+ - storage_pool
+ - protection_domain
+ - sdc
+ - sds
+ - snapshot_policy
+ - device
+ - rcg
+ - replication_pair
+ - fault_set
+
+ - name: Get specific volume details
+ dellemc.powerflex.info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - vol
+ filters:
+ - filter_key: "name"
+ filter_operator: "equal"
+ filter_value: "ansible_test"
+
+ - name: Get specific fault set list
+ dellemc.powerflex.info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - fault_set
+ filters:
+ - filter_key: "name"
+ filter_operator: "equal"
+ filter_value: "node4"
+
+ - name: Get managed device, deployments, service templates
+ dellemc.powerflex.info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - managed_device
+ - deployment
+ - service_template
+
+ - name: Get deployment with filter, sort, pagination
+ dellemc.powerflex.info:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - deployment
+ filters:
+ - filter_key: "name"
+ filter_operator: "contains"
+ filter_value: "partial"
+ sort: name
+ limit: 10
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/mdm_cluster.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/mdm_cluster.yml
new file mode 100644
index 000000000..bdc44bdfa
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/mdm_cluster.yml
@@ -0,0 +1,148 @@
+---
+- name: MDM cluster Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ host_port: 443
+
+ tasks:
+ - name: Get MDM cluster
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ state: "present"
+
+ - name: Rename MDM cluster with check_mode
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ mdm_id: "1c13c3847c971201"
+ mdm_new_name: "node_renamed"
+ state: "present"
+ check_mode: true
+
+ - name: Rename MDM cluster
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ mdm_id: "1c13c3847c971201"
+ mdm_new_name: "node_renamed"
+ state: "present"
+
+ - name: Remove standby MDM
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ mdm_name: "mdm_node1"
+ state: "absent"
+
+ - name: Add standby MDM
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ mdm_name: "mdm_standby_node"
+ standby_mdm:
+ mdm_ips:
+ - "10.x.x.x"
+ role: "TieBreaker"
+ management_ips:
+ - "10.x.x.x"
+ port: 9011
+ state: "present"
+
+ - name: Change MDM cluster owner
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ mdm_name: "node_renamed"
+ is_primary: true
+ state: "present"
+
+ - name: Change virtual IP interface
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ mdm_name: "mdm_manager_node"
+ virtual_ip_interfaces:
+ - "ens224"
+ - "ens256"
+ state: "present"
+
+ - name: Clear virtual IP interface
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ mdm_name: "mdm_manager_node"
+ clear_interfaces: true
+ state: "present"
+
+ - name: Change Performance profile
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ performance_profile: "Compact"
+ state: "present"
+
+ - name: Switch cluster mode to FiveNodes
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ cluster_mode: "FiveNodes"
+ mdm:
+ - mdm_name: "mdm_manger_node"
+ mdm_type: "Secondary"
+ - mdm_name: "mdm_tiebreaker_node"
+ mdm_type: "TieBreaker"
+ mdm_state: "present-in-cluster"
+ state: "present"
+
+ - name: Switch cluster mode to ThreeNodes
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ cluster_mode: "ThreeNodes"
+ mdm:
+ - mdm_name: "mdm_manger_node"
+ mdm_type: "Secondary"
+ - mdm_name: "mdm_tiebreaker_node"
+ mdm_type: "TieBreaker"
+ mdm_state: "absent-in-cluster"
+ state: "present"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/protection_domain.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/protection_domain.yml
new file mode 100644
index 000000000..5911a90aa
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/protection_domain.yml
@@ -0,0 +1,124 @@
+---
+- name: Protection domain Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ host_port: 443
+
+ tasks:
+ - name: Create Protection Domain
+ register: result
+ dellemc.powerflex.protection_domain:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ protection_domain_name: "domain_1"
+ network_limits:
+ rebuild_limit: 10240
+ rebalance_limit: 10240
+ vtree_migration_limit: 10240
+ overall_limit: 20480
+ bandwidth_unit: "KBps"
+ rf_cache_limits:
+ page_size: 32
+ pass_through_mode: "Write"
+ state: "present"
+
+ - name: Get Protection Domain
+ dellemc.powerflex.protection_domain:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ protection_domain_name: "domain_1"
+ state: "present"
+
+ - name: Set Protection Domain id
+ ansible.builtin.set_fact:
+ pd_id: "{{ result.protection_domain_details.id }}"
+
+ - name: Get Protection Domain using ID
+ dellemc.powerflex.protection_domain:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ protection_domain_id: "{{ pd_id }}"
+ state: "present"
+
+ - name: Modify Protection Domain using ID
+ register: result
+ dellemc.powerflex.protection_domain:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ is_active: false
+ protection_domain_id: "{{ pd_id }}"
+ protection_domain_new_name: "domain_1_renamed"
+ network_limits:
+ rebuild_limit: 12
+ rebalance_limit: 12
+ overall_limit: 22
+ bandwidth_unit: "GBps"
+ rf_cache_limits:
+ is_enabled: false
+ page_size: 16
+ max_io_limit: 128
+ pass_through_mode: "Write"
+ state: "present"
+
+ - name: Modify Protection Domain using ID - Idempotecny
+ register: result
+ dellemc.powerflex.protection_domain:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ is_active: false
+ protection_domain_id: "{{ pd_id }}"
+ protection_domain_new_name: "domain_1_renamed"
+ network_limits:
+ rebuild_limit: 12
+ rebalance_limit: 12
+ overall_limit: 22
+ bandwidth_unit: "GBps"
+ rf_cache_limits:
+ is_enabled: false
+ page_size: 16
+ max_io_limit: 128
+ pass_through_mode: "Write"
+ state: "present"
+
+ - name: Delete Protection Domain using ID
+ register: result
+ dellemc.powerflex.protection_domain:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ protection_domain_id: "{{ pd_id }}"
+ state: "absent"
+
+ - name: Delete Protection Domain - Idempotecny
+ register: result
+ dellemc.powerflex.protection_domain:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ protection_domain_name: "domain_1_renamed"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/replication_consistency_group.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/replication_consistency_group.yml
new file mode 100644
index 000000000..043808f53
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/replication_consistency_group.yml
@@ -0,0 +1,234 @@
+---
+- name: Replication consistency group operations on PowerFlex array.
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ remote_hostname: '**.**.**.**'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ rcg_name: "rcg_test"
+ rcg_id: "aadc17d900000002"
+
+ tasks:
+ - name: Get RCG details by name
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+
+ - name: Get RCG details by id
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_id: "{{ rcg_id }}"
+ state: "present"
+
+ - name: Create an RCG snapshot - check mode
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ create_snapshot: true
+ check_mode: true
+
+ - name: Create an RCG snapshot
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ create_snapshot: true
+
+ - name: Create an RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rpo: 120
+ protection_domain_name: "domain1"
+ activity_mode: "Active"
+ remote_peer:
+ hostname: "{{ remote_hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ protection_domain_name: "domain1"
+
+ - name: Modify RCG rpo
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rpo: 50
+
+ - name: Modify RCG target volume access mode
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ target_volume_access_mode: "ReadOnly"
+
+ - name: Pause RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: 'pause'
+ pause_mode: "StopDataTransfer"
+
+ - name: Resume RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: 'resume'
+
+ - name: Freeze RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: 'freeze'
+
+ - name: UnFreeze RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: 'unfreeze'
+
+ - name: Set RCG as consistent
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ is_consistent: true
+
+ - name: Inactivate RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ activity_mode: "InActive"
+
+ - name: Rename RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ new_rcg_rename: "rename_rcg"
+
+ - name: Delete RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ state: "absent"
+
+ - name: Failover RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: "failover"
+
+ - name: Restore RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: "restore"
+
+ - name: Switchover RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: "switchover"
+
+ - name: Reverse RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: "reverse"
+
+ - name: Force switchover RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: "switchover"
+ force: true
+
+ - name: Synchronize RCG
+ register: result
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ rcg_name: "{{ rcg_name }}"
+ rcg_state: "sync"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/replication_pair.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/replication_pair.yml
new file mode 100644
index 000000000..b24313843
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/replication_pair.yml
@@ -0,0 +1,82 @@
+---
+- name: Replication pair operations on PowerFlex array.
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ remote_hostname: '**.**.**.**'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ rcg_name: "test_rcg"
+ rcg_id: "aadc17d900000002"
+
+ tasks:
+ - name: Get replication pair details by name
+ register: result
+ dellemc.powerflex.replication_pair:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pair_name: "test_pair"
+
+ - name: Add replication pair to RCG
+ register: result
+ dellemc.powerflex.replication_pair:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pairs:
+ - source_volume_name: "ans_test_vol"
+ target_volume_name: "ans_env8_vol"
+ copy_type: "OnlineCopy"
+ name: "test_pair"
+ rcg_name: "test_rcg"
+ remote_peer:
+ hostname: "{{ remote_hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Pause Replication pair
+ register: result
+ dellemc.powerflex.replication_pair:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pair_name: "test_pair"
+ pause: true
+
+ - name: Resume Replication pair
+ register: result
+ dellemc.powerflex.replication_pair:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pair_name: "test_pair"
+ pause: false
+
+ - name: Delete replication pair
+ register: result
+ dellemc.powerflex.replication_pair:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pair_name: "test_pair"
+ state: "absent"
+
+ - name: Delete replication pair - Idemotency
+ register: result
+ dellemc.powerflex.replication_pair:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pair_name: "test_pair"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/sdc.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/sdc.yml
new file mode 100644
index 000000000..b796f02da
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/sdc.yml
@@ -0,0 +1,64 @@
+---
+- name: SDC Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ sdc_name: test_sdc
+
+ tasks:
+ - name: Get sdc details using sdc ip
+ register: result
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sdc_ip: "1.1.1.1"
+ state: "present"
+
+ - name: Set sdc id
+ ansible.builtin.set_fact:
+ sdc_id: "{{ result.sdc_details.id }}"
+
+ - name: Rename sdc using sdc id
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sdc_id: "{{ sdc_id }}"
+ sdc_new_name: "{{ sdc_name }}"
+ state: "present"
+
+ - name: Get sdc details using sdc name
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sdc_name: "{{ sdc_name }}"
+ state: "present"
+
+ - name: Set performance profile of sdc using sdc name
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sdc_name: "{{ sdc_name }}"
+ performance_profile: "HighPerformance"
+ state: "present"
+
+ - name: Remove sdc
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sdc_name: "{{ sdc_name }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/sds.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/sds.yml
new file mode 100644
index 000000000..30a9b0906
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/sds.yml
@@ -0,0 +1,149 @@
+---
+- name: SDS Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ host_port: 443
+ sds_name_1: "sds_node_1"
+ sds_name_2: "sds_node_2"
+ sds_name_1_new: "sds_node1_new"
+ protection_domain_name: "Ansible-PD1"
+
+ tasks:
+ - name: Create SDS
+ register: result
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_name: "{{ sds_name_1 }}"
+ protection_domain_name: "{{ protection_domain_name }}"
+ sds_ip_list:
+ - ip: '**.**.**.**'
+ role: "all"
+ sds_ip_state: "present-in-sds"
+ state: "present"
+
+ - name: Create SDS with all parameters
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_name: "{{ sds_name_2 }}"
+ fault_set_name: "sample_fault_set"
+ protection_domain_name: "{{ protection_domain_name }}"
+ sds_ip_list:
+ - ip: '**.**.**.**'
+ role: "all"
+ sds_ip_state: "present-in-sds"
+ rmcache_enabled: true
+ rmcache_size: 210
+ state: "present"
+
+ - name: Get SDS details using name
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_name: "{{ sds_name_2 }}"
+ state: "present"
+
+ - name: Get SDS details using ID
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_id: "{{ result.sds_details }}"
+ state: "present"
+
+ - name: Modify SDS attributes using name
+ register: result
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_name: "{{ sds_name_1 }}"
+ sds_new_name: "{{ sds_name_1_new }}"
+ rfcache_enabled: false
+ rmcache_enabled: true
+ rmcache_size: 256
+ performance_profile: "HighPerformance"
+ state: "present"
+
+ - name: Modify SDS attributes using ID
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_id: "{{ result.sds_details }}"
+ sds_new_name: "{{ sds_name_1 }}"
+ rfcache_enabled: true
+ rmcache_enabled: true
+ rmcache_size: 210
+ performance_profile: "Compact"
+ state: "present"
+
+ - name: Add IP and role to an SDS
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_name: "{{ sds_name_1 }}"
+ sds_ip_list:
+ - ip: '**.**.**.**'
+ role: "sdcOnly"
+ sds_ip_state: "present-in-sds"
+ state: "present"
+
+ - name: Remove IP and role from an SDS
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_name: "{{ sds_name_1 }}"
+ sds_ip_list:
+ - ip: '**.**.**.**'
+ role: "sdcOnly"
+ sds_ip_state: "absent-in-sds"
+ state: "present"
+
+ - name: Delete SDS using name
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_name: "{{ sds_name_2 }}"
+ state: "absent"
+
+ - name: Delete SDS using ID
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ host_port }}"
+ sds_id: "{{ result.sds_details }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/snapshot.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/snapshot.yml
new file mode 100644
index 000000000..33975539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/snapshot.yml
@@ -0,0 +1,130 @@
+---
+- name: Snapshot Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+
+ tasks:
+ - name: Create snapshot
+ register: result
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "ansible_snapshot_1"
+ vol_name: "ansible_volume"
+ state: "present"
+
+ - name: Set snapshot id
+ ansible.builtin.set_fact:
+ snapshot_id: "{{ result.snapshot_details.id }}"
+
+ - name: Create snapshot with retention
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "ansible_snapshot_2"
+ vol_name: "ansible_volume"
+ desired_retention: 2
+ state: "present"
+
+ - name: Get snapshot details using snapshot id
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ state: "present"
+
+ - name: Modify the retention
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "ansible_snapshot_2"
+ desired_retention: 4
+ state: "present"
+
+ - name: Map snapshot to SDC
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ sdc:
+ - sdc_ip: '**.**.**.**'
+ - sdc_id: "663ac0d200000001"
+ allow_multiple_mappings: true
+ sdc_state: "mapped"
+ state: "present"
+
+ - name: Modify the attributes of SDC mapped to snapshot
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ sdc:
+ - sdc_ip: '**.**.**.**'
+ iops_limit: 11
+ bandwidth_limit: 4096
+ - sdc_id: "663ac0d200000001"
+ iops_limit: 20
+ bandwidth_limit: 2048
+ sdc_state: "mapped"
+ state: "present"
+
+ - name: Extend the size of snapshot
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ size: 16
+ state: "present"
+
+ - name: Unmap SDCs from snapshot
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ sdc:
+ - sdc_ip: '**.**.**.**'
+ - sdc_id: "663ac0d200000001"
+ sdc_state: "unmapped"
+ state: "present"
+
+ - name: Rename snapshot
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ snapshot_new_name: "ansible_renamed_snapshot"
+ state: "present"
+
+ - name: Delete snapshot
+ dellemc.powerflex.snapshot:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ remove_mode: "ONLY_ME"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/snapshot_policy.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/snapshot_policy.yml
new file mode 100644
index 000000000..fd83a2a60
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/snapshot_policy.yml
@@ -0,0 +1,215 @@
+---
+- name: Snapshot Policy operations on powerflex array.
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ snapshot_policy_name: "Ansible_snap_policy_1"
+ source_volume_name1: "Ansible_volume_4"
+ source_volume_name2: "Ansible_volume_5"
+ snapshot_policy_name_new: "Ansible_snap_policy_1_new"
+
+ tasks:
+ - name: Create a snapshot policy in check mode
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name }}"
+ access_mode: "READ_WRITE"
+ secure_snapshots: false
+ auto_snapshot_creation_cadence:
+ time: 1
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 20
+ state: "present"
+ check_mode: true
+
+ - name: Create a snapshot policy
+ register: result
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name }}"
+ access_mode: "READ_WRITE"
+ secure_snapshots: false
+ auto_snapshot_creation_cadence:
+ time: 1
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 20
+ state: "present"
+
+ - name: Get snapshot policy details using name
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name }}"
+
+ - name: Get snapshot policy details using id
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_id: "{{ result.snapshot_policy_details.id }}"
+
+ - name: Modify a snapshot policy - check mode
+ register: result
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name }}"
+ auto_snapshot_creation_cadence:
+ time: 2
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 40
+ check_mode: true
+
+ - name: Modify a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name }}"
+ auto_snapshot_creation_cadence:
+ time: 2
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 40
+
+ - name: Rename a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name }}"
+ new_name: "{{ snapshot_policy_name_new }}"
+
+ - name: Add source volume - check mode
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ source_volume:
+ - name: "{{ source_volume_name1 }}"
+ - name: "{{ source_volume_name2 }}"
+ state: "present"
+ check_mode: true
+
+ - name: Add source volume
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ source_volume:
+ - name: "{{ source_volume_name1 }}"
+ - name: "{{ source_volume_name2 }}"
+ state: "present"
+
+ - name: Remove source volume - check mode
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ source_volume:
+ - name: "{{ source_volume_name1 }}"
+ auto_snap_removal_action: 'Remove'
+ state: "absent"
+ - name: "{{ source_volume_name2 }}"
+ auto_snap_removal_action: 'Remove'
+ state: "absent"
+ check_mode: true
+
+ - name: Remove source volume
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ source_volume:
+ - name: "{{ source_volume_name1 }}"
+ auto_snap_removal_action: 'Remove'
+ state: "absent"
+ - name: "{{ source_volume_name2 }}"
+ auto_snap_removal_action: 'Remove'
+ state: "absent"
+
+ - name: Pause snapshot policy - check mode
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ pause: true
+ check_mode: true
+
+ - name: Pause snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ pause: true
+
+ - name: Resume snapshot policy - check mode
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ pause: false
+ check_mode: true
+
+ - name: Resume snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ pause: false
+
+ - name: Delete snapshot policy - check mode
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ state: "absent"
+ check_mode: true
+
+ - name: Delete snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_policy_name: "{{ snapshot_policy_name_new }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/storagepool.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/storagepool.yml
new file mode 100644
index 000000000..7e7860a0a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/storagepool.yml
@@ -0,0 +1,61 @@
+---
+- name: Storage Pool Module Operations on PowerFlex
+ hosts: localhost
+ connection: local
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ pool_name: "ansible_test_pool6"
+ protection_domain_name: "domain1"
+
+ tasks:
+ - name: Create a new Storage pool
+ register: result
+ dellemc.powerflex.storagepool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ storage_pool_name: "{{ pool_name }}"
+ protection_domain_name: "{{ protection_domain_name }}"
+ media_type: "HDD"
+ state: "present"
+
+ - name: Set pool id
+ ansible.builtin.set_fact:
+ pool_id: "{{ result.storage_pool_details.id }}"
+
+ - name: Rename Storage pool by id
+ dellemc.powerflex.storagepool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ storage_pool_id: "{{ pool_id }}"
+ storage_pool_new_name: "new_ansible_pool"
+ state: "present"
+
+ - name: Restore the name of Storage pool by id
+ dellemc.powerflex.storagepool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ storage_pool_id: "{{ pool_id }}"
+ storage_pool_new_name: "{{ pool_name }}"
+ state: "present"
+
+
+ - name: Modify a Storage pool by name
+ dellemc.powerflex.storagepool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ storage_pool_name: "ansible_test_pool"
+ protection_domain_name: "{{ protection_domain_name }}"
+ use_rmcache: true
+ use_rfcache: true
+ state: "present"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/modules/volume.yml b/ansible_collections/dellemc/powerflex/playbooks/modules/volume.yml
new file mode 100644
index 000000000..da5f255bb
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/modules/volume.yml
@@ -0,0 +1,133 @@
+---
+- name: Volume operations on powerflex array.
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ hostname: 'x.x.x.x'
+ username: 'admin'
+ password: 'Password'
+ validate_certs: false
+ protection_domain_name: "domain1"
+ storage_pool_name: "pool1"
+ snapshot_policy_name: "sample_snap_policy_1"
+ vol_name: "sample_ansible_volume_20"
+
+ tasks:
+ - name: Create a volume
+ register: result
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_name: "{{ vol_name }}"
+ storage_pool_name: "{{ storage_pool_name }}"
+ protection_domain_name: "{{ protection_domain_name }}"
+ snapshot_policy_name: "{{ snapshot_policy_name }}"
+ sdc:
+ - sdc_ip: '**.**.**.**'
+ - sdc_id: "663ac0d200000001"
+ allow_multiple_mappings: true
+ sdc_state: "mapped"
+ size: 8
+ state: "present"
+
+ - name: Set volume id
+ ansible.builtin.set_fact:
+ vol_id: "{{ result.volume_details.id }}"
+
+ - name: Get volume details using volume id
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ state: "present"
+
+ - name: Get volume details using volume name
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_name: "{{ vol_name }}"
+ state: "present"
+
+ - name: Modify the size
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_name: "{{ vol_name }}"
+ size: 16
+ state: "present"
+
+ - name: Map volume to SDC and remove snapshot policy
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ sdc:
+ - sdc_ip: '**.**.**.**'
+ - sdc_id: "663ac0d200000001"
+ allow_multiple_mappings: true
+ snapshot_policy_name: ""
+ auto_snap_remove_type: "remove"
+ sdc_state: "mapped"
+ state: "present"
+
+ - name: Modify the attributes of SDC mapped to volume
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ sdc:
+ - sdc_ip: '**.**.**.**'
+ iops_limit: 11
+ bandwidth_limit: 4096
+ - sdc_id: "663ac0d200000001"
+ iops_limit: 20
+ bandwidth_limit: 2048
+ allow_multiple_mappings: true
+ sdc_state: "mapped"
+ state: "present"
+
+ - name: Unmap SDCs from volume
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ sdc:
+ - sdc_ip: '**.**.**.**'
+ - sdc_id: "663ac0d200000001"
+ sdc_state: "unmapped"
+ state: "present"
+
+ - name: Rename volume
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ vol_new_name: "ansible_renamed_volume"
+ state: "present"
+
+ - name: Delete volume
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ delete_snapshots: true
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/roles/group_vars/all b/ansible_collections/dellemc/powerflex/playbooks/roles/group_vars/all
new file mode 100644
index 000000000..1031958fc
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/roles/group_vars/all
@@ -0,0 +1,51 @@
+powerflex_common_file_install_location: "/var/tmp"
+powerflex_common_esxi_files_location: "/tmp/"
+powerflex_common_win_package_location: "C:\\Windows\\Temp"
+# powerflex sdc params
+powerflex_sdc_driver_sync_repo_address: 'ftp://ftp.emc.com/'
+powerflex_sdc_driver_sync_repo_user: 'QNzgdxXix'
+powerflex_sdc_driver_sync_repo_password: 'Aw3wFAwAq3'
+powerflex_sdc_driver_sync_repo_local_dir: '/bin/emc/scaleio/scini_sync/driver_cache/'
+powerflex_sdc_driver_sync_user_private_rsa_key_src: ''
+powerflex_sdc_driver_sync_user_private_rsa_key_dest: '/bin/emc/scaleio/scini_sync/scini_key'
+powerflex_sdc_driver_sync_repo_public_rsa_key_src: ''
+powerflex_sdc_driver_sync_repo_public_rsa_key_dest: '/bin/emc/scaleio/scini_sync/scini_repo_key.pub'
+powerflex_sdc_driver_sync_module_sigcheck: 1
+powerflex_sdc_driver_sync_emc_public_gpg_key_src: ../../../files/RPM-GPG-KEY-powerflex_2.0.*.0
+powerflex_sdc_driver_sync_emc_public_gpg_key_dest: '/bin/emc/scaleio/scini_sync/emc_key.pub'
+powerflex_sdc_driver_sync_sync_pattern: .*
+powerflex_sdc_state: present
+# powerflex mdm role params
+powerflex_mdm_state: present
+powerflex_mdm_password: 'Password123'
+powerflex_mdm_cert_password: 'Password123!'
+i_am_sure: 1
+powerflex_mdm_virtual_ip: ''
+# powerflex lia params
+powerflex_lia_state: present
+powerflex_lia_token: Cluster1!
+# powerflex tb params
+powerflex_tb_state: present
+powerflex_tb_cert_password: "{{ powerflex_mdm_cert_password }}"
+# powerflex sds params
+powerflex_sds_number: 1
+powerflex_sds_disks: { ansible_available_disks: ['/dev/sdb'] }
+powerflex_sds_disks_type: HDD
+powerflex_sds_protection_domain: domain1
+powerflex_sds_storage_pool: pool1
+powerflex_sds_role: all
+powerflex_sds_device_media_type: HDD
+powerflex_sds_device_name: '/dev/sdb'
+powerflex_sds_external_acceleration_type: ReadAndWrite
+powerflex_sds_state: present
+# powerflex webui params
+powerflex_webui_state: present
+# powerflex gateway role params
+powerflex_gateway_state: present
+powerflex_gateway_admin_password: Password123
+powerflex_gateway_http_port: 80
+powerflex_gateway_https_port: 443
+powerflex_gateway_is_redundant: false
+#powerflex sdr params
+powerflex_protection_domain_name: domain1
+powerflex_storage_pool_name: pool1
diff --git a/ansible_collections/dellemc/powerflex/playbooks/roles/host_vars/node2 b/ansible_collections/dellemc/powerflex/playbooks/roles/host_vars/node2
new file mode 100644
index 000000000..20a36d660
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/roles/host_vars/node2
@@ -0,0 +1,11 @@
+# SDC params
+powerflex_sdc_name: sdc_test
+powerflex_sdc_performance_profile: Compact
+#TB params
+powerflex_tb_primary_name: primary_tb
+powerflex_tb_secondary_name: secondary_tb
+powerflex_tb_cluster_mode: "ThreeNodes"
+powerflex_protection_domain_name: "domain1"
+powerflex_fault_sets: ['fs1','fs2','fs3']
+powerflex_media_type: 'SSD' # When version is R3
+powerflex_storage_pool_name: "pool1"
diff --git a/ansible_collections/dellemc/powerflex/playbooks/roles/inventory b/ansible_collections/dellemc/powerflex/playbooks/roles/inventory
new file mode 100644
index 000000000..24fc6f241
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/roles/inventory
@@ -0,0 +1,41 @@
+node0 ansible_host=10.1.1.1 ansible_port=22 ansible_ssh_pass=password ansible_user=root
+node1 ansible_host=10.x.x.x ansible_port=22 ansible_ssh_pass=password ansible_user=root
+node2 ansible_host=10.x.x.y ansible_port=22 ansible_ssh_pass=password ansible_user=root
+
+[activemq]
+node0
+node1
+
+[lia]
+node0
+node1
+node2
+
+[mdm]
+node0
+node1
+
+[gateway]
+node2
+
+[tb]
+node2
+
+[config]
+node1
+
+[sdc]
+node2
+
+[sds]
+node0
+node1
+node2
+
+[webui]
+node1
+
+[sdr]
+node0
+node1
+node2
diff --git a/ansible_collections/dellemc/powerflex/playbooks/roles/site.yml b/ansible_collections/dellemc/powerflex/playbooks/roles/site.yml
new file mode 100644
index 000000000..7f325d4d5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/roles/site.yml
@@ -0,0 +1,64 @@
+---
+- name: "Install PowerFlex Common"
+ hosts: all
+ roles:
+ - powerflex_common
+
+- name: Install and configure PowerFlex MDM
+ hosts: mdm
+ roles:
+ - powerflex_mdm
+
+- name: Install and configure PowerFlex gateway
+ hosts: gateway
+ roles:
+ - powerflex_gateway
+
+- name: Install and configure PowerFlex TB
+ hosts: tb
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_tb
+
+- name: Configure protection domain, fault set and storage pool.
+ hosts: config
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_config
+
+- name: Install and configure PowerFlex Web UI
+ hosts: webui
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_webui
+
+- name: Install and configure PowerFlex SDC
+ hosts: sdc
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sdc
+
+- name: Install and configure PowerFlex LIA
+ hosts: lia
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_lia
+
+- name: Install and configure PowerFlex SDS
+ hosts: sds
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sds
+
+- name: Install PowerFlex SDR
+ hosts: sdr
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sdr
diff --git a/ansible_collections/dellemc/powerflex/playbooks/roles/site_powerflex45.yml b/ansible_collections/dellemc/powerflex/playbooks/roles/site_powerflex45.yml
new file mode 100644
index 000000000..bd75f6bbe
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/roles/site_powerflex45.yml
@@ -0,0 +1,59 @@
+---
+- name: "Install PowerFlex Common"
+ hosts: all
+ roles:
+ - powerflex_common
+
+- name: Install and configure PowerFlex ActiveMQ
+ hosts: activemq
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_activemq
+
+- name: Install and configure PowerFlex LIA
+ hosts: lia
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_lia
+
+- name: Install and configure PowerFlex MDM
+ hosts: mdm
+ roles:
+ - powerflex_mdm
+
+- name: Install and configure PowerFlex TB
+ hosts: tb
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_tb
+
+- name: Configure protection domain, fault set and storage pool.
+ hosts: config
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_config
+
+- name: Install and configure PowerFlex SDC
+ hosts: sdc
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sdc
+
+- name: Install and configure PowerFlex SDS
+ hosts: sds
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sds
+
+- name: Install PowerFlex SDR
+ hosts: sdr
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sdr
diff --git a/ansible_collections/dellemc/powerflex/playbooks/roles/uninstall_powerflex.yml b/ansible_collections/dellemc/powerflex/playbooks/roles/uninstall_powerflex.yml
new file mode 100644
index 000000000..464cca789
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/roles/uninstall_powerflex.yml
@@ -0,0 +1,64 @@
+---
+- name: Uninstall PowerFlex SDC
+ hosts: sdc
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sdc
+ vars:
+ powerflex_sdc_state: absent
+
+- name: Uninstall PowerFlex SDS
+ hosts: sds
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sds
+ vars:
+ powerflex_sds_state: absent
+
+- name: Uninstall PowerFlex SDR
+ hosts: sdr
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sdr
+ vars:
+ powerflex_sdr_state: absent
+
+- name: Uninstall PowerFlex LIA
+ hosts: lia
+ roles:
+ - powerflex_lia
+ vars:
+ powerflex_lia_state: absent
+
+- name: Uninstall PowerFlex web UI
+ hosts: webui
+ roles:
+ - powerflex_webui
+ vars:
+ powerflex_webui_state: absent
+
+- name: Uninstall PowerFlex TB
+ hosts: tb
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_tb
+ vars:
+ powerflex_tb_state: absent
+
+- name: Uninstall PowerFlex gateway
+ hosts: gateway
+ roles:
+ - powerflex_gateway
+ vars:
+ powerflex_gateway_state: absent
+
+- name: Uninstall PowerFlex MDM
+ hosts: mdm
+ roles:
+ - powerflex_mdm
+ vars:
+ powerflex_mdm_state: absent
diff --git a/ansible_collections/dellemc/powerflex/playbooks/roles/uninstall_powerflex45.yml b/ansible_collections/dellemc/powerflex/playbooks/roles/uninstall_powerflex45.yml
new file mode 100644
index 000000000..2d36b5bc0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/roles/uninstall_powerflex45.yml
@@ -0,0 +1,61 @@
+---
+- name: Uninstall PowerFlex SDC
+ hosts: sdc
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sdc
+ vars:
+ powerflex_sdc_state: absent
+
+- name: Uninstall PowerFlex SDS
+ hosts: sds
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sds
+ vars:
+ powerflex_sds_state: absent
+
+- name: Uninstall PowerFlex SDR
+ hosts: sdr
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_sdr
+ vars:
+ powerflex_sdr_state: absent
+
+- name: Uninstall PowerFlex TB
+ hosts: tb
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_tb
+ vars:
+ powerflex_tb_state: absent
+
+- name: Uninstall PowerFlex MDM
+ hosts: mdm
+ roles:
+ - powerflex_mdm
+ vars_files:
+ - vars_files/connection.yml
+ vars:
+ powerflex_mdm_state: absent
+
+- name: Uninstall PowerFlex LIA
+ hosts: lia
+ roles:
+ - powerflex_lia
+ vars:
+ powerflex_lia_state: absent
+
+- name: Uninstall PowerFlex ActiveMQ
+ hosts: activemq
+ vars:
+ powerflex_activemq_state: absent
+ vars_files:
+ - vars_files/connection.yml
+ roles:
+ - powerflex_activemq
diff --git a/ansible_collections/dellemc/powerflex/playbooks/roles/vars_files/connection.yml b/ansible_collections/dellemc/powerflex/playbooks/roles/vars_files/connection.yml
new file mode 100644
index 000000000..98bc8526b
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/playbooks/roles/vars_files/connection.yml
@@ -0,0 +1,8 @@
+---
+# common params
+hostname: 10.XX.XX.XX
+username: 'user'
+password: 'password'
+validate_certs: false
+port: 443
+timeout: 120
diff --git a/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py
index 349680345..0c0e0d9e1 100644
--- a/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py
+++ b/ansible_collections/dellemc/powerflex/plugins/doc_fragments/powerflex.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2020, Dell Technologies.
+# Copyright: (c) 2024, Dell Technologies.
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
from __future__ import absolute_import, division, print_function
@@ -48,12 +48,12 @@ class ModuleDocFragment(object):
- Time after which connection will get terminated.
- It is to be mentioned in seconds.
type: int
- required: False
+ required: false
default: 120
requirements:
- - A Dell PowerFlex storage system version 3.5 or later.
- - Ansible-core 2.12 or later.
- - PyPowerFlex 1.6.0.
+ - A Dell PowerFlex storage system version 3.6 or later.
+ - Ansible-core 2.14 or later.
+ - PyPowerFlex 1.9.0.
- Python 3.9, 3.10 or 3.11.
notes:
- The modules present in the collection named as 'dellemc.powerflex'
diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/__init__.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/__init__.py
diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/configuration.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/configuration.py
new file mode 100644
index 000000000..b7ca3ec9a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/configuration.py
@@ -0,0 +1,121 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('configuration')
+
+
+class Configuration:
+
+ """
+ The configuration SDK class with shared configuration operations.
+ """
+
+ def __init__(self, powerflex_conn, module):
+ """
+ Initialize the configuration class
+ :param configuration: The configuration SDK instance
+ :param module: Ansible module object
+ """
+ self.module = module
+ self.powerflex_conn = powerflex_conn
+
+ def get_protection_domain(
+ self, protection_domain_name=None, protection_domain_id=None
+ ):
+ """
+ Get protection domain details
+ :param protection_domain_name: Name of the protection domain
+ :param protection_domain_id: ID of the protection domain
+ :return: Protection domain details if exists
+ :rtype: dict
+ """
+
+ name_or_id = (
+ protection_domain_id if protection_domain_id else protection_domain_name
+ )
+
+ try:
+ if protection_domain_id:
+ pd_details = self.powerflex_conn.protection_domain.get(
+ filter_fields={"id": protection_domain_id}
+ )
+
+ else:
+ pd_details = self.powerflex_conn.protection_domain.get(
+ filter_fields={"name": protection_domain_name}
+ )
+
+ if len(pd_details) == 0:
+ error_msg = (
+ "Unable to find the protection domain with " "'%s'." % name_or_id
+ )
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ return pd_details[0]
+
+ except Exception as e:
+ error_msg = (
+ "Failed to get the protection domain '%s' with "
+ "error '%s'" % (name_or_id, str(e))
+ )
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_fault_set(self, fault_set_name=None, fault_set_id=None, protection_domain_id=None):
+ """Get fault set details
+ :param fault_set_name: Name of the fault set
+ :param fault_set_id: Id of the fault set
+ :param protection_domain_id: ID of the protection domain
+ :return: Fault set details
+ :rtype: dict
+ """
+ name_or_id = fault_set_id if fault_set_id \
+ else fault_set_name
+ try:
+ fs_details = {}
+ if fault_set_id:
+ fs_details = self.powerflex_conn.fault_set.get(
+ filter_fields={'id': name_or_id})
+
+ if fault_set_name:
+ fs_details = self.powerflex_conn.fault_set.get(
+ filter_fields={'name': name_or_id, 'protectionDomainId': protection_domain_id})
+
+ if not fs_details:
+ msg = f"Unable to find the fault set with {name_or_id}"
+ LOG.info(msg)
+ return None
+
+ return fs_details[0]
+
+ except Exception as e:
+ error_msg = f"Failed to get the fault set '{name_or_id}' with error '{str(e)}'"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_associated_sds(self, fault_set_id=None):
+ """Get associated SDS to a fault set
+ :param fault_set_id: Id of the fault set
+ :return: Associated SDS details
+ :rtype: dict
+ """
+ try:
+ if fault_set_id:
+ sds_details = self.powerflex_conn.fault_set.get_sdss(
+ fault_set_id=fault_set_id)
+
+ return sds_details
+
+ except Exception as e:
+ error_msg = f"Failed to get the associated SDS with error '{str(e)}'"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/powerflex_base.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/powerflex_base.py
new file mode 100644
index 000000000..0cfb2659f
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/libraries/powerflex_base.py
@@ -0,0 +1,45 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('powerflex_base')
+
+
+class PowerFlexBase:
+
+ '''PowerFlex Base Class'''
+
+ def __init__(self, ansible_module, ansible_module_params):
+ """
+ Initialize the powerflex base class
+
+ :param ansible_module: Ansible module class
+ :type ansible_module: AnsibleModule
+ :param ansible_module_params: Parameters for ansible module class
+ :type ansible_module_params: dict
+ """
+ self.module_params = utils.get_powerflex_gateway_host_parameters()
+ ansible_module_params['argument_spec'].update(self.module_params)
+
+ # Initialize the ansible module
+ self.module = ansible_module(
+ **ansible_module_params
+ )
+
+ utils.ensure_required_libs(self.module)
+ self.result = {"changed": False}
+
+ try:
+ self.powerflex_conn = utils.get_powerflex_gateway_host_connection(
+ self.module.params)
+ LOG.info("Got the PowerFlex system connection object instance")
+ except Exception as e:
+ LOG.error(str(e))
+ self.module.fail_json(msg=str(e))
diff --git a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py
index 8503aeb0c..94024d498 100644
--- a/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py
+++ b/ansible_collections/dellemc/powerflex/plugins/module_utils/storage/dell/utils.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2021, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
from __future__ import absolute_import, division, print_function
@@ -17,14 +17,7 @@ from ansible.module_utils.basic import missing_required_lib
"""import PyPowerFlex lib"""
try:
from PyPowerFlex import PowerFlexClient
- from PyPowerFlex.objects.sds import Sds
- from PyPowerFlex.objects import protection_domain
- from PyPowerFlex.objects import storage_pool
- from PyPowerFlex.objects import sdc
- from PyPowerFlex.objects import volume
- from PyPowerFlex.objects import system
- from PyPowerFlex.objects.system import SnapshotDef
-
+ from PyPowerFlex.objects.system import SnapshotDef # pylint: disable=unused-import
HAS_POWERFLEX_SDK, POWERFLEX_SDK_IMP_ERR = True, None
except ImportError:
HAS_POWERFLEX_SDK, POWERFLEX_SDK_IMP_ERR = False, traceback.format_exc()
@@ -40,7 +33,7 @@ except ImportError:
"""importing dateutil"""
try:
- import dateutil.relativedelta
+ import dateutil.relativedelta # noqa # pylint: disable=unused-import
HAS_DATEUTIL, DATEUTIL_IMP_ERR = True, None
except ImportError:
HAS_DATEUTIL, DATEUTIL_IMP_ERR = False, traceback.format_exc()
@@ -87,10 +80,10 @@ def ensure_required_libs(module):
exception=PKG_RSRC_IMP_ERR)
if not HAS_POWERFLEX_SDK:
- module.fail_json(msg=missing_required_lib("PyPowerFlex V 1.6.0 or above"),
+ module.fail_json(msg=missing_required_lib("PyPowerFlex V 1.9.0 or above"),
exception=POWERFLEX_SDK_IMP_ERR)
- min_ver = '1.6.0'
+ min_ver = '1.9.0'
try:
curr_version = pkg_resources.require("PyPowerFlex")[0].version
supported_version = (parse_version(curr_version) >= parse_version(min_ver))
@@ -184,3 +177,19 @@ def is_invalid_name(name):
regexp = re.compile(r'^[a-zA-Z0-9!@#$%^~*_-]*$')
if not regexp.search(name):
return True
+
+
+def get_time_minutes(time, time_unit):
+ """Convert the given time to minutes"""
+
+ if time is not None and time > 0:
+ if time_unit in ('Hour'):
+ return time * 60
+ elif time_unit in ('Day'):
+ return time * 60 * 24
+ elif time_unit in ('Week'):
+ return time * 60 * 24 * 7
+ else:
+ return time
+ else:
+ return 0
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/device.py b/ansible_collections/dellemc/powerflex/plugins/modules/device.py
index a321315e3..e83353185 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/device.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/device.py
@@ -109,6 +109,14 @@ options:
choices: ['present', 'absent']
required: true
type: str
+ force:
+ description:
+ - Using the Force flag to add a device.
+ - Use this flag, to overwrite existing data on the device.
+ - Use this flag with caution, because all data on the device will be
+ destroyed.
+ type: bool
+ default: false
notes:
- The value for device_id is generated only after successful addition of the
device.
@@ -135,6 +143,22 @@ EXAMPLES = r'''
protection_domain_name: "domain1"
external_acceleration_type: "ReadAndWrite"
state: "present"
+- name: Add a device with force flag
+ dellemc.powerflex.device:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ media_type: "HDD"
+ device_name: "device2"
+ storage_pool_name: "pool1"
+ protection_domain_name: "domain1"
+ external_acceleration_type: "ReadAndWrite"
+ force: true
+ state: "present"
- name: Get device details using device_id
dellemc.powerflex.device:
hostname: "{{hostname}}"
@@ -166,23 +190,23 @@ EXAMPLES = r'''
state: "present"
- name: Remove a device using device_id
dellemc.powerflex.device:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
- device_id: "76eb7e2f00010000"
- state: "absent"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ device_id: "76eb7e2f00010000"
+ state: "absent"
- name: Remove a device using (current_pathname, sds_id)
dellemc.powerflex.device:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
- current_pathname: "/dev/sdb"
- sds_name: "node1"
- state: "absent"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ current_pathname: "/dev/sdb"
+ sds_name: "node1"
+ state: "absent"
'''
RETURN = r'''
@@ -715,12 +739,11 @@ class PowerFlexDevice(object):
self.powerflex_conn.device.create(
current_pathname=current_pathname,
- sds_id=sds_id,
- acceleration_pool_id=acceleration_pool_id,
+ sds_id=sds_id, acceleration_pool_id=acceleration_pool_id,
external_acceleration_type=external_acceleration_type,
- media_type=media_type,
- name=device_name,
- storage_pool_id=storage_pool_id)
+ media_type=media_type, name=device_name,
+ storage_pool_id=storage_pool_id,
+ force=self.module.params['force'])
return True
except Exception as e:
error_msg = "Adding device %s operation failed with " \
@@ -1076,21 +1099,15 @@ def get_powerflex_device_parameters():
"""This method provide parameter required for the device module on
PowerFlex"""
return dict(
- current_pathname=dict(),
- device_name=dict(),
- device_id=dict(),
- sds_name=dict(),
- sds_id=dict(),
- storage_pool_name=dict(),
- storage_pool_id=dict(),
- acceleration_pool_id=dict(),
- acceleration_pool_name=dict(),
- protection_domain_name=dict(),
- protection_domain_id=dict(),
- external_acceleration_type=dict(choices=['Invalid', 'None', 'Read',
- 'Write', 'ReadAndWrite']),
+ current_pathname=dict(), device_name=dict(), device_id=dict(),
+ sds_name=dict(), sds_id=dict(), storage_pool_name=dict(),
+ storage_pool_id=dict(), acceleration_pool_id=dict(),
+ acceleration_pool_name=dict(), protection_domain_name=dict(),
+ protection_domain_id=dict(), external_acceleration_type=dict(
+ choices=['Invalid', 'None', 'Read', 'Write', 'ReadAndWrite']),
media_type=dict(choices=['HDD', 'SSD', 'NVDIMM']),
- state=dict(required=True, type='str', choices=['present', 'absent'])
+ state=dict(required=True, type='str', choices=['present', 'absent']),
+ force=dict(type='bool', default=False)
)
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/fault_set.py b/ansible_collections/dellemc/powerflex/plugins/modules/fault_set.py
new file mode 100644
index 000000000..bfa926dd6
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/fault_set.py
@@ -0,0 +1,380 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2024, Dell Technologies
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+""" Ansible module for managing Fault Sets on Dell Technologies (Dell) PowerFlex"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: fault_set
+version_added: '2.2.0'
+short_description: Manage Fault Sets on Dell PowerFlex
+description:
+- Managing fault sets on PowerFlex storage system includes creating,
+ getting details, renaming and deleting a fault set.
+author:
+- Carlos Tronco (@ctronco) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
+extends_documentation_fragment:
+ - dellemc.powerflex.powerflex
+options:
+ fault_set_name:
+ description:
+ - Name of the Fault Set.
+ - Mutually exclusive with I(fault_set_id).
+ type: str
+ fault_set_id:
+ description:
+ - ID of the Fault Set.
+ - Mutually exclusive with I(fault_set_name).
+ type: str
+ protection_domain_name:
+ description:
+ - Name of protection domain.
+ - Mutually exclusive with I(protection_domain_id).
+ type: str
+ protection_domain_id:
+ description:
+ - ID of the protection domain.
+ - Mutually exclusive with I(protection_domain_name).
+ type: str
+ fault_set_new_name:
+ description:
+ - New name of the fault set.
+ type: str
+ state:
+ description:
+ - State of the Fault Set.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+notes:
+ - The I(check_mode) is supported.
+ - When I(fault_set_name) is provided, I(protection_domain_name)
+ or I(protection_domain_id) must be provided.
+'''
+
+
+EXAMPLES = r'''
+
+- name: Create Fault Set on Protection Domain
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_name: "{{ fault_set_name }}"
+ protection_domain_name: "{{ pd_name }}"
+ state: present
+
+- name: Rename Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_name: "{{ fault_set_name }}"
+ fault_set_new_name: "{{ fault_set_new_name }}"
+ state: present
+
+- name: Get details of a Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_id: "{{ fault_set_id }}"
+ state: present
+
+- name: Delete Fault Set
+ dellemc.powerflex.fault_set:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ fault_set_id: "{{ fault_set_id }}"
+ state: absent
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: 'false'
+
+fault_set_details:
+ description: Details of fault set.
+ returned: always
+ type: dict
+ contains:
+ protectionDomainId:
+ description: Unique identifier of the protection domain.
+ type: str
+ protectionDomainName:
+ description: Name of the protection domain.
+ type: str
+ name:
+ description: Name of the fault set.
+ type: str
+ id:
+ description: Unique identifier of the fault set.
+ type: str
+ SDS:
+ description: List of SDS associated to the fault set.
+ type: list
+ elements: dict
+ links:
+ description: Fault set links.
+ type: list
+ contains:
+ href:
+ description: Fault Set instance URL.
+ type: str
+ rel:
+ description: Relationship of fault set with different
+ entities.
+ type: str
+ sample: {
+ "protectionDomainId": "da721a8300000000",
+ "protectionDomainName": "sample-pd",
+ "name": "fs_001",
+ "id": "eb44b70500000000",
+ "links": []
+ }
+
+'''
+
+
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell import (
+ utils,
+)
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.powerflex_base \
+ import PowerFlexBase
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.configuration \
+ import Configuration
+from ansible.module_utils.basic import AnsibleModule
+
+
+LOG = utils.get_logger("fault_set")
+
+
+class PowerFlexFaultSet(PowerFlexBase):
+ """Class with FaultSet operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+
+ mutually_exclusive = [
+ ["fault_set_name", "fault_set_id"],
+ ["protection_domain_name", "protection_domain_id"],
+ ]
+ required_one_of = [["fault_set_name", "fault_set_id"]]
+
+ ansible_module_params = {
+ 'argument_spec': get_powerflex_fault_set_parameters(),
+ 'supports_check_mode': True,
+ 'mutually_exclusive': mutually_exclusive,
+ 'required_one_of': required_one_of
+ }
+ super().__init__(AnsibleModule, ansible_module_params)
+
+ self.result = dict(
+ changed=False,
+ fault_set_details={}
+ )
+
+ def get_protection_domain(
+ self, protection_domain_name=None, protection_domain_id=None
+ ):
+ """Get the details of a protection domain in a given PowerFlex storage
+ system"""
+ return Configuration(self.powerflex_conn, self.module).get_protection_domain(
+ protection_domain_name=protection_domain_name, protection_domain_id=protection_domain_id)
+
+ def get_associated_sds(
+ self, fault_set_id=None
+ ):
+ """Get the details of SDS associated to given fault set in a given PowerFlex storage
+ system"""
+ return Configuration(self.powerflex_conn, self.module).get_associated_sds(
+ fault_set_id=fault_set_id)
+
+ def create_fault_set(self, fault_set_name, protection_domain_id):
+ """
+ Create Fault Set
+ :param fault_set_name: Name of the fault set
+ :type fault_set_name: str
+ :param protection_domain_id: ID of the protection domain
+ :type protection_domain_id: str
+ :return: Boolean indicating if create operation is successful
+ """
+ try:
+ if not self.module.check_mode:
+ msg = (f"Creating fault set with name: {fault_set_name} on "
+ f"protection domain with id: {protection_domain_id}")
+ LOG.info(msg)
+ self.powerflex_conn.fault_set.create(
+ name=fault_set_name, protection_domain_id=protection_domain_id
+ )
+ return self.get_fault_set(
+ fault_set_name=fault_set_name,
+ protection_domain_id=protection_domain_id)
+
+ except Exception as e:
+ error_msg = (f"Create fault set {fault_set_name} operation failed "
+ f"with error {str(e)}")
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def get_fault_set(self, fault_set_name=None, fault_set_id=None, protection_domain_id=None):
+ """Get fault set details
+ :param fault_set_name: Name of the fault set
+ :param fault_set_id: Id of the fault set
+ :param protection_domain_id: ID of the protection domain
+ :return: Fault set details
+ :rtype: dict
+ """
+ return Configuration(self.powerflex_conn, self.module).get_fault_set(
+ fault_set_name=fault_set_name, fault_set_id=fault_set_id, protection_domain_id=protection_domain_id)
+
+ def is_rename_required(self, fault_set_details, fault_set_params):
+ """To get the details of the fields to be modified."""
+
+ if fault_set_params['fault_set_new_name'] is not None and \
+ fault_set_params['fault_set_new_name'] != fault_set_details['name']:
+ return True
+
+ return False
+
+ def rename_fault_set(self, fault_set_id,
+ new_name):
+ """Perform rename operation on a fault set"""
+
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.fault_set.rename(
+ fault_set_id=fault_set_id,
+ name=new_name)
+ return self.get_fault_set(
+ fault_set_id=fault_set_id)
+ except Exception as e:
+ msg = (f'Failed to rename the fault set instance '
+ f'with error {str(e)}')
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def delete_fault_set(self, fault_set_id):
+ """Delete the Fault Set"""
+ try:
+ if not self.module.check_mode:
+ LOG.info(msg=f"Removing Fault Set {fault_set_id}")
+ self.powerflex_conn.fault_set.delete(fault_set_id)
+ LOG.info("returning None")
+ return None
+ return self.get_fault_set(
+ fault_set_id=fault_set_id)
+ except Exception as e:
+ errormsg = f"Removing Fault Set {fault_set_id} failed with error {str(e)}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_parameters(self, fault_set_params):
+ params = [fault_set_params['fault_set_name'], fault_set_params['fault_set_new_name']]
+ for param in params:
+ if param is not None and len(param.strip()) == 0:
+ error_msg = "Provide valid value for name for the " \
+ "creation/modification of the fault set."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+ if fault_set_params['fault_set_name'] is not None and \
+ fault_set_params['protection_domain_id'] is None and fault_set_params['protection_domain_name'] is None:
+ error_msg = "Provide protection_domain_id/protection_domain_name with fault_set_name."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+
+def get_powerflex_fault_set_parameters():
+ """This method provide parameter required for the Ansible Fault Set module on
+ PowerFlex"""
+ return dict(
+ fault_set_name=dict(),
+ fault_set_id=dict(),
+ protection_domain_name=dict(),
+ protection_domain_id=dict(),
+ fault_set_new_name=dict(),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+
+class FaultSetExitHandler():
+ def handle(self, fault_set_obj, fault_set_details):
+ fault_set_obj.result["fault_set_details"] = fault_set_details
+ if fault_set_details:
+ fault_set_obj.result["fault_set_details"]["protectionDomainName"] = \
+ fault_set_obj.get_protection_domain(
+ protection_domain_id=fault_set_details["protectionDomainId"])["name"]
+ fault_set_obj.result["fault_set_details"]["SDS"] = \
+ fault_set_obj.get_associated_sds(
+ fault_set_id=fault_set_details['id'])
+ fault_set_obj.module.exit_json(**fault_set_obj.result)
+
+
+class FaultSetDeleteHandler():
+ def handle(self, fault_set_obj, fault_set_params, fault_set_details):
+ if fault_set_params['state'] == 'absent' and fault_set_details:
+ fault_set_details = fault_set_obj.delete_fault_set(fault_set_details['id'])
+ fault_set_obj.result['changed'] = True
+
+ FaultSetExitHandler().handle(fault_set_obj, fault_set_details)
+
+
+class FaultSetRenameHandler():
+ def handle(self, fault_set_obj, fault_set_params, fault_set_details):
+ if fault_set_params['state'] == 'present' and fault_set_details:
+ is_rename_required = fault_set_obj.is_rename_required(fault_set_details, fault_set_params)
+ if is_rename_required:
+ fault_set_details = fault_set_obj.rename_fault_set(fault_set_id=fault_set_details['id'],
+ new_name=fault_set_params['fault_set_new_name'])
+ fault_set_obj.result['changed'] = True
+
+ FaultSetDeleteHandler().handle(fault_set_obj, fault_set_params, fault_set_details)
+
+
+class FaultSetCreateHandler():
+ def handle(self, fault_set_obj, fault_set_params, fault_set_details, pd_id):
+ if fault_set_params['state'] == 'present' and not fault_set_details:
+ fault_set_details = fault_set_obj.create_fault_set(fault_set_name=fault_set_params['fault_set_name'],
+ protection_domain_id=pd_id)
+ fault_set_obj.result['changed'] = True
+
+ FaultSetRenameHandler().handle(fault_set_obj, fault_set_params, fault_set_details)
+
+
+class FaultSetHandler():
+ def handle(self, fault_set_obj, fault_set_params):
+ fault_set_obj.validate_parameters(fault_set_params=fault_set_params)
+ pd_id = None
+ if fault_set_params['protection_domain_id'] or fault_set_params['protection_domain_name']:
+ pd_id = fault_set_obj.get_protection_domain(
+ protection_domain_id=fault_set_params['protection_domain_id'],
+ protection_domain_name=fault_set_params['protection_domain_name'])['id']
+ fault_set_details = fault_set_obj.get_fault_set(fault_set_id=fault_set_params['fault_set_id'],
+ fault_set_name=fault_set_params['fault_set_name'],
+ protection_domain_id=pd_id)
+ FaultSetCreateHandler().handle(fault_set_obj, fault_set_params, fault_set_details, pd_id)
+
+
+def main():
+ """ Create PowerFlex fault set object and perform action on it
+ based on user input from playbook."""
+ obj = PowerFlexFaultSet()
+ FaultSetHandler().handle(obj, obj.module.params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/info.py b/ansible_collections/dellemc/powerflex/plugins/modules/info.py
index ff1401d63..33f3a8ad8 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/info.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/info.py
@@ -1,6 +1,6 @@
-#!/usr/bin/python
+# !/usr/bin/python
-# Copyright: (c) 2021, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
"""Ansible module for Gathering information about Dell Technologies (Dell) PowerFlex"""
@@ -21,12 +21,16 @@ description:
- Gathering information about Dell PowerFlex storage system includes
getting the api details, list of volumes, SDSs, SDCs, storage pools,
protection domains, snapshot policies, and devices.
+- Gathering information about Dell PowerFlex Manager includes getting the
+ list of managed devices, deployments and service templates.
extends_documentation_fragment:
- dellemc.powerflex.powerflex
author:
- Arindam Datta (@dattaarindam) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
+- Jennifer John (@Jennifer-John) <ansible.team@dell.com>
options:
gather_subset:
@@ -42,8 +46,13 @@ options:
- Devices - C(device).
- Replication consistency groups - C(rcg).
- Replication pairs - C(replication_pair).
+ - Fault Sets - C(fault_set).
+ - Service templates - C(service_template).
+ - Managed devices - C(managed_device).
+ - Deployments - C(deployment).
choices: [vol, storage_pool, protection_domain, sdc, sds,
- snapshot_policy, device, rcg, replication_pair]
+ snapshot_policy, device, rcg, replication_pair,
+ fault_set, service_template, managed_device, deployment]
type: list
elements: str
filters:
@@ -62,16 +71,63 @@ options:
filter_operator:
description:
- Operation to be performed on filter key.
+ - Choice I('contains') is supported for gather_subset keys I(service_template), I(managed_device), I(deployment).
type: str
- choices: [equal]
+ choices: [equal, contains]
required: true
filter_value:
description:
- Value of the filter key.
type: str
required: true
+ limit:
+ description:
+ - Page limit.
+ - Supported for gather_subset keys I(service_template), I(managed_device), I(deployment).
+ type: int
+ default: 50
+ offset:
+ description:
+ - Pagination offset.
+ - Supported for gather_subset keys I(service_template), I(managed_device), I(deployment).
+ type: int
+ default: 0
+ sort:
+ description:
+ - Sort the returned components based on specified field.
+ - Supported for gather_subset keys I(service_template), I(managed_device), I(deployment).
+ - The supported sort keys for the gather_subset can be referred from PowerFlex Manager API documentation in developer.dell.com.
+ type: str
+ include_devices:
+ description:
+ - Include devices in response.
+ - Applicable when gather_subset is I(deployment).
+ type: bool
+ default: true
+ include_template:
+ description:
+ - Include service templates in response.
+ - Applicable when gather_subset is I(deployment).
+ type: bool
+ default: true
+ full:
+ description:
+ - Specify if response is full or brief.
+ - Applicable when gather_subset is I(deployment), I(service_template).
+ - For I(deployment) specify to use full templates including resources in response.
+ type: bool
+ default: false
+ include_attachments:
+ description:
+ - Include attachments.
+ - Applicable when gather_subset is I(service_template).
+ type: bool
+ default: true
notes:
- The I(check_mode) is supported.
+ - The supported filter keys for the gather_subset can be referred from PowerFlex Manager API documentation in developer.dell.com.
+ - The I(filter), I(sort), I(limit) and I(offset) options will be ignored when more than one I(gather_subset) is specified along with
+ I(service_template), I(managed_device) or I(deployment).
'''
EXAMPLES = r'''
@@ -91,6 +147,7 @@ EXAMPLES = r'''
- device
- rcg
- replication_pair
+ - fault_set
- name: Get a subset list of PowerFlex volumes
dellemc.powerflex.info:
@@ -104,6 +161,35 @@ EXAMPLES = r'''
- filter_key: "name"
filter_operator: "equal"
filter_value: "ansible_test"
+
+- name: Get deployment and resource provisioning info
+ dellemc.powerflex.info:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - managed_device
+ - deployment
+ - service_template
+
+- name: Get deployment with filter, sort, pagination
+ dellemc.powerflex.info:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - deployment
+ filters:
+ - filter_key: "name"
+ filter_operator: "contains"
+ filter_value: "partial"
+ sort: name
+ limit: 10
+ offset: 10
+ include_devices: true
+ include_template: true
'''
RETURN = r'''
@@ -1147,19 +1233,557 @@ Replication_pairs:
"replicationConsistencyGroupId": "e2ce036b00000002",
"userRequestedPauseTransmitInitCopy": false
}
+Fault_Sets:
+ description: Details of fault sets.
+ returned: always
+ type: list
+ contains:
+ protectionDomainId:
+ description: The ID of the protection domain.
+ type: str
+ name:
+ description: device name.
+ type: str
+ id:
+ description: device id.
+ type: str
+ sample: [
+ {
+ "protectionDomainId": "da721a8300000000",
+ "protectionDomainName": "fault_set_1",
+ "name": "at1zbs1t6cp2sds1d1fs1",
+ "SDS": [],
+ "id": "eb44b70500000000",
+ "links": [
+ { "rel": "self", "href": "/api/instances/FaultSet::eb44b70500000000" },
+ {
+ "rel": "/api/FaultSet/relationship/Statistics",
+ "href": "/api/instances/FaultSet::eb44b70500000000/relationships/Statistics"
+ },
+ {
+ "rel": "/api/FaultSet/relationship/Sds",
+ "href": "/api/instances/FaultSet::eb44b70500000000/relationships/Sds"
+ },
+ {
+ "rel": "/api/parent/relationship/protectionDomainId",
+ "href": "/api/instances/ProtectionDomain::da721a8300000000"
+ }
+ ]
+ },
+ {
+ "protectionDomainId": "da721a8300000000",
+ "protectionDomainName": "fault_set_2",
+ "name": "at1zbs1t6cp2sds1d1fs3",
+ "SDS": [],
+ "id": "eb44b70700000002",
+ "links": [
+ { "rel": "self", "href": "/api/instances/FaultSet::eb44b70700000002" },
+ {
+ "rel": "/api/FaultSet/relationship/Statistics",
+ "href": "/api/instances/FaultSet::eb44b70700000002/relationships/Statistics"
+ },
+ {
+ "rel": "/api/FaultSet/relationship/Sds",
+ "href": "/api/instances/FaultSet::eb44b70700000002/relationships/Sds"
+ },
+ {
+ "rel": "/api/parent/relationship/protectionDomainId",
+ "href": "/api/instances/ProtectionDomain::da721a8300000000"
+ }
+ ]
+ }
+ ]
+ManagedDevices:
+ description: Details of all devices from inventory.
+ returned: when I(gather_subset) is I(managed_device)
+ type: list
+ contains:
+ deviceType:
+ description: Device Type.
+ type: str
+ serviceTag:
+ description: Service Tag.
+ type: str
+ serverTemplateId:
+ description: The ID of the server template.
+ type: str
+ state:
+ description: The state of the device.
+ type: str
+ managedState:
+ description: The managed state of the device.
+ type: str
+ compliance:
+ description: The compliance state of the device.
+ type: str
+ systemId:
+ description: The system ID.
+ type: str
+ sample: [{
+ "refId": "softwareOnlyServer-10.1.1.1",
+ "refType": null,
+ "ipAddress": "10.1.1.1",
+ "currentIpAddress": "10.1.1.1",
+ "serviceTag": "VMware-42 15 a5 f9 65 e6 63 0e-36 79 59 73 7b 3a 68 cd-SW",
+ "model": "VMware Virtual Platform",
+ "deviceType": "SoftwareOnlyServer",
+ "discoverDeviceType": "SOFTWAREONLYSERVER_CENTOS",
+ "displayName": "vpi1011-c1n1",
+ "managedState": "UNMANAGED",
+ "state": "READY",
+ "inUse": false,
+ "serviceReferences": [],
+ "statusMessage": null,
+ "firmwareName": "Default Catalog - PowerFlex 4.5.0.0",
+ "customFirmware": false,
+ "needsAttention": false,
+ "manufacturer": "VMware, Inc.",
+ "systemId": null,
+ "health": "RED",
+ "healthMessage": "Inventory run failed.",
+ "operatingSystem": "N/A",
+ "numberOfCPUs": 0,
+ "cpuType": null,
+ "nics": 0,
+ "memoryInGB": 0,
+ "infraTemplateDate": null,
+ "infraTemplateId": null,
+ "serverTemplateDate": null,
+ "serverTemplateId": null,
+ "inventoryDate": null,
+ "complianceCheckDate": "2024-02-05T18:31:31.213+00:00",
+ "discoveredDate": "2024-02-05T18:31:30.992+00:00",
+ "deviceGroupList": {
+ "paging": null,
+ "deviceGroup": [
+ {
+ "link": null,
+ "groupSeqId": -1,
+ "groupName": "Global",
+ "groupDescription": null,
+ "createdDate": null,
+ "createdBy": "admin",
+ "updatedDate": null,
+ "updatedBy": null,
+ "managedDeviceList": null,
+ "groupUserList": null
+ }
+ ]
+ },
+ "detailLink": {
+ "title": "softwareOnlyServer-10.1.1.1",
+ "href": "/AsmManager/ManagedDevice/softwareOnlyServer-10.1.1.1",
+ "rel": "describedby",
+ "type": null
+ },
+ "credId": "bc97cefb-5eb4-4c20-8e39-d1a2b809c9f5",
+ "compliance": "NONCOMPLIANT",
+ "failuresCount": 0,
+ "chassisId": null,
+ "parsedFacts": null,
+ "config": null,
+ "hostname": "vpi1011-c1n1",
+ "osIpAddress": null,
+ "osAdminCredential": null,
+ "osImageType": null,
+ "lastJobs": null,
+ "puppetCertName": "red_hat-10.1.1.1",
+ "svmAdminCredential": null,
+ "svmName": null,
+ "svmIpAddress": null,
+ "svmImageType": null,
+ "flexosMaintMode": 0,
+ "esxiMaintMode": 0,
+ "vmList": []
+ }]
+Deployments:
+ description: Details of all deployments.
+ returned: when I(gather_subset) is I(deployment)
+ type: list
+ contains:
+ id:
+ description: Deployment ID.
+ type: str
+ deploymentName:
+ description: Deployment name.
+ type: str
+ status:
+ description: The status of deployment.
+ type: str
+ firmwareRepository:
+ description: The firmware repository.
+ type: dict
+ contains:
+ signature:
+ description: The signature details.
+ type: str
+ downloadStatus:
+ description: The download status.
+ type: str
+ rcmapproved:
+ description: If RCM approved.
+ type: bool
+ sample: [{
+ "id": "8aaa80658cd602e0018cda8b257f78ce",
+ "deploymentName": "Test-Update - K",
+ "deploymentDescription": "Test-Update - K",
+ "deploymentValid": null,
+ "retry": false,
+ "teardown": false,
+ "teardownAfterCancel": false,
+ "removeService": false,
+ "createdDate": "2024-01-05T16:53:21.407+00:00",
+ "createdBy": "admin",
+ "updatedDate": "2024-02-11T17:00:05.657+00:00",
+ "updatedBy": "system",
+ "deploymentScheduledDate": null,
+ "deploymentStartedDate": "2024-01-05T16:53:22.886+00:00",
+ "deploymentFinishedDate": null,
+ "serviceTemplate": {
+ "id": "8aaa80658cd602e0018cda8b257f78ce",
+ "templateName": "block-only (8aaa80658cd602e0018cda8b257f78ce)",
+ "templateDescription": "Storage - Software Only deployment",
+ "templateType": "VxRack FLEX",
+ "templateVersion": "4.5.0.0",
+ "templateValid": {
+ "valid": true,
+ "messages": []
+ },
+ "originalTemplateId": "c44cb500-020f-4562-9456-42ec1eb5f9b2",
+ "templateLocked": false,
+ "draft": false,
+ "inConfiguration": false,
+ "createdDate": "2024-01-05T16:53:22.083+00:00",
+ "createdBy": null,
+ "updatedDate": "2024-02-09T06:00:09.602+00:00",
+ "lastDeployedDate": null,
+ "updatedBy": null,
+ "components": [
+ {
+ "id": "6def7edd-bae2-4420-93bf-9ceb051bbb65",
+ "componentID": "component-scaleio-gateway-1",
+ "identifier": null,
+ "componentValid": {
+ "valid": true,
+ "messages": []
+ },
+ "puppetCertName": "scaleio-block-legacy-gateway",
+ "osPuppetCertName": null,
+ "name": "block-legacy-gateway",
+ "type": "SCALEIO",
+ "subType": "STORAGEONLY",
+ "teardown": false,
+ "helpText": null,
+ "managementIpAddress": null,
+ "configFile": null,
+ "serialNumber": null,
+ "asmGUID": "scaleio-block-legacy-gateway",
+ "relatedComponents": {
+ "625b0e17-9b91-4bc0-864c-d0111d42d8d0": "Node (Software Only)",
+ "961a59eb-80c3-4a3a-84b7-2101e9831527": "Node (Software Only)-2",
+ "bca710a5-7cdf-481e-b729-0b53e02873ee": "Node (Software Only)-3"
+ },
+ "resources": [],
+ "refId": null,
+ "cloned": false,
+ "clonedFromId": null,
+ "manageFirmware": false,
+ "brownfield": false,
+ "instances": 1,
+ "clonedFromAsmGuid": null,
+ "ip": null
+ }
+ ],
+ "category": "block-only",
+ "allUsersAllowed": true,
+ "assignedUsers": [],
+ "manageFirmware": true,
+ "useDefaultCatalog": false,
+ "firmwareRepository": null,
+ "licenseRepository": null,
+ "configuration": null,
+ "serverCount": 3,
+ "storageCount": 1,
+ "clusterCount": 1,
+ "serviceCount": 0,
+ "switchCount": 0,
+ "vmCount": 0,
+ "sdnasCount": 0,
+ "brownfieldTemplateType": "NONE",
+ "networks": [
+ {
+ "id": "8aaa80648cd5fb9b018cda46e4e50000",
+ "name": "mgmt",
+ "description": "",
+ "type": "SCALEIO_MANAGEMENT",
+ "vlanId": 850,
+ "static": true,
+ "staticNetworkConfiguration": {
+ "gateway": "10.1.1.1",
+ "subnet": "1.1.1.0",
+ "primaryDns": "10.1.1.1",
+ "secondaryDns": "10.1.1.1",
+ "dnsSuffix": null,
+ "ipRange": [
+ {
+ "id": "8aaa80648cd5fb9b018cda46e5080001",
+ "startingIp": "10.1.1.1",
+ "endingIp": "10.1.1.1",
+ "role": null
+ }
+ ],
+ "ipAddress": null,
+ "staticRoute": null
+ },
+ "destinationIpAddress": "10.1.1.1"
+ }
+ ],
+ "blockServiceOperationsMap": {
+ "scaleio-block-legacy-gateway": {
+ "blockServiceOperationsMap": {}
+ }
+ }
+ },
+ "scheduleDate": null,
+ "status": "complete",
+ "compliant": true,
+ "deploymentDevice": [
+ {
+ "refId": "scaleio-block-legacy-gateway",
+ "refType": null,
+ "logDump": null,
+ "status": null,
+ "statusEndTime": null,
+ "statusStartTime": null,
+ "deviceHealth": "GREEN",
+ "healthMessage": "OK",
+ "compliantState": "COMPLIANT",
+ "brownfieldStatus": "NOT_APPLICABLE",
+ "deviceType": "scaleio",
+ "deviceGroupName": null,
+ "ipAddress": "block-legacy-gateway",
+ "currentIpAddress": "10.1.1.1",
+ "serviceTag": "block-legacy-gateway",
+ "componentId": null,
+ "statusMessage": null,
+ "model": "PowerFlex Gateway",
+ "cloudLink": false,
+ "dasCache": false,
+ "deviceState": "READY",
+ "puppetCertName": "scaleio-block-legacy-gateway",
+ "brownfield": false
+ }
+ ],
+ "vms": null,
+ "updateServerFirmware": true,
+ "useDefaultCatalog": false,
+ "firmwareRepository": {
+ "id": "8aaa80658cd602e0018cd996a1c91bdc",
+ "name": "Intelligent Catalog 45.373.00",
+ "sourceLocation": null,
+ "sourceType": null,
+ "diskLocation": null,
+ "filename": null,
+ "md5Hash": null,
+ "username": null,
+ "password": null,
+ "downloadStatus": null,
+ "createdDate": null,
+ "createdBy": null,
+ "updatedDate": null,
+ "updatedBy": null,
+ "defaultCatalog": false,
+ "embedded": false,
+ "state": null,
+ "softwareComponents": [],
+ "softwareBundles": [],
+ "deployments": [],
+ "bundleCount": 0,
+ "componentCount": 0,
+ "userBundleCount": 0,
+ "minimal": false,
+ "downloadProgress": 0,
+ "extractProgress": 0,
+ "fileSizeInGigabytes": null,
+ "signedKeySourceLocation": null,
+ "signature": null,
+ "custom": false,
+ "needsAttention": false,
+ "jobId": null,
+ "rcmapproved": false
+ },
+ "firmwareRepositoryId": "8aaa80658cd602e0018cd996a1c91bdc",
+ "licenseRepository": null,
+ "licenseRepositoryId": null,
+ "individualTeardown": false,
+ "deploymentHealthStatusType": "green",
+ "assignedUsers": [],
+ "allUsersAllowed": true,
+ "owner": "admin",
+ "noOp": false,
+ "firmwareInit": false,
+ "disruptiveFirmware": false,
+ "preconfigureSVM": false,
+ "preconfigureSVMAndUpdate": false,
+ "servicesDeployed": "NONE",
+ "precalculatedDeviceHealth": null,
+ "lifecycleModeReasons": [],
+ "jobDetails": null,
+ "numberOfDeployments": 0,
+ "operationType": "NONE",
+ "operationStatus": null,
+ "operationData": null,
+ "deploymentValidationResponse": null,
+ "currentStepCount": null,
+ "totalNumOfSteps": null,
+ "currentStepMessage": null,
+ "customImage": "os_sles",
+ "originalDeploymentId": null,
+ "currentBatchCount": null,
+ "totalBatchCount": null,
+ "templateValid": true,
+ "lifecycleMode": false,
+ "vds": false,
+ "scaleUp": false,
+ "brownfield": false,
+ "configurationChange": false
+ }]
+ServiceTemplates:
+ description: Details of all service templates.
+ returned: when I(gather_subset) is I(service_template)
+ type: list
+ contains:
+ templateName:
+ description: Template name.
+ type: str
+ templateDescription:
+ description: Template description.
+ type: str
+ templateType:
+ description: Template type.
+ type: str
+ templateVersion:
+ description: Template version.
+ type: str
+ category:
+ description: The template category.
+ type: str
+ serverCount:
+ description: Server count.
+ type: int
+ sample: [{
+ "id": "2434144f-7795-4245-a04b-6fcb771697d7",
+ "templateName": "Storage- 100Gb",
+ "templateDescription": "Storage Only 4 Node deployment with 100Gb networking",
+ "templateType": "VxRack FLEX",
+ "templateVersion": "4.5-213",
+ "templateValid": {
+ "valid": true,
+ "messages": []
+ },
+ "originalTemplateId": "ff80808177f880fc0177f883bf1e0027",
+ "templateLocked": true,
+ "draft": false,
+ "inConfiguration": false,
+ "createdDate": "2024-01-04T19:47:23.534+00:00",
+ "createdBy": "system",
+ "updatedDate": null,
+ "lastDeployedDate": null,
+ "updatedBy": null,
+ "components": [
+ {
+ "id": "43dec024-85a9-4901-9e8e-fa0d3c417f7b",
+ "componentID": "component-scaleio-gateway-1",
+ "identifier": null,
+ "componentValid": {
+ "valid": true,
+ "messages": []
+ },
+ "puppetCertName": null,
+ "osPuppetCertName": null,
+ "name": "PowerFlex Cluster",
+ "type": "SCALEIO",
+ "subType": "STORAGEONLY",
+ "teardown": false,
+ "helpText": null,
+ "managementIpAddress": null,
+ "configFile": null,
+ "serialNumber": null,
+ "asmGUID": null,
+ "relatedComponents": {
+ "c5c46733-012c-4dca-af9b-af46d73d045a": "Storage Only Node"
+ },
+ "resources": [],
+ "refId": null,
+ "cloned": false,
+ "clonedFromId": null,
+ "manageFirmware": false,
+ "brownfield": false,
+ "instances": 1,
+ "clonedFromAsmGuid": null,
+ "ip": null
+ }
+ ],
+ "category": "Sample Templates",
+ "allUsersAllowed": false,
+ "assignedUsers": [],
+ "manageFirmware": true,
+ "useDefaultCatalog": true,
+ "firmwareRepository": null,
+ "licenseRepository": null,
+ "configuration": null,
+ "serverCount": 4,
+ "storageCount": 0,
+ "clusterCount": 1,
+ "serviceCount": 0,
+ "switchCount": 0,
+ "vmCount": 0,
+ "sdnasCount": 0,
+ "brownfieldTemplateType": "NONE",
+ "networks": [
+ {
+ "id": "ff80808177f8823b0177f8bb82d80005",
+ "name": "flex-data2",
+ "description": "",
+ "type": "SCALEIO_DATA",
+ "vlanId": 105,
+ "static": true,
+ "staticNetworkConfiguration": {
+ "gateway": null,
+ "subnet": "1.1.1.0",
+ "primaryDns": null,
+ "secondaryDns": null,
+ "dnsSuffix": null,
+ "ipRange": null,
+ "ipAddress": null,
+ "staticRoute": null
+ },
+ "destinationIpAddress": "1.1.1.0"
+ }
+ ],
+ "blockServiceOperationsMap": {}
+ }]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
import utils
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.configuration \
+ import Configuration
+import re
LOG = utils.get_logger('info')
+UNSUPPORTED_SUBSET_FOR_VERSION = 'One or more specified subset is not supported for the PowerFlex version.'
+POWERFLEX_MANAGER_GATHER_SUBSET = {'managed_device', 'deployment', 'service_template'}
+MIN_SUPPORTED_POWERFLEX_MANAGER_VERSION = 4.0
+ERROR_CODES = r'PARSE002|FILTER002|FILTER003'
+
class PowerFlexInfo(object):
"""Class with Info operations"""
- filter_mapping = {'equal': 'eq.'}
+ filter_mapping = {'equal': 'eq', 'contains': 'co'}
def __init__(self):
""" Define all parameters required by this module"""
@@ -1265,7 +1889,7 @@ class PowerFlexInfo(object):
return result_list(sds)
except Exception as e:
- msg = 'Get sds list from powerflex array failed with' \
+ msg = 'Get SDS list from powerflex array failed with' \
' error %s' % (str(e))
LOG.error(msg)
self.module.fail_json(msg=msg)
@@ -1395,19 +2019,24 @@ class PowerFlexInfo(object):
system """
try:
- LOG.info('Getting snapshot schedules list ')
+ LOG.info('Getting snapshot policies list ')
if filter_dict:
- snapshot_schedules = \
+ snapshot_policies = \
self.powerflex_conn.snapshot_policy.get(
filter_fields=filter_dict)
else:
- snapshot_schedules = \
+ snapshot_policies = \
self.powerflex_conn.snapshot_policy.get()
- return result_list(snapshot_schedules)
+ if snapshot_policies:
+ statistics_map = self.powerflex_conn.utility.get_statistics_for_all_snapshot_policies()
+ list_of_snap_pol_ids_in_statistics = statistics_map.keys()
+ for item in snapshot_policies:
+ item['statistics'] = statistics_map[item['id']] if item['id'] in list_of_snap_pol_ids_in_statistics else {}
+ return result_list(snapshot_policies)
except Exception as e:
- msg = 'Get snapshot schedules list from powerflex array failed ' \
+ msg = 'Get snapshot policies list from powerflex array failed ' \
'with error %s' % (str(e))
LOG.error(msg)
self.module.fail_json(msg=msg)
@@ -1431,6 +2060,114 @@ class PowerFlexInfo(object):
LOG.error(msg)
self.module.fail_json(msg=msg)
+ def get_fault_sets_list(self, filter_dict=None):
+ """ Get the list of fault sets on a given PowerFlex storage
+ system """
+
+ try:
+ LOG.info('Getting fault set list ')
+ filter_pd = []
+ if filter_dict:
+ if 'protectionDomainName' in filter_dict.keys():
+ filter_pd = filter_dict['protectionDomainName']
+ del filter_dict['protectionDomainName']
+ fault_sets = self.powerflex_conn.fault_set.get(filter_fields=filter_dict)
+ else:
+ fault_sets = self.powerflex_conn.fault_set.get()
+
+ fault_set_final = []
+ if fault_sets:
+ for fault_set in fault_sets:
+ fault_set['protectionDomainName'] = Configuration(self.powerflex_conn, self.module).get_protection_domain(
+ protection_domain_id=fault_set["protectionDomainId"])["name"]
+ fault_set["SDS"] = Configuration(self.powerflex_conn, self.module).get_associated_sds(
+ fault_set_id=fault_set['id'])
+ fault_set_final.append(fault_set)
+ fault_sets = []
+ for fault_set in fault_set_final:
+ if fault_set['protectionDomainName'] in filter_pd:
+ fault_sets.append(fault_set)
+ if len(filter_pd) != 0:
+ return result_list(fault_sets)
+ return result_list(fault_set_final)
+
+ except Exception as e:
+ msg = 'Get fault set list from powerflex array failed ' \
+ 'with error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def get_managed_devices_list(self):
+ """ Get the list of managed devices on a given PowerFlex Manager system """
+ try:
+ LOG.info('Getting managed devices list ')
+ devices = self.powerflex_conn.managed_device.get(filters=self.populate_filter_list(),
+ limit=self.get_param_value('limit'),
+ offset=self.get_param_value('offset'),
+ sort=self.get_param_value('sort'))
+ return devices
+ except Exception as e:
+ msg = f'Get managed devices from PowerFlex Manager failed with error {str(e)}'
+ return self.handle_error_exit(msg)
+
+ def get_deployments_list(self):
+ """ Get the list of deployments on a given PowerFlex Manager system """
+ try:
+ LOG.info('Getting deployments list ')
+ deployments = self.powerflex_conn.deployment.get(filters=self.populate_filter_list(),
+ sort=self.get_param_value('sort'),
+ limit=self.get_param_value('limit'),
+ offset=self.get_param_value('offset'),
+ include_devices=self.get_param_value('include_devices'),
+ include_template=self.get_param_value('include_template'),
+ full=self.get_param_value('full'))
+ return deployments
+ except Exception as e:
+ msg = f'Get deployments from PowerFlex Manager failed with error {str(e)}'
+ return self.handle_error_exit(msg)
+
+ def get_service_templates_list(self):
+ """ Get the list of service templates on a given PowerFlex Manager system """
+ try:
+ LOG.info('Getting service templates list ')
+ service_templates = self.powerflex_conn.service_template.get(filters=self.populate_filter_list(),
+ sort=self.get_param_value('sort'),
+ offset=self.get_param_value('offset'),
+ limit=self.get_param_value('limit'),
+ full=self.get_param_value('full'),
+ include_attachments=self.get_param_value('include_attachments'))
+ return service_templates
+ except Exception as e:
+ msg = f'Get service templates from PowerFlex Manager failed with error {str(e)}'
+ return self.handle_error_exit(msg)
+
+ def handle_error_exit(self, detailed_message):
+ match = re.search(r"displayMessage=([^']+)", detailed_message)
+ error_message = match.group(1) if match else detailed_message
+ LOG.error(error_message)
+ if re.search(ERROR_CODES, detailed_message):
+ return []
+ self.module.fail_json(msg=error_message)
+
+ def get_param_value(self, param):
+ """
+ Get the value of the given parameter.
+ Args:
+ param (str): The parameter to get the value for.
+ Returns:
+ The value of the parameter if it is different from the default value,
+ The value of the parameter if int and greater than 0
+ otherwise None.
+ """
+ if param in ('sort', 'offset', 'limit') and len(self.module.params.get('gather_subset')) > 1:
+ return None
+
+ default_value = self.module_params.get(param).get('default')
+ param_value = self.module.params.get(param)
+ if (default_value != param_value) and (param_value >= 0 if isinstance(param_value, int) else True):
+ return param_value
+ return None
+
def validate_filter(self, filter_dict):
""" Validate given filter_dict """
@@ -1447,6 +2184,16 @@ class PowerFlexInfo(object):
LOG.error(msg)
self.module.fail_json(msg=msg)
+ def populate_filter_list(self):
+ """Populate the filter list"""
+ if len(self.module.params.get('gather_subset')) > 1:
+ return []
+ filters = self.module.params.get('filters') or []
+ return [
+ f'{self.filter_mapping.get(filter_dict["filter_operator"])},{filter_dict["filter_key"]},{filter_dict["filter_value"]}'
+ for filter_dict in filters
+ ]
+
def get_filters(self, filters):
"""Get the filters to be applied"""
@@ -1454,7 +2201,7 @@ class PowerFlexInfo(object):
for item in filters:
self.validate_filter(item)
f_op = item['filter_operator']
- if self.filter_mapping.get(f_op):
+ if self.filter_mapping.get(f_op) == self.filter_mapping.get("equal"):
f_key = item['filter_key']
f_val = item['filter_value']
if f_key in filter_dict:
@@ -1468,15 +2215,12 @@ class PowerFlexInfo(object):
filter_dict[f_key] = [filter_dict[f_key], f_val]
else:
filter_dict[f_key] = f_val
- else:
- msg = "Given filter operator '{0}' is not supported." \
- "supported operators are : '{1}'".format(
- f_op,
- list(self.filter_mapping.keys()))
- LOG.error(msg)
- self.module.fail_json(msg=msg)
return filter_dict
+ def validate_subset(self, api_version, subset):
+ if float(api_version) < MIN_SUPPORTED_POWERFLEX_MANAGER_VERSION and subset and set(subset).issubset(POWERFLEX_MANAGER_GATHER_SUBSET):
+ self.module.exit_json(msg=UNSUPPORTED_SUBSET_FOR_VERSION, skipped=True)
+
def perform_module_operation(self):
""" Perform different actions on info based on user input
in the playbook """
@@ -1498,8 +2242,13 @@ class PowerFlexInfo(object):
device = []
rcgs = []
replication_pair = []
+ fault_sets = []
+ service_template = []
+ managed_device = []
+ deployment = []
subset = self.module.params['gather_subset']
+ self.validate_subset(api_version, subset)
if subset is not None:
if 'sdc' in subset:
sdc = self.get_sdc_list(filter_dict=filter_dict)
@@ -1519,6 +2268,14 @@ class PowerFlexInfo(object):
rcgs = self.get_replication_consistency_group_list(filter_dict=filter_dict)
if 'replication_pair' in subset:
replication_pair = self.get_replication_pair_list(filter_dict=filter_dict)
+ if 'fault_set' in subset:
+ fault_sets = self.get_fault_sets_list(filter_dict=filter_dict)
+ if 'managed_device' in subset:
+ managed_device = self.get_managed_devices_list()
+ if 'service_template' in subset:
+ service_template = self.get_service_templates_list()
+ if 'deployment' in subset:
+ deployment = self.get_deployments_list()
self.module.exit_json(
Array_Details=array_details,
@@ -1531,7 +2288,11 @@ class PowerFlexInfo(object):
Protection_Domains=protection_domain,
Devices=device,
Replication_Consistency_Groups=rcgs,
- Replication_Pairs=replication_pair
+ Replication_Pairs=replication_pair,
+ Fault_Sets=fault_sets,
+ ManagedDevices=managed_device,
+ ServiceTemplates=service_template,
+ Deployments=deployment
)
@@ -1556,15 +2317,24 @@ def get_powerflex_info_parameters():
return dict(
gather_subset=dict(type='list', required=False, elements='str',
choices=['vol', 'storage_pool',
- 'protection_domain', 'sdc', 'sds',
- 'snapshot_policy', 'device', 'rcg', 'replication_pair']),
+ 'protection_domain', 'sdc', 'sds', 'snapshot_policy',
+ 'device', 'rcg', 'replication_pair', 'fault_set',
+ 'service_template', 'managed_device', 'deployment']),
filters=dict(type='list', required=False, elements='dict',
options=dict(filter_key=dict(type='str', required=True, no_log=False),
filter_operator=dict(
type='str', required=True,
- choices=['equal']),
+ choices=['equal', 'contains']),
filter_value=dict(type='str', required=True)
- )))
+ )),
+ sort=dict(type='str'),
+ limit=dict(type='int', default=50),
+ offset=dict(type='int', default=0),
+ include_devices=dict(type='bool', default=True),
+ include_template=dict(type='bool', default=True),
+ full=dict(type='bool', default=False),
+ include_attachments=dict(type='bool', default=True)
+ )
def main():
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py b/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py
index 084666bc3..90e0bcad0 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/mdm_cluster.py
@@ -145,7 +145,7 @@ notes:
interfaces.
- Parameters I(mdm_name) or I(mdm_id) are not required while modifying performance
profile.
- - For change MDM cluster ownership operation, only changed as True will be
+ - For change MDM cluster ownership operation, only changed as true will be
returned and for idempotency case MDM cluster details will be returned.
- Reinstall all SDC after changing ownership to some newly added MDM.
- To add manager standby MDM, MDM package must be installed with manager
@@ -229,7 +229,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
mdm_name: "mdm_2"
- is_primary: True
+ is_primary: true
state: "present"
- name: Modify performance profile
@@ -262,7 +262,7 @@ EXAMPLES = r'''
port: "{{port}}"
mdm_name: "mdm_1"
virtual_ip_interface:
- - "ens224"
+ - "ens224"
state: "present"
- name: Clear virtual IP interface of the MDM
@@ -273,7 +273,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
mdm_name: "mdm_1"
- clear_interfaces: True
+ clear_interfaces: true
state: "present"
'''
@@ -1052,6 +1052,12 @@ class PowerFlexMdmCluster(object):
if resp is not None:
mdm_cluster_details['perfProfile'] = resp['perfProfile']
+ # Append list of configured MDM IP addresses
+ gateway_configuration_details = self.powerflex_conn.system.\
+ get_gateway_configuration_details()
+ if gateway_configuration_details is not None:
+ mdm_cluster_details['mdmAddresses'] = gateway_configuration_details['mdmAddresses']
+
return mdm_cluster_details
except Exception as e:
@@ -1063,30 +1069,32 @@ class PowerFlexMdmCluster(object):
def check_ip_in_secondarys(self, standby_ip, cluster_details):
"""whether standby IPs present in secondary MDMs"""
- for secondary_mdm in cluster_details['slaves']:
- current_secondary_ips = secondary_mdm['ips']
- for ips in standby_ip:
- if ips in current_secondary_ips:
- LOG.info(self.exist_msg)
- return False
+ if 'slaves' in cluster_details:
+ for secondary_mdm in cluster_details['slaves']:
+ current_secondary_ips = secondary_mdm['ips']
+ for ips in standby_ip:
+ if ips in current_secondary_ips:
+ LOG.info(self.exist_msg)
+ return False
return True
def check_ip_in_tbs(self, standby_ip, cluster_details):
"""whether standby IPs present in tie-breaker MDMs"""
- for tb_mdm in cluster_details['tieBreakers']:
- current_tb_ips = tb_mdm['ips']
- for ips in standby_ip:
- if ips in current_tb_ips:
- LOG.info(self.exist_msg)
- return False
+ if 'tieBreakers' in cluster_details:
+ for tb_mdm in cluster_details['tieBreakers']:
+ current_tb_ips = tb_mdm['ips']
+ for ips in standby_ip:
+ if ips in current_tb_ips:
+ LOG.info(self.exist_msg)
+ return False
return True
def check_ip_in_standby(self, standby_ip, cluster_details):
"""whether standby IPs present in standby MDMs"""
if 'standbyMDMs' in cluster_details:
- for stb_mdm in cluster_details['tieBreakers']:
+ for stb_mdm in cluster_details['standbyMDMs']:
current_stb_ips = stb_mdm['ips']
for ips in standby_ip:
if ips in current_stb_ips:
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py b/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py
index 5ffdc6b63..18cb952f0 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/protection_domain.py
@@ -537,6 +537,14 @@ class PowerFlexProtectionDomain(object):
err_msg = msg.format(n_item)
self.module.fail_json(msg=err_msg)
+ if self.module.params['network_limits'] is not None:
+ if self.module.params['network_limits']['overall_limit'] is not None and \
+ self.module.params['network_limits']['overall_limit'] < 0:
+ error_msg = "Overall limit cannot be negative. " \
+ "Provide a valid value "
+ LOG.info(error_msg)
+ self.module.fail_json(msg=error_msg)
+
def is_id_or_new_name_in_create(self):
"""Checking if protection domain id or new names present in create """
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py b/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py
index 94ec651c3..b106dfbdc 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/replication_consistency_group.py
@@ -16,7 +16,8 @@ short_description: Manage replication consistency groups on Dell PowerFlex
description:
- Managing replication consistency groups on PowerFlex storage system includes
getting details, creating, modifying, creating snapshots, pause, resume, freeze, unfreeze,
- activate, inactivate and deleting a replication consistency group.
+ activate, failover, reverse, restore, sync, switchover,
+ inactivate and deleting a replication consistency group.
author:
- Trisha Datta (@Trisha-Datta) <ansible.team@dell.com>
- Jennifer John (@Jennifer-John) <ansible.team@dell.com>
@@ -61,15 +62,35 @@ options:
pause:
description:
- Pause or resume the RCG.
+ - This parameter is deprecated. Use rcg_state instead.
+ type: bool
+ rcg_state:
+ description:
+ - Specify an action for RCG.
+ - Failover the RCG.
+ - Reverse the RCG.
+ - Restore the RCG.
+ - Switchover the RCG.
+ - Pause or resume the RCG.
+ - Freeze or unfreeze the RCG.
+ - Synchronize the RCG.
+ choices: ['failover', 'reverse', 'restore',
+ 'switchover', 'sync', 'pause',
+ 'resume', 'freeze', 'unfreeze']
+ type: str
+ force:
+ description:
+ - Force switchover the RCG.
type: bool
freeze:
description:
- Freeze or unfreeze the RCG.
+ - This parameter is deprecated. Use rcg_state instead.
type: bool
pause_mode:
description:
- Pause mode.
- - It is required if pause is set as True.
+ - It is required if pause is set as true.
choices: ['StopDataTransfer', 'OnlyTrackChanges']
type: str
target_volume_access_mode:
@@ -150,7 +171,7 @@ notes:
- Idempotency is not supported for create snapshot operation.
- There is a delay in reflection of final state of RCG after few update operations on RCG.
- In 3.6 and above, the replication consistency group will return back to consistent mode on changing to inconsistent mode
- if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as True.
+ if consistence barrier arrives. Hence idempotency on setting to inconsistent mode will return changed as true.
'''
EXAMPLES = r'''
@@ -172,7 +193,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_id: "{{rcg_id}}"
- create_snapshot: True
+ create_snapshot: true
state: "present"
- name: Create a replication consistency group
@@ -205,7 +226,7 @@ EXAMPLES = r'''
rpo: 60
target_volume_access_mode: "ReadOnly"
activity_mode: "Inactive"
- is_consistent: True
+ is_consistent: true
- name: Rename replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -225,7 +246,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- pause: True
+ rcg_state: "pause"
pause_mode: "StopDataTransfer"
- name: Resume replication consistency group
@@ -236,7 +257,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- pause: False
+ rcg_state: "resume"
- name: Freeze replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -246,7 +267,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- freeze: True
+ rcg_state: "freeze"
- name: UnFreeze replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -256,7 +277,57 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
rcg_name: "rcg_test"
- freeze: False
+ rcg_state: "unfreeze"
+
+- name: Failover replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "failover"
+
+- name: Reverse replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "reverse"
+
+- name: Restore replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "restore"
+
+- name: Switchover replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "switchover"
+
+- name: Synchronize replication consistency group
+ dellemc.powerflex.replication_consistency_group:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
+ rcg_name: "rcg_test"
+ rcg_state: "sync"
- name: Delete replication consistency group
dellemc.powerflex.replication_consistency_group:
@@ -442,8 +513,8 @@ class PowerFlexReplicationConsistencyGroup(object):
def get_rcg(self, rcg_name=None, rcg_id=None):
"""Get rcg details
- :param rcg_name: Name of the rcg
- :param rcg_id: ID of the rcg
+ :param rcg_name: Name of the RCG
+ :param rcg_id: ID of the RCG
:return: RCG details
"""
name_or_id = rcg_id if rcg_id else rcg_name
@@ -585,22 +656,22 @@ class PowerFlexReplicationConsistencyGroup(object):
:param rcg_details: RCG details.
:param pause: Pause or resume RCG.
:param pause_mode: Specifies the pause mode if pause is True.
- :return: Boolean indicates if rcg action is successful
+ :return: Boolean indicates if RCG action is successful
"""
if pause and rcg_details['pauseMode'] == 'None':
if not pause_mode:
self.module.fail_json(msg="Specify pause_mode to perform pause on replication consistency group.")
return self.pause(rcg_id, pause_mode)
- if not pause and rcg_details['pauseMode'] != 'None':
+ if not pause and (rcg_details['pauseMode'] != 'None' or rcg_details['failoverType'] in ['Failover', 'Switchover']):
return self.resume(rcg_id)
def freeze_or_unfreeze_rcg(self, rcg_id, rcg_details, freeze):
- """Perform specified rcg action
+ """Perform specified RCG action
:param rcg_id: Unique identifier of the RCG.
:param rcg_details: RCG details.
:param freeze: Freeze or unfreeze RCG.
- :return: Boolean indicates if rcg action is successful
+ :return: Boolean indicates if RCG action is successful
"""
if freeze and rcg_details['freezeState'].lower() == 'unfrozen':
return self.freeze(rcg_id)
@@ -648,6 +719,98 @@ class PowerFlexReplicationConsistencyGroup(object):
LOG.error(errormsg)
self.module.fail_json(msg=errormsg)
+ def failover(self, rcg_id):
+ """Perform failover
+ :param rcg_id: Unique identifier of the RCG.
+ :return: Boolean indicates if RCG failover is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.failover(rcg_id)
+ return True
+ except Exception as e:
+ errormsg = f"Failover replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def reverse(self, rcg_id):
+ """Perform reverse
+ :param rcg_id: Unique identifier of the RCG.
+ :return: Boolean indicates if RCG reverse is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.reverse(rcg_id)
+ return True
+ except Exception as e:
+ errormsg = f"Reverse replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def restore(self, rcg_id):
+ """Perform restore
+ :param rcg_id: Unique identifier of the RCG.
+ :return: Boolean indicates if RCG restore is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.restore(rcg_id)
+ return True
+ except Exception as e:
+ errormsg = f"Restore replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def switchover(self, rcg_id, force):
+ """Perform switchover
+ :param rcg_id: Unique identifier of the RCG.
+ :param force: Force switchover.
+ :return: Boolean indicates if RCG switchover is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.switchover(rcg_id, force)
+ return True
+ except Exception as e:
+ errormsg = f"Switchover replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def perform_rcg_action(self, rcg_id, rcg_details):
+ """Perform failover, reverse, restore or switchover
+ :param rcg_id: Unique identifier of the RCG.
+ :param rcg_details: RCG details.
+ :return: Boolean indicates if RCG action is successful
+ """
+ rcg_state = self.module.params['rcg_state']
+ force = self.module.params['force']
+
+ if rcg_state == 'failover' and rcg_details['failoverType'] != 'Failover':
+ return self.failover(rcg_id)
+
+ if rcg_state == 'switchover' and rcg_details['failoverType'] != 'Switchover':
+ return self.switchover(rcg_id, force)
+
+ if rcg_state == 'reverse' and rcg_details['failoverType']:
+ return self.reverse(rcg_id)
+
+ if rcg_state == 'restore' and rcg_details['failoverType'] != 'None':
+ return self.restore(rcg_id)
+
+ def sync(self, rcg_id):
+ """Perform sync
+ :param rcg_id: Unique identifier of the RCG.
+ :return: Boolean indicates if RCG sync is successful
+ """
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.replication_consistency_group.sync(rcg_id)
+ return True
+ except Exception as e:
+ errormsg = f"Synchronization of replication consistency group {rcg_id} failed with error {e}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
def set_consistency(self, rcg_id, rcg_details, is_consistent):
"""Set rcg to specified mode
:param rcg_id: Unique identifier of the RCG.
@@ -689,7 +852,7 @@ class PowerFlexReplicationConsistencyGroup(object):
def delete_rcg(self, rcg_id):
"""Delete RCG
:param rcg_id: Unique identifier of the RCG.
- :return: Boolean indicates if delete rcg operation is successful
+ :return: Boolean indicates if delete RCG operation is successful
"""
try:
if not self.module.check_mode:
@@ -753,17 +916,55 @@ class PowerFlexReplicationConsistencyGroup(object):
rcg_params['remote_peer']['protection_domain_name'] is not None):
self.module.fail_json(msg='Enter remote protection_domain_name or protection_domain_id to create replication consistency group')
+ def get_pause_and_freeze_value(self):
+ """
+ Get Pause and Freeze values
+ :return: Boolean for pause and freeze
+ :rtype: (bool,bool)
+ """
+ rcg_state = self.module.params['rcg_state']
+ pause = self.module.params['pause']
+ freeze = self.module.params['freeze']
+
+ if pause is not None:
+ self.module.deprecate(
+ msg="Use 'rcg_state' param instead of 'pause'",
+ version="3.0.0",
+ collection_name="dellemc.powerflex"
+ )
+
+ if freeze is not None:
+ self.module.deprecate(
+ msg="Use 'rcg_state' param instead of 'freeze'",
+ version="3.0.0",
+ collection_name="dellemc.powerflex"
+ )
+
+ if rcg_state == 'pause':
+ pause = True
+ if rcg_state == 'resume':
+ pause = False
+ if rcg_state == 'freeze':
+ freeze = True
+ if rcg_state == 'unfreeze':
+ freeze = False
+
+ if self.module.params['pause_mode'] and not pause:
+ self.module.fail_json(msg="Specify rcg_state as 'pause' to pause replication consistency group")
+
+ return pause, freeze
+
def modify_rcg(self, rcg_id, rcg_details):
+ rcg_state = self.module.params['rcg_state']
create_snapshot = self.module.params['create_snapshot']
rpo = self.module.params['rpo']
target_volume_access_mode = self.module.params['target_volume_access_mode']
- pause = self.module.params['pause']
- freeze = self.module.params['freeze']
is_consistent = self.module.params['is_consistent']
activity_mode = self.module.params['activity_mode']
new_rcg_name = self.module.params['new_rcg_name']
changed = False
+ pause, freeze = self.get_pause_and_freeze_value()
if create_snapshot is True:
changed = self.create_rcg_snapshot(rcg_id)
if rpo and rcg_details['rpoInSeconds'] and \
@@ -788,6 +989,11 @@ class PowerFlexReplicationConsistencyGroup(object):
changed = True
if new_rcg_name and self.rename_rcg(rcg_id, rcg_details, new_rcg_name):
changed = True
+ if rcg_state == 'sync' and self.sync(rcg_id):
+ changed = True
+
+ rcg_action_status = self.perform_rcg_action(rcg_id, rcg_details)
+ changed = changed or rcg_action_status
return changed
@@ -800,8 +1006,6 @@ class PowerFlexReplicationConsistencyGroup(object):
for param in params:
if rcg_params[param] and utils.is_invalid_name(rcg_params[param]):
self.module.fail_json(msg='Enter a valid %s' % param)
- if rcg_params['pause_mode'] and rcg_params['pause'] is None:
- self.module.fail_json(msg='Specify pause as True to pause replication consistency group')
except Exception as e:
error_msg = "Validating input parameters failed with " \
"error '%s'" % (str(e))
@@ -879,7 +1083,13 @@ def get_powerflex_replication_consistency_group_parameters():
rpo=dict(type='int'), protection_domain_id=dict(),
protection_domain_name=dict(), new_rcg_name=dict(),
activity_mode=dict(choices=['Active', 'Inactive']),
- pause=dict(type='bool'), freeze=dict(type='bool'),
+ pause=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='dellemc.powerflex'),
+ freeze=dict(type='bool', removed_in_version='3.0.0', removed_from_collection='dellemc.powerflex'),
+ force=dict(type='bool'),
+ rcg_state=dict(choices=['failover', 'reverse',
+ 'restore', 'switchover',
+ 'sync', 'pause', 'resume',
+ 'freeze', 'unfreeze']),
pause_mode=dict(choices=['StopDataTransfer', 'OnlyTrackChanges']),
target_volume_access_mode=dict(choices=['ReadOnly', 'NoAccess']),
is_consistent=dict(type='bool'),
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py b/ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py
index c95455023..1bd69f225 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/replication_pair.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
-# Copyright: (c) 2023, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
""" Ansible module for managing replication pairs on Dell Technologies (Dell) PowerFlex"""
@@ -77,7 +77,7 @@ options:
- Copy type.
choices: ['Identical', 'OnlineCopy', 'OnlineHashCopy', 'OfflineCopy']
type: str
- required: True
+ required: true
name:
description:
- Name of replication pair.
@@ -138,7 +138,6 @@ notes:
'''
EXAMPLES = r'''
-
- name: Get replication pair details
dellemc.powerflex.replication_pair:
hostname: "{{hostname}}"
@@ -176,11 +175,11 @@ EXAMPLES = r'''
copy_type: "OnlineCopy"
name: "pair1"
remote_peer:
- hostname: "{{hostname}}"
- username: "{{username}}"
- password: "{{password}}"
- validate_certs: "{{validate_certs}}"
- port: "{{port}}"
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ port: "{{port}}"
- name: Pause replication pair
dellemc.powerflex.replication_pair:
@@ -190,7 +189,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
pair_name: "pair1"
- pause: True
+ pause: true
- name: Resume replication pair
dellemc.powerflex.replication_pair:
@@ -200,7 +199,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
pair_name: "pair1"
- pause: False
+ pause: false
- name: Delete replication pair
dellemc.powerflex.replication_pair:
@@ -596,7 +595,7 @@ class PowerFlexReplicationPair(object):
def validate_pause(self, params):
if params['pause'] is not None and (not params['pair_id'] and not params['pair_name']):
- self.module.fail_json(msg='Specify either pair_id or pair_name to perform pause or resume of inital copy')
+ self.module.fail_json(msg='Specify either pair_id or pair_name to perform pause or resume of initial copy')
def validate_pause_or_resume(self, pause, replication_pair_details, pair_id):
if not replication_pair_details:
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py b/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py
index a2f05a31b..bb13a19a2 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/sdc.py
@@ -46,6 +46,12 @@ options:
description:
- New name of the SDC. Used to rename the SDC.
type: str
+ performance_profile:
+ description:
+ - Define the performance profile as I(Compact) or I(HighPerformance).
+ - The high performance profile configures a predefined set of parameters for very high performance use cases.
+ choices: ['Compact', 'HighPerformance']
+ type: str
state:
description:
- State of the SDC.
@@ -75,6 +81,25 @@ EXAMPLES = r'''
sdc_name: "centos_sdc"
sdc_new_name: "centos_sdc_renamed"
state: "present"
+
+- name: Modify performance profile of SDC using SDC name
+ dellemc.powerflex.sdc:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ sdc_name: "centos_sdc"
+ performance_profile: "Compact"
+ state: "present"
+
+- name: Remove SDC using SDC name
+ dellemc.powerflex.sdc:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ sdc_name: "centos_sdc"
+ state: "absent"
'''
RETURN = r'''
@@ -274,16 +299,54 @@ class PowerFlexSdc(object):
def validate_parameters(self, sdc_name=None, sdc_id=None, sdc_ip=None):
"""Validate the input parameters"""
- if all(param is None for param in [sdc_name, sdc_id, sdc_ip]):
- self.module.fail_json(msg="Please provide sdc_name/sdc_id/sdc_ip "
- "with valid input.")
-
sdc_identifiers = ['sdc_name', 'sdc_id', 'sdc_ip']
for param in sdc_identifiers:
if self.module.params[param] is not None and \
len(self.module.params[param].strip()) == 0:
- error_msg = "Please provide valid %s" % param
- self.module.fail_json(msg=error_msg)
+ msg = f"Please provide valid {param}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def remove(self, sdc_id):
+ """Remove the SDC"""
+ try:
+ LOG.info(msg=f"Removing SDC {sdc_id}")
+ self.powerflex_conn.sdc.delete(sdc_id)
+ return True
+ except Exception as e:
+ errormsg = f"Removing SDC {sdc_id} failed with error {str(e)}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def set_performance_profile(self, sdc_id, performance_profile):
+ """Set performance profile of SDC"""
+ try:
+ LOG.info(msg=f"Setting performance profile of SDC {sdc_id}")
+ self.powerflex_conn.sdc.set_performance_profile(sdc_id, performance_profile)
+ return True
+ except Exception as e:
+ errormsg = f"Modifying performance profile of SDC {sdc_id} failed with error {str(e)}"
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def validate_input(self, sdc_details, sdc_new_name, state, id_ip_name):
+ if state == 'present' and not sdc_details:
+ error_msg = 'Could not find any SDC instance with ' \
+ 'identifier %s.' % id_ip_name
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if sdc_new_name and len(sdc_new_name.strip()) == 0:
+ self.module.fail_json(msg="Provide valid SDC name to rename to.")
+
+ def perform_modify(self, sdc_details, sdc_new_name, performance_profile):
+ changed = False
+ if sdc_new_name is not None and sdc_new_name != sdc_details['name']:
+ changed = self.rename_sdc(sdc_details['id'], sdc_new_name)
+
+ if performance_profile and performance_profile != sdc_details['perfProfile']:
+ changed = self.set_performance_profile(sdc_details['id'], performance_profile)
+ return changed
def perform_module_operation(self):
"""
@@ -294,6 +357,7 @@ class PowerFlexSdc(object):
sdc_id = self.module.params['sdc_id']
sdc_ip = self.module.params['sdc_ip']
sdc_new_name = self.module.params['sdc_new_name']
+ performance_profile = self.module.params['performance_profile']
state = self.module.params['state']
# result is a dictionary to contain end state and SDC details
@@ -304,40 +368,22 @@ class PowerFlexSdc(object):
)
self.validate_parameters(sdc_name, sdc_id, sdc_ip)
-
sdc_details = self.get_sdc(sdc_name=sdc_name, sdc_id=sdc_id,
sdc_ip=sdc_ip)
- if sdc_name:
- id_ip_name = sdc_name
- elif sdc_ip:
- id_ip_name = sdc_ip
- else:
- id_ip_name = sdc_id
+ id_ip_name = sdc_name or sdc_ip or sdc_id
- if state == 'present' and not sdc_details:
- error_msg = 'Could not find any SDC instance with ' \
- 'identifier %s.' % id_ip_name
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
+ self.validate_input(sdc_details, sdc_new_name, state, id_ip_name)
if state == 'absent' and sdc_details:
- error_msg = 'Removal of SDC is not allowed through Ansible ' \
- 'module.'
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
-
- if state == 'present' and sdc_details and sdc_new_name is not None:
- if len(sdc_new_name.strip()) == 0:
- self.module.fail_json(msg="Please provide valid SDC name.")
-
- changed = self.rename_sdc(sdc_details['id'], sdc_new_name)
+ changed = self.remove(sdc_details['id'])
- if changed:
- sdc_name = sdc_new_name
+ if state == 'present' and sdc_details:
+ changed = self.perform_modify(sdc_details, sdc_new_name, performance_profile)
- if state == 'present':
- result['sdc_details'] = self.get_sdc(sdc_name=sdc_name,
- sdc_id=sdc_id, sdc_ip=sdc_ip)
+ if changed:
+ sdc_details = self.get_sdc(sdc_name=sdc_new_name or sdc_name,
+ sdc_id=sdc_id, sdc_ip=sdc_ip)
+ result['sdc_details'] = sdc_details
result['changed'] = changed
self.module.exit_json(**result)
@@ -349,7 +395,7 @@ def get_powerflex_sdc_parameters():
sdc_id=dict(),
sdc_ip=dict(),
sdc_name=dict(),
- sdc_new_name=dict(),
+ sdc_new_name=dict(), performance_profile=dict(choices=['Compact', 'HighPerformance']),
state=dict(required=True, type='str', choices=['present', 'absent'])
)
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/sds.py b/ansible_collections/dellemc/powerflex/plugins/modules/sds.py
index 91c287769..b0d3045ec 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/sds.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/sds.py
@@ -1,6 +1,6 @@
#!/usr/bin/python
-# Copyright: (c) 2021, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
""" Ansible module for managing SDS on Dell Technologies (Dell) PowerFlex"""
@@ -19,6 +19,7 @@ description:
modifying attributes of SDS, and deleting SDS.
author:
- Rajshree Khare (@khareRajshree) <ansible.team@dell.com>
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
extends_documentation_fragment:
- dellemc.powerflex.powerflex
options:
@@ -96,6 +97,16 @@ options:
- Default value by API is C(HighPerformance).
choices: ['Compact', 'HighPerformance']
type: str
+ fault_set_name:
+ description:
+ - Name of the fault set.
+ - Mutually exclusive with I(fault_set_id).
+ type: str
+ fault_set_id:
+ description:
+ - Unique identifier of the fault set.
+ - Mutually exclusive with I(fault_set_name).
+ type: str
state:
description:
- State of the SDS.
@@ -114,7 +125,7 @@ notes:
'sdsOnly').
- SDS can be created with RF cache disabled, but, be aware that the RF cache
is not always updated. In this case, the user should re-try the operation.
- - The I(check_mode) is not supported.
+ - The I(check_mode) is supported.
'''
EXAMPLES = r'''
@@ -142,6 +153,7 @@ EXAMPLES = r'''
port: "{{port}}"
sds_name: "node1"
protection_domain_name: "domain1"
+ fault_set_name: "faultset1"
sds_ip_list:
- ip: "198.10.xxx.xxx"
role: "sdcOnly"
@@ -479,12 +491,16 @@ sds_details:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell\
import utils
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.powerflex_base \
+ import PowerFlexBase
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.libraries.configuration \
+ import Configuration
import copy
LOG = utils.get_logger('sds')
-class PowerFlexSDS(object):
+class PowerFlexSDS(PowerFlexBase):
"""Class with SDS operations"""
def __init__(self):
@@ -493,29 +509,27 @@ class PowerFlexSDS(object):
self.module_params.update(get_powerflex_sds_parameters())
mut_ex_args = [['sds_name', 'sds_id'],
- ['protection_domain_name', 'protection_domain_id']]
+ ['protection_domain_name', 'protection_domain_id'],
+ ['fault_set_name', 'fault_set_id']]
required_together_args = [['sds_ip_list', 'sds_ip_state']]
required_one_of_args = [['sds_name', 'sds_id']]
# initialize the Ansible module
- self.module = AnsibleModule(
- argument_spec=self.module_params,
- supports_check_mode=False,
- mutually_exclusive=mut_ex_args,
- required_together=required_together_args,
- required_one_of=required_one_of_args)
-
- utils.ensure_required_libs(self.module)
-
- try:
- self.powerflex_conn = utils.get_powerflex_gateway_host_connection(
- self.module.params)
- LOG.info("Got the PowerFlex system connection object instance")
- except Exception as e:
- LOG.error(str(e))
- self.module.fail_json(msg=str(e))
+ ansible_module_params = {
+ 'argument_spec': get_powerflex_sds_parameters(),
+ 'supports_check_mode': True,
+ 'mutually_exclusive': mut_ex_args,
+ 'required_one_of': required_one_of_args,
+ 'required_together': required_together_args
+ }
+ super().__init__(AnsibleModule, ansible_module_params)
+
+ self.result = dict(
+ changed=False,
+ sds_details={}
+ )
def validate_rmcache_size_parameter(self, rmcache_enabled, rmcache_size):
"""Validate the input parameters"""
@@ -571,40 +585,24 @@ class PowerFlexSDS(object):
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
- def get_protection_domain(self, protection_domain_name=None,
- protection_domain_id=None):
- """Get protection domain details
- :param protection_domain_name: Name of the protection domain
+ def get_protection_domain(
+ self, protection_domain_name=None, protection_domain_id=None
+ ):
+ """Get the details of a protection domain in a given PowerFlex storage
+ system"""
+ return Configuration(self.powerflex_conn, self.module).get_protection_domain(
+ protection_domain_name=protection_domain_name, protection_domain_id=protection_domain_id)
+
+ def get_fault_set(self, fault_set_name=None, fault_set_id=None, protection_domain_id=None):
+ """Get fault set details
+ :param fault_set_name: Name of the fault set
+ :param fault_set_id: Id of the fault set
:param protection_domain_id: ID of the protection domain
- :return: Protection domain details
+ :return: Fault set details
:rtype: dict
"""
- name_or_id = protection_domain_id if protection_domain_id \
- else protection_domain_name
- try:
- pd_details = None
- if protection_domain_id:
- pd_details = self.powerflex_conn.protection_domain.get(
- filter_fields={'id': protection_domain_id})
-
- if protection_domain_name:
- pd_details = self.powerflex_conn.protection_domain.get(
- filter_fields={'name': protection_domain_name})
-
- if not pd_details:
- error_msg = "Unable to find the protection domain with " \
- "'%s'. Please enter a valid protection domain " \
- "name/id." % name_or_id
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
-
- return pd_details[0]
-
- except Exception as e:
- error_msg = "Failed to get the protection domain '%s' with " \
- "error '%s'" % (name_or_id, str(e))
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
+ return Configuration(self.powerflex_conn, self.module).get_fault_set(
+ fault_set_name=fault_set_name, fault_set_id=fault_set_id, protection_domain_id=protection_domain_id)
def restructure_ip_role_dict(self, sds_ip_list):
"""Restructure IP role dict
@@ -619,8 +617,41 @@ class PowerFlexSDS(object):
new_sds_ip_list.append({"SdsIp": item})
return new_sds_ip_list
- def create_sds(self, protection_domain_id, sds_ip_list, sds_ip_state,
- sds_name, rmcache_enabled=None, rmcache_size=None):
+ def validate_create(self, protection_domain_id, sds_ip_list, sds_ip_state, sds_name,
+ sds_id, sds_new_name, rmcache_enabled=None, rmcache_size=None,
+ fault_set_id=None):
+
+ if sds_name is None or len(sds_name.strip()) == 0:
+ error_msg = "Please provide valid sds_name value for " \
+ "creation of SDS."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if protection_domain_id is None:
+ error_msg = "Protection Domain is a mandatory parameter " \
+ "for creating an SDS. Please enter a valid value."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if sds_ip_list is None or len(sds_ip_list) == 0:
+ error_msg = "Please provide valid sds_ip_list values for " \
+ "creation of SDS."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if sds_ip_state is not None and sds_ip_state != "present-in-sds":
+ error_msg = "Incorrect IP state given for creation of SDS."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ if sds_id:
+ error_msg = "Creation of SDS is allowed using sds_name " \
+ "only, sds_id given."
+ LOG.info(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def create_sds(self, protection_domain_id, sds_ip_list, sds_ip_state, sds_name,
+ sds_id, sds_new_name, rmcache_enabled=None, rmcache_size=None, fault_set_id=None):
"""Create SDS
:param protection_domain_id: ID of the Protection Domain
:type protection_domain_id: str
@@ -636,62 +667,53 @@ class PowerFlexSDS(object):
:type rmcache_enabled: bool
:param rmcache_size: Read RAM cache size (in MB)
:type rmcache_size: int
+ :param fault_set_id: ID of the Fault Set
+ :type fault_set_id: str
:return: Boolean indicating if create operation is successful
"""
try:
- if sds_name is None or len(sds_name.strip()) == 0:
- error_msg = "Please provide valid sds_name value for " \
- "creation of SDS."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
- if protection_domain_id is None:
- error_msg = "Protection Domain is a mandatory parameter " \
- "for creating a SDS. Please enter a valid value."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
-
- if sds_ip_list is None or len(sds_ip_list) == 0:
- error_msg = "Please provide valid sds_ip_list values for " \
- "creation of SDS."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
+ # Restructure IP-role parameter format
+ self.validate_create(protection_domain_id=protection_domain_id,
+ sds_ip_list=sds_ip_list, sds_ip_state=sds_ip_state,
+ sds_name=sds_name, sds_id=sds_id, sds_new_name=sds_new_name,
+ rmcache_enabled=rmcache_enabled, rmcache_size=rmcache_size,
+ fault_set_id=fault_set_id)
- if sds_ip_state is not None and sds_ip_state != "present-in-sds":
- error_msg = "Incorrect IP state given for creation of SDS."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
+ self.validate_ip_parameter(sds_ip_list)
- # Restructure IP-role parameter format
- if sds_ip_list and sds_ip_state == "present-in-sds":
- sds_ip_list = self.restructure_ip_role_dict(sds_ip_list)
-
- if rmcache_size is not None:
- self.validate_rmcache_size_parameter(rmcache_enabled,
- rmcache_size)
- # set rmcache size in KB
- rmcache_size = rmcache_size * 1024
-
- create_params = ("protection_domain_id: %s,"
- " sds_ip_list: %s,"
- " sds_name: %s,"
- " rmcache_enabled: %s, "
- " rmcache_size_KB: %s"
- % (protection_domain_id, sds_ip_list,
- sds_name, rmcache_enabled, rmcache_size))
- LOG.info("Creating SDS with params: %s", create_params)
-
- self.powerflex_conn.sds.create(
- protection_domain_id=protection_domain_id,
- sds_ips=sds_ip_list,
- name=sds_name,
- rmcache_enabled=rmcache_enabled,
- rmcache_size_in_kb=rmcache_size)
- return True
+ if not self.module.check_mode:
+ if sds_ip_list and sds_ip_state == "present-in-sds":
+ sds_ip_list = self.restructure_ip_role_dict(sds_ip_list)
+
+ if rmcache_size is not None:
+ self.validate_rmcache_size_parameter(rmcache_enabled=rmcache_enabled,
+ rmcache_size=rmcache_size)
+ # set rmcache size in KB
+ rmcache_size = rmcache_size * 1024
+
+ create_params = ("protection_domain_id: %s,"
+ " sds_ip_list: %s,"
+ " sds_name: %s,"
+ " rmcache_enabled: %s, "
+ " rmcache_size_KB: %s, "
+ " fault_set_id: %s"
+ % (protection_domain_id, sds_ip_list,
+ sds_name, rmcache_enabled, rmcache_size,
+ fault_set_id))
+ LOG.info("Creating SDS with params: %s", create_params)
+
+ self.powerflex_conn.sds.create(
+ protection_domain_id=protection_domain_id,
+ sds_ips=sds_ip_list,
+ name=sds_name,
+ rmcache_enabled=rmcache_enabled,
+ rmcache_size_in_kb=rmcache_size,
+ fault_set_id=fault_set_id)
+ return self.get_sds_details(sds_name=sds_name)
except Exception as e:
- error_msg = "Create SDS '%s' operation failed with error '%s'" \
- % (sds_name, str(e))
+ error_msg = f"Create SDS {sds_name} operation failed with error {str(e)}"
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
@@ -716,21 +738,20 @@ class PowerFlexSDS(object):
"""
modify_dict = {}
- if sds_new_name is not None:
- if len(sds_new_name.strip()) == 0:
- error_msg = "Please provide valid SDS name."
- LOG.error(error_msg)
- self.module.fail_json(msg=error_msg)
- if sds_new_name != sds_details['name']:
- modify_dict['name'] = sds_new_name
+ if sds_new_name is not None and \
+ sds_new_name != sds_details['name']:
+ modify_dict['name'] = sds_new_name
- if rfcache_enabled is not None and \
- sds_details['rfcacheEnabled'] != rfcache_enabled:
- modify_dict['rfcacheEnabled'] = rfcache_enabled
+ param_input = dict()
+ param_input['rfcacheEnabled'] = rfcache_enabled
+ param_input['rmcacheEnabled'] = rmcache_enabled
+ param_input['perfProfile'] = performance_profile
- if rmcache_enabled is not None and \
- sds_details['rmcacheEnabled'] != rmcache_enabled:
- modify_dict['rmcacheEnabled'] = rmcache_enabled
+ param_list = ['rfcacheEnabled', 'rmcacheEnabled', 'perfProfile']
+ for param in param_list:
+ if param_input[param] is not None and \
+ sds_details[param] != param_input[param]:
+ modify_dict[param] = param_input[param]
if rmcache_size is not None:
self.validate_rmcache_size_parameter(rmcache_enabled,
@@ -748,10 +769,6 @@ class PowerFlexSDS(object):
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
- if performance_profile is not None and \
- sds_details['perfProfile'] != performance_profile:
- modify_dict['perfProfile'] = performance_profile
-
return modify_dict
def modify_sds_attributes(self, sds_id, modify_dict,
@@ -772,41 +789,42 @@ class PowerFlexSDS(object):
" updated is '%s'." % (str(modify_dict))
LOG.info(msg)
- if 'name' in modify_dict:
- self.powerflex_conn.sds.rename(sds_id, modify_dict['name'])
- msg = "The name of the SDS is updated to '%s' successfully." \
- % modify_dict['name']
- LOG.info(msg)
+ if not self.module.check_mode:
+ if 'name' in modify_dict:
+ self.powerflex_conn.sds.rename(sds_id, modify_dict['name'])
+ msg = "The name of the SDS is updated to '%s' successfully." \
+ % modify_dict['name']
+ LOG.info(msg)
- if 'rfcacheEnabled' in modify_dict:
- self.powerflex_conn.sds.set_rfcache_enabled(
- sds_id, modify_dict['rfcacheEnabled'])
- msg = "The use RFcache is updated to '%s' successfully." \
- % modify_dict['rfcacheEnabled']
- LOG.info(msg)
+ if 'rfcacheEnabled' in modify_dict:
+ self.powerflex_conn.sds.set_rfcache_enabled(
+ sds_id, modify_dict['rfcacheEnabled'])
+ msg = "The use RFcache is updated to '%s' successfully." \
+ % modify_dict['rfcacheEnabled']
+ LOG.info(msg)
- if 'rmcacheEnabled' in modify_dict:
- self.powerflex_conn.sds.set_rmcache_enabled(
- sds_id, modify_dict['rmcacheEnabled'])
- msg = "The use RMcache is updated to '%s' successfully." \
- % modify_dict['rmcacheEnabled']
- LOG.info(msg)
+ if 'rmcacheEnabled' in modify_dict:
+ self.powerflex_conn.sds.set_rmcache_enabled(
+ sds_id, modify_dict['rmcacheEnabled'])
+ msg = "The use RMcache is updated to '%s' successfully." \
+ % modify_dict['rmcacheEnabled']
+ LOG.info(msg)
- if 'rmcacheSizeInMB' in modify_dict:
- self.powerflex_conn.sds.set_rmcache_size(
- sds_id, modify_dict['rmcacheSizeInMB'])
- msg = "The size of RMcache is updated to '%s' successfully." \
- % modify_dict['rmcacheSizeInMB']
- LOG.info(msg)
+ if 'rmcacheSizeInMB' in modify_dict:
+ self.powerflex_conn.sds.set_rmcache_size(
+ sds_id, modify_dict['rmcacheSizeInMB'])
+ msg = "The size of RMcache is updated to '%s' successfully." \
+ % modify_dict['rmcacheSizeInMB']
+ LOG.info(msg)
- if 'perfProfile' in modify_dict:
- self.powerflex_conn.sds.set_performance_parameters(
- sds_id, modify_dict['perfProfile'])
- msg = "The performance profile is updated to '%s'" \
- % modify_dict['perfProfile']
- LOG.info(msg)
+ if 'perfProfile' in modify_dict:
+ self.powerflex_conn.sds.set_performance_parameters(
+ sds_id, modify_dict['perfProfile'])
+ msg = "The performance profile is updated to '%s'" \
+ % modify_dict['perfProfile']
+ LOG.info(msg)
- return True
+ return self.get_sds_details(sds_id=sds_id)
except Exception as e:
if create_flag:
error_msg = "Create SDS is successful, but failed to update" \
@@ -818,50 +836,39 @@ class PowerFlexSDS(object):
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
- def identify_ip_role(self, sds_ip_list, sds_details, sds_ip_state):
- """Identify IPs before addition/removal
- :param sds_ip_list: List of one or more IP addresses and
- their roles
- :type sds_ip_list: list[dict]
- :param sds_details: SDS details
- :type sds_details: dict
- :param sds_ip_state: State of IP in SDS
- :type sds_ip_state: str
- :return: List containing the key-value pairs of IP-role for an
- SDS
- :rtype: list[dict]
- """
+ def identify_ip_role_add(self, sds_ip_list, sds_details, sds_ip_state):
+ # identify IPs to add or roles to update
+
existing_ip_role_list = sds_details['ipList']
+ update_role = []
+ ips_to_add = []
+
+ # identify IPs to add
+ existing_ip_list = []
+ if existing_ip_role_list:
+ for ip in existing_ip_role_list:
+ existing_ip_list.append(ip['ip'])
+ for given_ip in sds_ip_list:
+ ip = given_ip['ip']
+ if ip not in existing_ip_list:
+ ips_to_add.append(given_ip)
+ LOG.info("IP(s) to be added: %s", ips_to_add)
+
+ if len(ips_to_add) != 0:
+ for ip in ips_to_add:
+ sds_ip_list.remove(ip)
+
+ # identify IPs whose role needs to be updated
+ update_role = [ip for ip in sds_ip_list
+ if ip not in existing_ip_role_list]
+ LOG.info("Role update needed for: %s", update_role)
+ return ips_to_add, update_role
+
+ def identify_ip_role_remove(self, sds_ip_list, sds_details, sds_ip_state):
+ # identify IPs to remove
- # identify IPs to add or roles to update
- if sds_ip_state == "present-in-sds":
- update_role = []
- ips_to_add = []
-
- # identify IPs to add
- existing_ip_list = []
- if existing_ip_role_list:
- for ip in existing_ip_role_list:
- existing_ip_list.append(ip['ip'])
- for given_ip in sds_ip_list:
- ip = given_ip['ip']
- if ip not in existing_ip_list:
- ips_to_add.append(given_ip)
- LOG.info("IP(s) to be added: %s", ips_to_add)
-
- if len(ips_to_add) != 0:
- for ip in ips_to_add:
- sds_ip_list.remove(ip)
-
- # identify IPs whose role needs to be updated
- update_role = [ip for ip in sds_ip_list
- if ip not in existing_ip_role_list]
- LOG.info("Role update needed for: %s", update_role)
-
- return ips_to_add, update_role
-
- elif sds_ip_state == "absent-in-sds":
- # identify IPs to remove
+ existing_ip_role_list = sds_details['ipList']
+ if sds_ip_state == "absent-in-sds":
ips_to_remove = [ip for ip in existing_ip_role_list
if ip in sds_ip_list]
if len(ips_to_remove) != 0:
@@ -869,7 +876,7 @@ class PowerFlexSDS(object):
return ips_to_remove
else:
LOG.info("IP(s) do not exists.")
- return False, None
+ return []
def add_ip(self, sds_id, sds_ip_list):
"""Add IP to SDS
@@ -881,10 +888,11 @@ class PowerFlexSDS(object):
:return: Boolean indicating if add IP operation is successful
"""
try:
- for ip in sds_ip_list:
- LOG.info("IP to add: %s", ip)
- self.powerflex_conn.sds.add_ip(sds_id=sds_id, sds_ip=ip)
- LOG.info("IP added successfully.")
+ if not self.module.check_mode:
+ for ip in sds_ip_list:
+ LOG.info("IP to add: %s", ip)
+ self.powerflex_conn.sds.add_ip(sds_id=sds_id, sds_ip=ip)
+ LOG.info("IP added successfully.")
return True
except Exception as e:
error_msg = "Add IP to SDS '%s' operation failed with " \
@@ -902,15 +910,16 @@ class PowerFlexSDS(object):
:return: Boolean indicating if add IP operation is successful
"""
try:
- LOG.info("Role updates for: %s", sds_ip_list)
- if len(sds_ip_list) != 0:
- for ip in sds_ip_list:
- LOG.info("ip-role: %s", ip)
- self.powerflex_conn.sds.set_ip_role(sds_id, ip['ip'],
- ip['role'])
- msg = "The role '%s' for IP '%s' is updated " \
- "successfully." % (ip['role'], ip['ip'])
- LOG.info(msg)
+ if not self.module.check_mode:
+ LOG.info("Role updates for: %s", sds_ip_list)
+ if len(sds_ip_list) != 0:
+ for ip in sds_ip_list:
+ LOG.info("ip-role: %s", ip)
+ self.powerflex_conn.sds.set_ip_role(sds_id, ip['ip'],
+ ip['role'])
+ msg = "The role '%s' for IP '%s' is updated " \
+ "successfully." % (ip['role'], ip['ip'])
+ LOG.info(msg)
return True
except Exception as e:
error_msg = "Update role of IP for SDS '%s' operation failed " \
@@ -928,10 +937,11 @@ class PowerFlexSDS(object):
:return: Boolean indicating if remove IP operation is successful
"""
try:
- for ip in sds_ip_list:
- LOG.info("IP to remove: %s", ip)
- self.powerflex_conn.sds.remove_ip(sds_id=sds_id, ip=ip['ip'])
- LOG.info("IP removed successfully.")
+ if not self.module.check_mode:
+ for ip in sds_ip_list:
+ LOG.info("IP to remove: %s", ip)
+ self.powerflex_conn.sds.remove_ip(sds_id=sds_id, ip=ip['ip'])
+ LOG.info("IP removed successfully.")
return True
except Exception as e:
error_msg = "Remove IP from SDS '%s' operation failed with " \
@@ -946,145 +956,16 @@ class PowerFlexSDS(object):
:return: Boolean indicating if delete operation is successful
"""
try:
- self.powerflex_conn.sds.delete(sds_id)
- return True
+ if not self.module.check_mode:
+ self.powerflex_conn.sds.delete(sds_id)
+ return None
+ return self.get_sds_details(sds_id=sds_id)
except Exception as e:
error_msg = "Delete SDS '%s' operation failed with error '%s'" \
% (sds_id, str(e))
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
- def perform_module_operation(self):
- """
- Perform different actions on SDS based on parameters passed in
- the playbook
- """
- sds_name = self.module.params['sds_name']
- sds_id = self.module.params['sds_id']
- sds_new_name = self.module.params['sds_new_name']
- protection_domain_name = self.module.params['protection_domain_name']
- protection_domain_id = self.module.params['protection_domain_id']
- rfcache_enabled = self.module.params['rfcache_enabled']
- rmcache_enabled = self.module.params['rmcache_enabled']
- rmcache_size = self.module.params['rmcache_size']
- sds_ip_list = copy.deepcopy(self.module.params['sds_ip_list'])
- sds_ip_state = self.module.params['sds_ip_state']
- performance_profile = self.module.params['performance_profile']
- state = self.module.params['state']
-
- # result is a dictionary to contain end state and SDS details
- changed = False
- result = dict(
- changed=False,
- sds_details={}
- )
-
- # get SDS details
- sds_details = self.get_sds_details(sds_name, sds_id)
- if sds_details:
- sds_id = sds_details['id']
- msg = "Fetched the SDS details %s" % (str(sds_details))
- LOG.info(msg)
-
- # get Protection Domain ID from name
- if protection_domain_name:
- pd_details = self.get_protection_domain(protection_domain_name)
- if pd_details:
- protection_domain_id = pd_details['id']
- msg = "Fetched the protection domain details with id '%s', " \
- "name '%s'" % (protection_domain_id, protection_domain_name)
- LOG.info(msg)
-
- # create operation
- create_changed = False
- if state == 'present' and not sds_details:
- if sds_id:
- error_msg = "Creation of SDS is allowed using sds_name " \
- "only, sds_id given."
- LOG.info(error_msg)
- self.module.fail_json(msg=error_msg)
-
- if sds_new_name:
- error_msg = "sds_new_name parameter is not supported " \
- "during creation of a SDS. Try renaming the " \
- "SDS after the creation."
- LOG.info(error_msg)
- self.module.fail_json(msg=error_msg)
-
- self.validate_ip_parameter(sds_ip_list)
-
- create_changed = self.create_sds(protection_domain_id,
- sds_ip_list, sds_ip_state,
- sds_name, rmcache_enabled,
- rmcache_size)
- if create_changed:
- sds_details = self.get_sds_details(sds_name)
- sds_id = sds_details['id']
- msg = "SDS created successfully, fetched SDS details %s"\
- % (str(sds_details))
- LOG.info(msg)
-
- # checking if basic SDS parameters are modified or not
- modify_dict = {}
- if sds_details and state == 'present':
- modify_dict = self.to_modify(sds_details, sds_new_name,
- rfcache_enabled, rmcache_enabled,
- rmcache_size, performance_profile)
- msg = "Parameters to be modified are as follows: %s"\
- % (str(modify_dict))
- LOG.info(msg)
-
- # modify operation
- modify_changed = False
- if modify_dict and state == 'present':
- LOG.info("Modify SDS params.")
- modify_changed = self.modify_sds_attributes(sds_id, modify_dict,
- create_changed)
-
- # get updated SDS details
- sds_details = self.get_sds_details(sds_id=sds_id)
-
- # add IPs to SDS
- # update IP's role for an SDS
- add_ip_changed = False
- update_role_changed = False
- if sds_details and state == 'present' \
- and sds_ip_state == "present-in-sds":
- self.validate_ip_parameter(sds_ip_list)
- ips_to_add, roles_to_update = self.identify_ip_role(
- sds_ip_list, sds_details, sds_ip_state)
- if ips_to_add:
- add_ip_changed = self.add_ip(sds_id, ips_to_add)
- if roles_to_update:
- update_role_changed = self.update_role(sds_id,
- roles_to_update)
-
- # remove IPs from SDS
- remove_ip_changed = False
- if sds_details and state == 'present' \
- and sds_ip_state == "absent-in-sds":
- self.validate_ip_parameter(sds_ip_list)
- ips_to_remove = self.identify_ip_role(sds_ip_list, sds_details,
- sds_ip_state)
- if ips_to_remove:
- remove_ip_changed = self.remove_ip(sds_id, ips_to_remove)
-
- # delete operation
- delete_changed = False
- if sds_details and state == 'absent':
- delete_changed = self.delete_sds(sds_id)
-
- if create_changed or modify_changed or add_ip_changed \
- or update_role_changed or remove_ip_changed or delete_changed:
- changed = True
-
- # Returning the updated SDS details
- if state == 'present':
- sds_details = self.show_output(sds_id)
- result['sds_details'] = sds_details
- result['changed'] = changed
- self.module.exit_json(**result)
-
def show_output(self, sds_id):
"""Show SDS details
:param sds_id: ID of the SDS
@@ -1115,6 +996,14 @@ class PowerFlexSDS(object):
rmcache_size_mb = sds_details[0]['rmcacheSizeInKb'] / 1024
sds_details[0]['rmcacheSizeInMb'] = int(rmcache_size_mb)
+ # Append fault set name
+ if 'faultSetId' in sds_details[0] \
+ and sds_details[0]['faultSetId']:
+ fs_details = self.get_fault_set(
+ fault_set_id=sds_details[0]['faultSetId'],
+ protection_domain_id=sds_details[0]['protectionDomainId'])
+ sds_details[0]['faultSetName'] = fs_details['name']
+
return sds_details[0]
except Exception as e:
@@ -1123,6 +1012,15 @@ class PowerFlexSDS(object):
LOG.error(error_msg)
self.module.fail_json(msg=error_msg)
+ def validate_parameters(self, sds_params):
+ params = [sds_params['sds_name'], sds_params['sds_new_name']]
+ for param in params:
+ if param is not None and len(param.strip()) == 0:
+ error_msg = "Provide valid value for name for the " \
+ "creation/modification of the SDS."
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
def get_powerflex_sds_parameters():
"""This method provide parameter required for the SDS module on
@@ -1145,15 +1043,137 @@ def get_powerflex_sds_parameters():
rmcache_enabled=dict(type='bool'),
rmcache_size=dict(type='int'),
performance_profile=dict(choices=['Compact', 'HighPerformance']),
+ fault_set_name=dict(),
+ fault_set_id=dict(),
state=dict(required=True, type='str', choices=['present', 'absent'])
)
+class SDSExitHandler():
+ def handle(self, sds_obj, sds_details):
+ if sds_details:
+ sds_obj.result["sds_details"] = sds_obj.show_output(sds_id=sds_details['id'])
+ else:
+ sds_obj.result["sds_details"] = None
+ sds_obj.module.exit_json(**sds_obj.result)
+
+
+class SDSDeleteHandler():
+ def handle(self, sds_obj, sds_params, sds_details):
+ if sds_params['state'] == 'absent' and sds_details:
+ sds_details = sds_obj.delete_sds(sds_details['id'])
+ sds_obj.result['changed'] = True
+
+ SDSExitHandler().handle(sds_obj, sds_details)
+
+
+class SDSRemoveIPHandler():
+ def handle(self, sds_obj, sds_params, sds_details, sds_ip_list):
+ if sds_params['state'] == 'present' and sds_details:
+ # remove IPs from SDS
+ remove_ip_changed = False
+ if sds_params['sds_ip_state'] == "absent-in-sds":
+ sds_obj.validate_ip_parameter(sds_ip_list)
+ ips_to_remove = sds_obj.identify_ip_role_remove(sds_ip_list, sds_details,
+ sds_params['sds_ip_state'])
+ if ips_to_remove:
+ remove_ip_changed = sds_obj.remove_ip(sds_details['id'], ips_to_remove)
+
+ if remove_ip_changed:
+ sds_obj.result['changed'] = True
+
+ SDSDeleteHandler().handle(sds_obj, sds_params, sds_details)
+
+
+class SDSAddIPHandler():
+ def handle(self, sds_obj, sds_params, sds_details, sds_ip_list):
+ if sds_params['state'] == 'present' and sds_details:
+ # add IPs to SDS
+ # update IP's role for an SDS
+ add_ip_changed = False
+ update_role_changed = False
+ if sds_params['sds_ip_state'] == "present-in-sds":
+ sds_obj.validate_ip_parameter(sds_ip_list)
+ ips_to_add, roles_to_update = sds_obj.identify_ip_role_add(
+ sds_ip_list, sds_details, sds_params['sds_ip_state'])
+ if ips_to_add:
+ add_ip_changed = sds_obj.add_ip(sds_details['id'], ips_to_add)
+ if roles_to_update:
+ update_role_changed = sds_obj.update_role(sds_details['id'],
+ roles_to_update)
+
+ if add_ip_changed or update_role_changed:
+ sds_obj.result['changed'] = True
+
+ SDSRemoveIPHandler().handle(sds_obj, sds_params, sds_details, sds_ip_list)
+
+
+class SDSModifyHandler():
+ def handle(self, sds_obj, sds_params, sds_details, create_flag, sds_ip_list):
+ if sds_params['state'] == 'present' and sds_details:
+ modify_dict = sds_obj.to_modify(sds_details=sds_details,
+ sds_new_name=sds_params['sds_new_name'],
+ rfcache_enabled=sds_params['rfcache_enabled'],
+ rmcache_enabled=sds_params['rmcache_enabled'],
+ rmcache_size=sds_params['rmcache_size'],
+ performance_profile=sds_params['performance_profile'])
+ if modify_dict:
+ sds_details = sds_obj.modify_sds_attributes(sds_id=sds_details['id'],
+ modify_dict=modify_dict,
+ create_flag=create_flag)
+ sds_obj.result['changed'] = True
+
+ SDSAddIPHandler().handle(sds_obj, sds_params, sds_details, sds_ip_list)
+
+
+class SDSCreateHandler():
+ def handle(self, sds_obj, sds_params, sds_details, protection_domain_id, fault_set_id):
+ create_flag = False
+ sds_ip_list = copy.deepcopy(sds_params['sds_ip_list'])
+ if sds_params['state'] == 'present' and not sds_details:
+ sds_details = sds_obj.create_sds(sds_name=sds_params['sds_name'],
+ sds_id=sds_params['sds_id'],
+ sds_new_name=sds_params['sds_new_name'],
+ protection_domain_id=protection_domain_id,
+ sds_ip_list=sds_ip_list,
+ sds_ip_state=sds_params['sds_ip_state'],
+ rmcache_enabled=sds_params['rmcache_enabled'],
+ rmcache_size=sds_params['rmcache_size'],
+ fault_set_id=fault_set_id)
+ sds_obj.result['changed'] = True
+ create_flag = True
+
+ SDSModifyHandler().handle(sds_obj, sds_params, sds_details, create_flag, sds_ip_list)
+
+
+class SDSHandler():
+ def handle(self, sds_obj, sds_params):
+ sds_details = sds_obj.get_sds_details(sds_params['sds_name'], sds_params['sds_id'])
+ sds_obj.validate_parameters(sds_params=sds_params)
+ protection_domain_id = None
+ if sds_params['protection_domain_id'] or sds_params['protection_domain_name']:
+ protection_domain_id = sds_obj.get_protection_domain(
+ protection_domain_id=sds_params['protection_domain_id'],
+ protection_domain_name=sds_params['protection_domain_name'])['id']
+ fault_set_id = None
+ if sds_params['fault_set_name'] or sds_params['fault_set_id']:
+ fault_set_details = sds_obj.get_fault_set(fault_set_name=sds_params['fault_set_name'],
+ fault_set_id=sds_params['fault_set_id'],
+ protection_domain_id=protection_domain_id)
+ if fault_set_details is None:
+ error_msg = "The specified Fault set is not in the specified Protection Domain."
+ LOG.error(error_msg)
+ sds_obj.module.fail_json(msg=error_msg)
+ else:
+ fault_set_id = fault_set_details['id']
+ SDSCreateHandler().handle(sds_obj, sds_params, sds_details, protection_domain_id, fault_set_id)
+
+
def main():
- """ Create PowerFlex SDS object and perform actions on it
- based on user input from playbook"""
+ """ Create PowerFlex SDS object and perform action on it
+ based on user input from playbook."""
obj = PowerFlexSDS()
- obj.perform_module_operation()
+ SDSHandler().handle(obj, obj.module.params)
if __name__ == '__main__':
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py
index 69caea075..0cc41c50e 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot.py
@@ -150,7 +150,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
snapshot_name: "ansible_snapshot"
vol_name: "ansible_volume"
- read_only: False
+ read_only: false
desired_retention: 2
state: "present"
@@ -171,9 +171,9 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
snapshot_id: "fe6cb28200000007"
sdc:
- - sdc_ip: "198.10.xxx.xxx"
- - sdc_id: "663ac0d200000001"
- allow_multiple_mappings: True
+ - sdc_ip: "198.10.xxx.xxx"
+ - sdc_id: "663ac0d200000001"
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
@@ -185,13 +185,13 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
snapshot_id: "fe6cb28200000007"
sdc:
- - sdc_ip: "198.10.xxx.xxx"
- iops_limit: 11
- bandwidth_limit: 4096
- - sdc_id: "663ac0d200000001"
- iops_limit: 20
- bandwidth_limit: 2048
- allow_multiple_mappings: True
+ - sdc_ip: "198.10.xxx.xxx"
+ iops_limit: 11
+ bandwidth_limit: 4096
+ - sdc_id: "663ac0d200000001"
+ iops_limit: 20
+ bandwidth_limit: 2048
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/snapshot_policy.py b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot_policy.py
new file mode 100644
index 000000000..af2084e55
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/snapshot_policy.py
@@ -0,0 +1,828 @@
+#!/usr/bin/python
+
+# Copyright: (c) 2023, Dell Technologies
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+""" Ansible module for managing snapshot policies on Dell Technologies (Dell) PowerFlex"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = r'''
+module: snapshot_policy
+version_added: '1.7.0'
+short_description: Manage snapshot policies on Dell PowerFlex
+description:
+- Managing snapshot policies on PowerFlex storage system includes
+ creating, getting details, modifying attributes, adding a source volume,
+ removing a source volume and deleting a snapshot policy.
+author:
+- Trisha Datta (@trisha-dell) <ansible.team@dell.com>
+extends_documentation_fragment:
+ - dellemc.powerflex.powerflex
+options:
+ snapshot_policy_name:
+ description:
+ - The name of the snapshot policy.
+ - It is unique across the PowerFlex array.
+ - Mutually exclusive with I(snapshot_policy_id).
+ type: str
+ snapshot_policy_id:
+ description:
+ - The unique identifier of the snapshot policy.
+ - Except create operation, all other operations can be performed
+ using I(snapshot_policy_id).
+ - Mutually exclusive with I(snapshot_policy_name).
+ type: str
+ auto_snapshot_creation_cadence:
+ description:
+ - The auto snapshot creation cadence of the snapshot policy.
+ type: dict
+ suboptions:
+ time:
+ description:
+ - The time between creation of two snapshots.
+ type: int
+ unit:
+ description:
+ - The unit of the auto snapshot creation cadence.
+ type: str
+ choices: ["Minute", "Hour", "Day", "Week"]
+ default: "Minute"
+ num_of_retained_snapshots_per_level:
+ description:
+ - Number of retained snapshots per level.
+ type: list
+ elements: int
+ new_name:
+ description:
+ - New name of the snapshot policy.
+ type: str
+ access_mode:
+ description:
+ - Access mode of the snapshot policy.
+ choices: ['READ_WRITE', 'READ_ONLY']
+ type: str
+ secure_snapshots:
+ description:
+ - Whether to secure snapshots or not.
+ - Used only in the create operation.
+ type: bool
+ source_volume:
+ description:
+ - The source volume details to be added or removed.
+ type: list
+ elements: dict
+ suboptions:
+ id:
+ description:
+ - The unique identifier of the source volume
+ to be added or removed.
+ - Mutually exclusive with I(name).
+ type: str
+ name:
+ description:
+ - The name of the source volume to be added or removed.
+ - Mutually exclusive with I(id).
+ type: str
+ auto_snap_removal_action:
+ description:
+ - Ways to handle the snapshots created by the policy (auto snapshots).
+ - Must be provided when I(state) is set to C('absent').
+ choices: ['Remove', 'Detach']
+ type: str
+ detach_locked_auto_snapshots:
+ description:
+ - Whether to detach the locked auto snapshots during removal of source volume.
+ type: bool
+ state:
+ description:
+ - The state of the source volume.
+ - When C(present), source volume will be added to the snapshot policy.
+ - When C(absent), source volume will be removed from the snapshot policy.
+ type: str
+ choices: ['present', 'absent']
+ default: 'present'
+ pause:
+ description:
+ - Whether to pause or resume the snapshot policy.
+ type: bool
+ state:
+ description:
+ - State of the snapshot policy.
+ choices: ['present', 'absent']
+ default: 'present'
+ type: str
+notes:
+ - The I(check_mode) is supported.
+'''
+
+EXAMPLES = r'''
+- name: Create a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ access_mode: "READ_WRITE"
+ secure_snapshots: false
+ auto_snapshot_creation_cadence:
+ time: 1
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 20
+ state: "present"
+
+- name: Get snapshot policy details using name
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+
+- name: Get snapshot policy details using id
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_id: "snapshot_policy_id_1"
+
+- name: Modify a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ auto_snapshot_creation_cadence:
+ time: 2
+ unit: "Hour"
+ num_of_retained_snapshots_per_level:
+ - 40
+
+- name: Rename a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ new_name: "snapshot_policy_name_1_new"
+
+- name: Add source volume
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name_1"
+ source_volume:
+ - name: "source_volume_name_1"
+ - id: "source_volume_id_2"
+ state: "present"
+
+- name: Remove source volume
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ source_volume:
+ - name: "source_volume_name_1"
+ auto_snap_removal_action: 'Remove'
+ state: "absent"
+ - id: "source_volume_id_2"
+ auto_snap_removal_action: 'Remove'
+ detach_locked_auto_snapshots: true
+ state: "absent"
+
+- name: Pause a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ pause: true
+
+- name: Resume a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "{{snapshot_policy_name}}"
+ pause: false
+
+- name: Delete a snapshot policy
+ dellemc.powerflex.snapshot_policy:
+ hostname: "{{hostname}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ snapshot_policy_name: "snapshot_policy_name"
+ state: "absent"
+'''
+
+RETURN = r'''
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: 'false'
+snapshot_policy_details:
+ description: Details of the snapshot policy.
+ returned: When snapshot policy exists
+ type: dict
+ contains:
+ autoSnapshotCreationCadenceInMin:
+ description: The snapshot rule of the snapshot policy.
+ type: int
+ id:
+ description: The ID of the snapshot policy.
+ type: str
+ lastAutoSnapshotCreationFailureReason:
+ description: The reason for the failure of last auto snapshot creation .
+ type: str
+ name:
+ description: Name of the snapshot policy.
+ type: str
+ lastAutoSnapshotFailureInFirstLevel:
+ description: Whether the last auto snapshot in first level failed.
+ type: bool
+ maxVTreeAutoSnapshots:
+ description: Maximum number of VTree auto snapshots.
+ type: int
+ nextAutoSnapshotCreationTime:
+ description: The time of creation of the next auto snapshot.
+ type: int
+ numOfAutoSnapshots:
+ description: Number of auto snapshots.
+ type: int
+ numOfCreationFailures:
+ description: Number of creation failures.
+ type: int
+ numOfExpiredButLockedSnapshots:
+ description: Number of expired but locked snapshots.
+ type: int
+ numOfLockedSnapshots:
+ description: Number of locked snapshots.
+ type: int
+ numOfRetainedSnapshotsPerLevel:
+ description: Number of snapshots retained per level
+ type: list
+ numOfSourceVolumes:
+ description: Number of source volumes.
+ type: int
+ secureSnapshots:
+ description: Whether the snapshots are secured.
+ type: bool
+ snapshotAccessMode:
+ description: Access mode of the snapshots.
+ type: str
+ snapshotPolicyState:
+ description: State of the snapshot policy.
+ type: str
+ systemId:
+ description: Unique identifier of the PowerFlex system.
+ type: str
+ timeOfLastAutoSnapshot:
+ description: Time of the last auto snapshot creation.
+ type: str
+ timeOfLastAutoSnapshotCreationFailure:
+ description: Time of the failure of the last auto snapshot creation.
+ type: str
+ statistics:
+ description: Statistics details of the snapshot policy.
+ type: dict
+ contains:
+ autoSnapshotVolIds:
+ description: Volume Ids of all the auto snapshots.
+ type: list
+ expiredButLockedSnapshotsIds:
+ description: Ids of expired but locked snapshots.
+ type: list
+ numOfAutoSnapshots:
+ description: Number of auto snapshots.
+ type: int
+ numOfExpiredButLockedSnapshots:
+ description: Number of expired but locked snapshots.
+ type: int
+ numOfSrcVols:
+ description: Number of source volumes.
+ type: int
+ srcVolIds:
+ description: Ids of the source volumes.
+ type: list
+
+ sample: {
+ "autoSnapshotCreationCadenceInMin": 120,
+ "id": "15ae842800000004",
+ "lastAutoSnapshotCreationFailureReason": "NR",
+ "lastAutoSnapshotFailureInFirstLevel": false,
+ "links": [
+ {
+ "href": "/api/instances/SnapshotPolicy::15ae842800000004",
+ "rel": "self"
+ },
+ {
+ "href": "/api/instances/SnapshotPolicy::15ae842800000004/relationships/Statistics",
+ "rel": "/api/SnapshotPolicy/relationship/Statistics"
+ },
+ {
+ "href": "/api/instances/SnapshotPolicy::15ae842800000004/relationships/SourceVolume",
+ "rel": "/api/SnapshotPolicy/relationship/SourceVolume"
+ },
+ {
+ "href": "/api/instances/SnapshotPolicy::15ae842800000004/relationships/AutoSnapshotVolume",
+ "rel": "/api/SnapshotPolicy/relationship/AutoSnapshotVolume"
+ },
+ {
+ "href": "/api/instances/System::0e7a082862fedf0f",
+ "rel": "/api/parent/relationship/systemId"
+ }
+ ],
+ "maxVTreeAutoSnapshots": 40,
+ "name": "Sample_snapshot_policy_1",
+ "nextAutoSnapshotCreationTime": 1683709201,
+ "numOfAutoSnapshots": 0,
+ "numOfCreationFailures": 0,
+ "numOfExpiredButLockedSnapshots": 0,
+ "numOfLockedSnapshots": 0,
+ "numOfRetainedSnapshotsPerLevel": [
+ 40
+ ],
+ "numOfSourceVolumes": 0,
+ "secureSnapshots": false,
+ "snapshotAccessMode": "ReadWrite",
+ "snapshotPolicyState": "Active",
+ "statistics": {
+ "autoSnapshotVolIds": [],
+ "expiredButLockedSnapshotsIds": [],
+ "numOfAutoSnapshots": 0,
+ "numOfExpiredButLockedSnapshots": 0,
+ "numOfSrcVols": 0,
+ "srcVolIds": []
+ },
+ "systemId": "0e7a082862fedf0f",
+ "timeOfLastAutoSnapshot": 0,
+ "timeOfLastAutoSnapshotCreationFailure": 0
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('snapshot_policy')
+
+
+class PowerFlexSnapshotPolicy(object):
+ """Class with snapshot policies operations"""
+
+ def __init__(self):
+ """ Define all parameters required by this module"""
+ self.module_params = utils.get_powerflex_gateway_host_parameters()
+ self.module_params.update(get_powerflex_snapshot_policy_parameters())
+
+ mut_ex_args = [['snapshot_policy_name', 'snapshot_policy_id']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mut_ex_args)
+
+ utils.ensure_required_libs(self.module)
+
+ self.result = dict(
+ changed=False,
+ snapshot_policy_details={}
+ )
+
+ try:
+ self.powerflex_conn = utils.get_powerflex_gateway_host_connection(
+ self.module.params)
+ LOG.info("Got the PowerFlex system connection object instance")
+ except Exception as e:
+ LOG.error(str(e))
+ self.module.fail_json(msg=str(e))
+
+ def get_snapshot_policy(self, snap_pol_id=None, snap_pol_name=None):
+ """Get snapshot policy details
+ :param snap_pol_name: Name of the snapshot policy.
+ :param snap_pol_id: ID of the snapshot policy.
+ :return: snapshot policy details
+ """
+ try:
+ snap_pol_details = None
+ if snap_pol_id:
+ snap_pol_details = self.powerflex_conn.snapshot_policy.get(
+ filter_fields={'id': snap_pol_id})
+
+ if snap_pol_name:
+ snap_pol_details = self.powerflex_conn.snapshot_policy.get(
+ filter_fields={'name': snap_pol_name})
+
+ if not snap_pol_details:
+ msg = "Unable to find the snapshot policy."
+ LOG.info(msg)
+ return None
+
+ # Append statistics
+ statistics = self.powerflex_conn.snapshot_policy.get_statistics(snap_pol_details[0]['id'])
+ snap_pol_details[0]['statistics'] = statistics if statistics else {}
+ return snap_pol_details[0]
+
+ except Exception as e:
+ errormsg = f'Failed to get the snapshot policy with error {str(e)}'
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def create_snapshot_policy(self, auto_snapshot_creation_cadence_in_min, num_of_retained_snapshots_per_level,
+ access_mode, secure_snapshots, snapshot_policy_name=None):
+ """Create snapshot_policy
+ :param auto_snapshot_creation_cadence_in_min: The auto snapshot creation cadence of the snapshot policy.
+ :param num_of_retained_snapshots_per_level: Number of retained snapshots per level.
+ :param access_mode: Access mode of the snapshot policy.
+ :param secure_snapshots: Whether to secure snapshots or not.
+ :param snapshot_policy_name: Name of the snapshot policy.
+ :return: Id of the snapshot policy, if created.
+ """
+ try:
+ if not self.module.check_mode:
+ policy_id = self.powerflex_conn.snapshot_policy.create(
+ auto_snap_creation_cadence_in_min=auto_snapshot_creation_cadence_in_min,
+ retained_snaps_per_level=num_of_retained_snapshots_per_level, name=snapshot_policy_name,
+ snapshotAccessMode=access_mode, secureSnapshots=secure_snapshots)
+ return policy_id
+
+ except Exception as e:
+ errormsg = f'Creation of snapshot policy failed with error {str(e)}'
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def delete_snapshot_policy(self, snap_pol_id):
+ """Delete snapshot policy
+ :param snap_pol_id: The unique identifier of the snapshot policy.
+ :return: Details of the snapshot policy.
+ """
+
+ try:
+ if not self.module.check_mode:
+ self.powerflex_conn.snapshot_policy.delete(snap_pol_id)
+ return self.get_snapshot_policy(snap_pol_id=snap_pol_id)
+
+ except Exception as e:
+ errormsg = (f'Deletion of snapshot policy {snap_pol_id} '
+ f'failed with error {str(e)}')
+ LOG.error(errormsg)
+ self.module.fail_json(msg=errormsg)
+
+ def get_volume(self, vol_name=None, vol_id=None):
+ """Get volume details
+ :param vol_name: Name of the volume
+ :param vol_id: ID of the volume
+ :return: Details of volume if exist.
+ """
+
+ id_or_name = vol_id if vol_id else vol_name
+
+ try:
+ if vol_name:
+ volume_details = self.powerflex_conn.volume.get(
+ filter_fields={'name': vol_name})
+ else:
+ volume_details = self.powerflex_conn.volume.get(
+ filter_fields={'id': vol_id})
+
+ if len(volume_details) == 0:
+ error_msg = f"Volume with identifier {id_or_name} not found"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ # Append snapshot policy name and id
+ if volume_details[0]['snplIdOfSourceVolume'] is not None:
+ snap_policy_id = volume_details[0]['snplIdOfSourceVolume']
+ volume_details[0]['snapshotPolicyId'] = snap_policy_id
+ volume_details[0]['snapshotPolicyName'] = \
+ self.get_snapshot_policy(snap_policy_id)['name']
+
+ return volume_details[0]
+
+ except Exception as e:
+ error_msg = (f"Failed to get the volume {id_or_name}"
+ f" with error {str(e)}")
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def manage_source_volume(self, snap_pol_details, vol_details, source_volume_element):
+ """Adding or removing a source volume
+ :param snap_pol_details: Details of the snapshot policy details.
+ :param vol_details: Details of the volume.
+ :param source_volume_element: The index of the source volume in the
+ list of volumes to be added/removed.
+ :return: Boolean indicating whether volume is added/removed.
+ """
+ try:
+ if self.module.params['source_volume'][source_volume_element]['state'] == 'present' and \
+ vol_details['snplIdOfSourceVolume'] != snap_pol_details['id']:
+ if not self.module.check_mode:
+ snap_pol_details = self.powerflex_conn.snapshot_policy.add_source_volume(
+ snapshot_policy_id=snap_pol_details['id'],
+ volume_id=vol_details['id'])
+ LOG.info("Source volume successfully added")
+ return True
+
+ elif self.module.params['source_volume'][source_volume_element]['state'] == 'absent' and \
+ vol_details['snplIdOfSourceVolume'] == snap_pol_details['id']:
+ if not self.module.check_mode:
+ snap_pol_details = self.powerflex_conn.snapshot_policy.remove_source_volume(
+ snapshot_policy_id=snap_pol_details['id'],
+ volume_id=vol_details['id'],
+ auto_snap_removal_action=self.module.params['source_volume'][source_volume_element]['auto_snap_removal_action'],
+ detach_locked_auto_snaps=self.module.params['source_volume'][source_volume_element]['detach_locked_auto_snapshots'])
+ LOG.info("Source volume successfully removed")
+ return True
+
+ except Exception as e:
+ error_msg = f"Failed to manage the source volume {vol_details['id']} with error {str(e)}"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def pause_snapshot_policy(self, snap_pol_details):
+ """Pausing or resuming a snapshot policy.
+ :param snap_pol_details: Details of the snapshot policy details.
+ :return: Boolean indicating whether snapshot policy is paused/removed or not.
+ """
+ try:
+ if self.module.params['pause'] and \
+ snap_pol_details['snapshotPolicyState'] != "Paused":
+ if not self.module.check_mode:
+ self.powerflex_conn.snapshot_policy.pause(
+ snapshot_policy_id=snap_pol_details['id'])
+ LOG.info("Snapshot policy successfully paused.")
+ return True
+
+ elif not self.module.params['pause'] and \
+ snap_pol_details['snapshotPolicyState'] == "Paused":
+ if not self.module.check_mode:
+ self.powerflex_conn.snapshot_policy.resume(
+ snapshot_policy_id=snap_pol_details['id'])
+ LOG.info("Snapshot policy successfully resumed.")
+ return True
+
+ except Exception as e:
+ error_msg = f"Failed to pause/resume {snap_pol_details['id']} with error {str(e)}"
+ LOG.error(error_msg)
+ self.module.fail_json(msg=error_msg)
+
+ def to_modify(self, snap_pol_details, auto_snapshot_creation_cadence_in_min, num_of_retained_snapshots_per_level, new_name):
+ """Whether to modify the snapshot policy or not
+ :param snap_pol_details: Details of the snapshot policy.
+ :param auto_snapshot_creation_cadence_in_min: Snapshot rule of the policy.
+ :param num_of_retained_snapshots_per_level: Retention rule of the policy.
+ :param new_name: The new name of the snapshot policy.
+ :return: Dictionary containing the attributes of
+ snapshot policy which are to be updated
+ """
+ modify_dict = {}
+
+ if self.module_params['auto_snapshot_creation_cadence'] is not None and \
+ snap_pol_details['autoSnapshotCreationCadenceInMin'] != auto_snapshot_creation_cadence_in_min:
+ modify_dict['auto_snapshot_creation_cadence_in_min'] = auto_snapshot_creation_cadence_in_min
+
+ if num_of_retained_snapshots_per_level is not None and \
+ snap_pol_details['numOfRetainedSnapshotsPerLevel'] != num_of_retained_snapshots_per_level:
+ modify_dict['num_of_retained_snapshots_per_level'] = num_of_retained_snapshots_per_level
+
+ if new_name is not None:
+ if len(new_name.strip()) == 0:
+ self.module.fail_json(
+ msg="Provide valid volume name.")
+ if new_name != snap_pol_details['name']:
+ modify_dict['new_name'] = new_name
+
+ return modify_dict
+
+ def modify_snapshot_policy(self, snap_pol_details, modify_dict):
+ """
+ Modify the snapshot policy attributes
+ :param snap_pol_details: Details of the snapshot policy
+ :param modify_dict: Dictionary containing the attributes of
+ snapshot policy which are to be updated
+ :return: True, if the operation is successful
+ """
+ try:
+ msg = (f"Dictionary containing attributes which are to be"
+ f" updated is {str(modify_dict)}.")
+ LOG.info(msg)
+ if not self.module.check_mode:
+ if 'new_name' in modify_dict:
+ self.powerflex_conn.snapshot_policy.rename(snap_pol_details['id'],
+ modify_dict['new_name'])
+ msg = (f"The name of the volume is updated"
+ f" to {modify_dict['new_name']} sucessfully.")
+ LOG.info(msg)
+
+ if 'auto_snapshot_creation_cadence_in_min' in modify_dict and \
+ 'num_of_retained_snapshots_per_level' not in modify_dict:
+ self.powerflex_conn.snapshot_policy.modify(
+ snapshot_policy_id=snap_pol_details['id'],
+ auto_snap_creation_cadence_in_min=modify_dict['auto_snapshot_creation_cadence_in_min'],
+ retained_snaps_per_level=snap_pol_details['numOfRetainedSnapshotsPerLevel'])
+ msg = f"The snapshot rule is updated to {modify_dict['auto_snapshot_creation_cadence_in_min']}"
+ LOG.info(msg)
+
+ elif 'auto_snapshot_creation_cadence_in_min' not in modify_dict and 'num_of_retained_snapshots_per_level' in modify_dict:
+ self.powerflex_conn.snapshot_policy.modify(
+ snapshot_policy_id=snap_pol_details['id'],
+ auto_snap_creation_cadence_in_min=snap_pol_details['autoSnapshotCreationCadenceInMin'],
+ retained_snaps_per_level=modify_dict['num_of_retained_snapshots_per_level'])
+ msg = f"The retention rule is updated to {modify_dict['num_of_retained_snapshots_per_level']}"
+ LOG.info(msg)
+
+ elif 'auto_snapshot_creation_cadence_in_min' in modify_dict and 'num_of_retained_snapshots_per_level' in modify_dict:
+ self.powerflex_conn.snapshot_policy.modify(
+ snapshot_policy_id=snap_pol_details['id'],
+ auto_snap_creation_cadence_in_min=modify_dict['auto_snapshot_creation_cadence_in_min'],
+ retained_snaps_per_level=modify_dict['num_of_retained_snapshots_per_level'])
+ msg = (f"The snapshot rule is updated to {modify_dict['auto_snapshot_creation_cadence_in_min']}"
+ f" and the retention rule is updated to {modify_dict['num_of_retained_snapshots_per_level']}")
+ LOG.info(msg)
+
+ return True
+
+ except Exception as e:
+ err_msg = (f"Failed to update the snapshot policy {snap_pol_details['id']}"
+ f" with error {str(e)}")
+ LOG.error(err_msg)
+ self.module.fail_json(msg=err_msg)
+
+
+def get_access_mode(access_mode):
+ """
+ :param access_mode: Access mode of the snapshot policy
+ :return: The enum for the access mode
+ """
+
+ access_mode_dict = {
+ "READ_WRITE": "ReadWrite",
+ "READ_ONLY": "ReadOnly"
+ }
+ return access_mode_dict.get(access_mode)
+
+
+def get_powerflex_snapshot_policy_parameters():
+ """This method provide parameter required for the snapshot
+ policy module on PowerFlex"""
+ return dict(
+ snapshot_policy_name=dict(), snapshot_policy_id=dict(),
+ new_name=dict(),
+ access_mode=dict(choices=['READ_WRITE', 'READ_ONLY']),
+ secure_snapshots=dict(type='bool'),
+ auto_snapshot_creation_cadence=dict(type='dict', options=dict(
+ time=dict(type='int'),
+ unit=dict(choices=['Minute', 'Hour', 'Day', 'Week'],
+ default='Minute'))),
+ num_of_retained_snapshots_per_level=dict(type='list', elements='int'),
+ source_volume=dict(type='list', elements='dict', options=dict(
+ id=dict(), name=dict(),
+ auto_snap_removal_action=dict(choices=['Remove', 'Detach']),
+ detach_locked_auto_snapshots=dict(type='bool'),
+ state=dict(default='present', choices=['present', 'absent']))),
+ pause=dict(type='bool'),
+ state=dict(default='present', choices=['present', 'absent'])
+ )
+
+
+class SnapshotPolicyCreateHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details, access_mode, auto_snapshot_creation_cadence_in_min):
+ if con_params['state'] == 'present' and not snapshot_policy_details:
+ if con_params['snapshot_policy_id']:
+ con_object.module.fail_json(msg="Creation of snapshot "
+ "policy is allowed "
+ "using snapshot_policy_name only, "
+ "snapshot_policy_id given.")
+
+ snap_pol_id = con_object.create_snapshot_policy(snapshot_policy_name=con_params['snapshot_policy_name'],
+ access_mode=access_mode,
+ secure_snapshots=con_params['secure_snapshots'],
+ auto_snapshot_creation_cadence_in_min=auto_snapshot_creation_cadence_in_min,
+ num_of_retained_snapshots_per_level=con_params['num_of_retained_snapshots_per_level'])
+ con_object.result['changed'] = True
+ if snap_pol_id:
+ snapshot_policy_details = con_object.get_snapshot_policy(snap_pol_name=con_params['snapshot_policy_name'],
+ snap_pol_id=con_params['snapshot_policy_id'])
+
+ msg = (f"snapshot policy created successfully, fetched "
+ f"snapshot_policy details {str(snapshot_policy_details)}")
+ LOG.info(msg)
+
+ SnapshotPolicyModifyHandler().handle(con_object, con_params, snapshot_policy_details,
+ auto_snapshot_creation_cadence_in_min)
+
+
+class SnapshotPolicyModifyHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details, auto_snapshot_creation_cadence_in_min):
+ modify_dict = {}
+ if con_params['state'] == 'present' and snapshot_policy_details:
+ modify_dict = con_object.to_modify(
+ snap_pol_details=snapshot_policy_details, new_name=con_params['new_name'],
+ auto_snapshot_creation_cadence_in_min=auto_snapshot_creation_cadence_in_min,
+ num_of_retained_snapshots_per_level=con_params['num_of_retained_snapshots_per_level'])
+ msg = (f"Parameters to be modified are as"
+ f" follows: {str(modify_dict)}")
+ LOG.info(msg)
+ if modify_dict and con_params['state'] == 'present':
+ con_object.result['changed'] = con_object.modify_snapshot_policy(snap_pol_details=snapshot_policy_details,
+ modify_dict=modify_dict)
+ snapshot_policy_details = con_object.get_snapshot_policy(snap_pol_id=snapshot_policy_details.get("id"))
+ SnapshotPolicySourceVolumeHandler().handle(con_object, con_params, snapshot_policy_details)
+
+
+class SnapshotPolicySourceVolumeHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details):
+ if snapshot_policy_details and con_params['state'] == 'present' and con_params['source_volume'] is not None:
+ for source_volume_element in range(len(con_params['source_volume'])):
+ if not (con_params['source_volume'][source_volume_element]['id'] or
+ con_params['source_volume'][source_volume_element]['name']):
+ con_object.module.fail_json(
+ msg="Either id or name of source volume needs to be "
+ "passed with state of source volume")
+
+ elif con_params['source_volume'][source_volume_element]['id'] and \
+ con_params['source_volume'][source_volume_element]['name']:
+ con_object.module.fail_json(
+ msg="id and name of source volume are mutually exclusive")
+
+ elif con_params['source_volume'][source_volume_element]['id'] or \
+ con_params['source_volume'][source_volume_element]['name']:
+ volume_details = con_object.get_volume(vol_id=con_params['source_volume'][source_volume_element]['id'],
+ vol_name=con_params['source_volume'][source_volume_element]['name'])
+ con_object.result['changed'] = con_object.manage_source_volume(snap_pol_details=snapshot_policy_details,
+ vol_details=volume_details,
+ source_volume_element=source_volume_element)
+ snapshot_policy_details = con_object.get_snapshot_policy(snap_pol_name=con_params['snapshot_policy_name'],
+ snap_pol_id=con_params['snapshot_policy_id'])
+
+ SnapshotPolicyPauseHandler().handle(con_object, con_params, snapshot_policy_details)
+
+
+class SnapshotPolicyPauseHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details):
+ if con_params["state"] == "present" and con_params["pause"] is not None:
+ con_object.result['changed'] = \
+ con_object.pause_snapshot_policy(snap_pol_details=snapshot_policy_details)
+ snapshot_policy_details = \
+ con_object.get_snapshot_policy(snap_pol_name=con_params['snapshot_policy_name'],
+ snap_pol_id=con_params['snapshot_policy_id'])
+ SnapshotPolicyDeleteHandler().handle(con_object, con_params, snapshot_policy_details)
+
+
+class SnapshotPolicyDeleteHandler():
+ def handle(self, con_object, con_params, snapshot_policy_details):
+ if con_params['state'] == 'absent' and snapshot_policy_details:
+ snapshot_policy_details = con_object.delete_snapshot_policy(
+ snap_pol_id=snapshot_policy_details.get("id"))
+ con_object.result['changed'] = True
+ SnapshotPolicyExitHandler().handle(con_object, snapshot_policy_details)
+
+
+class SnapshotPolicyExitHandler():
+ def handle(self, con_object, snapshot_policy_details):
+ con_object.result['snapshot_policy_details'] = snapshot_policy_details
+ con_object.module.exit_json(**con_object.result)
+
+
+class SnapshotPolicyHandler():
+ def handle(self, con_object, con_params):
+ access_mode = get_access_mode(con_params['access_mode'])
+ snapshot_policy_details = con_object.get_snapshot_policy(snap_pol_name=con_params['snapshot_policy_name'],
+ snap_pol_id=con_params['snapshot_policy_id'])
+ auto_snapshot_creation_cadence_in_min = None
+ if snapshot_policy_details:
+ auto_snapshot_creation_cadence_in_min = snapshot_policy_details['autoSnapshotCreationCadenceInMin']
+ msg = f"Fetched the snapshot policy details {str(snapshot_policy_details)}"
+ LOG.info(msg)
+ if con_params['auto_snapshot_creation_cadence'] is not None:
+ auto_snapshot_creation_cadence_in_min = utils.get_time_minutes(time=con_params['auto_snapshot_creation_cadence']['time'],
+ time_unit=con_params['auto_snapshot_creation_cadence']['unit'])
+ SnapshotPolicyCreateHandler().handle(con_object, con_params, snapshot_policy_details,
+ access_mode, auto_snapshot_creation_cadence_in_min)
+
+
+def main():
+ """ Create PowerFlex snapshot policy object and perform action on it
+ based on user input from playbook"""
+ obj = PowerFlexSnapshotPolicy()
+ SnapshotPolicyHandler().handle(obj, obj.module.params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py
index ca343212d..9c8bb1d4a 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/storagepool.py
@@ -88,7 +88,6 @@ notes:
'''
EXAMPLES = r'''
-
- name: Get the details of storage pool by name
dellemc.powerflex.storagepool:
hostname: "{{hostname}}"
@@ -127,8 +126,8 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
storage_pool_name: "ansible_test_pool"
protection_domain_id: "1c957da800000000"
- use_rmcache: True
- use_rfcache: True
+ use_rmcache: true
+ use_rfcache: true
state: "present"
- name: Rename storage pool by id
diff --git a/ansible_collections/dellemc/powerflex/plugins/modules/volume.py b/ansible_collections/dellemc/powerflex/plugins/modules/volume.py
index 9c1e1cd29..0fc301831 100644
--- a/ansible_collections/dellemc/powerflex/plugins/modules/volume.py
+++ b/ansible_collections/dellemc/powerflex/plugins/modules/volume.py
@@ -126,7 +126,7 @@ options:
description:
- Specifies whether to allow or not allow multiple mappings.
- If the volume is mapped to one SDC then for every new mapping
- I(allow_multiple_mappings) has to be passed as True.
+ I(allow_multiple_mappings) has to be passed as true.
type: bool
sdc:
description:
@@ -175,10 +175,10 @@ options:
type: str
delete_snapshots:
description:
- - If C(True), the volume and all its dependent snapshots will be deleted.
- - If C(False), only the volume will be deleted.
+ - If C(true), the volume and all its dependent snapshots will be deleted.
+ - If C(false), only the volume will be deleted.
- It can be specified only when the I(state) is C(absent).
- - It defaults to C(False), if not specified.
+ - It defaults to C(false), if not specified.
type: bool
state:
description:
@@ -203,7 +203,7 @@ EXAMPLES = r'''
protection_domain_name: "pd_1"
vol_type: "THICK_PROVISIONED"
compression_type: "NORMAL"
- use_rmcache: True
+ use_rmcache: true
size: 16
state: "present"
@@ -215,7 +215,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- allow_multiple_mappings: True
+ allow_multiple_mappings: true
sdc:
- sdc_id: "92A304DB-EFD7-44DF-A07E-D78134CC9764"
access_mode: "READ_WRITE"
@@ -251,7 +251,7 @@ EXAMPLES = r'''
iops_limit: 20
- sdc_ip: "198.10.xxx.xxx"
access_mode: "READ_ONLY"
- allow_multiple_mappings: True
+ allow_multiple_mappings: true
sdc_state: "mapped"
state: "present"
@@ -286,7 +286,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- delete_snapshots: False
+ delete_snapshots: false
state: "absent"
- name: Delete the Volume and all its dependent snapshots
@@ -297,7 +297,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
port: "{{port}}"
vol_name: "sample_volume"
- delete_snapshots: True
+ delete_snapshots: true
state: "absent"
'''
diff --git a/ansible_collections/dellemc/powerflex/requirements.txt b/ansible_collections/dellemc/powerflex/requirements.txt
index d0fb0f636..863b61efb 100644
--- a/ansible_collections/dellemc/powerflex/requirements.txt
+++ b/ansible_collections/dellemc/powerflex/requirements.txt
@@ -1,4 +1,2 @@
PyPowerFlex
-requests>=2.23.0
-python-dateutil>=2.8.0
-setuptools
+python-dateutil>=2.8.2
diff --git a/ansible_collections/dellemc/powerflex/roles/README.md b/ansible_collections/dellemc/powerflex/roles/README.md
new file mode 100644
index 000000000..e6f8c426a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/README.md
@@ -0,0 +1,117 @@
+## Supported Roles
+
+#dellemc.powerflex roles directory
+
+Here is the list of supported roles.
+
+```
+.
+├── powerflex_common
+├── powerflex_mdm
+├── powerflex_gateway
+├── powerflex_sdc
+├── powerflex_sdr
+├── powerflex_lia
+├── powerflex_tb
+├── powerflex_sds
+├── powerflex_config
+├── powerflex_activeMQ
+
+```
+
+## Role Descriptions
+
+Below is the brief description of each role
+
+<table>
+<thead>
+ <tr>
+ <th>Role Name</th>
+ <th>Description</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>powerflex_common</td>
+ <td>Role to manage the common operations of Powerflex.</td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway</td>
+ <td>Role to manage the installation and uninstallation of Powerflex Gateway.</td>
+ </tr>
+ <tr>
+ <td>powerflex_lia</td>
+ <td>Role to manage the installation and uninstallation of Powerflex LIA.</td>
+ </tr>
+ <tr>
+ <td>powerflex_mdm</td>
+ <td>Role to manage the installation and uninstallation of Powerflex MDM.</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc</td>
+ <td>Role to manage the installation and uninstallation of Powerflex SDC.</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdr</td>
+ <td>Role to manage installation and uninstallation PowerFlex SDR.</td>
+ </tr>
+ <tr>
+ <td>powerflex_sds</td>
+ <td>Role to manage the installation and uninstallation of Powerflex SDS.</td>
+ </tr>
+ <tr>
+ <td>powerflex_tb</td>
+ <td>Role to manage the installation and uninstallation of Powerflex TB.</td>
+ </tr>
+ <tr>
+ <td>powerflex_webui</td>
+ <td>Role to manage the installation and uninstallation of Powerflex Web UI.</td>
+ </tr>
+ <tr>
+ <td>powerflex_config</td>
+ <td>Role to configure the protection domain, fault set and storage pool.</td>
+ </tr>
+ <tr>
+ <td>powerflex_activeMQ</td>
+ <td>Role to manage the installation and uninstallation of Powerflex ActiveMQ.</td>
+ </tr>
+</tbody>
+</table>
+
+## Acronyms and their full forms
+Below is the list of full form of the acronyms which are refered throughout the documentation
+
+<table>
+<thead>
+ <tr>
+ <th>Acronym</th>
+ <th>Full Form</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>LIA</td>
+ <td>Lightweight Installation Agent.</td>
+ </tr>
+ <tr>
+ <td>MDM</td>
+ <td>Metadata Manager.</td>
+ </tr>
+ <tr>
+ <td>SDC</td>
+ <td>Storage Data Client.</td>
+ </tr>
+ <tr>
+ <td>SDR</td>
+ <td>Storage Data Replicator.</td>
+ </tr>
+ <tr>
+ <td>SDS</td>
+ <td>Storage Data Server.</td>
+ </tr>
+ <tr>
+ <td>TB</td>
+ <td>Tie Breaker.</td>
+ </tr>
+</tbody>
+</table>
diff --git a/ansible_collections/dellemc/powerflex/roles/molecule.yml b/ansible_collections/dellemc/powerflex/roles/molecule.yml
new file mode 100644
index 000000000..425cea9d1
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/molecule.yml
@@ -0,0 +1,25 @@
+---
+dependency:
+ name: galaxy
+driver:
+ name: docker
+platforms:
+ - name: centos
+ image: quay.io/centos/centos:stream8
+ pre_build_image: true
+ volumes:
+ - /tmp:/tmp
+provisioner:
+ name: ansible
+ inventory:
+ links:
+ hosts: ../../../../playbooks/roles/inventory
+ group_vars: ../../../../playbooks/roles/group_vars/
+ host_vars: ../../../../playbooks/roles/host_vars/
+verifier:
+ name: ansible
+scenario:
+ test_sequence:
+ - check
+ - converge
+ - idempotence
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/README.md
new file mode 100644
index 000000000..bdaf5efac
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/README.md
@@ -0,0 +1,155 @@
+# powerflex_activemq
+
+Role to manage the installation and uninstallation of Powerflex ActiveMQ.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Notes](#notes)
+* [Usage instructions](#usage-instructions)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td>IP or FQDN of the PowerFlex host.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td>The username of the PowerFlex host.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td>The password of the PowerFlex host.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>port</td>
+ <td>false</td>
+ <td>The port of the PowerFlex host.</td>
+ <td></td>
+ <td>int</td>
+ <td>443</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>If C(false), the SSL certificates will not be validated.<br>Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>timeout</td>
+ <td>false</td>
+ <td>Time after which connection will get terminated.</td>
+ <td></td>
+ <td>int</td>
+ <td>120</td>
+ </tr>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>true</td>
+ <td>Location of installation and rpm gpg files to be installed.
+ <br>The required, compatible installation software package based on the operating system of the node. The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>path</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_activemq_state</td>
+ <td>false</td>
+ <td>Specify state of ActiveMQ.
+ <br>present will install the ActiveMQ and absent will uninstall the ActiveMQ.</td>
+ <td>absent, present</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: Install and configure PowerFlex ActiveMQ
+ ansible.builtin.import_role:
+ name: powerflex_activemq
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_activemq_state: present
+
+ - name: Uninstall powerflex ActiveMQ
+ ansible.builtin.import_role:
+ name: powerflex_activemq
+ vars:
+ powerflex_activemq_state: absent
+
+```
+## Notes
+- Supported in PowerFlex version 4.x and above
+
+## Usage instructions
+----
+### To install all dependency packages, including ActiveMQ, on node:
+ ```
+ ansible-playbook -i inventory site_powerflex45.yml
+ ```
+
+### To uninstall ActiveMQ:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex45.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Author Information
+------------------
+
+Dell Technologies<br>
+Pavan Mudunuri (ansible.team@Dell.com) 2023
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/defaults/main.yml
new file mode 100644
index 000000000..646a6bf28
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+file_glob_name: activemq
+file_gpg_name: RPM-GPG-KEY-ScaleIO
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_activemq_mdm_ips }}"
+powerflex_activemq_state: 'present'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/meta/argument_specs.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/meta/argument_specs.yml
new file mode 100644
index 000000000..4a793e5e9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/meta/argument_specs.yml
@@ -0,0 +1,48 @@
+---
+argument_specs:
+ main:
+ short_description: Role to manage the installation and uninstallation of Powerflex ActiveMQ.
+ description:
+ - Role to manage the installation and uninstallation of Powerflex ActiveMQ.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: IP or FQDN of the PowerFlex host.
+ username:
+ required: true
+ type: str
+ description: The username of the PowerFlex host.
+ password:
+ required: true
+ type: str
+ description: The password of the PowerFlex host.
+ port:
+ type: int
+ description: Port of the PowerFlex host.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: false
+ timeout:
+ description: Time after which connection will get terminated.
+ type: int
+ default: 120
+ powerflex_common_file_install_location:
+ description:
+ - Location of installation, compatible installation software package
+ based on the operating system of the node.
+ - The files can be downloaded from the Dell Product support page for PowerFlex software.
+ type: path
+ default: /var/tmp
+ powerflex_activemq_state:
+ description:
+ - Specifies the state of the ActiveMQ.
+ - present will install the ActiveMQ if not already installed.
+ - absent will uninstall the ActiveMQ if installed.
+ type: str
+ choices: ['absent', 'present']
+ default: present
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/meta/main.yml
new file mode 100644
index 000000000..7d16f052e
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/meta/main.yml
@@ -0,0 +1,27 @@
+---
+galaxy_info:
+ author: Pavan Mudunuri
+ description: Role to manage the installation and uninstallation of Powerflex ActiveMQ.
+ company: Dell Technologies
+ role_name: powerflex_activemq
+ namespace: dellemc
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.14.0"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_install/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_install/converge.yml
new file mode 100644
index 000000000..be4d31ed7
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_install/converge.yml
@@ -0,0 +1,31 @@
+---
+- name: Molecule Test for installation of ActiveMQ
+ hosts: activemq
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Install and configure Powerflex ActiveMQ
+ ansible.builtin.import_role:
+ name: powerflex_activemq
+ vars:
+ powerflex_activemq_state: present
+
+ - name: Verifying install package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_install_package_output.msg == "Check mode: No changes made"
+ when: ansible_check_mode
+
+ - name: Verifying installation package in normal mode
+ ansible.builtin.assert:
+ that:
+ - " 'Installed' in powerflex_common_install_package_output.results[0]"
+ when: not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: Verifying installation package in Idempotency mode
+ ansible.builtin.assert:
+ that: >
+ "'Nothing to do' in powerflex_common_install_package_output.results[0]" or
+ "'is already installed' in powerflex_common_install_package_output.results[0]"
+ when: not ansible_check_mode and not powerflex_common_install_package_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_install/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_install/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_install/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_uninstallation/converge.yml
new file mode 100644
index 000000000..3507299d0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_uninstallation/converge.yml
@@ -0,0 +1,67 @@
+---
+- name: Molecule Test for uninstallation of ActiveMQ
+ hosts: activemq
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Uninstall powerflex ActiveMQ
+ ansible.builtin.import_role:
+ name: powerflex_activemq
+ vars:
+ powerflex_activemq_state: absent
+
+ - name: Verifying uninstall package in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Removed:' in powerflex_activemq_uninstall_output.results[0].results[0]"
+ when:
+ - not ansible_check_mode
+ - powerflex_activemq_uninstall_output.changed
+ - ansible_distribution in ("RedHat", "CentOS", "SLES", "Rocky")
+
+ - name: Verifying uninstall package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_activemq_uninstall_output.msg == "Check mode: No changes made"
+ when:
+ - ansible_check_mode
+ - ansible_distribution in ("RedHat", "CentOS", "SLES", "Rocky")
+
+ - name: Verifying uninstall package in Idempotency
+ ansible.builtin.assert:
+ that: >
+ "'Nothing to do' in powerflex_activemq_uninstall_output.results[0].msg" or
+ "'EMC-ScaleIO-activemq is not installed' in powerflex_activemq_uninstall_output.results[0].results[0]"
+ when:
+ - not ansible_check_mode
+ - not powerflex_activemq_uninstall_output.changed
+ - ansible_distribution in ("RedHat", "CentOS", "SLES", "Rocky")
+
+ - name: Verifying uninstall package in check mode for ubuntu
+ ansible.builtin.assert:
+ that:
+ - powerflex_activemq_uninstall_deb_output.results[0].msg == "Check mode: No changes made"
+ - powerflex_activemq_uninstall_deb_output.changed
+ when:
+ - ansible_check_mode
+ - ansible_distribution == "Ubuntu"
+
+ - name: Verifying uninstall package in normal mode for ubuntu
+ ansible.builtin.assert:
+ that:
+ - "'Removed:' in powerflex_activemq_uninstall_deb_output.results[0].results[0]"
+ when:
+ - not ansible_check_mode
+ - powerflex_activemq_uninstall_deb_output.changed
+ - ansible_distribution == "Ubuntu"
+
+ - name: Verifying uninstall package in Idempotency for ubuntu
+ ansible.builtin.assert:
+ that: >
+ "'Nothing to do' in powerflex_activemq_uninstall_deb_output.results[0].msg" or
+ "'is not installed' in powerflex_activemq_uninstall_deb_output.results[0].results[0]"
+ when:
+ - not ansible_check_mode
+ - not powerflex_activemq_uninstall_deb_output.changed
+ - ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/molecule/activemq_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/install_activemq.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/install_activemq.yml
new file mode 100644
index 000000000..8b0dd44d7
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/install_activemq.yml
@@ -0,0 +1,30 @@
+---
+- name: Set facts for MDM counts
+ ansible.builtin.set_fact:
+ mdm_count: "{{ groups['mdm'] | length }}"
+
+- name: Set facts for powerflex_activemq_primary_ip
+ ansible.builtin.set_fact:
+ powerflex_activemq_primary_ip: "{{ hostvars[groups['mdm'][0]]['ansible_host'] }}"
+
+- name: Set facts for powerflex_activemq_secondary_ip
+ ansible.builtin.set_fact:
+ powerflex_activemq_secondary_ip: "{{ hostvars[groups['mdm'][1]]['ansible_host'] }}"
+
+- name: Set facts for powerflex_activemq_tertiary_ip
+ ansible.builtin.set_fact:
+ powerflex_activemq_tertiary_ip: "{{ hostvars[groups['mdm'][2]]['ansible_host'] }}"
+ when: mdm_count | int > 2
+
+- name: Set facts for powerflex_activemq_mdm_ips if mdm_count is 2
+ ansible.builtin.set_fact:
+ powerflex_activemq_mdm_ips: "{{ powerflex_activemq_primary_ip }},{{ powerflex_activemq_secondary_ip }}"
+ when: mdm_count | int == 2
+
+- name: Set facts for powerflex_activemq_mdm_ips if mdm_count is more than 2
+ ansible.builtin.set_fact:
+ powerflex_activemq_mdm_ips: "{{ powerflex_activemq_primary_ip }},{{ powerflex_activemq_secondary_ip }},{{ powerflex_activemq_tertiary_ip }}"
+ when: mdm_count | int > 2
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/main.yml
new file mode 100644
index 000000000..1f3a708b6
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Install ActiveMQ
+ ansible.builtin.include_tasks: install_activemq.yml
+ when: powerflex_activemq_state == 'present'
+
+- name: Uninstall ActiveMQ
+ ansible.builtin.include_tasks: uninstall_activemq.yml
+ when: powerflex_activemq_state == 'absent'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/uninstall_activemq.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/uninstall_activemq.yml
new file mode 100644
index 000000000..1f45e94c0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/tasks/uninstall_activemq.yml
@@ -0,0 +1,20 @@
+---
+- name: Uninstall package
+ register: powerflex_activemq_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-activemq
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES", "Rocky")
+
+- name: Uninstall deb package
+ register: powerflex_activemq_uninstall_deb_output
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - emc-scaleio-activemq
+ when: ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/vars/main.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_activemq/vars/main.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_common/README.md
new file mode 100644
index 000000000..f681f6ac4
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/README.md
@@ -0,0 +1,3 @@
+# powerflex_common
+
+Role to manage the common operations of Powerflex.
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/defaults/main.yml
new file mode 100644
index 000000000..b0ea37ec6
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+powerflex_common_file_install_location: "/var/tmp"
+powerflex_common_esxi_files_location: "/tmp/"
+powerflex_common_win_package_location: "C:\\Windows\\Temp"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/meta/main.yml
new file mode 100644
index 000000000..bbe74f9e0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/meta/main.yml
@@ -0,0 +1,41 @@
+---
+galaxy_info:
+ author: Jennifer John
+ description: The role helps to manage the common functions of PowerFlex.
+ company: Dell Technologies
+ role_name: powerflex_common
+ namespace: dellemc
+ # If the issue tracker for your role is not on github, uncomment the
+ # next line and provide a value
+ # issue_tracker_url: http://example.com/issue/tracker
+
+ # Choose a valid license ID from https://spdx.org - some suggested licenses:
+ # - BSD-3-Clause (default)
+ # - MIT
+ # - GPL-2.0-or-later
+ # - GPL-3.0-only
+ # - Apache-2.0
+ # - CC-BY-4.0
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.14.0"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+ # List tags for your role here, one per line. A tag is a keyword that describes
+ # and categorizes the role. Users find roles by searching for tags. Be sure to
+ # remove the '[]' above, if you add tags to this list.
+ #
+ # NOTE: A tag is limited to a single word comprised of alphanumeric characters.
+ # Maximum 20 tags per role.
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_CentOS.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_CentOS.yml
new file mode 100644
index 000000000..33922039b
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_CentOS.yml
@@ -0,0 +1,5 @@
+---
+- name: Install pre-requisite java
+ ansible.builtin.package:
+ name: "java-1.8.0-openjdk-devel"
+ state: "present"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_RedHat.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_RedHat.yml
new file mode 100644
index 000000000..33922039b
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_RedHat.yml
@@ -0,0 +1,5 @@
+---
+- name: Install pre-requisite java
+ ansible.builtin.package:
+ name: "java-1.8.0-openjdk-devel"
+ state: "present"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_Rocky.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_Rocky.yml
new file mode 100644
index 000000000..33922039b
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_Rocky.yml
@@ -0,0 +1,5 @@
+---
+- name: Install pre-requisite java
+ ansible.builtin.package:
+ name: "java-1.8.0-openjdk-devel"
+ state: "present"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_SLES.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_SLES.yml
new file mode 100644
index 000000000..484286982
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_SLES.yml
@@ -0,0 +1,5 @@
+---
+- name: Install pre-requisite java
+ ansible.builtin.package:
+ name: "java-11-openjdk-headless"
+ state: "present"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_Ubuntu.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_Ubuntu.yml
new file mode 100644
index 000000000..97de45222
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_java_Ubuntu.yml
@@ -0,0 +1,10 @@
+---
+- name: Install pre-requisite binutils
+ ansible.builtin.package:
+ name: "binutils"
+ state: "present"
+
+- name: Install pre-requisite java
+ ansible.builtin.package:
+ name: "openjdk-8-jdk"
+ state: "present"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_CentOS.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_CentOS.yml
new file mode 100644
index 000000000..35d08f875
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_CentOS.yml
@@ -0,0 +1,36 @@
+---
+- name: Copy files
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "/var/tmp/"
+ mode: "0644"
+ register: powerflex_common_file
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/*{{ file_glob_name }}*"
+ - "{{ powerflex_common_file_install_location }}/{{ file_gpg_name }}*"
+
+- name: List the rpm file
+ register: powerflex_common_package_file
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "*{{ file_glob_name }}*.rpm"
+
+- name: List the rpm gpg file
+ register: powerflex_common_package_gpg
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "{{ file_gpg_name }}*"
+
+- name: Import gpg key
+ ansible.builtin.rpm_key:
+ state: present
+ key: "{{ powerflex_common_package_gpg.files[0].path }}"
+ when: powerflex_common_package_gpg.files | length > 0
+
+- name: Install package
+ register: powerflex_common_install_package_output
+ environment: "{{ powerflex_role_environment }}"
+ ansible.builtin.package:
+ name: "{{ powerflex_common_package_file.files[0].path }}"
+ state: "present"
+ disable_gpg_check: "{{ powerflex_gateway_disable_gpg_check | default(false) }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_RedHat.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_RedHat.yml
new file mode 100644
index 000000000..380c9e81d
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_RedHat.yml
@@ -0,0 +1,36 @@
+---
+- name: Copy files
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "/var/tmp/"
+ mode: "0644"
+ register: powerflex_common_file
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/*{{ file_glob_name }}*.rpm"
+ - "{{ powerflex_common_file_install_location }}/{{ file_gpg_name }}*"
+
+- name: List the rpm file
+ register: powerflex_common_package_file
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "*{{ file_glob_name }}*.rpm"
+
+- name: List the rpm gpg file
+ register: powerflex_common_package_gpg
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "{{ file_gpg_name }}*"
+
+- name: Import gpg key
+ ansible.builtin.rpm_key:
+ state: present
+ key: "{{ powerflex_common_package_gpg.files[0].path }}"
+ when: powerflex_common_package_gpg.files | length > 0
+
+- name: Install package
+ register: powerflex_common_install_package_output
+ environment: "{{ powerflex_role_environment }}"
+ ansible.builtin.package:
+ name: "{{ powerflex_common_package_file.files[0].path }}"
+ state: "present"
+ disable_gpg_check: "{{ powerflex_gateway_disable_gpg_check | default(false) }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_Rocky.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_Rocky.yml
new file mode 100644
index 000000000..35d08f875
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_Rocky.yml
@@ -0,0 +1,36 @@
+---
+- name: Copy files
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "/var/tmp/"
+ mode: "0644"
+ register: powerflex_common_file
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/*{{ file_glob_name }}*"
+ - "{{ powerflex_common_file_install_location }}/{{ file_gpg_name }}*"
+
+- name: List the rpm file
+ register: powerflex_common_package_file
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "*{{ file_glob_name }}*.rpm"
+
+- name: List the rpm gpg file
+ register: powerflex_common_package_gpg
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "{{ file_gpg_name }}*"
+
+- name: Import gpg key
+ ansible.builtin.rpm_key:
+ state: present
+ key: "{{ powerflex_common_package_gpg.files[0].path }}"
+ when: powerflex_common_package_gpg.files | length > 0
+
+- name: Install package
+ register: powerflex_common_install_package_output
+ environment: "{{ powerflex_role_environment }}"
+ ansible.builtin.package:
+ name: "{{ powerflex_common_package_file.files[0].path }}"
+ state: "present"
+ disable_gpg_check: "{{ powerflex_gateway_disable_gpg_check | default(false) }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_SLES.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_SLES.yml
new file mode 100644
index 000000000..380c9e81d
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_SLES.yml
@@ -0,0 +1,36 @@
+---
+- name: Copy files
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "/var/tmp/"
+ mode: "0644"
+ register: powerflex_common_file
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/*{{ file_glob_name }}*.rpm"
+ - "{{ powerflex_common_file_install_location }}/{{ file_gpg_name }}*"
+
+- name: List the rpm file
+ register: powerflex_common_package_file
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "*{{ file_glob_name }}*.rpm"
+
+- name: List the rpm gpg file
+ register: powerflex_common_package_gpg
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "{{ file_gpg_name }}*"
+
+- name: Import gpg key
+ ansible.builtin.rpm_key:
+ state: present
+ key: "{{ powerflex_common_package_gpg.files[0].path }}"
+ when: powerflex_common_package_gpg.files | length > 0
+
+- name: Install package
+ register: powerflex_common_install_package_output
+ environment: "{{ powerflex_role_environment }}"
+ ansible.builtin.package:
+ name: "{{ powerflex_common_package_file.files[0].path }}"
+ state: "present"
+ disable_gpg_check: "{{ powerflex_gateway_disable_gpg_check | default(false) }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_Ubuntu.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_Ubuntu.yml
new file mode 100644
index 000000000..77f794572
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_Ubuntu.yml
@@ -0,0 +1,51 @@
+---
+- name: Copy files
+ ansible.builtin.unarchive:
+ src: "{{ item }}"
+ dest: "/var/tmp/"
+ mode: "0644"
+ register: powerflex_common_ubuntu_tar_file
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/*{{ file_glob_name }}*.tar"
+
+- name: Get powerflex_common_siob_file # noqa: no-handler
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "*{{ file_glob_name }}*.siob"
+ register: powerflex_common_siob_file
+ when: powerflex_common_ubuntu_tar_file.changed
+
+- name: Execute chmod siob_extract # noqa: no-handler
+ ansible.builtin.file:
+ path: "/var/tmp/siob_extract"
+ mode: "0755"
+ when: powerflex_common_ubuntu_tar_file.changed
+
+- name: Execute the siob_extract # noqa: no-handler
+ ansible.builtin.command: /var/tmp/siob_extract "{{ powerflex_common_siob_file.files[0].path }}" chdir="/var/tmp"
+ when: powerflex_common_ubuntu_tar_file.changed
+ register: powerflex_common_siob_extract_output
+ changed_when: powerflex_common_siob_extract_output.rc == 0
+
+- name: Copy deb file
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "/var/tmp"
+ mode: "0644"
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/*{{ file_glob_name }}*.deb"
+ when: powerflex_common_ubuntu_tar_file.skipped
+
+- name: List the deb file
+ register: powerflex_common_package_file
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "*{{ file_glob_name }}*.deb"
+
+- name: Install package
+ register: powerflex_common_install_package_output
+ environment: "{{ powerflex_role_environment }}"
+ ansible.builtin.package:
+ deb: "{{ powerflex_common_package_file.files[0].path }}"
+ state: "present"
+ disable_gpg_check: "{{ powerflex_gateway_disable_gpg_check | default(false) }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_VMkernel.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_VMkernel.yml
new file mode 100644
index 000000000..3291fe8f6
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_VMkernel.yml
@@ -0,0 +1,50 @@
+---
+- name: Get the acceptance level of the ESXi node
+ register: powerflex_common_get_acceptance_output
+ ansible.builtin.shell: >
+ esxcli software acceptance get
+ changed_when: powerflex_common_get_acceptance_output.stdout != 'PartnerSupported'
+
+- name: Set the acceptance level to PartnerSupported
+ register: powerflex_common_set_acceptance_output
+ ansible.builtin.shell: >
+ esxcli software acceptance set --level=PartnerSupported
+ when: powerflex_common_get_acceptance_output.stdout != 'PartnerSupported'
+ changed_when: powerflex_common_get_acceptance_output.stdout != 'PartnerSupported'
+
+- name: Copy Esxi component and rpm files
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "{{ powerflex_common_esxi_files_location }}"
+ mode: "0644"
+ register: powerflex_common_file
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/*{{ file_glob_name }}*"
+ - "{{ powerflex_common_file_install_location }}/{{ file_gpg_name }}*"
+
+- name: List the zip file
+ register: powerflex_common_package_file
+ ansible.builtin.find:
+ paths: "{{ powerflex_common_esxi_files_location }}"
+ patterns: "*{{ file_glob_name }}*.zip"
+
+- name: Install SDC package for ESXi
+ register: powerflex_common_install_package_output
+ ansible.builtin.shell: >
+ esxcli software vib install -d {{ powerflex_common_package_file.files[0].path }} --no-sig-check
+ ignore_errors: true
+ changed_when: "'Reboot Required: true' in powerflex_common_install_package_output.stdout"
+
+- name: Reboot ESXi host
+ register: powerflex_common_reboot_output
+ ansible.builtin.reboot:
+ reboot_timeout: 500
+ msg: "Rebooting the ESXi host."
+ when: "'Reboot Required: true' in powerflex_common_install_package_output.stdout"
+ changed_when: powerflex_common_reboot_output.rebooted
+
+- name: Ensure the driver is loaded for SDC after reboot
+ register: powerflex_common_loaded_driver_output
+ ansible.builtin.shell: >
+ set -o pipefail && esxcli software vib list | grep sdc
+ changed_when: powerflex_common_loaded_driver_output.stdout_lines | length == 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_WindowsOS.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_WindowsOS.yml
new file mode 100644
index 000000000..6b8e4b8e8
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_packages_WindowsOS.yml
@@ -0,0 +1,20 @@
+---
+- name: Copy files
+ ansible.windows.win_copy:
+ src: "{{ item }}"
+ dest: "{{ powerflex_common_win_package_location }}"
+ register: powerflex_common_file
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/*{{ file_glob_name }}*.msi"
+
+- name: List the msi file
+ ansible.windows.win_find:
+ paths: "{{ powerflex_common_win_package_location }}"
+ patterns: "*{{ file_glob_name }}*.msi"
+ register: powerflex_common_package_file
+
+- name: Install package
+ register: powerflex_common_install_package_output
+ ansible.windows.win_command: >
+ msiexec.exe /i "{{ powerflex_common_package_file.files[0].path }}" MDM_IP="{{ powerflex_role_environment.MDM_IP }}" /q
+ ignore_errors: true
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_powerflex.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_powerflex.yml
new file mode 100644
index 000000000..98ae4b0c9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/install_powerflex.yml
@@ -0,0 +1,8 @@
+---
+- name: Set fact # noqa var-naming[no-role-prefix]
+ ansible.builtin.set_fact:
+ ansible_distribution: "WindowsOS"
+ when: " 'Windows' in ansible_distribution"
+
+- name: Include installation file based on distribution
+ ansible.builtin.include_tasks: "install_packages_{{ ansible_distribution }}.yml"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/main.yml
new file mode 100644
index 000000000..b83b1c148
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/tasks/main.yml
@@ -0,0 +1,14 @@
+---
+- name: Set fact # noqa var-naming[no-role-prefix]
+ ansible.builtin.set_fact:
+ ansible_distribution: "WindowsOS"
+ when: " 'Windows' in ansible_distribution"
+
+- name: Include vars
+ ansible.builtin.include_vars: "../vars/{{ ansible_distribution }}.yml"
+
+- name: Install required packages # noqa package-latest
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: latest
+ with_items: "{{ powerflex_common_packages }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/CentOS.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/CentOS.yml
new file mode 100644
index 000000000..cadab0571
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/CentOS.yml
@@ -0,0 +1,4 @@
+---
+powerflex_common_packages:
+ - numactl
+ - libaio
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/RedHat.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/RedHat.yml
new file mode 100644
index 000000000..03cedf354
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/RedHat.yml
@@ -0,0 +1,6 @@
+---
+powerflex_common_packages:
+ - numactl
+ - libaio
+ - python3
+ - binutils
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/Rocky.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/Rocky.yml
new file mode 100644
index 000000000..03cedf354
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/Rocky.yml
@@ -0,0 +1,6 @@
+---
+powerflex_common_packages:
+ - numactl
+ - libaio
+ - python3
+ - binutils
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/SLES.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/SLES.yml
new file mode 100644
index 000000000..0f15a62a6
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/SLES.yml
@@ -0,0 +1,4 @@
+---
+powerflex_common_packages:
+ - python3
+ - libapr1
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/Ubuntu.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/Ubuntu.yml
new file mode 100644
index 000000000..6c5f371ec
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/Ubuntu.yml
@@ -0,0 +1,5 @@
+---
+powerflex_common_packages:
+ - numactl
+ - libaio1
+ - ldap-utils
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/VMkernel.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/VMkernel.yml
new file mode 100644
index 000000000..3cc003a2a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/VMkernel.yml
@@ -0,0 +1,2 @@
+---
+powerflex_common_packages: []
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/WindowsOS.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/WindowsOS.yml
new file mode 100644
index 000000000..3cc003a2a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_common/vars/WindowsOS.yml
@@ -0,0 +1,2 @@
+---
+powerflex_common_packages: []
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_config/README.md
new file mode 100644
index 000000000..987773d88
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/README.md
@@ -0,0 +1,165 @@
+# powerflex_config
+
+Role to configure the protection domain, fault set and storage pool.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Usage instructions](#usage-instructions)
+* [Notes](#notes)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td>IP or FQDN of the PowerFlex host.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td>The username of the PowerFlex host.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td>The password of the PowerFlex host.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>port</td>
+ <td>false</td>
+ <td>Port of the PowerFlex host.</td>
+ <td></td>
+ <td>int</td>
+ <td>443</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>If C(false), the SSL certificates will not be validated.<br>Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>powerflex_protection_domain_name</td>
+ <td>false</td>
+ <td>Name of the protection domain.<br></td>
+ <td></td>
+ <td>str</td>
+ <td>config_protection_domain</td>
+ </tr>
+ <tr>
+ <td>powerflex_fault_sets</td>
+ <td>false</td>
+ <td>List of fault sets.<br></td>
+ <td></td>
+ <td>list</td>
+ <td>['fs1','fs2','fs3']</td>
+ </tr>
+ <tr>
+ <td>powerflex_media_type</td>
+ <td>false</td>
+ <td>Media type of the storage pool.<br></td>
+ <td>'SSD', 'HDD', 'TRANSITIONAL'</td>
+ <td>str</td>
+ <td>SSD</td>>
+ </tr>
+ <tr>
+ <td>powerflex_storage_pool_name</td>
+ <td>false</td>
+ <td>Name of the storage pool.<br></td>
+ <td></td>
+ <td>str</td>
+ <td>config_storage_pool</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: Configuration of protection domain, fault set and storage pool.
+ ansible.builtin.import_role:
+ name: "powerflex_config"
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_protection_domain_name: "protection_domain"
+ powerflex_fault_sets:
+ - 'fs1'
+ - 'fs2'
+ - 'fs3'
+ powerflex_media_type: 'SSD'
+ powerflex_storage_pool_name: "storage_pool"
+
+```
+
+## Usage instructions
+----
+### To configure the protection domain and storage pool:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory site_powerflex45.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Notes
+----
+
+- As a pre-requisite, the Gateway must be installed.
+- TRANSITIONAL media type is supported only during modification.
+
+## Author Information
+------------------
+
+Dell Technologies </br>
+Felix Stephen A (ansible.team@Dell.com) 2023
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_config/defaults/main.yml
new file mode 100644
index 000000000..0cb687ecb
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+# defaults file for powerflex_config
+powerflex_protection_domain_name: "domain1"
+powerflex_fault_sets: ['fs1', 'fs2', 'fs3']
+powerflex_media_type: 'SSD' # When version is R3
+powerflex_storage_pool_name: "pool1"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/meta/argument_specs.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_config/meta/argument_specs.yml
new file mode 100644
index 000000000..bd94c306f
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/meta/argument_specs.yml
@@ -0,0 +1,50 @@
+---
+argument_specs:
+ main:
+ short_description: Role to configure the protection domain, fault set and storage pool.
+ description:
+ - Role to configure the protection domain, fault set and storage pool.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: IP or FQDN of the PowerFlex gateway.
+ username:
+ required: true
+ type: str
+ description: The username of the PowerFlex gateway.
+ password:
+ required: true
+ type: str
+ description: The password of the PowerFlex gateway.
+ port:
+ type: int
+ description: Port of the PowerFlex gateway.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: false
+ timeout:
+ description: Timeout.
+ type: int
+ default: 120
+ powerflex_protection_domain_name:
+ type: str
+ description: Name of the protection domain.
+ default: 'config_protection_domain'
+ powerflex_fault_sets:
+ description: List of fault sets.
+ type: list
+ default: ['fs1', 'fs2', 'fs3']
+ powerflex_media_type:
+ description: Media type of the storage pool.
+ type: str
+ choices: ['SSD', 'HDD', 'TRANSITIONAL']
+ default: 'SSD'
+ powerflex_storage_pool_name:
+ description: Name of the storage pool.
+ type: str
+ default: 'config_storage_pool'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_config/meta/main.yml
new file mode 100644
index 000000000..9924409c8
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/meta/main.yml
@@ -0,0 +1,25 @@
+---
+galaxy_info:
+ author: Felix Stephen A
+ description: Role to configure the protection domain, fault set and storage pool.
+ company: Dell Technologies
+ license: GPL-3.0-only
+ role_name: powerflex_config
+ namespace: dellemc
+
+ min_ansible_version: "2.14.0"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/molecule/configure_protection_domain/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_config/molecule/configure_protection_domain/converge.yml
new file mode 100644
index 000000000..1fb1c2425
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/molecule/configure_protection_domain/converge.yml
@@ -0,0 +1,47 @@
+---
+- name: Molecule Test for Configuring Protection Domain
+ hosts: mdm
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Run Config role
+ ansible.builtin.import_role:
+ name: "powerflex_config"
+ register: powerflex_config_result
+
+ - name: "Verifying protection domain creation"
+ ansible.builtin.assert:
+ that:
+ - powerflex_config_add_pd_output.protection_domain_details.name == powerflex_protection_domain_name
+ when: not ansible_check_mode and powerflex_config_add_pd_output.changed
+
+ - name: "Verifying storage pool R2 creation"
+ ansible.builtin.assert:
+ that:
+ - powerflex_config_storage_pool_output.storage_pool_details.name == powerflex_storage_pool_name
+ when: not ansible_check_mode and powerflex_config_storage_pool_output.changed
+
+ - name: "Verifying storage pool R3 creation"
+ ansible.builtin.assert:
+ that:
+ - powerflex_config_storage_pool_output.storage_pool_details.name == powerflex_storage_pool_name
+ when: not ansible_check_mode and powerflex_config_storage_pool_output.changed and powerflex_media_type is not none
+
+ - name: "Verifying protection domain creation in Idempotency"
+ ansible.builtin.assert:
+ that:
+ - powerflex_config_add_pd_output.protection_domain_details.name == powerflex_protection_domain_name
+ when: not ansible_check_mode and powerflex_config_add_pd_output.changed
+
+ - name: "Verifying storage pool R2 creation in Idempotency"
+ ansible.builtin.assert:
+ that:
+ powerflex_config_storage_pool_output.storage_pool_details.name == powerflex_storage_pool_name
+ when: not ansible_check_mode and powerflex_config_storage_pool_output.changed
+
+ - name: "Verifying storage pool R3 creation in Idempotency"
+ ansible.builtin.assert:
+ that:
+ - powerflex_config_storage_pool_output.storage_pool_details.name == powerflex_storage_pool_name
+ when: not ansible_check_mode and powerflex_config_storage_pool_output.changed and powerflex_media_type is not none
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/molecule/configure_protection_domain/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_config/molecule/configure_protection_domain/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/molecule/configure_protection_domain/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_config/tasks/main.yml
new file mode 100644
index 000000000..f9340f0fd
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/tasks/main.yml
@@ -0,0 +1,79 @@
+---
+- name: Get configured MDM IP addresses
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ state: "present"
+ register: powerflex_config_mdm_ip_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Set fact - PowerFlex version and MDM primary hostname
+ ansible.builtin.set_fact:
+ powerflex_config_array_version: "{{ powerflex_config_mdm_ip_result.mdm_cluster_details.master.versionInfo[1] }}"
+ powerflex_config_mdm_primary_hostname: "{{ hostvars[groups['mdm'][0]]['inventory_hostname'] }}"
+
+- name: Login to primary MDM of PowerFlex 3.6
+ ansible.builtin.command: scli --login --username {{ username }} --password "{{ password }}"
+ run_once: true
+ register: powerflex_config_login_output
+ changed_when: powerflex_config_login_output.rc == 0
+ delegate_to: "{{ powerflex_config_mdm_primary_hostname }}"
+ when: powerflex_config_array_version == '3'
+
+- name: Login to primary MDM of PowerFlex 4.5
+ ansible.builtin.command: scli --login --username {{ username }} --management_system_ip {{ hostname }} --password "{{ password }}"
+ run_once: true
+ register: powerflex_config_login_output
+ changed_when: powerflex_config_login_output.rc == 0
+ delegate_to: "{{ powerflex_config_mdm_primary_hostname }}"
+ when: powerflex_config_array_version == '4'
+
+- name: Create the protection domain
+ dellemc.powerflex.protection_domain:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ protection_domain_name: "{{ powerflex_protection_domain_name }}"
+ is_active: "{{ is_active | default(omit) }}"
+ network_limits: "{{ network_limits | default(omit) }}"
+ rf_cache_limits: "{{ rf_cache_limits | default(omit) }}"
+ state: "present"
+ register: powerflex_config_add_pd_output
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Add fault set
+ ansible.builtin.command: scli --add_fault_set --protection_domain_name "{{ powerflex_protection_domain_name }}" --fault_set_name "{{ item }}"
+ with_items: "{{ powerflex_fault_sets }}"
+ run_once: true
+ delegate_to: "{{ powerflex_config_mdm_primary_hostname }}"
+ register: powerflex_config_add_fs_output
+ ignore_errors: true
+ changed_when: powerflex_config_add_fs_output.rc == 0
+ when:
+ - powerflex_fault_sets is defined
+
+- name: Create a new storage pool.
+ dellemc.powerflex.storagepool:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ storage_pool_name: "{{ powerflex_storage_pool_name }}"
+ protection_domain_name: "{{ powerflex_protection_domain_name }}"
+ media_type: "{{ powerflex_media_type | default(omit) }}"
+ state: "present"
+ register: powerflex_config_storage_pool_output
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Enable zero-padding
+ ansible.builtin.command: |
+ scli --modify_zero_padding_policy --protection_domain_name
+ {{ powerflex_protection_domain_name }} --storage_pool_name {{ powerflex_storage_pool_name }} --enable_zero_padding
+ run_once: true
+ register: powerflex_config_enable_zero_padding_output
+ changed_when: powerflex_config_enable_zero_padding_output.rc == 0
+ delegate_to: "{{ powerflex_config_mdm_primary_hostname }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_config/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_config/vars/main.yml
new file mode 100644
index 000000000..ac3d9b959
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_config/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for powerflex_config
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/README.md
new file mode 100644
index 000000000..eac43ab78
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/README.md
@@ -0,0 +1,160 @@
+# powerflex_gateway
+
+Role to manage the installation and uninstallation of Powerflex Gateway.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Notes](#notes)
+* [Usage instructions](#usage-instructions)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>false</td>
+ <td>Location of installation, compatible installation software package based on the operating system of the node.
+ <br> The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>path</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway_is_redundant</td>
+ <td>false</td>
+ <td>Is the gateway redundant (will install keepalived).<br></td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway_admin_password</td>
+ <td>true</td>
+ <td>Admin password for the Powerflex gateway.<br></td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway_http_port</td>
+ <td>false</td>
+ <td>Powerflex gateway HTTP port.<br></td>
+ <td></td>
+ <td>int</td>
+ <td>80</td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway_https_port</td>
+ <td>false</td>
+ <td>Powerflex gateway HTTPS port.<br></td>
+ <td></td>
+ <td>int</td>
+ <td>443</td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway_virtual_ip</td>
+ <td>false</td>
+ <td>Virtual IP address of Powerflex gateway.<br></td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway_virtual_interface</td>
+ <td>false</td>
+ <td>Virtual interface of Powerflex gateway.<br></td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway_state</td>
+ <td>false</td>
+ <td>Specify state of gateway.<br></td>
+ <td>absent, present</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+ <tr>
+ <td>powerflex_gateway_skip_java</td>
+ <td>false</td>
+ <td>Specify whether to install the java or not.<br></td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: Install and configure powerflex gateway
+ ansible.builtin.import_role:
+ name: powerflex_gateway
+ vars:
+ powerflex_common_file_install_location: "/opt/scaleio/rpm"
+ powerflex_gateway_admin_password: password
+ powerflex_gateway_state: present
+
+ - name: Uninstall powerflex gateway
+ ansible.builtin.import_role:
+ name: powerflex_gateway
+ vars:
+ powerflex_gateway_state: absent
+
+```
+
+## Notes
+- Supported only in PowerFlex version 3.6.
+
+## Usage instructions
+----
+### To install all dependency packages, including gateway, on node:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+
+### To uninstall gateway:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Bhavneet Sharma (ansible.team@Dell.com) 2023
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/defaults/main.yml
new file mode 100644
index 000000000..011911837
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/defaults/main.yml
@@ -0,0 +1,17 @@
+---
+# Is the gateway redundant (will install keepalived)
+powerflex_gateway_is_redundant: false
+# Virtual IP if redundant
+powerflex_gateway_virtual_ip: ''
+# Interface to attach the virtual ip
+powerflex_gateway_virtual_interface: eth1
+# Skip Java installation for PowerFlex (assume it's on the system)
+powerflex_gateway_skip_java: false
+# Disable GPG check to install PowerFlex gateway
+powerflex_gateway_disable_gpg_check: true
+powerflex_gateway_user_properties_file: /opt/emc/scaleio/gateway/webapps/ROOT/WEB-INF/classes/gatewayUser.properties
+powerflex_gateway_catalina_properties_file: /opt/emc/scaleio/gateway/conf/catalina.properties
+file_glob_name: gateway
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_mdm_ips }}"
+ GATEWAY_ADMIN_PASSWORD: "{{ powerflex_gateway_admin_password }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/handlers/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/handlers/main.yml
new file mode 100644
index 000000000..ae7f1163f
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/handlers/main.yml
@@ -0,0 +1,6 @@
+---
+- name: Restart keepalived
+ ansible.builtin.service:
+ name: "keepalived"
+ state: "restarted"
+ enabled: true
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/meta/argument_specs.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/meta/argument_specs.yml
new file mode 100644
index 000000000..2fe8f150e
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/meta/argument_specs.yml
@@ -0,0 +1,47 @@
+---
+argument_specs:
+ main:
+ short_description: Role to manage the installation and uninstallation of
+ PowerFlex gateway
+ description:
+ - Role to manage the installation and uninstallation of PowerFlex gateway.
+ options:
+ powerflex_common_file_install_location:
+ type: path
+ description:
+ - Location of installation, compatible installation software package
+ based on the operating system of the node.
+ - The files can be downloaded from the Dell Product support page for
+ the PowerFlex software.
+ default: '/var/tmp'
+ powerflex_gateway_is_redundant:
+ type: bool
+ description: Is the gateway redundant (will install keepalived)
+ default: false
+ powerflex_gateway_admin_password:
+ required: true
+ type: str
+ description: Admin password for the PowerFlex gateway.
+ powerflex_gateway_http_port:
+ type: int
+ description: PowerFlex gateway HTTP port.
+ default: 80
+ powerflex_gateway_https_port:
+ type: int
+ description: PowerFlex gateway HTTPS port.
+ default: 443
+ powerflex_gateway_virtual_ip:
+ type: str
+ description: Virtual IP address of PowerFlex gateway.
+ powerflex_gateway_virtual_interface:
+ type: str
+ description: Virtual interface of PowerFlex gateway.
+ powerflex_gateway_state:
+ type: str
+ description: State of the PowerFlex gateway.
+ choices: ['present', 'absent']
+ default: 'present'
+ powerflex_gateway_skip_java:
+ type: bool
+ description: Specifies whether to install java or not.
+ default: false
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/meta/main.yml
new file mode 100644
index 000000000..11db01ec5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/meta/main.yml
@@ -0,0 +1,28 @@
+---
+galaxy_info:
+ role_name: powerflex_gateway
+ namespace: dellemc
+ author: Bhavneet Sharma
+ description: Role to manage the installation and uninstallation of Powerflex gateway
+ company: Dell Technologies
+ license: GPL-3.0-only
+ min_ansible_version: "2.14.0"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+
+ - name: Ubuntu
+ versions:
+ - jammy
+
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+
+dependencies:
+ - role: powerflex_common
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation/converge.yml
new file mode 100644
index 000000000..92183f3dc
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation/converge.yml
@@ -0,0 +1,29 @@
+---
+- name: Install gateway
+ hosts: gateway
+ gather_facts: true
+ tasks:
+ - name: Install and configure powerflex gateway
+ ansible.builtin.import_role:
+ name: "powerflex_gateway"
+ vars:
+ powerflex_gateway_state: present
+ register: powerflex_gateway_result_molecule
+
+ - name: Verifying installation package in check mode
+ ansible.builtin.assert:
+ that:
+ - " 'No changes made, but would have if not in check mode' in powerflex_common_install_package_output.msg"
+ when: ansible_check_mode
+
+ - name: Verifying installation package
+ ansible.builtin.assert:
+ that:
+ - " 'Installed' in powerflex_common_install_package_output.results[0]"
+ when: not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: Verifying installation package in idempotency
+ ansible.builtin.assert:
+ that:
+ - " 'Nothing to do' in powerflex_common_install_package_output.msg"
+ when: not ansible_check_mode and not powerflex_common_install_package_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/converge.yml
new file mode 100644
index 000000000..258173d9a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/converge.yml
@@ -0,0 +1,32 @@
+---
+- name: Install gateway with invalid rpm path
+ hosts: gateway
+ gather_facts: true
+ tasks:
+ - name: Install and configure powerflex gateway with no rpm
+ ansible.builtin.import_role:
+ name: "powerflex_gateway"
+ vars:
+ powerflex_common_file_install_location: "/opt/empty"
+ powerflex_gateway_state: present
+ ignore_errors: true
+ register: powerflex_gateway_install_config_no_rpm_result
+
+ - name: Verifying failure of install package with respect to no rpm file
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length == 0
+
+ - name: Install and configure powerflex gateway with wrong file path
+ ansible.builtin.import_role:
+ name: "powerflex_gateway"
+ vars:
+ powerflex_common_file_install_location: "/opt/aaab"
+ powerflex_gateway_state: present
+ ignore_errors: true
+ register: powerflex_gateway_install_config_wrong_path_result
+
+ - name: Verifying failure of install package with wrong file path
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length == 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/molecule.yml
new file mode 100644
index 000000000..93cad84c9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_installation_invalid_path_rpm/molecule.yml
@@ -0,0 +1,4 @@
+---
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_uninstallation/converge.yml
new file mode 100644
index 000000000..6342d5f25
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_uninstallation/converge.yml
@@ -0,0 +1,48 @@
+---
+- name: Uninstall gateway
+ hosts: gateway
+ gather_facts: true
+ tasks:
+ - name: Uninstall powerflex gateway
+ ansible.builtin.import_role:
+ name: "powerflex_gateway"
+ vars:
+ powerflex_gateway_state: 'absent'
+
+ - name: Verifying uninstall package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_gateway_uninstall_output.msg == "Check mode: No changes made, but would have if not in check mode"
+ - powerflex_gateway_uninstall_output.changed is true
+ when: ansible_check_mode and ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+ - name: Verifying uninstall package in converge
+ ansible.builtin.assert:
+ that:
+ - " 'Removed:' in powerflex_gateway_uninstall_output.results[0].results[0]"
+ when: not ansible_check_mode and powerflex_gateway_uninstall_output.changed and ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+ - name: Verifying uninstall package in Idempotency
+ ansible.builtin.assert:
+ that:
+ - powerflex_gateway_uninstall_output.results[0].msg == 'Nothing to do'
+ when: not ansible_check_mode and not powerflex_gateway_uninstall_output.changed and ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+ - name: Verifying uninstall package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_gateway_uninstall_deb_output.msg == "Check mode: No changes made, but would have if not in check mode"
+ - powerflex_gateway_uninstall_deb_output.changed is true
+ when: ansible_check_mode and ansible_distribution == "Ubuntu"
+
+ - name: Verifying uninstall package in converge
+ ansible.builtin.assert:
+ that:
+ - " 'Removed:' in powerflex_gateway_uninstall_deb_output.results[0].results[0]"
+ when: not ansible_check_mode and powerflex_gateway_uninstall_deb_output.changed and ansible_distribution == "Ubuntu"
+
+ - name: Verifying uninstall package in Idempotency
+ ansible.builtin.assert:
+ that:
+ - powerflex_gateway_uninstall_deb_output.results[0].msg == 'Nothing to do'
+ when: not ansible_check_mode and not powerflex_gateway_uninstall_deb_output.changed and ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/molecule/gateway_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_gateway.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_gateway.yml
new file mode 100644
index 000000000..58bbd1a08
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_gateway.yml
@@ -0,0 +1,52 @@
+---
+- name: Set Fact the powerflex_mdm_ips
+ ansible.builtin.set_fact:
+ powerflex_mdm_ips: "{{ hostvars[groups['mdm'][0]]['ansible_host'] }},{{ hostvars[groups['mdm'][1]]['ansible_host'] }}"
+
+- name: Install java
+ ansible.builtin.include_tasks: "../../powerflex_common/tasks/install_java_{{ ansible_distribution }}.yml"
+ when: not powerflex_gateway_skip_java
+
+- name: Set gateway admin password
+ ansible.builtin.set_fact:
+ token: "{{ powerflex_gateway_admin_password }}"
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
+
+- name: Include install_keepalived.yml
+ ansible.builtin.include_tasks: install_keepalived.yml
+ when: powerflex_gateway_is_redundant == "true"
+
+- name: Configure gateway with MDM addresses
+ ansible.builtin.lineinfile:
+ name: "{{ powerflex_gateway_user_properties_file }}"
+ regexp: '^mdm.ip.addresses'
+ line: "mdm.ip.addresses={{ powerflex_mdm_ips }}"
+ ignore_errors: "{{ ansible_check_mode }}"
+
+- name: Configure gateway to accept certificates
+ ansible.builtin.lineinfile:
+ name: "{{ powerflex_gateway_user_properties_file }}"
+ regexp: '^security.bypass_certificate_check'
+ line: "security.bypass_certificate_check=true"
+ ignore_errors: "{{ ansible_check_mode }}"
+
+- name: Configure gateway http port
+ ansible.builtin.lineinfile:
+ name: "{{ powerflex_gateway_catalina_properties_file }}"
+ regexp: '^http.port'
+ line: "http.port={{ powerflex_gateway_http_port }}"
+ ignore_errors: "{{ ansible_check_mode }}"
+
+- name: Configure gateway https port
+ ansible.builtin.lineinfile:
+ name: "{{ powerflex_gateway_catalina_properties_file }}"
+ regexp: '^ssl.port'
+ line: "ssl.port={{ powerflex_gateway_https_port }}"
+ ignore_errors: "{{ ansible_check_mode }}"
+
+- name: Restart service PowerFlex Gateway
+ ansible.builtin.service:
+ name: scaleio-gateway.service
+ state: restarted
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_keepalived.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_keepalived.yml
new file mode 100644
index 000000000..df6fd9dac
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/install_keepalived.yml
@@ -0,0 +1,28 @@
+---
+- name: Include vars
+ ansible.builtin.include_vars: "../vars/{{ ansible_distribution }}.yml"
+
+- name: Install required packages
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: present
+ with_items: "{{ keepalived_packages }}"
+
+- name: Set the priority of keepalived
+ ansible.builtin.set_fact:
+ keepalived_priority: 100
+ run_once: true
+
+- name: Set the priority of keepalived if not defined
+ ansible.builtin.set_fact:
+ keepalived_priority: 101
+ when: keepalived_priority is not defined
+
+- name: Configure keepalived
+ ansible.builtin.template:
+ src: keepalived.conf.j2
+ dest: "{{ keepalived_config_file_location }}/keepalived.conf"
+ mode: '0600'
+ owner: root
+ group: root
+ notify: restart keepalived
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/main.yml
new file mode 100644
index 000000000..80af948e2
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Install Powerflex gateway
+ ansible.builtin.include_tasks: install_gateway.yml
+ when: powerflex_gateway_state == "present"
+
+- name: Uninstall Powerflex gateway
+ ansible.builtin.include_tasks: uninstall_gateway.yml
+ when: powerflex_gateway_state == "absent"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/uninstall_gateway.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/uninstall_gateway.yml
new file mode 100644
index 000000000..39645d5a3
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/tasks/uninstall_gateway.yml
@@ -0,0 +1,20 @@
+---
+- name: Uninstall package
+ register: powerflex_gateway_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-gateway
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+- name: Uninstall deb package
+ register: powerflex_gateway_uninstall_deb_output
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - emc-scaleio-gateway
+ when: ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/templates/keepalived.conf.j2 b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/templates/keepalived.conf.j2
new file mode 100644
index 000000000..b9896823e
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/templates/keepalived.conf.j2
@@ -0,0 +1,18 @@
+vrrp_script chk_gateway { # Requires keepalived-1.1.13
+ script "killall -0 scaleio-gateway-wd.bash" # cheaper than pidof
+ interval 2 # check every 2 seconds
+ weight 2 # add 2 points of prio if OK
+}
+
+vrrp_instance gateway_vi_1 {
+ interface {{ powerflex_gateway_virtual_interface }}
+ state MASTER
+ virtual_router_id 51
+ priority {{ keepalived_priority }} # 101 on master, 100 on backup
+ virtual_ipaddress {
+ {{ powerflex_gateway_virtual_ip }}
+ }
+ track_script {
+ chk_gateway
+ }
+}
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/CentOS.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/CentOS.yml
new file mode 100644
index 000000000..cbb2739c1
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/CentOS.yml
@@ -0,0 +1,4 @@
+---
+keepalived_packages:
+ - keepalived
+keepalived_config_file_location: /etc/keepalived
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/RedHat.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/RedHat.yml
new file mode 100644
index 000000000..cbb2739c1
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/RedHat.yml
@@ -0,0 +1,4 @@
+---
+keepalived_packages:
+ - keepalived
+keepalived_config_file_location: /etc/keepalived
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/SLES.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/SLES.yml
new file mode 100644
index 000000000..cbb2739c1
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/SLES.yml
@@ -0,0 +1,4 @@
+---
+keepalived_packages:
+ - keepalived
+keepalived_config_file_location: /etc/keepalived
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/Ubuntu.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/Ubuntu.yml
new file mode 100644
index 000000000..cbb2739c1
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/Ubuntu.yml
@@ -0,0 +1,4 @@
+---
+keepalived_packages:
+ - keepalived
+keepalived_config_file_location: /etc/keepalived
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/main.yml
new file mode 100644
index 000000000..6de60b1d3
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_gateway/vars/main.yml
@@ -0,0 +1,6 @@
+---
+file_glob_name: gateway
+file_gpg_name: RPM-GPG-KEY-ScaleIO
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_mdm_ips }}"
+ GATEWAY_ADMIN_PASSWORD: "{{ powerflex_gateway_admin_password }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/README.md
new file mode 100644
index 000000000..2ae19c9a1
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/README.md
@@ -0,0 +1,170 @@
+# powerflex_lia
+
+Role to manage the installation and uninstallation of Powerflex LIA.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Usage instructions](#usage-instructions)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td>IP or FQDN of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td>10.1.1.1</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td>The username of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td>admin</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td>The password of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td>password</td>
+ </tr>
+ <tr>
+ <td>port</td>
+ <td>false</td>
+ <td>Port</td>
+ <td></td>
+ <td>int</td>
+ <td>443</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>If C(false), the SSL certificates will not be validated.<br>Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>timeout</td>
+ <td>false</td>
+ <td>Timeout</td>
+ <td></td>
+ <td>int</td>
+ <td>120</td>
+ </tr>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>true</td>
+ <td>Location of installation and rpm gpg files to be installed.
+ <br>The required, compatible installation software package based on the operating system of the node. The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>str</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_lia_token</td>
+ <td>true</td>
+ <td>Lia password for node management.</td>
+ <td></td>
+ <td>str</td>
+ <td>Cluster1!</td>
+ </tr>
+ <tr>
+ <td>powerflex_lia_state</td>
+ <td>false</td>
+ <td>Specify state of LIA.<br></td>
+ <td>absent, present</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: Install and configure PowerFlex LIA
+ ansible.builtin.import_role:
+ name: powerflex_lia
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_lia_token: "Cluster1!"
+ powerflex_lia_state: present
+
+ - name: Uninstall powerflex LIA
+ ansible.builtin.import_role:
+ name: powerflex_lia
+ vars:
+ powerflex_lia_state: absent
+
+```
+
+## Usage instructions
+----
+### To install all dependency packages, including LIA, on node:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory site_powerflex45.yml
+ ```
+
+### To uninstall LIA:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex45.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Author Information
+------------------
+
+Dell Technologies
+Pavan Mudunuri (ansible.team@Dell.com) 2023
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/defaults/main.yml
new file mode 100644
index 000000000..d69a82e0a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/defaults/main.yml
@@ -0,0 +1,5 @@
+---
+file_glob_name: lia
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_lia_mdm_ips }}"
+ TOKEN: "{{ powerflex_lia_token }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/meta/main.yml
new file mode 100644
index 000000000..ea089a471
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/meta/main.yml
@@ -0,0 +1,27 @@
+---
+galaxy_info:
+ author: Pavan Mudunuri
+ description: Role to manage the installation and uninstallation of Powerflex LIA.
+ company: Dell Technologies
+ role_name: powerflex_lia
+ namespace: dellemc
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.14.0"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_install/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_install/converge.yml
new file mode 100644
index 000000000..7774d935b
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_install/converge.yml
@@ -0,0 +1,30 @@
+---
+- name: Molecule Test for installation of LIA
+ hosts: lia
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Install and configure Powerflex LIA
+ ansible.builtin.import_role:
+ name: powerflex_lia
+ vars:
+ powerflex_lia_state: present
+
+ - name: Verifying install package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_install_package_output.msg == "Check mode: No changes made"
+ when: ansible_check_mode
+
+ - name: Verifying installation package in normal mode
+ ansible.builtin.assert:
+ that:
+ - " 'Installed' in powerflex_common_install_package_output.results[0]"
+ when: not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: Verifying installation package in Idempotency mode
+ ansible.builtin.assert:
+ that:
+ - "'Nothing to do' in powerflex_common_install_package_output.msg"
+ when: not ansible_check_mode and not powerflex_common_install_package_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_install/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_install/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_install/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/converge.yml
new file mode 100644
index 000000000..d517ac2b6
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/converge.yml
@@ -0,0 +1,34 @@
+---
+- name: Molecule Test for installation of LIA with invalid rpm path, rpm file
+ hosts: lia
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Install and configure powerflex LIA with no rpm
+ ansible.builtin.import_role:
+ name: powerflex_lia
+ vars:
+ powerflex_common_file_install_location: "/opt/empty"
+ powerflex_lia_state: present
+ register: powerflex_lia_no_rpm_result
+ ignore_errors: true
+
+ - name: Verifying failure of install package with respect to no rpm file in normal mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length | int == 0
+
+ - name: Install and configure powerflex lia with wrong file path
+ ansible.builtin.import_role:
+ name: powerflex_lia
+ vars:
+ powerflex_common_file_install_location: "/opt/aaab"
+ powerflex_lia_state: present
+ register: powerflex_lia_wrong_file_path_result
+ ignore_errors: true
+
+ - name: Verifying failure of install package with wrong file path in normal mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length | int == 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/molecule.yml
new file mode 100644
index 000000000..93cad84c9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_installation_invalid_path_rpm/molecule.yml
@@ -0,0 +1,4 @@
+---
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_uninstallation/converge.yml
new file mode 100644
index 000000000..531359673
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_uninstallation/converge.yml
@@ -0,0 +1,30 @@
+---
+- name: Molecule Test for uninstallation of LIA
+ hosts: lia
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Uninstall powerflex lia
+ ansible.builtin.import_role:
+ name: powerflex_lia
+ vars:
+ powerflex_lia_state: 'absent'
+
+ - name: Verifying uninstall package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_lia_uninstall_output.msg == "Check mode: No changes made
+ when: ansible_check_mode
+
+ - name: Verifying uninstall package in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Removed: EMC-ScaleIO-lia' in powerflex_lia_uninstall_output.results[0].results[0]"
+ when: not ansible_check_mode and powerflex_lia_uninstall_output.changed
+
+ - name: Verifying uninstall package in Idempotency
+ ansible.builtin.assert:
+ that:
+ - "'Nothing to do' in powerflex_lia_uninstall_output.results[0].msg"
+ when: not ansible_check_mode and not powerflex_lia_uninstall_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/molecule/lia_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/install_lia.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/install_lia.yml
new file mode 100644
index 000000000..4b987b80e
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/install_lia.yml
@@ -0,0 +1,22 @@
+---
+- name: Set facts for MDM counts
+ ansible.builtin.set_fact:
+ mdm_count: "{{ groups['mdm'] | length }}"
+
+- name: Set facts for powerflex_lia_tertiary_ip
+ ansible.builtin.set_fact:
+ powerflex_lia_tertiary_ip: "{{ hostvars[groups['mdm'][2]]['ansible_host'] }}"
+ when: mdm_count | int > 2
+
+- name: Set facts for powerflex_lia_mdm_ips if mdm_count is 2
+ ansible.builtin.set_fact:
+ powerflex_lia_mdm_ips: "{{ hostvars[groups['mdm'][0]]['ansible_host'] }},{{ hostvars[groups['mdm'][1]]['ansible_host'] }}"
+ when: mdm_count | int == 2
+
+- name: Set facts for powerflex_lia_mdm_ips if mdm_count is more than 2
+ ansible.builtin.set_fact:
+ powerflex_lia_mdm_ips: "{{ hostvars[groups['mdm'][0]]['ansible_host'] }},{{ hostvars[groups['mdm'][1]]['ansible_host'] }},{{ powerflex_lia_tertiary_ip }}"
+ when: mdm_count | int > 2
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/main.yml
new file mode 100644
index 000000000..f04a3ff75
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Install LIA
+ ansible.builtin.include_tasks: install_lia.yml
+ when: powerflex_lia_state == 'present'
+
+- name: Uninstall LIA
+ ansible.builtin.include_tasks: uninstall_lia.yml
+ when: powerflex_lia_state == 'absent'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/uninstall_lia.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/uninstall_lia.yml
new file mode 100644
index 000000000..3cac9b41d
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/tasks/uninstall_lia.yml
@@ -0,0 +1,19 @@
+---
+- name: Uninstall package
+ register: powerflex_lia_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-lia
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+- name: Uninstall deb package
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - emc-scaleio-lia
+ when: ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_lia/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/vars/main.yml
new file mode 100644
index 000000000..6877f3723
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_lia/vars/main.yml
@@ -0,0 +1,6 @@
+---
+file_glob_name: lia
+file_gpg_name: RPM-GPG-KEY-ScaleIO
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_lia_mdm_ips }}"
+ TOKEN: "{{ powerflex_lia_token }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/README.md
new file mode 100644
index 000000000..aa54a27a0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/README.md
@@ -0,0 +1,142 @@
+# powerflex_mdm
+
+Role to manage the installation and uninstallation of Powerflex MDM.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Usage instructions](#usage-instructions)
+* [Notes](#notes)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+ansible.posix
+community.general
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>true</td>
+ <td>Location of installation and rpm gpg files to be installed.
+ <br> The required, compatible installation software package based on the operating system of the node.
+ <br> The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>str</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_mdm_password</td>
+ <td>true</td>
+ <td>Password for mdm cluster.<br></td>
+ <td></td>
+ <td>str</td>
+ <td>Password123</td>
+ </tr>
+ <tr>
+ <td>powerflex_mdm_state</td>
+ <td>false</td>
+ <td>Specify state of MDM.<br></td>
+ <td>absent, present</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+ <tr>
+ <td>powerflex_mdm_virtual_ip</td>
+ <td>false</td>
+ <td>Virtual IP address of MDM.<br></td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_mdm_cert_password</td>
+ <td>false</td>
+ <td>The password to generate the certificate cli.
+ <br>Required while installing MDM for Powerlex 4.x.<br></td>
+ <td></td>
+ <td>str</td>
+ <td>Password123!</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: "Install and configure powerflex mdm"
+ ansible.builtin.import_role:
+ name: "powerflex_mdm"
+ vars:
+ powerflex_common_file_install_location: "/opt/scaleio/rpm"
+ powerflex_mdm_password: password
+ powerflex_mdm_state: present
+
+ - name: "Uninstall powerflex mdm"
+ ansible.builtin.import_role:
+ name: "powerflex_mdm"
+ vars:
+ powerflex_mdm_state: absent
+
+```
+
+## Usage instructions
+----
+### To install all dependency packages, including mdm, on node:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory site_powerflex45.yml
+ ```
+
+### To uninstall mdm:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex45.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Notes
+- The ```community.general``` collection must be installed for MDM installation on SLES OS.
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Bhavneet Sharma (ansible.team@Dell.com) 2023
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/defaults/main.yml
new file mode 100644
index 000000000..340da8bb8
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# Skip Java installation for powerflex (assume it's on the system)
+powerflex_skip_java: false
+powerflex_mdm_primary_hostname: ''
+powerflex_mdm_primary_ip: ''
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_mdm_ips }}"
+ MDM_ROLE_IS_MANAGER: 1
+file_glob_name: mdm
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/meta/argument_spec.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/meta/argument_spec.yml
new file mode 100644
index 000000000..be80c1970
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/meta/argument_spec.yml
@@ -0,0 +1,28 @@
+---
+argument_specs:
+ main:
+ version_added: "1.8.0"
+ short_description: Role to manage the installation and uninstallation of Powerflex MDM
+ description:
+ - Role to manage the installation and uninstallation of Powerflex MDM.
+ options:
+ powerflex_mdm_state:
+ required: true
+ type: str
+ description: State of the Powerflex MDM.
+ powerflex_mdm_password:
+ required: true
+ type: str
+ description: Password for the Powerflex MDM.
+ powerflex_common_file_install_location:
+ required: true
+ type: str
+ description: Common file installation location.
+ powerflex_mdm_virtual_ip:
+ type: str
+ description: Virtual IP address of MDM.
+ powerflex_mdm_cert_password:
+ type: str
+ description:
+ - Password to generate cli certificate for MDM.
+ - Required while installing MDM for Powerlex 4.x.
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/meta/main.yml
new file mode 100644
index 000000000..889114f46
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/meta/main.yml
@@ -0,0 +1,29 @@
+---
+galaxy_info:
+ author: Bhavneet Sharma
+ description: Role to manage the installation and uninstallation of Powerflex MDM.
+ company: Dell Technologies
+ role_name: powerflex_mdm
+ namespace: dellemc
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.14.0"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+dependencies:
+ - role: powerflex_common
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_installation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_installation/converge.yml
new file mode 100644
index 000000000..707de56fe
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_installation/converge.yml
@@ -0,0 +1,63 @@
+---
+- name: MDM installation
+ hosts: mdm
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ tasks:
+ - name: Install common packages
+ ansible.builtin.import_role:
+ name: powerflex_common
+
+ - name: Install and configure Powerflex MDM
+ ansible.builtin.import_role:
+ name: powerflex_mdm
+ vars:
+ powerflex_mdm_state: present
+
+ - name: Verifying install package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_install_package_output.msg == "Check mode: No changes made"
+ when: ansible_check_mode
+
+ - name: Verifying installation package in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Installed' in powerflex_common_install_package_output.results[0]"
+ when: not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: Verifying add primary mdm in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Successfully created the MDM Cluster' in powerflex_mdm_add_primary_output.stdout"
+ when: not ansible_check_mode and powerflex_mdm_add_primary_output.changed
+
+ - name: Verifying add secondary mdm in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Successfully added a standby MDM' in powerflex_mdm_add_secondary_output.stdout"
+ when: not ansible_check_mode and powerflex_mdm_add_secondary_output.changed
+
+ - name: Verifying add tertiary mdm in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Successfully added a standby MDM' in powerflex_mdm_add_tertiary_output.stdout"
+ when: not ansible_check_mode and powerflex_mdm_add_tertiary_output.changed
+
+ - name: Verifying primary mdm configuration in Idempotency
+ ansible.builtin.assert:
+ that:
+ - "'The Primary MDM is already configured' in powerflex_mdm_add_primary_output.stderr_lines[0]"
+ when: not ansible_check_mode and not powerflex_mdm_add_primary_output.changed
+
+ - name: Verifying secondary mdm configuration in Idempotency
+ ansible.builtin.assert:
+ that:
+ - "'An MDM with the same name already exists' in powerflex_mdm_add_secondary_output.stderr_lines[0]"
+ when: not ansible_check_mode and not powerflex_mdm_add_secondary_output.changed
+
+ - name: Verifying tertiary mdm configuration in Idempotency
+ ansible.builtin.assert:
+ that:
+ - "'An MDM with the same name already exists' in powerflex_mdm_add_tertiary_output.stderr_lines[0]"
+ when: not ansible_check_mode and powerflex_mdm_tertiary_ip is defined and not powerflex_mdm_add_tertiary_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_installation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_installation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_installation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_uninstallation/converge.yml
new file mode 100644
index 000000000..ede8baf5a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_uninstallation/converge.yml
@@ -0,0 +1,53 @@
+---
+- name: MDM uninstallation
+ hosts: mdm
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ tasks:
+ - name: Uninstall powerflex MDM
+ ansible.builtin.import_role:
+ name: powerflex_mdm
+ vars:
+ powerflex_mdm_state: 'absent'
+
+ - name: Verifying uninstall package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_mdm_uninstall_output.msg == "Check mode: No changes made"
+ when: ansible_check_mode
+
+ - name: Verifying remove secondary mdm in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Successfully removed the standby MDM' in powerflex_mdm_remove_secondary.stdout"
+ when: not ansible_check_mode and powerflex_mdm_remove_secondary.changed
+
+ - name: Verifying remove tertiary mdm in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Successfully removed the standby MDM' in powerflex_mdm_remove_tertiary.stdout"
+ when: not ansible_check_mode and powerflex_mdm_tertiary_ip is defined and powerflex_mdm_remove_tertiary.changed
+
+ - name: Verifying uninstall package in normal mode
+ ansible.builtin.assert:
+ that:
+ - "'Removed: EMC-ScaleIO-mdm' in powerflex_mdm_uninstall_output.results[0].results[0]"
+ when: not ansible_check_mode and powerflex_mdm_uninstall_output.changed
+
+ - name: Verifying remove secondary mdm in Idempotency
+ ansible.builtin.assert:
+ that:
+ - "'No such file or directory' in powerflex_mdm_remove_secondary.msg"
+ when: not ansible_check_mode and not powerflex_mdm_remove_secondary.changed
+
+ - name: Verifying remove tertiary mdm in Idempotency
+ ansible.builtin.assert:
+ that:
+ - "'No such file or directory' in powerflex_mdm_remove_tertiary.msg"
+ when: not ansible_check_mode and powerflex_mdm_tertiary_ip is defined and not powerflex_mdm_remove_tertiary.changed
+
+ - name: Verifying uninstall package in Idempotency
+ ansible.builtin.assert:
+ that:
+ - "'Nothing to do' in powerflex_mdm_uninstall_output.results[0].msg"
+ when: not ansible_check_mode and not powerflex_mdm_uninstall_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/molecule/mdm_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/add_certs.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/add_certs.yml
new file mode 100644
index 000000000..f517d5229
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/add_certs.yml
@@ -0,0 +1,168 @@
+---
+- name: Generate CA certificate
+ register: powerflex_mdm_generate_mgmt_ca_cert
+ ansible.builtin.command: python3 certificate_generator_MDM_USER.py --generate_ca mgmt_ca.pem
+ args:
+ chdir: /opt/emc/scaleio/mdm/cfg
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ changed_when: powerflex_mdm_generate_mgmt_ca_cert.rc == 0
+
+- name: Create CLI certificate
+ register: powerflex_mdm_generate_cli_cert
+ ansible.builtin.command: >
+ python3 certificate_generator_MDM_USER.py --generate_cli cli_certificate.p12 -CA mgmt_ca.pem --password {{ powerflex_mdm_cert_password }}
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ args:
+ chdir: /opt/emc/scaleio/mdm/cfg
+ changed_when: powerflex_mdm_generate_cli_cert.rc == 0
+
+- name: Create MDM certificate
+ register: powerflex_mdm_generate_mdm_cert
+ ansible.builtin.command: python3 certificate_generator_MDM_USER.py --generate_mdm mdm_certificate.pem -CA mgmt_ca.pem
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ args:
+ chdir: /opt/emc/scaleio/mdm/cfg
+ changed_when: powerflex_mdm_generate_mdm_cert.rc == 0
+
+- name: Create additional MDM certificates
+ register: powerflex_mdm_generate_additional_mdm_cert
+ ansible.builtin.command: python3 certificate_generator_MDM_USER.py --generate_mdm sec_mdm_certificate.pem -CA mgmt_ca.pem
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ args:
+ chdir: /opt/emc/scaleio/mdm/cfg
+ changed_when: powerflex_mdm_generate_additional_mdm_cert.rc == 0
+
+- name: Fetch all certs to localhost
+ register: powerflex_mdm_fetch_certs
+ ansible.builtin.fetch:
+ src: /opt/emc/scaleio/mdm/cfg/{{ item }}
+ dest: /tmp/
+ flat: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ with_items:
+ - sec_mdm_certificate.pem
+ - cli_certificate.p12
+ - mgmt_ca.pem
+
+- name: Copy MDM certificates to Secondary manager MDM node
+ register: powerflex_mdm_copy_additional_certs_to_secondary
+ ansible.builtin.copy:
+ src: /tmp/sec_mdm_certificate.pem
+ dest: /opt/emc/scaleio/mdm/cfg/mdm_certificate.pem
+ mode: preserve
+ delegate_to: "{{ powerflex_mdm_secondary_hostname }}"
+ when: powerflex_mdm_secondary_ip is defined
+
+- name: Copy CLI certificates to Secondary manager MDM node
+ register: powerflex_mdm_copy_cli_certs_to_secondary
+ ansible.builtin.copy:
+ src: /tmp/cli_certificate.p12
+ dest: /opt/emc/scaleio/mdm/cfg/cli_certificate.p12
+ mode: preserve
+ delegate_to: "{{ powerflex_mdm_secondary_hostname }}"
+ when: powerflex_mdm_secondary_ip is defined
+
+- name: Copy mgmt_ca.pem certificates to Secondary manager MDM node
+ register: powerflex_mdm_copy_mgmt_certs_to_secondary
+ ansible.builtin.copy:
+ src: /tmp/mgmt_ca.pem
+ dest: /opt/emc/scaleio/mdm/cfg/mgmt_ca.pem
+ mode: preserve
+ delegate_to: "{{ powerflex_mdm_secondary_hostname }}"
+ when: powerflex_mdm_secondary_ip is defined
+
+- name: Copy MDM certificates to Tertiary manager MDM node
+ register: powerflex_mdm_copy_additional_certs_to_tertiary
+ ansible.builtin.copy:
+ src: /tmp/sec_mdm_certificate.pem
+ dest: /opt/emc/scaleio/mdm/cfg/mdm_certificate.pem
+ mode: preserve
+ delegate_to: "{{ powerflex_mdm_tertiary_hostname }}"
+ when: powerflex_mdm_tertiary_ip is defined
+
+- name: Copy CLI certificates to Tertiary manager MDM node
+ register: powerflex_mdm_copy_cli_certs_to_tertiary
+ ansible.builtin.copy:
+ src: /tmp/cli_certificate.p12
+ dest: /opt/emc/scaleio/mdm/cfg/cli_certificate.p12
+ mode: preserve
+ delegate_to: "{{ powerflex_mdm_tertiary_hostname }}"
+ when: powerflex_mdm_tertiary_ip is defined
+
+- name: Copy mgmt_ca.pem certificates to Tertiary manager MDM node
+ register: powerflex_mdm_copy_mgmt_certs_to_tertiary
+ ansible.builtin.copy:
+ src: /tmp/mgmt_ca.pem
+ dest: /opt/emc/scaleio/mdm/cfg/mgmt_ca.pem
+ mode: preserve
+ delegate_to: "{{ powerflex_mdm_tertiary_hostname }}"
+ when: powerflex_mdm_tertiary_ip is defined
+
+- name: Add CA certificate on primary MDM
+ register: powerflex_mdm_add_mgmt_cert_to_ca_primary
+ ansible.builtin.command: scli --add_certificate --certificate_file mgmt_ca.pem
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ args:
+ chdir: /opt/emc/scaleio/mdm/cfg
+ changed_when: powerflex_mdm_add_mgmt_cert_to_ca_primary.rc == 0
+
+- name: Add CA certificate on secondary MDM
+ register: powerflex_mdm_add_mgmt_cert_to_ca_secondary
+ ansible.builtin.command: scli --add_certificate --certificate_file mgmt_ca.pem
+ delegate_to: "{{ powerflex_mdm_secondary_hostname }}"
+ when: powerflex_mdm_secondary_ip is defined
+ args:
+ chdir: /opt/emc/scaleio/mdm/cfg
+ changed_when: powerflex_mdm_add_mgmt_cert_to_ca_secondary.rc == 0
+
+- name: Add CA certificate on tertiary MDM
+ register: powerflex_mdm_add_mgmt_cert_to_ca_tertiary
+ ansible.builtin.command: scli --add_certificate --certificate_file mgmt_ca.pem
+ delegate_to: "{{ powerflex_mdm_tertiary_hostname }}"
+ when: powerflex_mdm_tertiary_ip is defined
+ args:
+ chdir: /opt/emc/scaleio/mdm/cfg
+ changed_when: powerflex_mdm_add_mgmt_cert_to_ca_tertiary.rc == 0
+
+- name: Start MDM service on primary MDM
+ register: powerflex_mdm_start_service_primary
+ ansible.builtin.service:
+ name: "mdm.service"
+ state: "restarted"
+ enabled: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+
+- name: Start MDM service on secondary MDM
+ register: powerflex_mdm_start_service_secondary
+ ansible.builtin.service:
+ name: "mdm.service"
+ state: "restarted"
+ enabled: true
+ delegate_to: "{{ powerflex_mdm_secondary_hostname }}"
+ when: powerflex_mdm_secondary_ip is defined
+
+- name: Start MDM service on tertiary MDM
+ register: powerflex_mdm_start_service_tertiary
+ ansible.builtin.service:
+ name: "mdm.service"
+ state: "restarted"
+ enabled: true
+ delegate_to: "{{ powerflex_mdm_tertiary_hostname }}"
+ when: powerflex_mdm_tertiary_ip is defined
+
+- name: Check MDM service status
+ register: powerflex_mdm_check_service
+ ansible.builtin.command: systemctl status mdm.service
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ changed_when: powerflex_mdm_check_service.rc == 0
+
+- name: Delete certificates from localhost
+ register: powerflex_mdm_delete_localhost_certs
+ ansible.builtin.file:
+ path: /tmp/{{ item }}
+ state: absent
+ with_items:
+ - sec_mdm_certificate.pem
+ - cli_certificate.p12
+ - mgmt_ca.pem
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_mdm.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_mdm.yml
new file mode 100644
index 000000000..76bbba5a6
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_mdm.yml
@@ -0,0 +1,32 @@
+---
+- name: Include the mdm_set_facts.yml
+ ansible.builtin.include_tasks: "mdm_set_facts.yml"
+
+- name: Include vars
+ ansible.builtin.include_vars: "../vars/{{ ansible_distribution }}.yml"
+
+- name: Pre-requisite on rhel6 based os
+ ansible.posix.sysctl:
+ name: kernel.shmmax
+ value: 209715200
+ when: ansible_os_family == "RedHat" and ansible_distribution_major_version == "6"
+
+- name: List the rpm file
+ register: powerflex_mdm_package_file_version
+ ansible.builtin.find:
+ paths: "{{ powerflex_common_file_install_location }}"
+ patterns: "*{{ file_glob_name }}*.rpm"
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Extract file versions
+ ansible.builtin.set_fact:
+ version: "{{ powerflex_mdm_package_file_version.files[0].path | regex_search('mdm-(\\d+)', '\\1') }}"
+ when: powerflex_mdm_package_file_version.files | length > 0
+
+- name: Install MDM for PowerFlex below 4.x
+ ansible.builtin.include_tasks: install_powerflex3x_mdm.yml
+ when: version[0] < "4"
+
+- name: Install MDM for PowerFlex 4.x
+ ansible.builtin.include_tasks: install_powerflex4x_mdm.yml
+ when: version[0] >= "4"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex3x_mdm.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex3x_mdm.yml
new file mode 100644
index 000000000..178bd8696
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex3x_mdm.yml
@@ -0,0 +1,128 @@
+---
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
+
+- name: Wait for MDM to be active
+ ansible.builtin.wait_for:
+ port: 9011
+ state: started
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+
+- name: Add primary MDM with virtual ip
+ ansible.builtin.command: >
+ scli --create_mdm_cluster
+ --master_mdm_ip {{ powerflex_mdm_primary_ip }}
+ --master_mdm_management_ip {{ powerflex_mdm_primary_ip }}
+ --master_mdm_name {{ powerflex_mdm_primary_hostname }}
+ --master_mdm_virtual_ip_interface {{ ansible_default_ipv4.interface }}
+ --cluster_virtual_ip {{ powerflex_mdm_virtual_ip }}
+ --accept_license --approve_certificate
+ run_once: true
+ register: powerflex_mdm_add_primary_output
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ until: ("{{ powerflex_mdm_add_primary_output.rc }} == 0") or ("{{ powerflex_mdm_add_primary_output.rc }} == 7")
+ ignore_errors: true
+ when:
+ - powerflex_mdm_virtual_ip is defined
+ - powerflex_mdm_virtual_ip | length > 0
+ changed_when: powerflex_mdm_add_primary_output.rc == 0
+
+- name: Add primary MDM without virtual ip
+ ansible.builtin.command: >
+ scli --create_mdm_cluster
+ --master_mdm_ip {{ powerflex_mdm_primary_ip }}
+ --master_mdm_management_ip {{ powerflex_mdm_primary_ip }}
+ --master_mdm_name {{ powerflex_mdm_primary_hostname }}
+ --master_mdm_virtual_ip_interface {{ ansible_default_ipv4.interface }}
+ --accept_license --approve_certificate
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ until: ("{{ powerflex_mdm_add_primary_output.rc }} == 0") or ("{{ powerflex_mdm_add_primary_output.rc }} == 7")
+ register: powerflex_mdm_add_primary_output
+ ignore_errors: true
+ when:
+ - powerflex_mdm_virtual_ip | length == 0
+ changed_when: powerflex_mdm_add_primary_output.rc == 0
+
+- name: Wait for MDM to be active
+ ansible.builtin.wait_for:
+ port: 6611
+ state: started
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+
+- name: Initial login to primary MDM
+ ansible.builtin.command: scli --login --username admin --password admin
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ ignore_errors: true
+ register: powerflex_mdm_initial_login
+ changed_when: powerflex_mdm_initial_login.rc == 0
+
+- name: Login with new password primary MDM
+ ansible.builtin.command: >
+ scli --login --username admin --password "{{ powerflex_mdm_password }}"
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ when: powerflex_mdm_initial_login.rc == 7
+ changed_when: powerflex_mdm_initial_login.rc == 0
+
+- name: Set password for MDM cluster
+ ansible.builtin.command: >
+ scli --set_password --old_password admin
+ --new_password "{{ powerflex_mdm_password }}"
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ when: powerflex_mdm_initial_login.rc == 0
+ changed_when: powerflex_mdm_initial_login.rc == 0
+
+- name: Secondary node login
+ ansible.builtin.command: >
+ scli --login --mdm_ip {{ powerflex_mdm_primary_ip }}
+ --username admin --password {{ powerflex_mdm_password }} --approve_certificate
+ run_once: true
+ register: powerflex_mdm_secondary_login
+ changed_when: powerflex_mdm_secondary_login.rc == 0
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+
+- name: Add secondary MDM
+ ansible.builtin.command: >
+ scli --add_standby_mdm
+ --new_mdm_ip {{ powerflex_mdm_secondary_ip }}
+ --mdm_role manager
+ --new_mdm_name {{ powerflex_mdm_secondary_hostname }}
+ --new_mdm_management_ip {{ powerflex_mdm_secondary_ip }}
+ --new_mdm_virtual_ip_interface {{ ansible_default_ipv4.interface }}
+ --approve_certificate
+ run_once: true
+ register: powerflex_mdm_add_secondary_output
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ ignore_errors: true
+ changed_when: powerflex_mdm_add_secondary_output.rc == 0
+
+- name: Tertiary node login
+ ansible.builtin.command: >
+ scli --login --mdm_ip {{ powerflex_mdm_primary_ip }} --username admin
+ --password {{ powerflex_mdm_password }} --approve_certificate
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_tertiary_hostname }}"
+ when: powerflex_mdm_tertiary_ip is defined
+ register: powerflex_mdm_tertiary_login
+ changed_when: powerflex_mdm_tertiary_login.rc == 0
+
+- name: Add tertiary MDM
+ ansible.builtin.command: >
+ scli --add_standby_mdm
+ --new_mdm_ip {{ powerflex_mdm_tertiary_ip }}
+ --mdm_role manager
+ --new_mdm_name {{ powerflex_mdm_tertiary_hostname }}
+ --new_mdm_management_ip {{ powerflex_mdm_tertiary_ip }}
+ --new_mdm_virtual_ip_interface {{ ansible_default_ipv4.interface }}
+ --approve_certificate
+ run_once: true
+ register: powerflex_mdm_add_tertiary_output
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ ignore_errors: true
+ when: powerflex_mdm_tertiary_ip is defined
+ changed_when: powerflex_mdm_add_tertiary_output.rc == 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml
new file mode 100644
index 000000000..67164337d
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/install_powerflex4x_mdm.yml
@@ -0,0 +1,101 @@
+---
+- name: Install java
+ ansible.builtin.include_tasks: "../../powerflex_common/tasks/install_java_{{ ansible_distribution }}.yml"
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
+
+- name: Include the add_certs.yml
+ ansible.builtin.include_tasks: add_certs.yml
+
+- name: Add primary MDM with virtual ip
+ ansible.builtin.command: >
+ scli --create_mdm_cluster
+ --primary_mdm_ip {{ powerflex_mdm_primary_ip }}
+ --primary_mdm_management_ip {{ powerflex_mdm_primary_ip }}
+ --primary_mdm_name {{ powerflex_mdm_primary_hostname }}
+ --primary_mdm_virtual_ip_interface {{ ansible_default_ipv4.interface }}
+ --cluster_virtual_ip {{ powerflex_mdm_virtual_ip }}
+ --accept_license --approve_certificate
+ run_once: true
+ register: powerflex_mdm_add_primary_output
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ until: ("{{ powerflex_mdm_add_primary_output.rc }} == 0") or ("{{ powerflex_mdm_add_primary_output.rc }} == 7")
+ ignore_errors: true
+ when:
+ - powerflex_mdm_virtual_ip is defined
+ - powerflex_mdm_virtual_ip | length > 0
+ changed_when: powerflex_mdm_add_primary_output.rc == 0
+
+- name: Add primary MDM without virtual ip
+ ansible.builtin.command: >
+ scli --create_mdm_cluster
+ --primary_mdm_ip {{ powerflex_mdm_primary_ip }}
+ --primary_mdm_management_ip {{ powerflex_mdm_primary_ip }}
+ --primary_mdm_name {{ powerflex_mdm_primary_hostname }}
+ --primary_mdm_virtual_ip_interface {{ ansible_default_ipv4.interface }}
+ --accept_license --approve_certificate
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ until: ("{{ powerflex_mdm_add_primary_output.rc }} == 0") or ("{{ powerflex_mdm_add_primary_output.rc }} == 7")
+ register: powerflex_mdm_add_primary_output
+ ignore_errors: true
+ when:
+ - powerflex_mdm_virtual_ip | length == 0
+ changed_when: powerflex_mdm_add_primary_output.rc == 0
+
+- name: Wait for MDM to be active
+ ansible.builtin.wait_for:
+ port: 8611
+ state: started
+ run_once: true
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+
+- name: Login to primary MDM node
+ register: powerflex_mdm_secondary_login
+ ansible.builtin.command: >
+ scli --login --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ powerflex_mdm_cert_password }}
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ run_once: true
+ when: powerflex_mdm_secondary_ip is defined
+ changed_when: powerflex_mdm_secondary_login.rc == 0
+
+- name: Add secondary MDM
+ ansible.builtin.command: >
+ scli --add_standby_mdm
+ --new_mdm_ip {{ powerflex_mdm_secondary_ip }}
+ --mdm_role manager
+ --new_mdm_name {{ powerflex_mdm_secondary_hostname }}
+ --new_mdm_management_ip {{ powerflex_mdm_secondary_ip }}
+ --new_mdm_virtual_ip_interface {{ ansible_default_ipv4.interface }}
+ --approve_certificate
+ run_once: true
+ register: powerflex_mdm_add_secondary_output
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ ignore_errors: true
+ changed_when: powerflex_mdm_add_secondary_output.rc == 0
+
+- name: Login to Primary MDM node
+ register: powerflex_mdm_tertiary_login
+ ansible.builtin.command: >
+ scli --login --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ powerflex_mdm_cert_password }}
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ run_once: true
+ when: powerflex_mdm_tertiary_ip is defined
+ changed_when: powerflex_mdm_tertiary_login.rc == 0
+
+- name: Add tertiary MDM
+ ansible.builtin.command: >
+ scli --add_standby_mdm
+ --new_mdm_ip {{ powerflex_mdm_tertiary_ip }}
+ --mdm_role manager
+ --new_mdm_name {{ powerflex_mdm_tertiary_hostname }}
+ --new_mdm_management_ip {{ powerflex_mdm_tertiary_ip }}
+ --new_mdm_virtual_ip_interface {{ ansible_default_ipv4.interface }}
+ --approve_certificate
+ run_once: true
+ register: powerflex_mdm_add_tertiary_output
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ ignore_errors: true
+ when: powerflex_mdm_tertiary_ip is defined
+ changed_when: powerflex_mdm_add_tertiary_output.rc == 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/main.yml
new file mode 100644
index 000000000..a5e2703ac
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Install and Configure MDM cluster
+ ansible.builtin.include_tasks: install_mdm.yml
+ when: powerflex_mdm_state == "present"
+
+- name: Uninstall MDM cluster
+ ansible.builtin.include_tasks: remove_mdm.yml
+ when: powerflex_mdm_state == "absent"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/mdm_set_facts.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/mdm_set_facts.yml
new file mode 100644
index 000000000..c24b889eb
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/mdm_set_facts.yml
@@ -0,0 +1,40 @@
+---
+- name: Set facts for MDM counts
+ ansible.builtin.set_fact:
+ mdm_count: "{{ groups['mdm'] | length }}"
+
+- name: Set facts for powerflex_mdm_primary_ip
+ ansible.builtin.set_fact:
+ powerflex_mdm_primary_ip: "{{ hostvars[groups['mdm'][0]]['ansible_host'] }}"
+
+- name: Set facts for powerflex_mdm_primary_hostname
+ ansible.builtin.set_fact:
+ powerflex_mdm_primary_hostname: "{{ hostvars[groups['mdm'][0]]['inventory_hostname'] }}"
+
+- name: Set facts for powerflex_mdm_secondary_ip
+ ansible.builtin.set_fact:
+ powerflex_mdm_secondary_ip: "{{ hostvars[groups['mdm'][1]]['ansible_host'] }}"
+
+- name: Set facts for powerflex_mdm_secondary_hostname
+ ansible.builtin.set_fact:
+ powerflex_mdm_secondary_hostname: "{{ hostvars[groups['mdm'][1]]['inventory_hostname'] }}"
+
+- name: Set facts for powerflex_mdm_tertiary_ip
+ ansible.builtin.set_fact:
+ powerflex_mdm_tertiary_ip: "{{ hostvars[groups['mdm'][2]]['ansible_host'] }}"
+ when: mdm_count | int > 2
+
+- name: Set facts for powerflex_mdm_tertiary_hostname
+ ansible.builtin.set_fact:
+ powerflex_mdm_tertiary_hostname: "{{ hostvars[groups['mdm'][2]]['inventory_hostname'] }}"
+ when: mdm_count | int > 2
+
+- name: Set facts for powerflex_mdm_ips if mdm_count is 2
+ ansible.builtin.set_fact:
+ powerflex_mdm_ips: "{{ powerflex_mdm_secondary_ip }},{{ powerflex_mdm_primary_ip }}"
+ when: mdm_count | int == 2
+
+- name: Set facts for powerflex_mdm_ips if mdm_count is more than 2
+ ansible.builtin.set_fact:
+ powerflex_mdm_ips: "{{ powerflex_mdm_secondary_ip }},{{ powerflex_mdm_primary_ip }},{{ powerflex_mdm_tertiary_ip }}"
+ when: mdm_count | int > 2
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/remove_mdm.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/remove_mdm.yml
new file mode 100644
index 000000000..5c200c5e3
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/remove_mdm.yml
@@ -0,0 +1,58 @@
+---
+- name: Include the mdm_set_facts.yml
+ ansible.builtin.include_tasks: mdm_set_facts.yml
+
+- name: List the rpm file
+ register: powerflex_mdm_package_file_version
+ ansible.builtin.find:
+ paths: "/var/tmp/"
+ patterns: "*{{ file_glob_name }}*.rpm"
+
+- name: Extract file versions
+ ansible.builtin.set_fact:
+ version: "{{ powerflex_mdm_package_file_version.files[0].path | regex_search('mdm-(\\d+)', '\\1') }}"
+ when: powerflex_mdm_package_file_version.files | length > 0
+
+- name: MDM Cluster login below PowerFlex 4.x
+ ansible.builtin.command: >
+ scli --login --mdm_ip {{ powerflex_mdm_primary_ip }}
+ --username admin --password {{ powerflex_mdm_password }} --approve_certificate
+ run_once: true
+ ignore_errors: true
+ register: powerflex_mdm_cluster_login
+ changed_when: powerflex_mdm_cluster_login.rc == 0
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ when: version[0] < "4"
+
+- name: Login to primary MDM node for PowerFlex 4.x
+ register: powerflex_mdm_primary_login
+ ansible.builtin.command: >
+ scli --login --username {{ username }} --management_system_ip {{ hostname }} --password "{{ password }}"
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ run_once: true
+ ignore_errors: true
+ changed_when: powerflex_mdm_primary_login.rc == 0
+ when: version[0] >= "4"
+
+- name: Remove secondary MDM
+ ansible.builtin.command: >
+ scli --remove_standby_mdm --remove_mdm_ip {{ powerflex_mdm_secondary_ip }}
+ run_once: true
+ register: powerflex_mdm_remove_secondary
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ ignore_errors: true
+ when: powerflex_mdm_secondary_ip is defined
+ changed_when: powerflex_mdm_remove_secondary.rc == 0
+
+- name: Remove tertiary MDM
+ ansible.builtin.command: >
+ scli --remove_standby_mdm --remove_mdm_ip {{ powerflex_mdm_tertiary_ip }}
+ run_once: true
+ register: powerflex_mdm_remove_tertiary
+ delegate_to: "{{ powerflex_mdm_primary_hostname }}"
+ ignore_errors: true
+ when: powerflex_mdm_tertiary_ip is defined
+ changed_when: powerflex_mdm_remove_tertiary.rc == 0
+
+- name: Include uninstall_mdm.yml
+ ansible.builtin.include_tasks: uninstall_mdm.yml
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/uninstall_mdm.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/uninstall_mdm.yml
new file mode 100644
index 000000000..a4302cb6d
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/tasks/uninstall_mdm.yml
@@ -0,0 +1,19 @@
+---
+- name: Uninstall MDM package
+ register: powerflex_mdm_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-mdm
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+- name: Uninstall deb package
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - emc-scaleio-mdm
+ when: ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/CentOS.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/CentOS.yml
new file mode 100644
index 000000000..1f3617cf0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/CentOS.yml
@@ -0,0 +1,5 @@
+---
+powerflex_mdm_packages:
+ - bash-completion
+ - python2
+ - binutils
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/RedHat.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/RedHat.yml
new file mode 100644
index 000000000..1f3617cf0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/RedHat.yml
@@ -0,0 +1,5 @@
+---
+powerflex_mdm_packages:
+ - bash-completion
+ - python2
+ - binutils
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/SLES.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/SLES.yml
new file mode 100644
index 000000000..f71b51d9a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/SLES.yml
@@ -0,0 +1,3 @@
+---
+powerflex_mdm_packages:
+ - python3
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/Ubuntu.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/Ubuntu.yml
new file mode 100644
index 000000000..3d64b947c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/Ubuntu.yml
@@ -0,0 +1,4 @@
+---
+powerflex_mdm_packages:
+ - bash-completion
+ - python2.7
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/main.yml
new file mode 100644
index 000000000..94678a3cf
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_mdm/vars/main.yml
@@ -0,0 +1,6 @@
+---
+file_glob_name: mdm
+file_gpg_name: RPM-GPG-KEY-ScaleIO
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_mdm_ips }}"
+ MDM_ROLE_IS_MANAGER: 1
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/README.md
new file mode 100644
index 000000000..5006cb6d4
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/README.md
@@ -0,0 +1,311 @@
+# powerflex_sdc
+
+Role to manage the installation and uninstallation of Powerflex SDC.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Notes](#notes)
+* [Usage instructions](#usage-instructions)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+pywinrm==0.4.3
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+ansible.windows
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td>IP or FQDN of the PowerFlex gateway</td>
+ <td></td>
+ <td>str</td>
+ <td>10.1.1.1</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td>The username of the PowerFlex gateway</td>
+ <td></td>
+ <td>str</td>
+ <td>admin</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td>The password of the PowerFlex gateway</td>
+ <td></td>
+ <td>str</td>
+ <td>password</td>
+ </tr>
+ <tr>
+ <td>port</td>
+ <td>false</td>
+ <td>Port</td>
+ <td></td>
+ <td>int</td>
+ <td>443</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>If C(false), the SSL certificates will not be validated.<br>Configure C(false) only on personally controlled sites where self-signed certificates are used</td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>timeout</td>
+ <td>false</td>
+ <td>Timeout</td>
+ <td></td>
+ <td>int</td>
+ <td>120</td>
+ </tr>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>true</td>
+ <td>Location of installation and rpm gpg files to be installed.
+ <br>The required, compatible installation software package based on the operating system of the node.
+ <br>The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>str</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_common_win_package_location</td>
+ <td>false</td>
+ <td>Location of SDC windows package on the node.
+ <br>SDC windows package will be copied to this location on installation.</td>
+ <td></td>
+ <td>str</td>
+ <td>C:\\Windows\\Temp</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_repo_address</td>
+ <td>false</td>
+ <td>Repository address for the kernel modules</td>
+ <td></td>
+ <td>str</td>
+ <td>ftp://ftp.emc.com/</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_repo_user</td>
+ <td>false</td>
+ <td>Username for the repository</td>
+ <td></td>
+ <td>str</td>
+ <td>QNzgdxXix</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_repo_password</td>
+ <td>false</td>
+ <td>Password for the repository</td>
+ <td></td>
+ <td>str</td>
+ <td>Aw3wFAwAq3</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_repo_local_dir</td>
+ <td>false</td>
+ <td>Local cache of the repository</td>
+ <td></td>
+ <td>str</td>
+ <td>/bin/emc/scaleio/scini_sync/driver_cache/</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_user_private_rsa_key_src</td>
+ <td>false</td>
+ <td>Private ssh RSA key source (if using sftp protocol)</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_user_private_rsa_key_dest</td>
+ <td>false</td>
+ <td>Private ssh RSA key destination</td>
+ <td></td>
+ <td>str</td>
+ <td>/bin/emc/scaleio/scini_sync/scini_key</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_repo_public_rsa_key_src</td>
+ <td>false</td>
+ <td>Public ssh USA key source (if using sftp protocol)</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_repo_public_rsa_key_dest</td>
+ <td>false</td>
+ <td>Private ssh RSA key destination</td>
+ <td></td>
+ <td>str</td>
+ <td>/bin/emc/scaleio/scini_sync/scini_repo_key.pub</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_module_sigcheck</td>
+ <td>false</td>
+ <td>If signature check is required</td>
+ <td></td>
+ <td>str</td>
+ <td>1</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_emc_public_gpg_key_src</td>
+ <td>false</td>
+ <td>Location of the signature file</td>
+ <td></td>
+ <td>str</td>
+ <td>{{ powerflex_common_file_install_location }}/files/RPM-GPG-KEY-ScaleIO_2.0.*.0</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_emc_public_gpg_key_dest</td>
+ <td>false</td>
+ <td>Destination of the signature file</td>
+ <td></td>
+ <td>str</td>
+ <td>/bin/emc/scaleio/scini_sync/emc_key.pub</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_driver_sync_sync_pattern</td>
+ <td>false</td>
+ <td>Repo sync pattern</td>
+ <td></td>
+ <td>str</td>
+ <td>.*</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_name</td>
+ <td>false</td>
+ <td>Name of SDC to rename to<br></td>
+ <td></td>
+ <td>str</td>
+ <td>sdc_test</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_performance_profile</td>
+ <td>false</td>
+ <td>Performance profile of SDC<br></td>
+ <td></td>
+ <td>str</td>
+ <td>Compact</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_esxi_guid</td>
+ <td>false</td>
+ <td>Specifies the unique GUID for the ESXi SDC node.
+ <br>It is required only with ESXi node.
+ <br>To configure ESXi node as SDC, generate one GUID per server.
+ <br>Tools that are freely available online can generate these strings.
+ <br>If the value is different, then update in main.yml of defaults.</td>
+ <td></td>
+ <td>str</td>
+ <td>d422ecab-af6f-4e0c-a059-333ac89cfb42</td>
+ </tr>
+ <tr>
+ <td>powerflex_sdc_state</td>
+ <td>false</td>
+ <td>Specify state of SDC<br></td>
+ <td>absent, present</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: Install and configure powerflex SDC
+ ansible.builtin.import_role:
+ name: powerflex_sdc
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_common_file_install_location: "/opt/scaleio/rpm"
+ powerflex_sdc_name: sdc_test
+ powerflex_sdc_performance_profile: Compact
+ powerflex_sdc_state: present
+
+ - name: Uninstall powerflex SDC
+ ansible.builtin.import_role:
+ name: powerflex_sdc
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_sdc_state: absent
+
+```
+
+## Notes
+- Generate GUID using https://www.guidgenerator.com/online-guid-generator.aspx. Use the default GUID settings.
+- While adding ESXi server as SDC, this procedure requires two server reboots.
+
+## Usage instructions
+----
+### To install all dependency packages, including SDC, on node:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory site_powerflex45.yml
+ ```
+
+### To uninstall SDC:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex45.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Jennifer John (ansible.team@Dell.com) 2023 <br>
+Bhavneet Sharma (ansible.team@Dell.com) 2023
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/defaults/main.yml
new file mode 100644
index 000000000..5801c0ced
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/defaults/main.yml
@@ -0,0 +1,21 @@
+---
+powerflex_common_win_package_location: "C:\\Windows\\Temp"
+powerflex_sdc_driver_sync_repo_address: 'ftp://ftp.emc.com/'
+powerflex_sdc_driver_sync_repo_user: 'QNzgdxXix'
+powerflex_sdc_driver_sync_repo_password: 'Aw3wFAwAq3'
+powerflex_sdc_driver_sync_repo_local_dir: '/bin/emc/scaleio/scini_sync/driver_cache/'
+powerflex_sdc_driver_sync_user_private_rsa_key_src: ''
+powerflex_sdc_driver_sync_user_private_rsa_key_dest: '/bin/emc/scaleio/scini_sync/scini_key'
+powerflex_sdc_driver_sync_repo_public_rsa_key_src: ''
+powerflex_sdc_driver_sync_repo_public_rsa_key_dest: '/bin/emc/scaleio/scini_sync/scini_repo_key.pub'
+powerflex_sdc_driver_sync_module_sigcheck: 1
+powerflex_sdc_driver_sync_emc_public_gpg_key_src: ../../../files/RPM-GPG-KEY-powerflex_2.0.*.0
+powerflex_sdc_driver_sync_emc_public_gpg_key_dest: '/bin/emc/scaleio/scini_sync/emc_key.pub'
+powerflex_sdc_driver_sync_sync_pattern: .*
+powerflex_sdc_state: present
+powerflex_sdc_name: sdc_test
+powerflex_sdc_performance_profile: Compact
+file_glob_name: sdc
+i_am_sure: 1
+powerflex_sdc_esxi_guid: "d422ecab-af6f-4e0c-a059-333ac89cfb42"
+powerflex_role_environment:
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/handlers/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/handlers/main.yml
new file mode 100644
index 000000000..ecdcc3384
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/handlers/main.yml
@@ -0,0 +1,5 @@
+---
+- name: Restart scini
+ ansible.builtin.service:
+ name: "scini"
+ state: "restarted"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/meta/main.yml
new file mode 100644
index 000000000..2535678e4
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/meta/main.yml
@@ -0,0 +1,25 @@
+---
+galaxy_info:
+ author: Jennifer John
+ description: The role helps to manage the installation of SDC.
+ company: Dell Technologies
+ role_name: powerflex_sdc
+ namespace: dellemc
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.14.0"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/converge.yml
new file mode 100644
index 000000000..8f5049642
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/converge.yml
@@ -0,0 +1,118 @@
+---
+- name: Volume creation
+ hosts: localhost
+ vars_files:
+ - ../var_values.yml
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ tasks:
+ - name: Create a volume
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ vol_name: "{{ vol_name }}"
+ storage_pool_name: "{{ storage_pool_name }}"
+ protection_domain_name: "{{ protection_domain_name }}"
+ size: "{{ vol_size }}"
+ state: "present"
+ register: volume_output
+
+- name: Install SDC
+ hosts: sdc
+ vars_files:
+ - ../var_values.yml
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ tasks:
+ - name: "Install and configure powerflex SDC"
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_sdc_name: "{{ sdc_name }}"
+ powerflex_sdc_state: present
+ register: powerflex_sdc_result_molecule
+
+- name: Mapping SDC to volume
+ hosts: localhost
+ vars_files:
+ - ../var_values.yml
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: Map a SDC to volume
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ vol_name: "{{ vol_name }}"
+ allow_multiple_mappings: true
+ sdc:
+ - sdc_name: "{{ sdc_name }}"
+ access_mode: "{{ access_mode }}"
+ sdc_state: "mapped"
+ state: "present"
+
+- name: Uninstall SDC
+ hosts: sdc
+ vars_files:
+ - ../var_values.yml
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: "Uninstall powerflex SDC"
+ register: powerflex_sdc_uninstall_outputs
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ ignore_errors: true
+ vars:
+ powerflex_sdc_name: "{{ sdc_name }}"
+ powerflex_sdc_state: 'absent'
+
+ - name: "Verifying failure of sdc removal"
+ ansible.builtin.assert:
+ that:
+ - "'SDC has mapped volume(s)' in powerflex_sdc_remove_output.msg"
+
+- name: Unmapping and Removing SDC
+ hosts: localhost
+ vars_files:
+ - ../var_values.yml
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: Unmap a SDC to volume
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ vol_name: "{{ vol_name }}"
+ sdc:
+ - sdc_name: "{{ sdc_name }}"
+ sdc_state: "unmapped"
+ state: "present"
+
+ - name: Delete the Volume
+ dellemc.powerflex.volume:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ vol_name: "{{ vol_name }}"
+ delete_snapshots: false
+ state: "absent"
+
+ - name: Remove the SDC
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ sdc_name: "{{ sdc_name }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/molecule.yml
new file mode 100644
index 000000000..93cad84c9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_install_map_volume_uninstall_negative/molecule.yml
@@ -0,0 +1,4 @@
+---
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation/converge.yml
new file mode 100644
index 000000000..4687e5ffc
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation/converge.yml
@@ -0,0 +1,62 @@
+---
+- name: Converge
+ hosts: sdc
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: "Install and configure powerflex SDC"
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_sdc_state: present
+ register: powerflex_sdc_result_molecule
+
+ - name: "Verifying installation package"
+ ansible.builtin.assert:
+ that:
+ - " 'Installed' in powerflex_common_install_package_output.results[0]"
+ when:
+ - not ansible_check_mode and powerflex_common_install_package_output.changed
+ - " 'WindowsOS' not in ansible_distribution "
+ - " 'VMkernel' not in ansible_distribution "
+
+ - name: "Verifying installation package on windows"
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_install_package_output.rc == 0
+ when:
+ - " 'WindowsOS' in ansible_distribution "
+ - not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: "Verifying installation package on ESXi"
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_install_package_output.rc == 0
+ when:
+ - " 'VMkernel' in ansible_distribution "
+ - not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: "Verifying performance profile value"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_performance_profile_output.sdc_details.perfProfile == "{{ powerflex_sdc_performance_profile }}"
+ when: not ansible_check_mode and powerflex_sdc_performance_profile_output.changed
+
+ - name: "Verifying rename value"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_rename_output.sdc_details.name == "{{ powerflex_sdc_name }}"
+ when: not ansible_check_mode and powerflex_sdc_performance_profile_output.changed
+
+ - name: "Verifying performance profile value in Idempotency"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_performance_profile_output.sdc_details.perfProfile == "{{ powerflex_sdc_performance_profile }}"
+ when: not ansible_check_mode and not powerflex_sdc_performance_profile_output.changed
+
+ - name: "Verifying rename value in Idempotency"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_rename_output.sdc_details.name == '{{ powerflex_sdc_name }}'
+ when: not ansible_check_mode and not powerflex_sdc_performance_profile_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/converge.yml
new file mode 100644
index 000000000..bb06afe18
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/converge.yml
@@ -0,0 +1,54 @@
+---
+- name: Converge
+ hosts: sdc
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: "Install and configure powerflex SDC with no rpm"
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_common_file_install_location: "/opt/empty"
+ powerflex_sdc_state: present
+ register: powerflex_sdc_result_molecule
+ ignore_errors: true
+
+ - name: "Verifying failure of install package with respect to no rpm file"
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length == 0
+
+ - name: "Install and configure powerflex SDC with wrong file path"
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_common_file_install_location: "/opt/aaab"
+ powerflex_sdc_state: present
+ register: powerflex_sdc_result_molecule
+ ignore_errors: true
+
+ - name: "Verifying failure of install package with wrong file path"
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length == 0
+
+ - name: "Install and configure powerflex SDC with wrong rpm version"
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_common_file_install_location: "/opt/wrong_rpm"
+ powerflex_sdc_state: present
+ ignore_errors: true
+ register: powerflex_sdc_wrong_rpm_version
+
+ - name: "Verifying failure of install package with wrong rpm version"
+ ansible.builtin.assert:
+ that:
+ - " 'Depsolve Error occurred: ' in powerflex_common_install_package_output.msg"
+
+ - name: "Uninstall powerflex SDC"
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_sdc_state: absent
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/molecule.yml
new file mode 100644
index 000000000..93cad84c9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_installation_invalid_path_rpm/molecule.yml
@@ -0,0 +1,4 @@
+---
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_uninstallation/converge.yml
new file mode 100644
index 000000000..a11ce0ec4
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_uninstallation/converge.yml
@@ -0,0 +1,57 @@
+---
+- name: Converge
+ hosts: sdc
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: "Uninstall powerflex SDC"
+ register: powerflex_sdc_uninstall_outputs
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_sdc_state: 'absent'
+
+ - name: "Verifying uninstall package in check mode"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_uninstall_output.msg == "Check mode: No changes made, but would have if not in check mode"
+ - powerflex_sdc_uninstall_output.changed is true
+ when: ansible_check_mode
+
+ - name: "Verifying remove the sdc"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_remove_output.sdc_details is none
+ when: not ansible_check_mode and powerflex_sdc_remove_output.changed
+
+ - name: "Verifying uninstall package in Idempotency"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_uninstall_output.results[0].msg == 'Nothing to do'
+ when:
+ - not ansible_check_mode and not powerflex_sdc_uninstall_output.changed
+ - " 'WindowsOS' not in ansible_distribution "
+ - " 'VMkernel' not in ansible_distribution "
+
+ - name: "Verifying uninstall package in Idempotency for Windows node"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_uninstall_output.msg == 'All items skipped'
+ when:
+ - not ansible_check_mode and not powerflex_sdc_uninstall_output.changed
+ - " 'WindowsOS' in ansible_distribution "
+
+ - name: "Verifying uninstall package in Idempotency for ESXi node"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_uninstall_output.msg == 'All items skipped'
+ when:
+ - not ansible_check_mode and not powerflex_sdc_uninstall_output.changed
+ - " 'VMkernel' in ansible_distribution "
+
+ - name: "Verifying remove the sdc in Idempotency"
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdc_remove_output.sdc_details is none
+ when: not ansible_check_mode and not powerflex_sdc_remove_output.changed and powerflex_sdc_remove_output.sdc_details is defined
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/sdc_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/var_values.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/var_values.yml
new file mode 100644
index 000000000..91e203e08
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/var_values.yml
@@ -0,0 +1,7 @@
+---
+vol_name: "sdc_test_vol_1"
+storage_pool_name: "pool1"
+protection_domain_name: "domain1"
+vol_size: 8
+sdc_name: sdc_test_demo_1
+access_mode: "READ_WRITE"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/converge.yml
new file mode 100644
index 000000000..86583fd34
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/converge.yml
@@ -0,0 +1,20 @@
+---
+- name: Converge
+ hosts: sdc
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: false
+ tasks:
+ - name: "Install and configure powerflex SDC with wrong credentials"
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_sdc_state: present
+ ignore_unreachable: true
+ ignore_errors: true
+ register: powerflex_sdc_wrong_credentials_output
+
+ - name: "Verifying failure of install package with wrong credentials"
+ ansible.builtin.assert:
+ that:
+ - " 'failed. The error was: error while evaluating conditional' in powerflex_sdc_driver_sync_output.msg"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/inventory b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/inventory
new file mode 100644
index 000000000..1974d7ad4
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/inventory
@@ -0,0 +1,4 @@
+node0 ansible_host=10.2.2.2 ansible_port=22 ansible_ssh_pass=wrongpassword ansible_user=root
+
+[sdc]
+node0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/molecule.yml
new file mode 100644
index 000000000..805f92879
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_credentials/molecule.yml
@@ -0,0 +1,11 @@
+---
+provisioner:
+ name: ansible
+ inventory:
+ links:
+ hosts: inventory
+ group_vars: ../../../../playbooks/roles/group_vars/
+ host_vars: ../../../../playbooks/roles/host_vars/
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/converge.yml
new file mode 100644
index 000000000..1ebda8c05
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/converge.yml
@@ -0,0 +1,20 @@
+---
+- name: Converge
+ hosts: sdc
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: false
+ tasks:
+ - name: "Install and configure powerflex SDC with wrong SDC IP"
+ ansible.builtin.import_role:
+ name: "powerflex_sdc"
+ vars:
+ powerflex_sdc_state: present
+ ignore_unreachable: true
+ ignore_errors: true
+ register: powerflex_sdc_wrong_ip_output
+
+ - name: "Verifying failure of install package with wrong SDC IP"
+ ansible.builtin.assert:
+ that:
+ " 'failed. The error was: error while evaluating conditional' in powerflex_sdc_driver_sync_output.msg"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/inventory b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/inventory
new file mode 100644
index 000000000..845add1d5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/inventory
@@ -0,0 +1,4 @@
+node0 ansible_host=10.2.2.0 ansible_port=22 ansible_ssh_pass=password ansible_user=root
+
+[sdc]
+node0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/molecule.yml
new file mode 100644
index 000000000..805f92879
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/molecule/wrong_sdc_ip/molecule.yml
@@ -0,0 +1,11 @@
+---
+provisioner:
+ name: ansible
+ inventory:
+ links:
+ hosts: inventory
+ group_vars: ../../../../playbooks/roles/group_vars/
+ host_vars: ../../../../playbooks/roles/host_vars/
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/configure_sdc.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/configure_sdc.yml
new file mode 100644
index 000000000..453fe43d5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/configure_sdc.yml
@@ -0,0 +1,28 @@
+---
+- name: Rename the SDC
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sdc_ip: "{{ ansible_host }}"
+ sdc_new_name: "{{ powerflex_sdc_name }}"
+ state: "present"
+ register: powerflex_sdc_rename_output
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+ when: powerflex_sdc_name is defined and powerflex_sdc_name | length > 0
+
+- name: Set performance profile of SDC
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sdc_ip: "{{ ansible_host }}"
+ performance_profile: "{{ powerflex_sdc_performance_profile }}"
+ state: "present"
+ register: powerflex_sdc_performance_profile_output
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+ when:
+ - powerflex_sdc_performance_profile is defined
+ - powerflex_sdc_performance_profile | length > 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/install_sdc.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/install_sdc.yml
new file mode 100644
index 000000000..9b75321c3
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/install_sdc.yml
@@ -0,0 +1,75 @@
+---
+- name: Get configured MDM IP addresses
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ state: "present"
+ register: powerflex_sdc_mdm_ip_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Set fact - powerflex_sdc_mdm_ips
+ ansible.builtin.set_fact:
+ powerflex_sdc_mdm_ips: "{{ powerflex_sdc_mdm_ip_result.mdm_cluster_details.mdmAddresses | join(',') }}"
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
+
+- name: Register SDC and Set MDM IP addresses by register_esxi_sdc.yml
+ ansible.builtin.include_tasks: register_esxi_sdc.yml
+ when: ansible_distribution == "VMkernel"
+
+- name: Include configure_sdc.yml
+ ansible.builtin.include_tasks: configure_sdc.yml
+
+- name: Copy powerflex_sdc_driver_sync_user_private_rsa_key_src for driver_sync.conf
+ ansible.builtin.copy:
+ src: "{{ powerflex_sdc_driver_sync_user_private_rsa_key_src }}"
+ dest: "{{ powerflex_sdc_driver_sync_user_private_rsa_key_dest }}"
+ mode: "0600"
+ owner: "root"
+ group: "root"
+ when:
+ - powerflex_sdc_driver_sync_user_private_rsa_key_src is defined
+ - powerflex_sdc_driver_sync_user_private_rsa_key_src | length > 0
+ - " 'WindowsOS' not in ansible_distribution"
+
+- name: Copy powerflex_sdc_driver_sync_repo_public_rsa_key_src for driver_sync.conf
+ ansible.builtin.copy:
+ src: "{{ powerflex_sdc_driver_sync_repo_public_rsa_key_src }}"
+ dest: "{{ powerflex_sdc_driver_sync_repo_public_rsa_key_dest }}"
+ mode: "0600"
+ owner: "root"
+ group: "root"
+ when:
+ - powerflex_sdc_driver_sync_repo_public_rsa_key_src is defined
+ - powerflex_sdc_driver_sync_repo_public_rsa_key_src | length > 0
+ - " 'WindowsOS' not in ansible_distribution"
+
+- name: Copy powerflex_sdc_driver_sync_emc_public_gpg_key_src for driver_sync.conf
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "{{ powerflex_sdc_driver_sync_emc_public_gpg_key_dest }}"
+ mode: "0600"
+ owner: "root"
+ group: "root"
+ with_fileglob:
+ - "{{ powerflex_sdc_driver_sync_emc_public_gpg_key_src }}"
+ when:
+ - powerflex_sdc_driver_sync_emc_public_gpg_key_src is defined
+ - powerflex_sdc_driver_sync_emc_public_gpg_key_src | length > 0
+ - " 'WindowsOS' not in ansible_distribution"
+
+- name: Copy driver_sync.conf template in place
+ register: powerflex_sdc_driver_sync_output
+ ansible.builtin.template:
+ src: "driver_sync.conf.j2"
+ dest: "/bin/emc/scaleio/scini_sync/driver_sync.conf"
+ mode: "0600"
+ owner: "root"
+ group: "root"
+ notify: restart scini
+ when:
+ - ansible_distribution != "VMkernel"
+ - " 'WindowsOS' not in ansible_distribution"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/main.yml
new file mode 100644
index 000000000..ec81fb21f
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Install SDC
+ ansible.builtin.include_tasks: install_sdc.yml
+ when: powerflex_sdc_state == 'present'
+
+- name: Remove SDC
+ ansible.builtin.include_tasks: remove_sdc.yml
+ when: powerflex_sdc_state == 'absent'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/register_esxi_sdc.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/register_esxi_sdc.yml
new file mode 100644
index 000000000..0b06fc8cd
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/register_esxi_sdc.yml
@@ -0,0 +1,47 @@
+---
+- name: Register SDC and Set MDM IP addresses
+ register: powerflex_sdc_register_set_sdc_mdm
+ ansible.builtin.shell: >
+ esxcli system module parameters set -m scini -p "IoctlIniGuidStr={{ powerflex_sdc_esxi_guid }}
+ IoctlMdmIPStr={{ powerflex_sdc_mdm_ips }} bBlkDevIsPdlActive=1 blkDevPdlTimeoutMillis=60000"
+ changed_when: powerflex_sdc_register_set_sdc_mdm.rc == 0
+
+- name: Reboot ESXi host
+ register: powerflex_sdc_reboot_node
+ ansible.builtin.reboot:
+ reboot_timeout: 500
+ msg: "Rebooting the ESXi host."
+ when:
+ - powerflex_sdc_register_set_sdc_mdm.rc == 0
+ - "'Reboot Required: true' in powerflex_common_install_package_output.stdout"
+ changed_when: powerflex_sdc_reboot_node
+
+- name: Ensure the driver is loaded for SDC
+ register: powerflex_sdc_driver_loaded
+ ansible.builtin.shell: >
+ set -o pipefail && vmkload_mod -l | grep scini
+ changed_when: powerflex_sdc_driver_loaded.stdout_lines | length == 0
+
+- name: Verify ESXi SDC connection with MDMs
+ ansible.builtin.copy:
+ src: "{{ item }}"
+ dest: "{{ powerflex_common_esxi_files_location }}"
+ mode: "0755"
+ register: powerflex_sdc_drv_cfg_file_output
+ with_fileglob:
+ - "{{ powerflex_common_file_install_location }}/drv_cfg*"
+
+- name: List the drv_cfg file
+ register: powerflex_sdc_drv_cfg_file
+ ansible.builtin.find:
+ paths: "{{ powerflex_common_esxi_files_location }}"
+ patterns: "*drv_cfg*"
+
+- name: Execute drv_cfg command
+ register: powerflex_sdc_drv_cfg_output
+ ansible.builtin.command: ./drv_cfg --query_mdm
+ args:
+ chdir: "{{ powerflex_common_esxi_files_location }}"
+ when: powerflex_sdc_drv_cfg_file.files | length > 0
+ changed_when:
+ - "'Retrieved 1 mdm(s)' not in powerflex_sdc_drv_cfg_output.stdout"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/remove_sdc.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/remove_sdc.yml
new file mode 100644
index 000000000..b8e9b5492
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/remove_sdc.yml
@@ -0,0 +1,20 @@
+---
+- name: Set fact # noqa var-naming[no-role-prefix]
+ ansible.builtin.set_fact:
+ ansible_distribution: "WindowsOS"
+ when: " 'Windows' in ansible_distribution"
+
+- name: Include uninstall_sdc.yml
+ ansible.builtin.include_tasks: uninstall_sdc.yml
+
+- name: Remove the SDC
+ dellemc.powerflex.sdc:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sdc_name: "{{ powerflex_sdc_name }}"
+ state: "absent"
+ register: powerflex_sdc_remove_output
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+ when: powerflex_sdc_name is defined and powerflex_sdc_name | length > 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/uninstall_esxi_sdc.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/uninstall_esxi_sdc.yml
new file mode 100644
index 000000000..b3c3c68df
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/uninstall_esxi_sdc.yml
@@ -0,0 +1,30 @@
+---
+- name: Get the name of SDC driver installed
+ register: powerflex_sdc_installed_driver_list
+ ansible.builtin.shell: >
+ set -o pipefail && esxcli software vib list | grep sdc
+ changed_when: powerflex_sdc_installed_driver_list.stdout_lines | length == 0
+ ignore_errors: true
+
+- name: Remove the SDC driver form the esxi host
+ register: powerflex_sdc_remove_driver_output
+ ansible.builtin.shell: >
+ esxcli software vib remove -n {{ powerflex_sdc_installed_driver_list.stdout.split()[0] }}
+ changed_when: "'Reboot Required: true' in powerflex_sdc_remove_driver_output.stdout"
+ when: powerflex_sdc_installed_driver_list.stdout_lines | length != 0
+
+- name: Reboot ESXi host
+ register: powerflex_sdc_remove_driver_reboot_output
+ ansible.builtin.reboot:
+ reboot_timeout: 450
+ msg: "Rebooting the ESXi host."
+ when:
+ - powerflex_sdc_installed_driver_list.stdout_lines | length != 0
+ changed_when: powerflex_sdc_remove_driver_reboot_output.rebooted
+
+- name: List the SDC driver installed
+ ansible.builtin.shell: >
+ set -o pipefail && esxcli software vib list | grep sdc
+ register: powerflex_sdc_drivers_list
+ changed_when: powerflex_sdc_drivers_list.stdout_lines | length != 0
+ failed_when: powerflex_sdc_drivers_list.stdout_lines | length != 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/uninstall_sdc.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/uninstall_sdc.yml
new file mode 100644
index 000000000..e46a32683
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/tasks/uninstall_sdc.yml
@@ -0,0 +1,40 @@
+---
+- name: Uninstall package
+ register: powerflex_sdc_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-sdc
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES", "Rocky")
+
+- name: Uninstall deb package
+ register: powerflex_sdc_uninstall_deb_output
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - emc-scaleio-sdc
+ when: ansible_distribution == "Ubuntu"
+
+- name: Include uninstall_esxi_sdc.yml to uninstall the esxi sdc
+ ansible.builtin.include_tasks: uninstall_esxi_sdc.yml
+ when: ansible_distribution == "VMkernel"
+
+- name: List the msi files
+ ansible.windows.win_find:
+ paths: "{{ powerflex_common_win_package_location }}"
+ patterns: "*{{ file_glob_name }}*.msi"
+ register: powerflex_sdc_msi_package_files
+ when: ansible_distribution == "WindowsOS"
+
+- name: Uninstall win package
+ register: powerflex_sdc_uninstall_win_output
+ ansible.windows.win_package:
+ path: "{{ item.path }}"
+ state: absent
+ with_items:
+ - "{{ powerflex_sdc_msi_package_files.files }}"
+ when: ansible_distribution == "WindowsOS"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/templates/driver_sync.conf.j2 b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/templates/driver_sync.conf.j2
new file mode 100644
index 000000000..08541128e
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/templates/driver_sync.conf.j2
@@ -0,0 +1,31 @@
+###############################################################################
+# driver_sync Configuration file
+# Everything after a '#' until the end of the line is ignored
+###############################################################################
+
+# Repository address, prefixed by protocol
+repo_address = {{ powerflex_sdc_driver_sync_repo_address }}
+
+# Repository user (valid for ftp/sftp protocol)
+repo_user = {{ powerflex_sdc_driver_sync_repo_user }}
+
+# Repository password (valid for ftp protocol)
+repo_password = {{ powerflex_sdc_driver_sync_repo_password }}
+
+# Local directory for modules
+local_dir = {{ powerflex_sdc_driver_sync_repo_local_dir }}
+
+# User's RSA private key file (sftp protocol)
+user_private_rsa_key = {{ powerflex_sdc_driver_sync_user_private_rsa_key_dest }}
+
+# Repository host public key (sftp protocol)
+repo_public_rsa_key = {{ powerflex_sdc_driver_sync_repo_public_rsa_key_dest }}
+
+# Should the fetched modules' signatures be checked [0, 1]
+module_sigcheck = {{ powerflex_sdc_driver_sync_module_sigcheck }}
+
+# EMC public signature key (needed when module_sigcheck is 1)
+emc_public_gpg_key = {{ powerflex_sdc_driver_sync_emc_public_gpg_key_dest }}
+
+# Sync pattern (regular expression) for massive retrieve
+sync_pattern = {{ powerflex_sdc_driver_sync_sync_pattern }}
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/vars/main.yml
new file mode 100644
index 000000000..ad6c0e4e2
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdc/vars/main.yml
@@ -0,0 +1,5 @@
+---
+file_glob_name: sdc
+file_gpg_name: RPM-GPG-KEY-ScaleIO
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_sdc_mdm_ips }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/README.md
new file mode 100644
index 000000000..e83491329
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/README.md
@@ -0,0 +1,145 @@
+# powerflex_sdr
+
+Role to manage installation and uninstallation PowerFlex SDR.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Usage instructions](#usage-instructions)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>false</td>
+ <td>Location of required, compatible installation software package based on the operating system of the node.
+ <br>The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>path</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_protection_domain_name</td>
+ <td>false</td>
+ <td>The name of the protection domain to which the SDR will be added.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_storage_pool_name</td>
+ <td>false</td>
+ <td>The name of the storage pool to which the device will be added.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sdr_repl_journal_capacity_max_ratio</td>
+ <td>false</td>
+ <td>Maximum capacity percentage to be allocated for journal capacity. Range is 0 to 100.</td>
+ <td></td>
+ <td>int</td>
+ <td>10</td>
+ </tr>
+ <tr>
+ <td>powerflex_mdm_password</td>
+ <td>true</td>
+ <td>Password for primary MDM node.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+<tr>
+ <td>powerflex_sdr_state</td>
+ <td>false</td>
+ <td>State of the SDR.</td>
+ <td>present, absent</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: Install powerflex SDR
+ ansible.builtin.include_role:
+ name: powerflex_sdr
+ vars:
+ powerflex_protection_domain_name: domain1
+ powerflex_storage_pool_name: pool1
+ powerflex_sdr_repl_journal_capacity_max_ratio: 10
+ powerflex_sdr_state: present
+ powerflex_mdm_password: Password111
+
+ - name: Uninstall powerflex SDR
+ ansible.builtin.include_role:
+ name: powerflex_sdr
+ vars:
+ powerflex_mdm_password: Password111
+ powerflex_sdr_state: absent
+
+```
+
+## Usage instructions
+----
+### To install all dependency packages, including SDR, on node:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory site_powerflex45.yml
+ ```
+
+### To uninstall SDR:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex45.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Abhishek Sinha (ansible.team@Dell.com) 2023 \ No newline at end of file
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/defaults/main.yml
new file mode 100644
index 000000000..efae8a870
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/defaults/main.yml
@@ -0,0 +1,9 @@
+---
+# defaults file for powerflex_sdr
+file_glob_name: sdr
+file_gpg_name: RPM-GPG-KEY-ScaleIO
+powerflex_common_file_install_location: "/var/tmp"
+powerflex_sdr_repl_journal_capacity_max_ratio: 10
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_sdr_mdm_primary_ip }}"
+powerflex_sdr_state: present
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/meta/argument_specs.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/meta/argument_specs.yml
new file mode 100644
index 000000000..b730ebed4
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/meta/argument_specs.yml
@@ -0,0 +1,34 @@
+---
+argument_specs:
+ main:
+ short_description: Role to manage installation and uninstallation Powerflex SDR
+ description:
+ - Role to manage installation and uninstallation Powerflex SDR.
+ options:
+ powerflex_common_file_install_location:
+ description:
+ - Location of installation and rpm gpg files to be installed.
+ - The required, compatible installation software package based on the operating system of the node.
+ - The files can be downloaded from the Dell Product support page for PowerFlex software.
+ type: path
+ default: /var/tmp
+ powerflex_sdr_state:
+ description:
+ - Specifies the state of SDR.
+ type: str
+ choices: ['absent', 'present']
+ default: present
+ powerflex_protection_domain_name:
+ description: The name of the protection domain to which the SDR will be added.
+ type: str
+ powerflex_storage_pool_name:
+ description: The name of the storage pool to which the device will be added.
+ type: str
+ powerflex_sdr_repl_journal_capacity_max_ratio:
+ description: Maximum capacity percentage to be allocated for journal capacity.
+ type: int
+ default: 10
+ powerflex_mdm_password:
+ required: true
+ type: str
+ description: Password for the Powerflex MDM.
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/meta/main.yml
new file mode 100644
index 000000000..e0280962c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/meta/main.yml
@@ -0,0 +1,21 @@
+---
+galaxy_info:
+ role_name: powerflex_sdr
+ namespace: dellemc
+ author: Abhishek Sinha
+ description: The role to manage installation and uninstallation PowerFlex SDR.
+ company: Dell Technologies
+ license: GPL-3.0-only
+ min_ansible_version: "2.14.0"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation/converge.yml
new file mode 100644
index 000000000..3e4bb51fa
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation/converge.yml
@@ -0,0 +1,30 @@
+---
+- name: Molecule Test for installation of SDR
+ hosts: sdr
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Install and configure Powerflex SDR
+ ansible.builtin.import_role:
+ name: powerflex_sdr
+ vars:
+ powerflex_sdr_state: present
+
+ - name: Verifying install package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_install_package_output.msg == "Check mode: No changes made"
+ when: ansible_check_mode
+
+ - name: Verifying installation package in normal mode
+ ansible.builtin.assert:
+ that:
+ - " 'Installed' in powerflex_common_install_package_output.results[0]"
+ when: not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: Verifying installation package in Idempotency mode
+ ansible.builtin.assert:
+ that:
+ - "'Nothing to do' in powerflex_common_install_package_output.msg"
+ when: not ansible_check_mode and not powerflex_common_install_package_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/converge.yml
new file mode 100644
index 000000000..bc5b5d3d5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/converge.yml
@@ -0,0 +1,34 @@
+---
+- name: Molecule Test for installation of SDR with invalid rpm path, rpm file
+ hosts: sdr
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Install and configure powerflex SDR with no rpm
+ ansible.builtin.import_role:
+ name: powerflex_sdr
+ vars:
+ powerflex_common_file_install_location: "/opt/empty"
+ powerflex_sdr_state: present
+ register: powerflex_sdr_no_rpm_result
+ ignore_errors: true
+
+ - name: Verifying failure of install package with respect to no rpm file in normal mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length | int == 0
+
+ - name: Install and configure powerflex SDR with wrong file path
+ ansible.builtin.import_role:
+ name: powerflex_sdr
+ vars:
+ powerflex_common_file_install_location: "/opt/aaab"
+ powerflex_sdr_state: present
+ ignore_errors: true
+ register: powerflex_sdr_wrong_path_result
+
+ - name: Verifying failure of install package with wrong file path
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length == 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/molecule.yml
new file mode 100644
index 000000000..93cad84c9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_path_rpm/molecule.yml
@@ -0,0 +1,4 @@
+---
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/converge.yml
new file mode 100644
index 000000000..17a448963
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/converge.yml
@@ -0,0 +1,20 @@
+---
+- name: Molecule Test for installation of SDR
+ hosts: sdr
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Install and configure powerflex SDR without protection domain
+ vars:
+ error_msg: "Please provide powerflex_protection_domain_name and powerflex_storage_pool_name in parameter for installing SDR."
+ block:
+ - name: Powerflex SDR
+ ansible.builtin.import_role:
+ name: dellemc.powerflex.powerflex_sdr
+ vars:
+ powerflex_sdr_state: present
+ rescue:
+ - name: Verifying failure of install package without protection domain
+ ansible.builtin.assert:
+ that: ansible_failed_result.ansible_facts.powerflex_add_sdr_output.msg == error_msg
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/molecule.yml
new file mode 100644
index 000000000..93cad84c9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_installation_invalid_pd/molecule.yml
@@ -0,0 +1,4 @@
+---
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_uninstallation/converge.yml
new file mode 100644
index 000000000..cc28cae33
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_uninstallation/converge.yml
@@ -0,0 +1,66 @@
+---
+- name: SDR uninstallation
+ hosts: sdr
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+
+ tasks:
+ - name: Uninstall powerflex SDR
+ register: powerflex_sdr_uninstall_outputs
+ ansible.builtin.import_role:
+ name: powerflex_sdr
+ vars:
+ powerflex_sdr_state: absent
+
+ - name: Verifying uninstall package in converge
+ ansible.builtin.assert:
+ that:
+ - " 'Removed:' in powerflex_sdr_uninstall_output.results[0].results[0]"
+ when:
+ - not ansible_check_mode
+ - powerflex_sdr_uninstall_output.changed
+ - ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+ - name: Verifying uninstall package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdr_uninstall_output.msg == "Check mode: No changes made"
+ - powerflex_sdr_uninstall_output.changed
+ - ansible_distribution in ("RedHat", "CentOS", "SLES")
+ when: ansible_check_mode
+
+ - name: Verifying remove the sdr in normal mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_remove_sdr_output.sdr_details is None
+ when:
+ - not ansible_check_mode
+ - powerflex_remove_sdr_output.changed
+ - ansible_distribution == "Ubuntu"
+
+ - name: Verifying uninstall package in Idempotency
+ ansible.builtin.assert:
+ that:
+ - powerflex_sdr_uninstall_output.results[0].msg == 'Nothing to do'
+ - ansible_distribution in ("RedHat", "CentOS", "SLES")
+ when:
+ - not ansible_check_mode
+ - not powerflex_sdr_uninstall_output.changed
+ - ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+ - name: Verifying remove the sdr in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_remove_sdr_output.msg == "Check mode: No changes made"
+ - powerflex_remove_sdr_output.changed
+ - ansible_distribution == "Ubuntu"
+ when: ansible_check_mode
+
+ - name: Verifying remove the sdr in Idempotency
+ ansible.builtin.assert:
+ that:
+ - powerflex_remove_sdr_output.sdr_details is None
+ when:
+ - not ansible_check_mode
+ - not powerflex_remove_sdr_output.changed
+ - ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/sdr_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/var_values.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/var_values.yml
new file mode 100644
index 000000000..87df25556
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/molecule/var_values.yml
@@ -0,0 +1,3 @@
+---
+powerflex_protection_domain_name: domain1
+powerflex_storage_pool_name: pool1
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/add_sdr.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/add_sdr.yml
new file mode 100644
index 000000000..1af345276
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/add_sdr.yml
@@ -0,0 +1,142 @@
+---
+- name: Get configured MDM IP addresses
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ state: "present"
+ register: powerflex_sdr_mdm_ip_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Set fact - PowerFlex version
+ ansible.builtin.set_fact:
+ powerflex_sdr_array_version: "{{ powerflex_sdr_mdm_ip_result.mdm_cluster_details.master.versionInfo[1] }}"
+
+- name: Checking powerflex_protection_domain_name and powerflex_storage_pool_name is provided
+ ansible.builtin.set_fact:
+ powerflex_add_sdr_output:
+ msg: Please provide powerflex_protection_domain_name and powerflex_storage_pool_name in parameter for installing SDR.
+ failed_when:
+ - powerflex_protection_domain_name is undefined or powerflex_storage_pool_name is undefined
+
+- name: Include the sdr_set_facts.yml
+ ansible.builtin.include_tasks: sdr_set_facts.yml
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
+
+- name: Login to mdm for PowerFlex version below 4.x
+ ansible.builtin.command: >
+ scli --login --mdm_ip {{ powerflex_sdr_mdm_primary_ip }}
+ --username admin
+ --password "{{ powerflex_mdm_password }}"
+ --approve_certificate
+ run_once: true
+ register: powerflex_initial_login
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_initial_login.rc == 0
+ no_log: true
+ when: powerflex_sdr_array_version == "3"
+
+- name: Login to mdm for PowerFlex version 4.x
+ ansible.builtin.command: >
+ scli --login --management_system_ip {{ hostname }}
+ --username admin
+ --password "{{ password }}"
+ --approve_certificate
+ run_once: true
+ register: powerflex_initial_login
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_initial_login.rc == 0
+ no_log: true
+ when: powerflex_sdr_array_version != "3"
+
+- name: Output msg of previous task login to mdm
+ ansible.builtin.debug:
+ var: powerflex_initial_login.stdout
+
+- name: Set replication capacity
+ ansible.builtin.command: >
+ scli --set_replication_journal_capacity
+ --protection_domain_name {{ powerflex_protection_domain_name }}
+ --storage_pool_name {{ powerflex_storage_pool_name }}
+ --replication_journal_capacity_max_ratio {{ powerflex_sdr_repl_journal_capacity_max_ratio }}
+ run_once: true
+ register: powerflex_set_replication_capacity
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_set_replication_capacity.rc == 0
+
+- name: Wait for replication capacity to be created
+ ansible.builtin.pause:
+ seconds: 60
+ run_once: true
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+
+- name: Get replication capacity
+ ansible.builtin.command: >
+ scli --query_all_replication_journal_capacity
+ --protection_domain_name {{ powerflex_protection_domain_name }}
+ run_once: true
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ register: powerflex_get_replication_capacity
+ changed_when: powerflex_get_replication_capacity.rc == 0
+
+- name: Checking if SDR already exists or not for PowerFlex version below 4.x
+ ansible.builtin.command: >
+ scli --mdm_ip {{ powerflex_sdr_mdm_primary_ip }}
+ --query_sdr
+ --sdr_name "{{ powerflex_sdr_hostname }}"
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ register: check_sdr_exists
+ changed_when: check_sdr_exists.rc == 0
+ failed_when: false
+ when: powerflex_sdr_array_version == "3"
+
+- name: Checking if SDR already exists or not for PowerFlex version 4.x
+ ansible.builtin.command: >
+ scli --management_system_ip {{ hostname }}
+ --query_sdr
+ --sdr_name "{{ powerflex_sdr_hostname }}"
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ register: check_sdr_exists
+ changed_when: check_sdr_exists.rc == 0
+ failed_when: false
+ when: powerflex_sdr_array_version != "3"
+
+- name: Skipping add SDR if already exists
+ ansible.builtin.debug:
+ msg: "SDR name {{ powerflex_sdr_hostname }} already exists, will skip adding SDR."
+ when: check_sdr_exists.rc == 0
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+
+- name: Adding SDR for PowerFlex version below 4.x
+ ansible.builtin.command: >
+ scli --add_sdr --mdm_ip {{ powerflex_sdr_mdm_primary_ip }}
+ --sdr_ip_role all
+ --sdr_ip {{ powerflex_sdr_ip }}
+ --sdr_name "{{ powerflex_sdr_hostname }}"
+ --protection_domain_name {{ powerflex_protection_domain_name }}
+ register: powerflex_add_sdr_output
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_add_sdr_output.rc == 0
+ when: check_sdr_exists.rc == 7 and powerflex_sdr_array_version == "3"
+
+- name: Adding SDR for PowerFlex version 4.x
+ ansible.builtin.command: >
+ scli --add_sdr --management_system_ip {{ hostname }}
+ --sdr_ip_role all
+ --sdr_ip {{ powerflex_sdr_ip }}
+ --sdr_name "{{ powerflex_sdr_hostname }}"
+ --protection_domain_name {{ powerflex_protection_domain_name }}
+ register: powerflex_add_sdr_output
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_add_sdr_output.rc == 0
+ when: check_sdr_exists.rc == 7 and powerflex_sdr_array_version != "3"
+
+- name: Logging out of the mdm
+ ansible.builtin.command: scli --logout
+ register: powerflex_mdm_logout
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_mdm_logout.rc == 0
+ run_once: true
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/main.yml
new file mode 100644
index 000000000..440173b8d
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Add SDR
+ ansible.builtin.include_tasks: add_sdr.yml
+ when: powerflex_sdr_state == 'present'
+
+- name: Remove SDR
+ ansible.builtin.include_tasks: remove_sdr.yml
+ when: powerflex_sdr_state == 'absent'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/remove_sdr.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/remove_sdr.yml
new file mode 100644
index 000000000..3bf33b6ea
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/remove_sdr.yml
@@ -0,0 +1,120 @@
+---
+- name: Get configured MDM IP addresses
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ state: "present"
+ register: powerflex_sdr_mdm_ip_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Set fact - PowerFlex version
+ ansible.builtin.set_fact:
+ powerflex_sdr_array_version: "{{ powerflex_sdr_mdm_ip_result.mdm_cluster_details.master.versionInfo[1] }}"
+
+- name: Include the sdr_set_facts.yml
+ ansible.builtin.include_tasks: sdr_set_facts.yml
+
+- name: Login to mdm for PowerFlex version below 4.x
+ ansible.builtin.command: >
+ scli --login --mdm_ip {{ powerflex_sdr_mdm_primary_ip }}
+ --username admin
+ --password "{{ powerflex_mdm_password }}"
+ --approve_certificate
+ run_once: true
+ register: powerflex_initial_login
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_initial_login.rc == 0
+ no_log: true
+ when: powerflex_sdr_array_version == "3"
+
+- name: Login to mdm for PowerFlex version 4.x
+ ansible.builtin.command: >
+ scli --login --management_system_ip {{ hostname }}
+ --username admin
+ --password "{{ password }}"
+ --approve_certificate
+ run_once: true
+ register: powerflex_initial_login
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_initial_login.rc == 0
+ no_log: true
+ when: powerflex_sdr_array_version != "3"
+
+- name: Output msg of previous task login to mdm
+ ansible.builtin.debug:
+ var: powerflex_initial_login.stdout
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+
+- name: Checking if SDR already exists or not for PowerFlex version below 4.x
+ ansible.builtin.command: >
+ scli --mdm_ip {{ powerflex_sdr_mdm_primary_ip }}
+ --query_sdr
+ --sdr_name "{{ powerflex_sdr_hostname }}"
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ register: check_sdr_exists
+ failed_when: false
+ changed_when: check_sdr_exists.rc == 0
+ when: powerflex_sdr_array_version == "3"
+
+- name: Checking if SDR already exists or not for PowerFlex version 4.x
+ ansible.builtin.command: >
+ scli --management_system_ip {{ hostname }}
+ --query_sdr
+ --sdr_name "{{ powerflex_sdr_hostname }}"
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ register: check_sdr_exists
+ failed_when: false
+ changed_when: check_sdr_exists.rc == 0
+ when: powerflex_sdr_array_version != "3"
+
+- name: Skipping remove SDR if does not exist
+ ansible.builtin.debug:
+ msg: "SDR name {{ powerflex_sdr_hostname }} does not exist, will skip removing SDR."
+ when: check_sdr_exists.rc == 7
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+
+- name: Removing SDR for PowerFlex version below 4.x
+ ansible.builtin.command: >
+ scli --remove_sdr --mdm_ip {{ powerflex_sdr_mdm_primary_ip }}
+ --sdr_name "{{ powerflex_sdr_hostname }}"
+ register: powerflex_remove_sdr_output
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_remove_sdr_output.rc == 0
+ when: check_sdr_exists.rc == 0 and powerflex_sdr_array_version == "3"
+
+- name: Removing SDR for PowerFlex version 4.x
+ ansible.builtin.command: >
+ scli --remove_sdr --management_system_ip {{ hostname }}
+ --sdr_name "{{ powerflex_sdr_hostname }}"
+ register: powerflex_remove_sdr_output
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_remove_sdr_output.rc == 0
+ when: check_sdr_exists.rc == 0 and powerflex_sdr_array_version != "3"
+
+- name: Uninstall package
+ register: powerflex_sdr_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-sdr
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+- name: Uninstall deb package
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - emc-scaleio-sdr
+ when: ansible_distribution == "Ubuntu"
+
+- name: Logging out of the mdm
+ ansible.builtin.command: scli --logout
+ register: powerflex_mdm_logout
+ run_once: true
+ delegate_to: "{{ powerflex_sdr_mdm_primary_hostname }}"
+ changed_when: powerflex_mdm_logout.rc == 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/sdr_set_facts.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/sdr_set_facts.yml
new file mode 100644
index 000000000..086b8b5bd
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/tasks/sdr_set_facts.yml
@@ -0,0 +1,7 @@
+---
+- name: Set facts for powerflex primary ip and hostname for mdm and sdr
+ ansible.builtin.set_fact:
+ powerflex_sdr_mdm_primary_ip: "{{ hostvars[groups['mdm'][0]]['ansible_host'] }}"
+ powerflex_sdr_mdm_primary_hostname: "{{ hostvars[groups['mdm'][0]]['inventory_hostname'] }}"
+ powerflex_sdr_ip: "{{ hostvars[inventory_hostname]['ansible_host'] }}"
+ powerflex_sdr_hostname: "{{ inventory_hostname }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/vars/main.yml
new file mode 100644
index 000000000..6a0f1ad81
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sdr/vars/main.yml
@@ -0,0 +1,2 @@
+---
+# vars file for powerflex_sdr
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/README.md
new file mode 100644
index 000000000..af7061108
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/README.md
@@ -0,0 +1,243 @@
+# powerflex_sds
+
+Role to manage the installation and uninstallation of Powerflex SDS.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Usage instructions](#usage-instructions)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td>IP or FQDN of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td>10.1.1.1</td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td>The username of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td>admin</td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td>The password of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td>password</td>
+ </tr>
+ <tr>
+ <td>port</td>
+ <td>false</td>
+ <td>Port</td>
+ <td></td>
+ <td>int</td>
+ <td>443</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>If C(false), the SSL certificates will not be validated.<br>Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>timeout</td>
+ <td>false</td>
+ <td>Timeout</td>
+ <td></td>
+ <td>int</td>
+ <td>120</td>
+ </tr>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>true</td>
+ <td>Location of installation and rpm gpg files to be installed.
+ <br>The required, compatible installation software package based on the operating system of the node.
+ <br>The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>str</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_sds_protection_domain</td>
+ <td>true</td>
+ <td>The name of the protection domain to which the SDS will be added.
+ </td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sds_storage_pool</td>
+ <td>true</td>
+ <td>The name of the storage pool to which the device will be added.
+ </td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sds_disks</td>
+ <td>true</td>
+ <td>Disks for adding the device.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sds_role</td>
+ <td>true</td>
+ <td>Role of the SDS.</td>
+ <td>'sdsOnly', 'sdcOnly', 'all'</td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sds_device_media_type</td>
+ <td>true</td>
+ <td>Media type of the device.</td>
+ <td>'HDD', 'SSD', 'NVDIMM'</td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sds_device_name</td>
+ <td>true</td>
+ <td>Name of the device added to the SDS.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sds_external_acceleration_type</td>
+ <td>true</td>
+ <td>External acceleration type of the device added.</td>
+ <td>'Invalid', 'None', 'Read', 'Write', 'ReadAndWrite'</td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>powerflex_sds_fault_set</td>
+ <td>false</td>
+ <td>Fault set to which the SDS will be added.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+<tr>
+ <td>powerflex_sds_state</td>
+ <td>false</td>
+ <td>State of the SDS.</td>
+ <td>present, absent</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: "Install and configure powerflex SDS"
+ ansible.builtin.import_role:
+ name: "powerflex_sds"
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_sds_disks:
+ ansible_available_disks:
+ - '/dev/sdb'
+ powerflex_sds_disks_type: HDD
+ powerflex_sds_protection_domain: domain1
+ powerflex_sds_storage_pool: pool1
+ powerflex_sds_role: all
+ powerflex_sds_device_media_type: HDD
+ powerflex_sds_device_name: '/dev/sdb'
+ powerflex_sds_external_acceleration_type: ReadAndWrite
+ powerflex_sds_state: present
+
+ - name: "Uninstall powerflex SDS"
+ ansible.builtin.import_role:
+ name: "powerflex_sds"
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_sds_state: 'absent'
+
+```
+
+## Usage instructions
+----
+### To install all dependency packages, including SDS, on node:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory site_powerflex45.yml
+ ```
+
+### To uninstall SDS:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex45.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Author Information
+------------------
+
+Dell Technologies
+Trisha Datta (ansible.team@Dell.com) 2023 \ No newline at end of file
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/defaults/main.yml
new file mode 100644
index 000000000..033c766b5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/defaults/main.yml
@@ -0,0 +1,4 @@
+---
+file_glob_name: sds
+i_am_sure: 1
+powerflex_sds_state: present
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/meta/argument_spec.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/meta/argument_spec.yml
new file mode 100644
index 000000000..9b5e44cbe
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/meta/argument_spec.yml
@@ -0,0 +1,88 @@
+---
+argument_specs:
+ main:
+ short_description: Role to manage the installation and uninstallation of Powerflex SDS.
+ description:
+ - Role to manage the installation and uninstallation of Powerflex SDS.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: IP or FQDN of the PowerFlex gateway.
+ username:
+ required: true
+ type: str
+ description: The username of the PowerFlex gateway.
+ password:
+ required: true
+ type: str
+ description: The password of the PowerFlex gateway.
+ port:
+ type: int
+ description: Port of the PowerFlex gateway.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: false
+ timeout:
+ description: Timeout.
+ type: int
+ default: 120
+ powerflex_common_file_install_location:
+ description:
+ - Location of installation and rpm gpg files to be installed.
+ - The required, compatible installation software package based on the operating system of the node.
+ - The files can be downloaded from the Dell Product support page for PowerFlex software.
+ type: str
+ default: /var/tmp
+ powerflex_sds_state:
+ description:
+ - Specifies the state of SDS.
+ type: str
+ choices: ['absent', 'present']
+ default: present
+ powerflex_sds_protection_domaine:
+ required: true
+ description:
+ - The name of the protection domain to which the SDS will be added.
+ type: str
+ powerflex_sds_storage_pool:
+ required: true
+ description:
+ - The name of the storage pool to which the device will be added.
+ type: str
+ powerflex_sds_disks:
+ required: true
+ description:
+ - Disks for adding the device.
+ type: str
+ powerflex_sds_role:
+ required: true
+ description:
+ - Role of the SDS
+ type: str
+ choices: ['sdsOnly', 'sdcOnly', 'all']
+ powerflex_sds_device_media_type:
+ required: true
+ description:
+ - Media type of the device.
+ type: str
+ choices: ['HDD', 'SSD', 'NVDIMM']
+ powerflex_sds_device_name:
+ required: true
+ description:
+ - Name of the SDS device.
+ type: str
+ powerflex_sds_external_acceleration_type:
+ required: true
+ description:
+ - External acceleration type of the device added.
+ type: str
+ choices: ['Invalid', 'None', 'Read', 'Write', 'ReadAndWrite']
+ powerflex_sds_fault_set:
+ description:
+ - Fault set to which the SDS will be added.
+ type: str
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/meta/main.yml
new file mode 100644
index 000000000..999d95e6e
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/meta/main.yml
@@ -0,0 +1,24 @@
+---
+galaxy_info:
+ author: Trisha Datta
+ description: The role helps to manage the installation of SDS.
+ company: Dell Technologies
+ license: GPL-3.0-only
+ role_name: powerflex_sds
+ namespace: dellemc
+
+ min_ansible_version: "2.14.0"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_installation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_installation/converge.yml
new file mode 100644
index 000000000..0dc711a2c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_installation/converge.yml
@@ -0,0 +1,55 @@
+---
+- name: SDS installation
+ hosts: sds
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ tasks:
+ - name: Install common packages
+ ansible.builtin.import_role:
+ name: powerflex_common
+ vars:
+ powerflex_sds_state: present
+
+ - name: Install and configure powerflex SDS
+ ansible.builtin.import_role:
+ name: powerflex_sds
+ vars:
+ powerflex_sds_state: present
+
+ - name: Verifying device in normal mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_sds_add_device_output.device_details.mediaType == "{{ powerflex_sds_device_media_type }}"
+ - powerflex_sds_add_device_output.device_details.name == "{{ powerflex_sds_device_name }}"
+ - powerflex_sds_add_device_output.device_details.externalAccelerationType == "{{ powerflex_sds_external_acceleration_type }}"
+ - powerflex_sds_add_device_output.device_details.storagepoolPoolName == "{{ powerflex_sds_storage_pool }}"
+ - powerflex_sds_add_device_output.device_details.protectionDomainName == "{{ powerflex_sds_protection_domain }}"
+ when:
+ - not ansible_check_mode
+ - powerflex_sds_add_device_output.changed
+
+ - name: Verifying install package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_install_package_output.msg == "Check mode: No changes made"
+ when: ansible_check_mode
+
+ - name: Verifying installation package
+ ansible.builtin.assert:
+ that:
+ - " 'Installed' in powerflex_common_install_package_output.results[0]"
+ when:
+ - not ansible_check_mode
+ - powerflex_common_install_package_output.changed
+
+ - name: Verifying device in Idempotency
+ ansible.builtin.assert:
+ that:
+ - powerflex_sds_add_device_output.device_details.mediaType == "{{ powerflex_sds_device_media_type }}"
+ - powerflex_sds_add_device_output.device_details.name == "{{ powerflex_sds_device_name }}"
+ - powerflex_sds_add_device_output.device_details.externalAccelerationType == "{{ powerflex_sds_external_acceleration_type }}"
+ - powerflex_sds_add_device_output.device_details.storagepoolPoolName == "{{ powerflex_sds_storage_pool }}"
+ - powerflex_sds_add_device_output.device_details.protectionDomainName == "{{ powerflex_sds_protection_domain }}"
+ when:
+ - not ansible_check_mode
+ - not powerflex_sds_add_device_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_installation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_installation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_installation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_uninstallation/converge.yml
new file mode 100644
index 000000000..fbb99419a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_uninstallation/converge.yml
@@ -0,0 +1,50 @@
+---
+- name: SDS uninstallation
+ hosts: sds
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ tasks:
+ - name: Uninstall powerflex SDS
+ register: powerflex_sds_uninstall_outputs
+ ansible.builtin.import_role:
+ name: powerflex_sds
+ vars:
+ powerflex_sds_state: absent
+
+ - name: Verifying uninstall package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_sds_uninstall_output.msg == "Check mode: No changes made"
+ - powerflex_sds_uninstall_output.changed
+ when: ansible_check_mode
+
+ - name: Verifying remove the sds in normal mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_sds_remove_result.sds_details is None
+ when:
+ - not ansible_check_mode
+ - powerflex_sds_remove_result.changed
+
+ - name: Verifying uninstall package in Idempotency
+ ansible.builtin.assert:
+ that:
+ - powerflex_sds_uninstall_output.results[0].msg == 'Nothing to do'
+ when:
+ - not ansible_check_mode
+ - not powerflex_sds_uninstall_output.changed
+
+ - name: Verifying remove the sds in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_sds_remove_result.msg == "Check mode: No changes made"
+ - powerflex_sds_remove_result.changed
+ when: ansible_check_mode
+
+ - name: Verifying remove the sds in Idempotency
+ ansible.builtin.assert:
+ that:
+ - powerflex_sds_remove_result.sds_details is None
+ when:
+ - not ansible_check_mode
+ - not powerflex_sds_remove_result.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/sds_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/var_values.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/var_values.yml
new file mode 100644
index 000000000..b8d4ddc74
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/molecule/var_values.yml
@@ -0,0 +1,11 @@
+---
+powerflex_sds_disks:
+ansible_available_disks:
+ - '/dev/sdb'
+powerflex_sds_disks_type: HDD
+powerflex_sds_protection_domain: domain1
+powerflex_sds_storage_pool: pool1
+powerflex_sds_role: all
+powerflex_sds_device_media_type: HDD
+powerflex_sds_device_name: '/dev/sdb'
+powerflex_sds_external_acceleration_type: ReadAndWrite
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/install_sds.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/install_sds.yml
new file mode 100644
index 000000000..8887ff13c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/install_sds.yml
@@ -0,0 +1,112 @@
+---
+- name: Get configured MDM IP addresses
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ state: "present"
+ register: powerflex_sds_mdm_ip_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Set fact - PowerFlex version
+ ansible.builtin.set_fact:
+ powerflex_sds_array_version: "{{ powerflex_sds_mdm_ip_result.mdm_cluster_details.master.versionInfo[1] }}"
+
+- name: Set fact - powerflex_sds_mdm_ips
+ ansible.builtin.set_fact:
+ powerflex_sds_mdm_ips: "{{ powerflex_sds_mdm_ip_result.mdm_cluster_details.mdmAddresses | join(',') }}"
+ powerflex_sds_primary_mdm_hostname: "{{ hostvars[groups['mdm'][0]]['inventory_hostname'] }}"
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
+
+- name: Collect only facts about hardware
+ ansible.builtin.setup:
+ gather_subset:
+ - hardware
+
+- name: Find disks
+ ansible.builtin.set_fact:
+ disks: hostvars[inventory_hostname].ansible_devices.keys() | list
+
+- name: Use local disk variable
+ when: powerflex_sds_disks
+ ansible.builtin.set_fact:
+ disks: "{{ powerflex_sds_disks }}"
+
+- name: Login to MDM for PowerFlex version 4.x
+ ansible.builtin.command: scli --login --management_system_ip {{ hostname }} --username {{ username }} --password {{ password }} --approve_certificate
+ run_once: true
+ register: powerflex_sds_login_output
+ changed_when: powerflex_sds_login_output.rc == 0
+ delegate_to: "{{ powerflex_sds_primary_mdm_hostname }}"
+ when: powerflex_sds_array_version != "3"
+
+- name: Login to MDM for PowerFlex version below 4.x
+ ansible.builtin.command: scli --login --username {{ username }} --password {{ password }} --approve_certificate
+ run_once: true
+ register: powerflex_sds_login_output
+ changed_when: powerflex_sds_login_output.rc == 0
+ delegate_to: "{{ powerflex_sds_primary_mdm_hostname }}"
+ when: powerflex_sds_array_version == "3"
+
+- name: Create SDS
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ sds_name: "{{ inventory_hostname }}"
+ protection_domain_name: "{{ powerflex_sds_protection_domain }}"
+ sds_ip_list:
+ - ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}"
+ role: "{{ powerflex_sds_role }}"
+ sds_ip_state: "present-in-sds"
+ state: "present"
+ register: powerflex_sds_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+ when: powerflex_sds_fault_set is not defined
+
+- name: Add SDS with fault set for PowerFlex version 4.x
+ ansible.builtin.command:
+ scli --add_sds --management_system_ip {{ hostname }}
+ --sds_ip {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}
+ --storage_pool_name {{ powerflex_sds_storage_pool }} --sds_name "{{ inventory_hostname }}"
+ --protection_domain_name {{ powerflex_sds_protection_domain }} --fault_set_name {{ powerflex_sds_fault_set }}
+ register: add_sds
+ changed_when: ('already in use' in add_sds.stderr) or (add_sds.rc == 0)
+ delegate_to: "{{ powerflex_sds_primary_mdm_hostname }}"
+ ignore_errors: true
+ when: powerflex_sds_fault_set is defined and powerflex_sds_array_version != "3"
+
+- name: Add SDS with fault set for PowerFlex version below 4.x
+ ansible.builtin.command:
+ scli --add_sds --mdm_ip {{ powerflex_sds_mdm_ips }}
+ --sds_ip {{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}
+ --storage_pool_name {{ powerflex_sds_storage_pool }} --sds_name "{{ inventory_hostname }}"
+ --protection_domain_name {{ powerflex_sds_protection_domain }} --fault_set_name {{ powerflex_sds_fault_set }}
+ register: add_sds
+ changed_when: ('already in use' in add_sds.stderr) or (add_sds.rc == 0)
+ delegate_to: "{{ powerflex_sds_primary_mdm_hostname }}"
+ ignore_errors: true
+ when: powerflex_sds_fault_set is defined and powerflex_sds_array_version == "3"
+
+- name: Add a device
+ dellemc.powerflex.device:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ current_pathname: "{{ disks.ansible_available_disks | join(',') }}"
+ sds_name: "{{ inventory_hostname }}"
+ media_type: "{{ powerflex_sds_device_media_type }}"
+ device_name: "{{ powerflex_sds_device_name }}"
+ storage_pool_name: "{{ powerflex_sds_storage_pool }}"
+ protection_domain_name: "{{ powerflex_sds_protection_domain }}"
+ external_acceleration_type: "{{ powerflex_sds_external_acceleration_type }}"
+ force: true
+ state: "present"
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+ when: disks.ansible_available_disks | length > 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/main.yml
new file mode 100644
index 000000000..04fe23630
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Install SDS
+ ansible.builtin.include_tasks: install_sds.yml
+ when: powerflex_sds_state == 'present'
+
+- name: Remove SDS
+ ansible.builtin.include_tasks: uninstall_sds.yml
+ when: powerflex_sds_state == 'absent'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/uninstall_sds.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/uninstall_sds.yml
new file mode 100644
index 000000000..af5609e57
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/tasks/uninstall_sds.yml
@@ -0,0 +1,31 @@
+---
+- name: Remove SDS
+ dellemc.powerflex.sds:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ sds_name: "{{ inventory_hostname }}"
+ state: "absent"
+ register: powerflex_sds_remove_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Uninstall package
+ register: powerflex_sds_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-sds
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+- name: Uninstall deb package
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - emc-scaleio-sds
+ when: ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_sds/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/vars/main.yml
new file mode 100644
index 000000000..af154c9d9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_sds/vars/main.yml
@@ -0,0 +1,5 @@
+---
+file_glob_name: sds
+file_gpg_name: RPM-GPG-KEY-ScaleIO
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_sds_mdm_ips }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/README.md
new file mode 100644
index 000000000..dec61fec9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/README.md
@@ -0,0 +1,210 @@
+# powerflex_tb
+
+Role to manage the installation and uninstallation of Powerflex TB.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Notes](#notes)
+* [Usage instructions](#usage-instructions)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td>IP or FQDN of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td>The username of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td>The password of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>port</td>
+ <td>false</td>
+ <td>Port of the PowerFlex gateway.</td>
+ <td></td>
+ <td>int</td>
+ <td>443</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>If C(false), the SSL certificates will not be validated.<br>Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>timeout</td>
+ <td>false</td>
+ <td>Timeout.</td>
+ <td></td>
+ <td>int</td>
+ <td>120</td>
+ </tr>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>false</td>
+ <td>Location of installation and rpm gpg files to be installed.
+ <br>The required, compatible installation software package based on the operating system of the node.
+ <br> The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>str</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_tb_state</td>
+ <td>false</td>
+ <td>Specify state of TB.<br></td>
+ <td>absent, present</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+ <tr>
+ <td>powerflex_tb_primary_name</td>
+ <td>false</td>
+ <td>Name of the primary TB.<br></td>
+ <td></td>
+ <td>str</td>
+ <td>primary_tb</td>
+ </tr>
+ <tr>
+ <td>powerflex_tb_secondary_name</td>
+ <td>false</td>
+ <td>Name of the secondary TB.<br></td>
+ <td></td>
+ <td>str</td>
+ <td>secondary_tb</td>
+ </tr>
+ <tr>
+ <td>powerflex_tb_cluster_mode</td>
+ <td>false</td>
+ <td>Mode of the cluster.<br></td>
+ <td>ThreeNodes, FiveNodes</td>
+ <td>str</td>
+ <td>ThreeNodes</td>
+ </tr>
+ <tr>
+ <td>powerflex_tb_cert_password</td>
+ <td>false</td>
+ <td>The CLI certificate password for login to the primary MDM.<br></td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: Install and configure PowerFlex TB
+ ansible.builtin.import_role:
+ name: "powerflex_tb"
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_tb_primary_name: "primary_tb"
+ powerflex_tb_secondary_name: "secondary_tb"
+ powerflex_tb_cluster_mode: "ThreeNodes"
+ powerflex_common_file_install_location: "/var/tmp"
+ powerflex_tb_state: present
+
+ - name: Uninstall powerflex TB
+ ansible.builtin.import_role:
+ name: "powerflex_tb"
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_tb_state: 'absent'
+
+```
+
+## Notes
+----
+
+- As a pre-requisite for PowerFlex 3.6, the Gateway must be installed.
+- For PowerFlex 4.x, after installing the TB perform initial configuration steps on PowerFlex Manager GUI. These steps can be found in Install and Update of Dell PowerFlex 4.x from Dell Support page.
+
+## Usage instructions
+----
+### To install all dependency packages, including TB, on node:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory site_powerflex45.yml
+ ```
+
+### To uninstall TB:
+- PowerFlex 3.6:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex.yml
+ ```
+- PowerFlex 4.5:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex45.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Ananthu S Kuttattu (ansible.team@Dell.com) 2023
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/defaults/main.yml
new file mode 100644
index 000000000..b4b0b5a30
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# defaults file for powerflex_tb
+powerflex_tb_primary_name: primary_tb
+powerflex_tb_secondary_name: secondary_tb
+powerflex_tb_cluster_mode: "ThreeNodes"
+file_glob_name: mdm
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_tb_mdm_ips }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/argument_spec.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/argument_spec.yml
new file mode 100644
index 000000000..f3072df80
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/argument_spec.yml
@@ -0,0 +1,85 @@
+---
+argument_specs:
+ main:
+ short_description: Role to manage the installation and uninstallation of Powerflex TB.
+ description:
+ - Role to manage the installation and uninstallation of Powerflex TB.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: IP or FQDN of the PowerFlex gateway.
+ username:
+ required: true
+ type: str
+ description: The username of the PowerFlex gateway.
+ password:
+ required: true
+ type: str
+ description: The password of the PowerFlex gateway.
+ port:
+ type: int
+ description: Port of the PowerFlex gateway.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: false
+ timeout:
+ description: Timeout.
+ type: int
+ default: 120
+ powerflex_common_file_install_location:
+ description:
+ - Location of installation and rpm gpg files to be installed.
+ - The required, compatible installation software package based on the operating system of the node.
+ type: str
+ default: /var/tmp
+ powerflex_tb_state:
+ description:
+ - Specify state of TB.
+ type: str
+ choices: ['absent', 'present']
+ default: present
+ powerflex_tb_primary_name:
+ required: true
+ description:
+ - Name of the primary TB.
+ type: str
+ default: 'primary_tb'
+ powerflex_tb_secondary_name:
+ required: true
+ description:
+ - Name of the secondary TB.
+ type: str
+ default: 'secondary_tb'
+ powerflex_tb_cluster_mode:
+ required: true
+ description:
+ - Mode of the cluster.
+ choices: ['ThreeNodes', 'FiveNodes']
+ type: str
+ default: 'ThreeNodes'
+ powerflex_protection_domain_name:
+ description:
+ - Name of the protection domain.
+ type: str
+ default: 'tb_protection_domain'
+ powerflex_fault_sets:
+ description:
+ - List of fault sets.
+ type: list
+ default: ['fs1', 'fs2', 'fs3']
+ powerflex_media_type:
+ description:
+ - Media type of the storage pool.
+ type: str
+ choices: ['SSD', 'HDD', 'TRANSITIONAL']
+ default: 'SSD'
+ powerflex_storage_pool_name:
+ description:
+ - Name of the storage pool.
+ type: str
+ default: 'tb_storage_pool'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/argument_specs.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/argument_specs.yml
new file mode 100644
index 000000000..ac5a3e3ef
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/argument_specs.yml
@@ -0,0 +1,65 @@
+---
+argument_specs:
+ main:
+ short_description: Role to manage the installation and uninstallation of Powerflex TB.
+ description:
+ - Role to manage the installation and uninstallation of Powerflex TB.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: IP or FQDN of the PowerFlex gateway.
+ username:
+ required: true
+ type: str
+ description: The username of the PowerFlex gateway.
+ password:
+ required: true
+ type: str
+ description: The password of the PowerFlex gateway.
+ port:
+ type: int
+ description: Port of the PowerFlex gateway.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: false
+ timeout:
+ description: Timeout.
+ type: int
+ default: 120
+ powerflex_common_file_install_location:
+ description:
+ - Location of installation and rpm gpg files to be installed.
+ - The required, compatible installation software package based on the operating system of the node.
+ type: str
+ default: /var/tmp
+ powerflex_tb_state:
+ description:
+ - Specify state of TB.
+ type: str
+ choices: ['absent', 'present']
+ default: present
+ powerflex_tb_primary_name:
+ description:
+ - Name of the primary TB.
+ type: str
+ default: 'primary_tb'
+ powerflex_tb_secondary_name:
+ description:
+ - Name of the secondary TB.
+ type: str
+ default: 'secondary_tb'
+ powerflex_tb_cluster_mode:
+ description:
+ - Mode of the cluster.
+ choices: ['ThreeNodes', 'FiveNodes']
+ type: str
+ default: 'ThreeNodes'
+ powerflex_tb_cert_password:
+ description:
+ - The CLI certificate password for login to the primary MDM.
+ type: str
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/main.yml
new file mode 100644
index 000000000..c3179cd1e
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/meta/main.yml
@@ -0,0 +1,25 @@
+---
+galaxy_info:
+ author: Ananthu S Kuttattu
+ description: Role to manage the installation and uninstallation of Powerflex TB.
+ company: Dell Technologies
+ license: GPL-3.0-only
+ role_name: powerflex_tb
+ namespace: dellemc
+
+ min_ansible_version: "2.14.0"
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_installation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_installation/converge.yml
new file mode 100644
index 000000000..77d00ec11
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_installation/converge.yml
@@ -0,0 +1,35 @@
+---
+- name: TB installation
+ hosts: tb
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: Install common packages
+ ansible.builtin.import_role:
+ name: powerflex_common
+
+ - name: "Install PowerFlex TieBreaker"
+ ansible.builtin.import_role:
+ name: "powerflex_tb"
+ vars:
+ powerflex_tb_state: present
+ register: powerflex_tb_results
+
+ - name: "Verifying installation package"
+ ansible.builtin.assert:
+ that:
+ - " 'Installed' in powerflex_common_install_package_output.results[0]"
+ when: not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: "Verifying cluster mode switch from 1 node to 3 node MDM cluster"
+ ansible.builtin.assert:
+ that:
+ - powerflex_tb_cluster_to_three_output.stdout == "Successfully switched the cluster mode."
+ when: not ansible_check_mode and powerflex_tb_cluster_to_three_output.changed and powerflex_tb_cluster_mode == "ThreeNodes"
+
+ - name: "Verifying cluster mode switch from 1 node to 5 node MDM cluster"
+ ansible.builtin.assert:
+ that:
+ - powerflex_tb_cluster_to_five_output.stdout == "Successfully switched the cluster mode."
+ when: not ansible_check_mode and powerflex_tb_cluster_to_five_output.changed and powerflex_tb_cluster_mode == "FiveNodes"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_installation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_installation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_installation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_uninstallation/converge.yml
new file mode 100644
index 000000000..986b270de
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_uninstallation/converge.yml
@@ -0,0 +1,19 @@
+---
+- name: TB uninstallation
+ hosts: tb
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: "Uninstall powerflex TB"
+ register: powerflex_tb_uninstall_outputs
+ ansible.builtin.import_role:
+ name: "powerflex_tb"
+ vars:
+ powerflex_tb_state: 'absent'
+
+ - name: "Verifying remove the TB"
+ ansible.builtin.assert:
+ that:
+ - powerflex_tb_remove_primary_tb_output.stdout == "Successfully removed the standby MDM."
+ when: not ansible_check_mode and powerflex_tb_remove_primary_tb_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/tb_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/var_values.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/var_values.yml
new file mode 100644
index 000000000..01397a639
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/var_values.yml
@@ -0,0 +1,7 @@
+---
+powerflex_tb_primary_name: primary_tb
+powerflex_tb_secondary_name: secondary_tb
+powerflex_tb_cluster_mode: "ThreeNodes"
+powerflex_protection_domain_name: "tb_protection_domain"
+powerflex_fault_sets: ['fs1', 'fs2', 'fs3']
+powerflex_storage_pool_name: "tb_storage_pool"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/converge.yml
new file mode 100644
index 000000000..c9322f319
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/converge.yml
@@ -0,0 +1,20 @@
+---
+- name: Providing incorrect credentials for TB node
+ hosts: tb
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: false
+ tasks:
+ - name: "Install and configure powerflex tb with wrong credentials"
+ ansible.builtin.import_role:
+ name: "powerflex_tb"
+ vars:
+ powerflex_tb_state: present
+ ignore_unreachable: true
+ ignore_errors: true
+ register: powerflex_tb_wrong_credentials_output
+
+ - name: "Verifying failure of install package with wrong credentials"
+ ansible.builtin.assert:
+ that:
+ - " 'Communication error' in powerflex_tb_primary_output.msg"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/inventory b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/inventory
new file mode 100644
index 000000000..391105e2b
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/inventory
@@ -0,0 +1,4 @@
+node0 ansible_host=10.2.2.2 ansible_port=22 ansible_ssh_pass=wrongpassword ansible_user=root
+
+[tb]
+node0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/molecule.yml
new file mode 100644
index 000000000..805f92879
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/molecule/wrong_tb_credentials/molecule.yml
@@ -0,0 +1,11 @@
+---
+provisioner:
+ name: ansible
+ inventory:
+ links:
+ hosts: inventory
+ group_vars: ../../../../playbooks/roles/group_vars/
+ host_vars: ../../../../playbooks/roles/host_vars/
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb.yml
new file mode 100644
index 000000000..504b3e920
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb.yml
@@ -0,0 +1,8 @@
+---
+- name: Install TB for PowerFlex below 4.x
+ ansible.builtin.include_tasks: install_tb3x.yml
+ when: powerflex_tb_scli_version[0] == '3'
+
+- name: Install TB for PowerFlex 4.x
+ ansible.builtin.include_tasks: install_tb4x.yml
+ when: powerflex_tb_scli_version[0] >= '4'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb3x.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb3x.yml
new file mode 100644
index 000000000..e602351da
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb3x.yml
@@ -0,0 +1,81 @@
+---
+- name: Get configured MDM IP addresses
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ state: "present"
+ register: powerflex_tb_mdm_ip_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Set fact - powerflex_mdm_ips
+ ansible.builtin.set_fact:
+ powerflex_tb_mdm_ips: "{{ powerflex_tb_mdm_ip_result.mdm_cluster_details.mdmAddresses | join(',') }}"
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
+
+- name: Login with password
+ ansible.builtin.command: scli --login --username {{ username }} --password "{{ password }}"
+ run_once: true
+ register: powerflex_tb_login_output
+ changed_when: powerflex_tb_login_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+
+- name: Add primary TB
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ mdm_name: "{{ powerflex_tb_primary_name }}"
+ standby_mdm:
+ mdm_ips:
+ - "{{ powerflex_tb_primary_ip }}"
+ role: "TieBreaker"
+ management_ips:
+ - "{{ powerflex_tb_primary_ip }}"
+ state: "present"
+ register: powerflex_tb_primary_output
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Add secondary TB
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ mdm_name: "{{ powerflex_tb_secondary_name }}"
+ standby_mdm:
+ mdm_ips:
+ - "{{ powerflex_tb_secondary_ip }}"
+ role: "TieBreaker"
+ management_ips:
+ - "{{ powerflex_tb_secondary_ip }}"
+ state: "present"
+ register: powerflex_tb_secondary_output
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+ when: powerflex_tb_secondary_ip is defined
+
+- name: Switch to cluster three node
+ ansible.builtin.command: |
+ scli --switch_cluster_mode --cluster_mode "3_node" --add_slave_mdm_ip
+ "{{ powerflex_tb_mdm_secondary_ip }}" --add_tb_ip "{{ powerflex_tb_primary_ip }}"
+ run_once: true
+ register: powerflex_tb_cluster_to_three_output
+ changed_when: powerflex_tb_cluster_to_three_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_primary_output.mdm_cluster_details.clusterMode != "ThreeNodes" and powerflex_tb_cluster_mode == "ThreeNodes"
+
+- name: Switch to cluster five node
+ ansible.builtin.command: |
+ scli --switch_cluster_mode --cluster_mode "5_node" --add_slave_mdm_ip
+ "{{ powerflex_tb_mdm_secondary_ip }}","{{ powerflex_tb_mdm_tertiary_ip }}" --add_tb_ip "{{ powerflex_tb_primary_ip }}","{{ powerflex_tb_secondary_ip }}"
+ run_once: true
+ register: powerflex_tb_cluster_to_five_output
+ changed_when: powerflex_tb_cluster_to_five_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_primary_output.mdm_cluster_details.clusterMode != "FiveNodes" and powerflex_tb_cluster_mode == "FiveNodes"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb4x.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb4x.yml
new file mode 100644
index 000000000..d34857ba4
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/install_tb4x.yml
@@ -0,0 +1,69 @@
+---
+- name: Set fact - powerflex_mdm_ips
+ ansible.builtin.set_fact:
+ powerflex_tb_mdm_ips: "{{ powerflex_tb_mdm_primary_ip }},{{ powerflex_tb_mdm_secondary_ip }}"
+ when: powerflex_tb_mdm_count | int == 2
+
+- name: Set fact - powerflex_mdm_ips
+ ansible.builtin.set_fact:
+ powerflex_tb_mdm_ips: "{{ powerflex_tb_mdm_primary_ip }},{{ powerflex_tb_mdm_secondary_ip }},{{ powerflex_tb_mdm_tertiary_ip }}"
+ when: powerflex_tb_mdm_count | int > 2
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
+
+- name: Login to primary MDM node
+ ansible.builtin.command: >
+ scli --login --p12_path /opt/emc/scaleio/mdm/cfg/cli_certificate.p12 --p12_password {{ powerflex_tb_cert_password }}
+ run_once: true
+ register: powerflex_tb_login_output
+ changed_when: powerflex_tb_login_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+
+- name: Add primary TB
+ ansible.builtin.command: >
+ scli --add_standby_mdm
+ --new_mdm_ip {{ powerflex_tb_primary_ip }}
+ --mdm_role tb
+ --new_mdm_name {{ powerflex_tb_primary_name }}
+ --new_mdm_management_ip {{ powerflex_tb_primary_ip }}
+ run_once: true
+ register: powerflex_tb_primary_output
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_primary_ip is defined
+ ignore_errors: true
+ changed_when: powerflex_tb_primary_output.rc == 0
+
+- name: Add secondary TB
+ ansible.builtin.command: >
+ scli --add_standby_mdm
+ --new_mdm_ip {{ powerflex_tb_secondary_ip }}
+ --mdm_role tb
+ --new_mdm_name {{ powerflex_tb_secondary_name }}
+ --new_mdm_management_ip {{ powerflex_tb_secondary_ip }}
+ register: powerflex_tb_secondary_output
+ run_once: true
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_secondary_ip is defined
+ ignore_errors: true
+ changed_when: powerflex_tb_secondary_output.rc == 0
+
+- name: Switch to cluster three node
+ ansible.builtin.command: |
+ scli --switch_cluster_mode --cluster_mode "3_node" --add_secondary_mdm_ip
+ "{{ powerflex_tb_mdm_secondary_ip }}" --add_tb_ip "{{ powerflex_tb_primary_ip }}"
+ run_once: true
+ register: powerflex_tb_cluster_to_three_output
+ changed_when: powerflex_tb_cluster_to_three_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_mdm_cluster_mode[0] != "3_node" and powerflex_tb_cluster_mode == "ThreeNodes"
+
+- name: Switch to cluster five node
+ ansible.builtin.command: |
+ scli --switch_cluster_mode --cluster_mode "5_node" --add_secondary_mdm_ip
+ "{{ powerflex_tb_mdm_secondary_ip }}","{{ powerflex_tb_mdm_tertiary_ip }}" --add_tb_ip "{{ powerflex_tb_primary_ip }}","{{ powerflex_tb_secondary_ip }}"
+ run_once: true
+ register: powerflex_tb_cluster_to_five_output
+ changed_when: powerflex_tb_cluster_to_five_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_mdm_cluster_mode[0] != "5_node" and powerflex_tb_cluster_mode == "FiveNodes"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/main.yml
new file mode 100644
index 000000000..f98c09ff1
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/main.yml
@@ -0,0 +1,31 @@
+---
+- name: Include set_tb_ips.yml
+ ansible.builtin.include_tasks: set_tb_ips.yml
+
+- name: Get SCli version
+ ansible.builtin.command: >
+ scli --query_cluster
+ register: powerflex_tb_scli_cluster_details
+ tags: register
+ changed_when: powerflex_tb_scli_cluster_details.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+
+- name: Extract the scli version
+ ansible.builtin.set_fact:
+ powerflex_tb_scli_version: "{{ input_query | ansible.builtin.regex_search('Version: (\\d+)\\.(\\d+)', '\\1\\2') }}"
+ vars:
+ input_query: "{{ powerflex_tb_scli_cluster_details.stdout }}"
+
+- name: Extract the cluster mode
+ ansible.builtin.set_fact:
+ powerflex_tb_mdm_cluster_mode: "{{ input_query | ansible.builtin.regex_search('Mode: (\\w+)', '\\1') }}"
+ vars:
+ input_query: "{{ powerflex_tb_scli_cluster_details.stdout }}"
+
+- name: Install TB
+ ansible.builtin.include_tasks: install_tb.yml
+ when: powerflex_tb_state == 'present'
+
+- name: Uninstall TB
+ ansible.builtin.include_tasks: uninstall_tb.yml
+ when: powerflex_tb_state == 'absent'
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/set_tb_ips.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/set_tb_ips.yml
new file mode 100644
index 000000000..34c0144d5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/set_tb_ips.yml
@@ -0,0 +1,29 @@
+---
+- name: Set TB count
+ ansible.builtin.set_fact:
+ powerflex_tb_count: "{{ groups['tb'] | length }}"
+ powerflex_tb_mdm_count: "{{ groups['mdm'] | length }}"
+
+- name: Set fact - powerflex_tb_mdm_primary_ip and powerflex_tb_mdm_secondary_ip
+ ansible.builtin.set_fact:
+ powerflex_tb_mdm_primary_ip: "{{ hostvars[groups['mdm'][0]]['ansible_host'] }}"
+ powerflex_tb_mdm_primary_hostname: "{{ hostvars[groups['mdm'][0]]['inventory_hostname'] }}"
+ powerflex_tb_mdm_secondary_ip: "{{ hostvars[groups['mdm'][1]]['ansible_host'] }}"
+ powerflex_tb_mdm_secondary_hostname: "{{ hostvars[groups['mdm'][1]]['inventory_hostname'] }}"
+
+- name: Set fact - powerflex_tb_mdm_tertiary_ip
+ ansible.builtin.set_fact:
+ powerflex_tb_mdm_tertiary_ip: "{{ hostvars[groups['tb'][2]]['ansible_host'] }}"
+ powerflex_tb_mdm_tertiary_hostname: "{{ hostvars[groups['tb'][2]]['inventory_hostname'] }}"
+ when: "powerflex_tb_mdm_count | int > 2"
+
+- name: Set fact - powerflex_tb_primary
+ ansible.builtin.set_fact:
+ powerflex_tb_primary_ip: "{{ hostvars[groups['tb'][0]]['ansible_host'] }}"
+ powerflex_tb_primary_hostname: "{{ hostvars[groups['tb'][0]]['inventory_hostname'] }}"
+
+- name: Set fact - powerflex_tb_primary
+ ansible.builtin.set_fact:
+ powerflex_tb_secondary_ip: "{{ hostvars[groups['tb'][1]]['ansible_host'] }}"
+ powerflex_tb_secondary_hostname: "{{ hostvars[groups['tb'][1]]['inventory_hostname'] }}"
+ when: "powerflex_tb_count | int > 1"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/uninstall_tb.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/uninstall_tb.yml
new file mode 100644
index 000000000..b08bffed8
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/tasks/uninstall_tb.yml
@@ -0,0 +1,97 @@
+---
+# Switch from three or five to cluster one node for PowerFlex version 3.6
+- name: Login to primary MDM node of PowerFlex version 3.x
+ ansible.builtin.command: scli --login --username {{ username }} --password "{{ password }}"
+ run_once: true
+ register: powerflex_tb_login_output
+ changed_when: powerflex_tb_login_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_scli_version[0] == '3'
+
+- name: Switch cluster mode from three nodes to one node
+ ansible.builtin.command: |
+ scli --switch_cluster_mode --cluster_mode "1_node" --remove_slave_mdm_ip
+ "{{ powerflex_tb_mdm_secondary_ip }}" --remove_tb_ip "{{ powerflex_tb_primary_ip }}"
+ run_once: true
+ register: powerflex_tb_cluster_to_one_output
+ changed_when: powerflex_tb_cluster_to_one_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_mdm_cluster_mode[0] == "3_node" and powerflex_tb_scli_version[0] == '3'
+
+- name: Switch cluster mode from five nodes to one node
+ ansible.builtin.command: |
+ scli --switch_cluster_mode --cluster_mode "1_node" --remove_slave_mdm_ip
+ "{{ powerflex_tb_mdm_secondary_ip }}","{{ powerflex_tb_mdm_tertiary_ip }}" --remove_tb_ip "{{ powerflex_tb_primary_ip }}","{{ powerflex_tb_secondary_ip }}"
+ run_once: true
+ register: powerflex_tb_cluster_to_one_output
+ changed_when: powerflex_tb_cluster_to_one_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_mdm_cluster_mode[0] == "5_node" and powerflex_tb_scli_version[0] == '3'
+
+# Switch from three or five to cluster one node for PowerFlex version 4.5
+- name: Login to primary MDM node of PowerFlex version 4.5
+ ansible.builtin.command: >
+ scli --login --management_system_ip {{ hostname }} --username {{ username }} --password {{ password }}
+ run_once: true
+ register: powerflex_tb_login_output
+ changed_when: powerflex_tb_login_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_scli_version[0] >= '4'
+
+- name: Switch cluster mode from three nodes to one node
+ ansible.builtin.command: |
+ scli --switch_cluster_mode --cluster_mode "1_node" --remove_secondary_mdm_ip
+ "{{ powerflex_tb_mdm_secondary_ip }}" --remove_tb_ip "{{ powerflex_tb_primary_ip }}"
+ run_once: true
+ register: powerflex_tb_cluster_three_to_one_output
+ changed_when: powerflex_tb_cluster_three_to_one_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_mdm_cluster_mode[0] == "3_node" and powerflex_tb_scli_version[0] >= '4'
+
+- name: Switch cluster mode from five nodes to one node
+ ansible.builtin.command: |
+ scli --switch_cluster_mode --cluster_mode "1_node" --remove_secondary_mdm_ip
+ "{{ powerflex_tb_mdm_secondary_ip }}","{{ powerflex_tb_mdm_tertiary_ip }}" --remove_tb_ip "{{ powerflex_tb_primary_ip }}","{{ powerflex_tb_secondary_ip }}"
+ run_once: true
+ register: powerflex_tb_cluster_five_to_one_output
+ changed_when: powerflex_tb_cluster_five_to_one_output.rc == 0
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_mdm_cluster_mode[0] == "5_node" and powerflex_tb_scli_version[0] >= '4'
+
+# Remove the standby MDMs
+- name: Remove primary tb from standby mdm
+ ansible.builtin.command: scli --remove_standby_mdm --remove_mdm_ip "{{ powerflex_tb_primary_ip }}"
+ run_once: true
+ register: powerflex_tb_remove_primary_tb_output
+ changed_when: powerflex_tb_remove_primary_tb_output.rc == 0
+ ignore_errors: true
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+
+- name: Remove secondary tb from standby mdm
+ ansible.builtin.command: scli --remove_standby_mdm --remove_mdm_ip "{{ powerflex_tb_secondary_ip }}"
+ run_once: true
+ register: powerflex_tb_remove_secondary_tb_output
+ changed_when: powerflex_tb_remove_secondary_tb_output.rc == 0
+ ignore_errors: true
+ delegate_to: "{{ powerflex_tb_mdm_primary_hostname }}"
+ when: powerflex_tb_secondary_ip is defined
+
+- name: Uninstall package
+ register: powerflex_tb_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-mdm
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+- name: Uninstall deb package
+ register: powerflex_tb_uninstall_output
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - EMC-ScaleIO-mdm
+ when: ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_tb/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/vars/main.yml
new file mode 100644
index 000000000..aa01f740c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_tb/vars/main.yml
@@ -0,0 +1,6 @@
+---
+# vars file for powerflex_tb
+file_glob_name: mdm
+file_gpg_name: RPM-GPG-KEY-ScaleIO
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_tb_mdm_ips }}"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/README.md b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/README.md
new file mode 100644
index 000000000..794eb6b08
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/README.md
@@ -0,0 +1,165 @@
+# powerflex_webui
+
+Role to manage the installation and uninstallation of Powerflex Web UI.
+
+## Table of contents
+
+* [Requirements](#requirements)
+* [Ansible collections](#ansible-collections)
+* [Role Variables](#role-variables)
+* [Examples](#examples)
+* [Notes](#notes)
+* [Usage instructions](#usage-instructions)
+* [Author Information](#author-information)
+
+## Requirements
+
+```
+ansible
+python
+```
+
+## Ansible collections
+
+Collections required to use the role.
+
+```
+dellemc.powerflex
+```
+
+## Role Variables
+
+<table>
+<thead>
+ <tr>
+ <th>Name</th>
+ <th>Required</th>
+ <th>Description</th>
+ <th>Choices</th>
+ <th>Type</th>
+ <th>Default Value</th>
+ </tr>
+</thead>
+<tbody>
+ <tr>
+ <td>hostname</td>
+ <td>true</td>
+ <td>IP or FQDN of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>username</td>
+ <td>true</td>
+ <td>The username of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>password</td>
+ <td>true</td>
+ <td>The password of the PowerFlex gateway.</td>
+ <td></td>
+ <td>str</td>
+ <td></td>
+ </tr>
+ <tr>
+ <td>port</td>
+ <td>false</td>
+ <td>The port of the PowerFlex gateway.</td>
+ <td></td>
+ <td>int</td>
+ <td>443</td>
+ </tr>
+ <tr>
+ <td>validate_certs</td>
+ <td>false</td>
+ <td>If C(false), the SSL certificates will not be validated.<br>Configure C(false) only on personally controlled sites where self-signed certificates are used.</td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>timeout</td>
+ <td>false</td>
+ <td>Time after which connection will get terminated.</td>
+ <td></td>
+ <td>int</td>
+ <td>120</td>
+ </tr>
+ <tr>
+ <td>powerflex_common_file_install_location</td>
+ <td>false</td>
+ <td>Location of installation, compatible installation software package based on the operating system of the node.
+ <br> The files can be downloaded from the Dell Product support page for PowerFlex software.</td>
+ <td></td>
+ <td>path</td>
+ <td>/var/tmp</td>
+ </tr>
+ <tr>
+ <td>powerflex_webui_skip_java</td>
+ <td>false</td>
+ <td>Specifies whether to install java or not.<br></td>
+ <td></td>
+ <td>bool</td>
+ <td>false</td>
+ </tr>
+ <tr>
+ <td>powerflex_webui_state</td>
+ <td>false</td>
+ <td>Specify state of web UI.
+ <br>present will install the web UI and absent will uninstall the web UI.</td>
+ <td>absent, present</td>
+ <td>str</td>
+ <td>present</td>
+ </tr>
+</tbody>
+</table>
+
+## Examples
+----
+```
+ - name: Install and configure powerflex web UI
+ ansible.builtin.import_role:
+ name: "powerflex_webui"
+ vars:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ powerflex_common_file_install_location: "/opt/scaleio/rpm"
+ powerflex_webui_skip_java: true
+ powerflex_webui_state: present
+
+ - name: Uninstall powerflex web UI
+ ansible.builtin.import_role:
+ name: "powerflex_webui"
+ vars:
+ powerflex_webui_state: absent
+
+```
+## Notes
+- Supported only in PowerFlex version 3.6.
+
+## Usage instructions
+----
+### To install all dependency packages, including web UI, on node:
+ ```
+ ansible-playbook -i inventory site.yml
+ ```
+
+### To uninstall web UI:
+ ```
+ ansible-playbook -i inventory uninstall_powerflex.yml
+ ```
+
+Sample playbooks and inventory can be found in the playbooks directory.
+
+## Author Information
+------------------
+
+Dell Technologies <br>
+Trisha Datta (ansible.team@Dell.com) 2023
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/defaults/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/defaults/main.yml
new file mode 100644
index 000000000..fd2b592a8
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/defaults/main.yml
@@ -0,0 +1,6 @@
+---
+powerflex_skip_java: false
+file_glob_name: mgmt-server
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_webui_mdm_ips }}"
+powerflex_webui_state: "present"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/meta/argument_specs.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/meta/argument_specs.yml
new file mode 100644
index 000000000..50aee5bbe
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/meta/argument_specs.yml
@@ -0,0 +1,52 @@
+---
+argument_specs:
+ main:
+ short_description: Role to manage the installation and uninstallation of Powerflex web UI.
+ description:
+ - Role to manage the installation and uninstallation of Powerflex web UI.
+ options:
+ hostname:
+ required: true
+ type: str
+ description: IP or FQDN of the PowerFlex gateway.
+ username:
+ required: true
+ type: str
+ description: The username of the PowerFlex gateway.
+ password:
+ required: true
+ type: str
+ description: The password of the PowerFlex gateway.
+ port:
+ type: int
+ description: Port of the PowerFlex gateway.
+ default: 443
+ validate_certs:
+ description:
+ - If C(false), the SSL certificates will not be validated.
+ - Configure C(false) only on personally controlled sites where self-signed certificates are used.
+ type: bool
+ default: false
+ timeout:
+ description: Time after which connection will get terminated.
+ type: int
+ default: 120
+ powerflex_common_file_install_location:
+ description:
+ - Location of installation, compatible installation software package
+ based on the operating system of the node.
+ - The files can be downloaded from the Dell Product support page for PowerFlex software.
+ type: path
+ default: /var/tmp
+ powerflex_webui_skip_java:
+ type: bool
+ description: Specifies whether to install java or not.
+ default: false
+ powerflex_webui_state:
+ description:
+ - Specifies the state of the web UI.
+ - present will install the web UI if not already installed.
+ - absent will uninstall the web UI if installed.
+ type: str
+ choices: ['absent', 'present']
+ default: present
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/meta/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/meta/main.yml
new file mode 100644
index 000000000..2872690ea
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/meta/main.yml
@@ -0,0 +1,29 @@
+---
+galaxy_info:
+ role_name: powerflex_webui
+ author: Trisha Datta
+ namespace: dellemc
+ description: Role to manage the installation and uninstallation of Powerflex WebUI.
+ company: Dell Technologies
+
+ license: GPL-3.0-only
+
+ min_ansible_version: "2.14.0"
+
+ platforms:
+ - name: EL
+ versions:
+ - "9"
+ - "8"
+ - name: Ubuntu
+ versions:
+ - jammy
+
+ - name: SLES
+ versions:
+ - "15SP3"
+ - "15SP4"
+
+ galaxy_tags: []
+dependencies:
+ - role: powerflex_common
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation/converge.yml
new file mode 100644
index 000000000..cd2b9c8b5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation/converge.yml
@@ -0,0 +1,30 @@
+---
+- name: Converge
+ hosts: webui
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: Install and configure powerflex webui
+ ansible.builtin.import_role:
+ name: "powerflex_webui"
+ vars:
+ powerflex_webui_state: present
+
+ - name: Verifying installation package in check mode
+ ansible.builtin.assert:
+ that:
+ - " 'No changes made, but would have if not in check mode' in powerflex_common_install_package_output.msg"
+ when: ansible_check_mode
+
+ - name: Verifying installation package in converge
+ ansible.builtin.assert:
+ that:
+ - " 'Installed' in powerflex_common_install_package_output.results[0]"
+ when: not ansible_check_mode and powerflex_common_install_package_output.changed
+
+ - name: Verifying installation package in idempotency
+ ansible.builtin.assert:
+ that:
+ - " 'Nothing to do' in powerflex_common_install_package_output.msg"
+ when: not ansible_check_mode and not powerflex_common_install_package_output.changed
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/converge.yml
new file mode 100644
index 000000000..f614a1862
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/converge.yml
@@ -0,0 +1,34 @@
+---
+- name: Converge
+ hosts: webui
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: Install and configure powerflex webui with no rpm
+ ansible.builtin.import_role:
+ name: "powerflex_webui"
+ vars:
+ powerflex_common_file_install_location: "/opt/empty"
+ powerflex_webui_state: present
+ ignore_errors: true
+ register: powerflex_webui_install_config_no_rpm_result
+
+ - name: Verifying failure of install package with respect to no rpm file
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length == 0
+
+ - name: Install and configure powerflex webui with wrong file path
+ ansible.builtin.import_role:
+ name: "powerflex_webui"
+ vars:
+ powerflex_common_file_install_location: "/opt/aaab"
+ powerflex_webui_state: present
+ ignore_errors: true
+ register: powerflex_webui_install_config_wrong_path_result
+
+ - name: Verifying failure of install package with wrong file path
+ ansible.builtin.assert:
+ that:
+ - powerflex_common_package_file.files | length == 0
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/molecule.yml
new file mode 100644
index 000000000..93cad84c9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_installation_invalid_path_rpm/molecule.yml
@@ -0,0 +1,4 @@
+---
+scenario:
+ test_sequence:
+ - converge
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_uninstallation/converge.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_uninstallation/converge.yml
new file mode 100644
index 000000000..625f18a32
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_uninstallation/converge.yml
@@ -0,0 +1,48 @@
+---
+- name: Converge
+ hosts: webui
+ vars_files:
+ - ../../../../playbooks/roles/vars_files/connection.yml
+ gather_facts: true
+ tasks:
+ - name: Uninstall powerflex webui
+ ansible.builtin.import_role:
+ name: "powerflex_webui"
+ vars:
+ powerflex_webui_state: 'absent'
+
+ - name: Verifying uninstall package in converge
+ ansible.builtin.assert:
+ that:
+ - " 'Removed:' in powerflex_webui_uninstall_output.results[0].results[0]"
+ when: not ansible_check_mode and powerflex_webui_uninstall_output.changed and ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+ - name: Verifying uninstall package in check mode
+ ansible.builtin.assert:
+ that:
+ - powerflex_webui_uninstall_output.results[0].msg == "Check mode: No changes made, but would have if not in check mode"
+ when: ansible_check_mode and ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+ - name: Verifying uninstall package in Idempotency
+ ansible.builtin.assert:
+ that:
+ - powerflex_webui_uninstall_output.results[0].msg == 'Nothing to do'
+ when: not ansible_check_mode and not powerflex_webui_uninstall_output.changed and ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+ - name: Verifying uninstall package in check mode for deb
+ ansible.builtin.assert:
+ that:
+ - powerflex_webui_uninstall_deb_output.results[0].msg == "Check mode: No changes made, but would have if not in check mode"
+ when: ansible_check_mode and ansible_distribution == "Ubuntu"
+
+ - name: Verifying uninstall package in converge for deb
+ ansible.builtin.assert:
+ that:
+ - " 'Removed:' in powerflex_webui_uninstall_deb_output.results[0].results[0]"
+ when: not ansible_check_mode and powerflex_webui_uninstall_deb_output.changed and ansible_distribution == "Ubuntu"
+
+ - name: Verifying uninstall package in Idempotency for deb
+ ansible.builtin.assert:
+ that:
+ - powerflex_webui_uninstall_deb_output.results[0].msg == 'Nothing to do'
+ when: not ansible_check_mode and not powerflex_webui_uninstall_deb_output.changed and ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_uninstallation/molecule.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_uninstallation/molecule.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/molecule/webui_uninstallation/molecule.yml
@@ -0,0 +1 @@
+---
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/install_webui.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/install_webui.yml
new file mode 100644
index 000000000..13d58ffac
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/install_webui.yml
@@ -0,0 +1,23 @@
+---
+- name: Check if webui package is available
+ delegate_to: localhost
+ ansible.builtin.find:
+ paths: "{{ powerflex_common_file_install_location }}"
+ patterns: "*{{ file_glob_name }}*"
+
+- name: Get configured MDM IP addresses
+ dellemc.powerflex.mdm_cluster:
+ hostname: "{{ hostname }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ state: present
+ register: powerflex_webui_result
+ delegate_to: "{{ lookup('ansible.builtin.env', 'RUNON', default='localhost') }}"
+
+- name: Set fact - powerflex_webui_mdm_ips
+ ansible.builtin.set_fact:
+ powerflex_webui_mdm_ips: "{{ powerflex_webui_result.mdm_cluster_details.mdmAddresses | join(',') }}"
+
+- name: Include install_powerflex.yml
+ ansible.builtin.include_tasks: ../../powerflex_common/tasks/install_powerflex.yml
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/main.yml
new file mode 100644
index 000000000..417f5d504
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/main.yml
@@ -0,0 +1,8 @@
+---
+- name: Install and Configure PowerFlex webUI
+ ansible.builtin.include_tasks: install_webui.yml
+ when: powerflex_webui_state == "present"
+
+- name: Uninstall PowerFlex webUI
+ ansible.builtin.include_tasks: uninstall_webui.yml
+ when: powerflex_webui_state == "absent"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/uninstall_webui.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/uninstall_webui.yml
new file mode 100644
index 000000000..72d7fd53a
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/tasks/uninstall_webui.yml
@@ -0,0 +1,20 @@
+---
+- name: Uninstall web UI package
+ register: powerflex_webui_uninstall_output
+ environment:
+ I_AM_SURE: "{{ i_am_sure | int }}"
+ ansible.builtin.package:
+ name: "{{ item }}"
+ state: "absent"
+ with_items:
+ - EMC-ScaleIO-mgmt-server
+ when: ansible_distribution in ("RedHat", "CentOS", "SLES")
+
+- name: Uninstall deb package
+ register: powerflex_webui_uninstall_deb_output
+ ansible.builtin.apt:
+ name: "{{ item }}"
+ state: absent
+ with_items:
+ - EMC-ScaleIO-mgmt-server
+ when: ansible_distribution == "Ubuntu"
diff --git a/ansible_collections/dellemc/powerflex/roles/powerflex_webui/vars/main.yml b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/vars/main.yml
new file mode 100644
index 000000000..aba9fecd1
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/roles/powerflex_webui/vars/main.yml
@@ -0,0 +1,5 @@
+---
+file_glob_name: mgmt-server
+powerflex_role_environment:
+ MDM_IP: "{{ powerflex_webui_mdm_ips }}"
+file_gpg_name: RPM-GPG-KEY-ScaleIO
diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt
deleted file mode 100644
index c78903cdf..000000000
--- a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.12.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-plugins/modules/device.py validate-modules:missing-gplv3-license
-plugins/modules/sdc.py validate-modules:missing-gplv3-license
-plugins/modules/sds.py validate-modules:missing-gplv3-license
-plugins/modules/snapshot.py validate-modules:missing-gplv3-license
-plugins/modules/storagepool.py validate-modules:missing-gplv3-license
-plugins/modules/volume.py validate-modules:missing-gplv3-license
-plugins/modules/info.py validate-modules:missing-gplv3-license
-plugins/modules/protection_domain.py validate-modules:missing-gplv3-license
-plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license
-plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license
-plugins/modules/replication_pair.py validate-modules:missing-gplv3-license
diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt
deleted file mode 100644
index c78903cdf..000000000
--- a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.13.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-plugins/modules/device.py validate-modules:missing-gplv3-license
-plugins/modules/sdc.py validate-modules:missing-gplv3-license
-plugins/modules/sds.py validate-modules:missing-gplv3-license
-plugins/modules/snapshot.py validate-modules:missing-gplv3-license
-plugins/modules/storagepool.py validate-modules:missing-gplv3-license
-plugins/modules/volume.py validate-modules:missing-gplv3-license
-plugins/modules/info.py validate-modules:missing-gplv3-license
-plugins/modules/protection_domain.py validate-modules:missing-gplv3-license
-plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license
-plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license
-plugins/modules/replication_pair.py validate-modules:missing-gplv3-license
diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt
index c78903cdf..cb6ef4675 100644
--- a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt
+++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.14.txt
@@ -1,3 +1,18 @@
+plugins/modules/sds.py import-2.7
+plugins/modules/sds.py import-3.5
+plugins/modules/sds.py compile-2.7
+plugins/modules/sds.py compile-3.5
+plugins/modules/info.py import-2.7
+plugins/modules/info.py import-3.5
+plugins/modules/info.py compile-2.7
+plugins/modules/fault_set.py import-2.7
+plugins/modules/fault_set.py import-3.5
+plugins/modules/fault_set.py compile-2.7
+plugins/modules/fault_set.py compile-3.5
+plugins/module_utils/storage/dell/libraries/configuration.py import-2.7
+plugins/module_utils/storage/dell/libraries/configuration.py import-3.5
+plugins/module_utils/storage/dell/libraries/configuration.py compile-2.7
+plugins/module_utils/storage/dell/libraries/configuration.py compile-3.5
plugins/modules/device.py validate-modules:missing-gplv3-license
plugins/modules/sdc.py validate-modules:missing-gplv3-license
plugins/modules/sds.py validate-modules:missing-gplv3-license
@@ -9,3 +24,19 @@ plugins/modules/protection_domain.py validate-modules:missing-gplv3-license
plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license
plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license
plugins/modules/replication_pair.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot_policy.py validate-modules:missing-gplv3-license
+plugins/modules/fault_set.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot_policy.py compile-2.7
+plugins/modules/snapshot_policy.py compile-3.5
+plugins/modules/snapshot_policy.py import-2.7
+plugins/modules/snapshot_policy.py import-3.5
+plugins/modules/sdc.py import-2.7
+plugins/modules/sdc.py import-3.5
+plugins/modules/sdc.py compile-2.7
+plugins/modules/sdc.py compile-3.5
+tests/unit/plugins/module_utils/mock_device_api.py compile-2.7
+tests/unit/plugins/module_utils/mock_device_api.py compile-3.5
+plugins/modules/replication_consistency_group.py import-2.7
+plugins/modules/replication_consistency_group.py import-3.5
+plugins/modules/replication_consistency_group.py compile-2.7
+plugins/modules/replication_consistency_group.py compile-3.5
diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.15.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.15.txt
new file mode 100644
index 000000000..cb6ef4675
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.15.txt
@@ -0,0 +1,42 @@
+plugins/modules/sds.py import-2.7
+plugins/modules/sds.py import-3.5
+plugins/modules/sds.py compile-2.7
+plugins/modules/sds.py compile-3.5
+plugins/modules/info.py import-2.7
+plugins/modules/info.py import-3.5
+plugins/modules/info.py compile-2.7
+plugins/modules/fault_set.py import-2.7
+plugins/modules/fault_set.py import-3.5
+plugins/modules/fault_set.py compile-2.7
+plugins/modules/fault_set.py compile-3.5
+plugins/module_utils/storage/dell/libraries/configuration.py import-2.7
+plugins/module_utils/storage/dell/libraries/configuration.py import-3.5
+plugins/module_utils/storage/dell/libraries/configuration.py compile-2.7
+plugins/module_utils/storage/dell/libraries/configuration.py compile-3.5
+plugins/modules/device.py validate-modules:missing-gplv3-license
+plugins/modules/sdc.py validate-modules:missing-gplv3-license
+plugins/modules/sds.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/storagepool.py validate-modules:missing-gplv3-license
+plugins/modules/volume.py validate-modules:missing-gplv3-license
+plugins/modules/info.py validate-modules:missing-gplv3-license
+plugins/modules/protection_domain.py validate-modules:missing-gplv3-license
+plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license
+plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license
+plugins/modules/replication_pair.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot_policy.py validate-modules:missing-gplv3-license
+plugins/modules/fault_set.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot_policy.py compile-2.7
+plugins/modules/snapshot_policy.py compile-3.5
+plugins/modules/snapshot_policy.py import-2.7
+plugins/modules/snapshot_policy.py import-3.5
+plugins/modules/sdc.py import-2.7
+plugins/modules/sdc.py import-3.5
+plugins/modules/sdc.py compile-2.7
+plugins/modules/sdc.py compile-3.5
+tests/unit/plugins/module_utils/mock_device_api.py compile-2.7
+tests/unit/plugins/module_utils/mock_device_api.py compile-3.5
+plugins/modules/replication_consistency_group.py import-2.7
+plugins/modules/replication_consistency_group.py import-3.5
+plugins/modules/replication_consistency_group.py compile-2.7
+plugins/modules/replication_consistency_group.py compile-3.5
diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.16.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.16.txt
new file mode 100644
index 000000000..531796f6c
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.16.txt
@@ -0,0 +1,28 @@
+plugins/modules/sds.py import-2.7
+plugins/modules/sds.py compile-2.7
+plugins/module_utils/storage/dell/libraries/configuration.py import-2.7
+plugins/module_utils/storage/dell/libraries/configuration.py compile-2.7
+plugins/modules/device.py validate-modules:missing-gplv3-license
+plugins/modules/sdc.py validate-modules:missing-gplv3-license
+plugins/modules/sds.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/storagepool.py validate-modules:missing-gplv3-license
+plugins/modules/volume.py validate-modules:missing-gplv3-license
+plugins/modules/info.py validate-modules:missing-gplv3-license
+plugins/modules/protection_domain.py validate-modules:missing-gplv3-license
+plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license
+plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license
+plugins/modules/replication_pair.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot_policy.py validate-modules:missing-gplv3-license
+plugins/modules/fault_set.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot_policy.py compile-2.7
+plugins/modules/snapshot_policy.py import-2.7
+plugins/modules/sdc.py import-2.7
+plugins/modules/sdc.py compile-2.7
+plugins/modules/fault_set.py import-2.7
+plugins/modules/fault_set.py compile-2.7
+tests/unit/plugins/module_utils/mock_device_api.py compile-2.7
+plugins/modules/replication_consistency_group.py import-2.7
+plugins/modules/replication_consistency_group.py compile-2.7
+plugins/modules/info.py compile-2.7
+plugins/modules/info.py import-2.7
diff --git a/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.17.txt b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.17.txt
new file mode 100644
index 000000000..54067647b
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/sanity/ignore-2.17.txt
@@ -0,0 +1,13 @@
+plugins/modules/device.py validate-modules:missing-gplv3-license
+plugins/modules/sdc.py validate-modules:missing-gplv3-license
+plugins/modules/sds.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/storagepool.py validate-modules:missing-gplv3-license
+plugins/modules/volume.py validate-modules:missing-gplv3-license
+plugins/modules/info.py validate-modules:missing-gplv3-license
+plugins/modules/protection_domain.py validate-modules:missing-gplv3-license
+plugins/modules/mdm_cluster.py validate-modules:missing-gplv3-license
+plugins/modules/replication_consistency_group.py validate-modules:missing-gplv3-license
+plugins/modules/replication_pair.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot_policy.py validate-modules:missing-gplv3-license
+plugins/modules/fault_set.py validate-modules:missing-gplv3-license
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/__init__.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/__init__.py
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/fail_json.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/fail_json.py
new file mode 100644
index 000000000..d270326b9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/fail_json.py
@@ -0,0 +1,21 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock fail json for PowerFlex Test modules"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class FailJsonException(Exception):
+ def __init__(self, *args):
+ if args:
+ self.message = args[0]
+ else:
+ self.message = None
+
+
+def fail_json(msg, **kwargs):
+ raise FailJsonException(msg)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/initial_mock.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/initial_mock.py
new file mode 100644
index 000000000..7409ab4c2
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/initial_mock.py
@@ -0,0 +1,17 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+from mock.mock import MagicMock
+
+utils.get_logger = MagicMock()
+utils.get_powerflex_gateway_host_connection = MagicMock()
+utils.PowerFlexClient = MagicMock()
+
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/powerflex_unit_base.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/powerflex_unit_base.py
new file mode 100644
index 000000000..0c06b0cd5
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/libraries/powerflex_unit_base.py
@@ -0,0 +1,40 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+import pytest
+# pylint: disable=unused-import
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock
+from mock.mock import MagicMock
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries. \
+ fail_json import FailJsonException, fail_json
+
+
+class PowerFlexUnitBase:
+
+ '''Powerflex Unit Test Base Class'''
+
+ @pytest.fixture
+ def powerflex_module_mock(self, mocker, module_object):
+ powerflex_module_mock = module_object()
+ powerflex_module_mock.module = MagicMock()
+ powerflex_module_mock.module.fail_json = fail_json
+ powerflex_module_mock.module.check_mode = False
+ return powerflex_module_mock
+
+ def capture_fail_json_call(self, error_msg, module_mock, module_handler=None, invoke_perform_module=False):
+ try:
+ if not invoke_perform_module:
+ module_handler().handle(module_mock, module_mock.module.params)
+ else:
+ module_mock.perform_module_operation()
+ except FailJsonException as fj_object:
+ if error_msg not in fj_object.message:
+ raise AssertionError(fj_object.message)
+
+ def set_module_params(self, module_mock, get_module_args, params):
+ get_module_args.update(params)
+ module_mock.module.params = get_module_args
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_device_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_device_api.py
new file mode 100644
index 000000000..2a2cf8756
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_device_api.py
@@ -0,0 +1,146 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""
+Mock Api response for Unit tests of Device module on Dell Technologies (Dell) PowerFlex
+"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class MockDeviceApi:
+ MODULE_UTILS_PATH = "ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.utils"
+
+ DEVICE_NAME_1 = "ansible_device_1"
+ DEVICE_ID_1 = "3fef781c00080000"
+ PD_ID_1 = "4eeb305100000001"
+ PD_NAME_1 = "domain1"
+ SDS_ID_1 = "6af03fc500000008"
+ SDS_NAME_1 = "ansible_sds_1"
+ SP_ID_1 = "7644c68600000008"
+ SP_NAME_1 = "ansible_sp_1"
+ PATH_1 = "/dev/sdb"
+
+ DEVICE_COMMON_ARGS = {
+ "hostname": "**.***.**.***",
+ "current_pathname": None,
+ "device_id": None,
+ "device_name": None,
+ "sds_name": None,
+ "sds_id": None,
+ "storage_pool_name": None,
+ "storage_pool_id": None,
+ "acceleration_pool_name": None,
+ "acceleration_pool_id": None,
+ "protection_domain_name": None,
+ "protection_domain_id": None,
+ "external_acceleration_type": None,
+ "media_type": None,
+ "state": None
+ }
+
+ DEVICE_GET_LIST = [
+ {
+ "accelerationPoolId": SP_ID_1,
+ "accelerationPoolName": SP_NAME_1,
+ "autoDetectMediaType": "Unknown",
+ "capacityLimitInKb": 124718080,
+ "deviceCurrentPathName": PATH_1,
+ "deviceOriginalPathName": PATH_1,
+ "externalAccelerationType": "ReadAndWrite",
+ "fglNvdimmWriteCacheSize": 16,
+ "id": DEVICE_ID_1,
+ "mediaType": "HDD",
+ "name": DEVICE_NAME_1,
+ "protectionDomainId": PD_ID_1,
+ "protectionDomainName": PD_NAME_1,
+ "sdsId": SDS_ID_1,
+ "sdsName": SDS_NAME_1,
+ "spSdsId": "bfe791ff00080000",
+ "storagePoolId": SP_ID_1,
+ "storagePoolName": SP_NAME_1
+ }
+ ]
+ SDS_DETAILS_1 = [
+ {
+ "name": SDS_NAME_1,
+ "id": SDS_ID_1
+ }
+ ]
+ PD_DETAILS_1 = [
+ {
+ "name": PD_NAME_1,
+ "id": PD_ID_1
+ }
+ ]
+ SP_DETAILS_1 = [
+ {
+ "name": SP_NAME_1,
+ "protectionDomainId": PD_ID_1,
+ "id": SP_ID_1
+ }
+ ]
+ AP_DETAILS_1 = [
+ {
+ "name": SP_NAME_1,
+ "protectionDomainId": PD_ID_1,
+ "id": SP_ID_1
+ }
+ ]
+
+ @staticmethod
+ def get_device_exception_response(response_type):
+ if response_type == 'get_dev_without_SDS':
+ return "sds_name or sds_id is mandatory along with device_name. Please enter a valid value"
+ elif response_type == 'get_device_details_without_path':
+ return "sds_name or sds_id is mandatory along with current_pathname. Please enter a valid value"
+ elif response_type == 'get_device_exception':
+ return "Failed to get the device with error"
+ elif response_type == 'create_id_exception':
+ return "Addition of device is allowed using device_name only, device_id given."
+ elif response_type == 'empty_path':
+ return "Please enter a valid value for current_pathname"
+ elif response_type == 'empty_device_name':
+ return "Please enter a valid value for device_name."
+ elif response_type == 'empty_sds':
+ return "Please enter a valid value for "
+ elif response_type == 'empty_dev_id':
+ return "Please provide valid device_id value to identify a device"
+ elif response_type == 'space_in_name':
+ return "current_pathname or device_name is mandatory along with sds"
+ elif response_type == 'with_required_params':
+ return "Please specify a valid parameter combination to identify a device"
+
+ @staticmethod
+ def get_device_exception_response1(response_type):
+ if response_type == 'modify_exception':
+ return "Modification of device attributes is currently not supported by Ansible modules."
+ elif response_type == 'delete_exception':
+ return f"Remove device '{MockDeviceApi.DEVICE_ID_1}' operation failed with error"
+ elif response_type == 'sds_exception':
+ return f"Unable to find the SDS with '{MockDeviceApi.SDS_NAME_1}'. Please enter a valid SDS name/id."
+ elif response_type == 'pd_exception':
+ return f"Unable to find the protection domain with " \
+ f"'{MockDeviceApi.PD_NAME_1}'. Please enter a valid " \
+ f"protection domain name/id"
+ elif response_type == 'sp_exception':
+ return f"Unable to find the storage pool with " \
+ f"'{MockDeviceApi.SP_NAME_1}'. Please enter a valid " \
+ f"storage pool name/id."
+ elif response_type == 'ap_exception':
+ return f"Unable to find the acceleration pool with " \
+ f"'{MockDeviceApi.SP_NAME_1}'. Please enter a valid " \
+ f"acceleration pool name/id."
+ elif response_type == 'add_exception':
+ return "Adding device ansible_device_1 operation failed with error"
+ elif response_type == 'add_dev_name_exception':
+ return "Please provide valid device_name value for adding a device"
+ elif response_type == 'add_dev_path_exception':
+ return "Current pathname of device is a mandatory parameter for adding a device. Please enter a valid value"
+ elif response_type == 'ext_type_exception':
+ return "Storage Pool ID/name or Acceleration Pool ID/name is mandatory along with external_acceleration_type."
+ elif response_type == 'add_without_pd':
+ return "Protection domain name/id is required to uniquely identify"
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_fail_json.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_fail_json.py
new file mode 100644
index 000000000..8e20402c0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_fail_json.py
@@ -0,0 +1,21 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock fail json for PowerFlex Test modules"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class FailJsonException(Exception):
+ def __init__(self, *args):
+ if args:
+ self.message = args[0]
+ else:
+ self.message = None
+
+
+def fail_json(msg):
+ raise FailJsonException(msg)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_fault_set_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_fault_set_api.py
new file mode 100644
index 000000000..1072888a2
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_fault_set_api.py
@@ -0,0 +1,69 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""
+Mock Api response for Unit tests of fault set module on Dell Technologies (Dell) PowerFlex
+"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class MockFaultSetApi:
+ FAULT_SET_COMMON_ARGS = {
+ "hostname": "**.***.**.***",
+ "protection_domain_name": None,
+ "protection_domain_id": None,
+ "fault_set_name": None,
+ "fault_set_id": None,
+ "fault_set_new_name": None,
+ "state": None
+ }
+
+ FAULT_SET_GET_LIST = [
+ {
+ "protectionDomainId": "7bd6457000000000",
+ "name": "fault_set_name_1",
+ "id": "fault_set_id_1",
+ "links": []
+ }
+ ]
+
+ PROTECTION_DOMAIN = {
+ "protectiondomain": [
+ {
+ "id": "7bd6457000000000",
+ "name": "test_pd_1",
+ "protectionDomainState": "Active",
+ "overallIoNetworkThrottlingInKbps": 20480,
+ "rebalanceNetworkThrottlingInKbps": 10240,
+ "rebuildNetworkThrottlingInKbps": 10240,
+ "vtreeMigrationNetworkThrottlingInKbps": 10240,
+ "rfcacheEnabled": "false",
+ "rfcacheMaxIoSizeKb": 128,
+ "rfcacheOpertionalMode": "None",
+ "rfcachePageSizeKb": 64,
+ "storagePools": [
+ {
+ "id": "8d1cba1700000000",
+ "name": "pool1"
+ }
+ ]
+ }
+ ]
+ }
+
+ RESPONSE_EXEC_DICT = {
+ 'delete_fault_set_exception': "Removing Fault Set fault_set_id_1 failed with error",
+ 'rename_fault_set_exception': "Failed to rename the fault set instance",
+ 'create_fault_set_exception': "Create fault set test_fs_1 operation failed",
+ 'get_fault_set_exception': "Failed to get the Fault Set",
+ 'create_fault_set_wo_pd_exception': "Provide protection_domain_id/protection_domain_name with fault_set_name.",
+ 'create_fault_set_empty_name_exception': "Provide valid value for name for the creation/modification of the fault set."
+ }
+
+ @staticmethod
+ def get_fault_set_exception_response(response_type):
+ return MockFaultSetApi.RESPONSE_EXEC_DICT.get(response_type, "")
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py
index e2ef01fe7..20de1c1c9 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_info_api.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2022, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -13,12 +13,16 @@ from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_
import MockReplicationConsistencyGroupApi
from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_replication_pair_api \
import MockReplicationPairApi
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_snapshot_policy_api \
+ import MockSnapshotPolicyApi
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_fault_set_api import MockFaultSetApi
__metaclass__ = type
class MockInfoApi:
+ MODULE_UTILS_PATH = "ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.utils"
INFO_COMMON_ARGS = {
"hostname": "**.***.**.***",
"gather_subset": [],
@@ -219,6 +223,12 @@ class MockInfoApi:
'test_vol_id_1': MockVolumeApi.VOLUME_STATISTICS
}
+ INFO_SNAPSHOT_POLICY_GET_LIST = MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+
+ INFO_SNAPSHOT_POLICY_STATISTICS = {
+ 'test_snap_pol_id_1': MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ }
+
INFO_STORAGE_POOL_GET_LIST = MockStoragePoolApi.STORAGE_POOL_GET_LIST
INFO_STORAGE_POOL_STATISTICS = {
@@ -228,13 +238,90 @@ class MockInfoApi:
RCG_LIST = MockReplicationConsistencyGroupApi.get_rcg_details()
PAIR_LIST = MockReplicationPairApi.get_pair_details()
+ INFO_GET_FAULT_SET_LIST = MockFaultSetApi.FAULT_SET_GET_LIST
+
+ INFO_SDC_GET_LIST = [
+ {
+ "id": "07335d3d00000006",
+ "name": "sdc_1"
+ },
+ {
+ "id": "07335d3c00000005",
+ "name": "sdc_2"
+ },
+ {
+ "id": "0733844a00000003",
+ "name": "sdc_3"
+ }
+ ]
+
+ INFO_SDC_FILTER_LIST = [
+ {
+ "id": "07335d3d00000006",
+ "name": "sdc_1"
+ }
+ ]
+
+ INFO_SDS_GET_LIST = [
+ {
+ "id": "8f3bb0cc00000002",
+ "name": "node0"
+ },
+ {
+ "id": "8f3bb0ce00000000",
+ "name": "node1"
+ },
+ {
+ "id": "8f3bb15300000001",
+ "name": "node22"
+ }
+ ]
+ INFO_GET_PD_LIST = [
+ {
+ "id": "9300e90900000001",
+ "name": "domain2"
+ },
+ {
+ "id": "9300c1f900000000",
+ "name": "domain1"
+ }
+ ]
+ INFO_GET_DEVICE_LIST = [
+ {
+ "id": "b6efa59900000000",
+ "name": "device230"
+ },
+ {
+ "id": "b6efa5fa00020000",
+ "name": "device_node0"
+ },
+ {
+ "id": "b7f3a60900010000",
+ "name": "device22"
+ }
+ ]
+
+ RESPONSE_EXEC_DICT = {
+ 'volume_get_details': "Get volumes list from powerflex array failed with error",
+ 'snapshot_policy_get_details': "Get snapshot policies list from powerflex array failed with error ",
+ 'sp_get_details': "Get storage pool list from powerflex array failed with error ",
+ 'rcg_get_details': "Get replication consistency group list from powerflex array failed with error ",
+ 'replication_pair_get_details': "Get replication pair list from powerflex array failed with error ",
+ 'fault_set_get_details': "Get fault set list from powerflex array failed with error",
+ 'sdc_get_details': "Get SDC list from powerflex array failed with error",
+ 'sds_get_details': "Get SDS list from powerflex array failed with error",
+ 'pd_get_details': "Get protection domain list from powerflex array failed with error",
+ 'device_get_details': "Get device list from powerflex array failed with error",
+ 'get_sds_details_filter_invalid': "Filter should have all keys: 'filter_key, filter_operator, filter_value'",
+ 'get_sds_details_filter_empty': "Filter keys: '['filter_key', 'filter_operator', 'filter_value']' cannot be None",
+ 'invalid_filter_operator_exception': "Given filter operator 'does_not_contain' is not supported.",
+ 'api_exception': "Get API details from Powerflex array failed with error",
+ 'system_exception': "Get array details from Powerflex array failed with error",
+ 'managed_device_get_error': "Get managed devices from PowerFlex Manager failed with error",
+ 'service_template_get_error': "Get service templates from PowerFlex Manager failed with error",
+ 'deployment_get_error': "Get deployments from PowerFlex Manager failed with error"
+ }
+
@staticmethod
def get_exception_response(response_type):
- if response_type == 'volume_get_details':
- return "Get volumes list from powerflex array failed with error "
- elif response_type == 'sp_get_details':
- return "Get storage pool list from powerflex array failed with error "
- elif response_type == 'rcg_get_details':
- return "Get replication consistency group list from powerflex array failed with error "
- elif response_type == 'replication_pair_get_details':
- return "Get replication pair list from powerflex array failed with error "
+ return MockInfoApi.RESPONSE_EXEC_DICT.get(response_type, "")
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py
index 60452ecda..bab9a832c 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_protection_domain_api.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2022, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -15,54 +15,86 @@ class MockProtectionDomainApi:
MODULE_PATH = 'ansible_collections.dellemc.powerflex.plugins.modules.protection_domain.PowerFlexProtectionDomain'
MODULE_UTILS_PATH = 'ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.utils'
- PROTECTION_DOMAIN = {
- "protectiondomain": [
- {
- "id": "7bd6457000000000",
- "name": "test_domain",
- "protectionDomainState": "Active",
- "overallIoNetworkThrottlingInKbps": 20480,
- "rebalanceNetworkThrottlingInKbps": 10240,
- "rebuildNetworkThrottlingInKbps": 10240,
- "vtreeMigrationNetworkThrottlingInKbps": 10240,
- "rfcacheEnabled": "false",
- "rfcacheMaxIoSizeKb": 128,
- "rfcacheOpertionalMode": "None",
- "rfcachePageSizeKb": 64,
- "storagePools": [
- {
- "id": "8d1cba1700000000",
- "name": "pool1"
- }
- ]
- }
- ]
- }
- STORAGE_POOL = {
- "storagepool": [
- {
- "protectionDomainId": "7bd6457000000000",
- "rebuildEnabled": True,
- "mediaType": "HDD",
- "name": "pool1",
- "id": "8d1cba1700000000"
- }
- ]
+ PD_COMMON_ARGS = {
+ 'hostname': '**.***.**.***',
+ 'protection_domain_id': None,
+ 'protection_domain_name': None,
+ 'protection_domain_new_name': None,
+ 'is_active': None,
+ 'network_limits': None,
+ 'rf_cache_limits': None,
+ 'state': 'present'
}
+ PD_NAME = 'test_domain'
+ PD_NEW_NAME = 'test_domain_new'
+ PD_ID = '7bd6457000000000'
- @staticmethod
- def modify_pd_with_failed_msg(protection_domain_name):
- return "Failed to update the rf cache limits of protection domain " + protection_domain_name + " with error "
+ PROTECTION_DOMAIN = [
+ {
+ "id": "7bd6457000000000",
+ "name": "test_domain",
+ "protectionDomainState": "Active",
+ "overallIoNetworkThrottlingInKbps": 20480,
+ "rebalanceNetworkThrottlingInKbps": 10240,
+ "rebuildNetworkThrottlingInKbps": 10240,
+ "vtreeMigrationNetworkThrottlingInKbps": 10240,
+ "rfcacheEnabled": "false",
+ "rfcacheMaxIoSizeKb": 128,
+ "rfcacheOpertionalMode": "None",
+ "rfcachePageSizeKb": 64,
+ "storagePools": [
+ {
+ "id": "8d1cba1700000000",
+ "name": "pool1"
+ }
+ ]
+ }
+ ]
- @staticmethod
- def delete_pd_failed_msg(protection_domain_id):
- return "Delete protection domain '" + protection_domain_id + "' operation failed with error ''"
+ PROTECTION_DOMAIN_1 = [
+ {
+ "id": "7bd6457000000000",
+ "name": "test_domain",
+ "protectionDomainState": "Inactive",
+ "overallIoNetworkThrottlingInKbps": 20480,
+ "rebalanceNetworkThrottlingInKbps": 10240,
+ "rebuildNetworkThrottlingInKbps": 10240,
+ "vtreeMigrationNetworkThrottlingInKbps": 10240,
+ "rfcacheEnabled": "false",
+ "rfcacheMaxIoSizeKb": 128,
+ "rfcacheOpertionalMode": "None",
+ "rfcachePageSizeKb": 64,
+ "storagePools": [
+ {
+ "id": "8d1cba1700000000",
+ "name": "pool1"
+ }
+ ]
+ }
+ ]
- @staticmethod
- def rename_pd_failed_msg(protection_domain_name):
- return "Failed to update the protection domain " + protection_domain_name + " with error "
+ STORAGE_POOL = [
+ {
+ "protectionDomainId": "7bd6457000000000",
+ "rebuildEnabled": True,
+ "mediaType": "HDD",
+ "name": "pool1",
+ "id": "8d1cba1700000000"
+ }
+ ]
@staticmethod
- def version_pd_failed_msg():
- return "Getting PyPowerFlex SDK version, failed with Error The 'PyPowerFlex' distribution was " \
- "not found and is required by the application"
+ def get_failed_msgs(response_type):
+ error_msg = {
+ 'get_pd_failed_msg': "Failed to get the protection domain ",
+ 'empty_pd_msg': "Please provide the valid protection_domain_name",
+ 'overall_limit_msg': "Overall limit cannot be negative. Provide a valid value",
+ 'new_name_in_create': "protection_domain_new_name/protection_domain_id are not supported during creation of protection domain",
+ 'create_pd_exception': "operation failed with error",
+ 'rename_pd_exception': "Failed to update the protection domain 7bd6457000000000 with error",
+ 'modify_network_limits_exception': "Failed to update the network limits of protection domain",
+ 'rf_cache_limits_exception': "Failed to update the rf cache limits of protection domain",
+ 'delete_pd_exception': "Delete protection domain '7bd6457000000000' operation failed with error ''",
+ 'get_sp_exception': "Failed to get the storage pools present in protection domain",
+ }
+ return error_msg.get(response_type)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py
index 6671fd875..0867024f5 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_consistency_group_api.py
@@ -23,6 +23,7 @@ class MockReplicationConsistencyGroupApi:
"verifycert": None, "port": None, "protection_domain_name": None,
"protection_domain_id": None},
"target_volume_access_mode": None, "is_consistent": None,
+ "rcg_state": None, "force": None,
"state": None
}
RCG_ID = "aadc17d500000000"
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_pair_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_pair_api.py
index f621db47e..5ac7010cf 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_pair_api.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_replication_pair_api.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2023, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -48,3 +48,12 @@ class MockReplicationPairApi:
def get_volume_details():
return [{"id": "0001",
"name": "volume1"}]
+
+ @staticmethod
+ def get_error_message(response_type):
+ error_msg = {"get_rcg_exception": "Failed to get the replication consistency group 12 with error ",
+ "get_rcg_id_name_error": "Specify either rcg_id or rcg_name to create replication pair",
+ "get_pause_error": "Specify either pair_id or pair_name to perform pause or resume of initial copy",
+ "get_pause_or_resume_error": "Specify a valid pair_name or pair_id to perform pause or resume",
+ "get_volume_exception": "Failed to retrieve volume"}
+ return error_msg[response_type]
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdc_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdc_api.py
new file mode 100644
index 000000000..ba65bc67f
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sdc_api.py
@@ -0,0 +1,64 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""
+Mock Api response for Unit tests of SDC module on Dell Technologies (Dell) PowerFlex
+"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class MockSdcApi:
+ MODULE_UTILS_PATH = "ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell.utils"
+ COMMON_ARGS = {
+ "sdc_id": None,
+ "sdc_ip": None,
+ "sdc_name": None, "sdc_new_name": None,
+ "performance_profile": None,
+ "state": None
+ }
+ SDC_ID = "07335d3d00000006"
+
+ @staticmethod
+ def get_sdc_details():
+ return [{
+ "id": "07335d3d00000006",
+ "installedSoftwareVersionInfo": "R3_6.0.0",
+ "kernelBuildNumber": None,
+ "kernelVersion": "3.10.0",
+ "mapped_volumes": [],
+ "mdmConnectionState": "Disconnected",
+ "memoryAllocationFailure": None,
+ "name": "LGLAP203",
+ "osType": "Linux",
+ "peerMdmId": None,
+ "perfProfile": "HighPerformance",
+ "sdcApproved": True,
+ "sdcApprovedIps": None,
+ "sdcGuid": "F8ECB844-23B8-4629-92BB-B6E49A1744CB",
+ "sdcIp": "N/A",
+ "sdcIps": None,
+ "sdcType": "AppSdc",
+ "sdrId": None,
+ "socketAllocationFailure": None,
+ "softwareVersionInfo": "R3_6.0.0",
+ "systemId": "4a54a8ba6df0690f",
+ "versionInfo": "R3_6.0.0"
+ }]
+
+ RESPONSE_EXEC_DICT = {
+ 'get_sdc_details_empty_sdc_id_exception': "Please provide valid sdc_id",
+ 'get_sdc_details_with_exception': "Failed to get the SDC 07335d3d00000006 with error",
+ 'get_sdc_details_mapped_volumes_with_exception': "Failed to get the volumes mapped to SDC",
+ 'modify_sdc_throws_exception': "Modifying performance profile of SDC 07335d3d00000006 failed with error",
+ 'rename_sdc_empty_new_name_exception': "Provide valid SDC name to rename to.",
+ 'rename_sdc_throws_exception': "Failed to rename SDC",
+ 'remove_sdc_throws_exception': "Removing SDC 07335d3d00000006 failed with error"
+ }
+
+ @staticmethod
+ def get_sdc_exception_response(response_type):
+ return MockSdcApi.RESPONSE_EXEC_DICT.get(response_type, "")
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sds_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sds_api.py
new file mode 100644
index 000000000..60b5f95e9
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_sds_api.py
@@ -0,0 +1,147 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""
+Mock Api response for Unit tests of SDS module on Dell Technologies (Dell) PowerFlex
+"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class MockSDSApi:
+ SDS_COMMON_ARGS = {
+ "hostname": "**.***.**.***",
+ "sds_name": "test_node0",
+ "sds_id": None,
+ "sds_new_name": None,
+ "sds_ip_list": None,
+ "sds_ip_state": None,
+ "rfcache_enabled": None,
+ "rmcache_enabled": None,
+ "rmcache_size": None,
+ "performance_profile": None,
+ "protection_domain_name": None,
+ "protection_domain_id": None,
+ "fault_set_name": None,
+ "fault_set_id": None,
+ "fault_set_new_name": None,
+ "state": None
+ }
+
+ SDS_GET_LIST = [
+ {
+ "authenticationError": "None",
+ "certificateInfo": None,
+ "configuredDrlMode": "Volatile",
+ "drlMode": "Volatile",
+ "faultSetId": "test_id_1",
+ "fglMetadataCacheSize": 0,
+ "fglMetadataCacheState": "Disabled",
+ "fglNumConcurrentWrites": 1000,
+ "id": "8f3bb0cc00000002",
+ "ipList": [
+ {
+ "ip": "10.47.xxx.xxx",
+ "role": "all"
+ },
+ {
+ "ip": "10.46.xxx.xxx",
+ "role": "sdcOnly"
+ }
+ ],
+ "lastUpgradeTime": 0,
+ "links": [],
+ "maintenanceState": "NoMaintenance",
+ "maintenanceType": "NoMaintenance",
+ "mdmConnectionState": "Connected",
+ "membershipState": "Joined",
+ "name": "test_node0",
+ "numOfIoBuffers": None,
+ "numRestarts": 2,
+ "onVmWare": True,
+ "perfProfile": "HighPerformance",
+ "port": 7072,
+ "protectionDomainId": "9300c1f900000000",
+ "protectionDomainName": "test_domain",
+ "raidControllers": None,
+ "rfcacheEnabled": True,
+ "rfcacheErrorApiVersionMismatch": False,
+ "rfcacheErrorDeviceDoesNotExist": False,
+ "rfcacheErrorInconsistentCacheConfiguration": False,
+ "rfcacheErrorInconsistentSourceConfiguration": False,
+ "rfcacheErrorInvalidDriverPath": False,
+ "rfcacheErrorLowResources": False,
+ "rmcacheEnabled": True,
+ "rmcacheFrozen": False,
+ "rmcacheMemoryAllocationState": "AllocationPending",
+ "rmcacheSizeInKb": 131072,
+ "rmcacheSizeInMb": 128,
+ "sdsConfigurationFailure": None,
+ "sdsDecoupled": None,
+ "sdsReceiveBufferAllocationFailures": None,
+ "sdsState": "Normal",
+ "softwareVersionInfo": "R3_6.0.0"
+ }
+ ]
+
+ PROTECTION_DOMAIN = {
+ "protectiondomain": [
+ {
+ "id": "7bd6457000000000",
+ "name": "test_domain",
+ "protectionDomainState": "Active",
+ "overallIoNetworkThrottlingInKbps": 20480,
+ "rebalanceNetworkThrottlingInKbps": 10240,
+ "rebuildNetworkThrottlingInKbps": 10240,
+ "vtreeMigrationNetworkThrottlingInKbps": 10240,
+ "rfcacheEnabled": "false",
+ "rfcacheMaxIoSizeKb": 128,
+ "rfcacheOpertionalMode": "None",
+ "rfcachePageSizeKb": 64,
+ "storagePools": [
+ {
+ "id": "8d1cba1700000000",
+ "name": "pool1"
+ }
+ ]
+ }
+ ]
+ }
+
+ FAULT_SET_GET_LIST = [
+ {
+ "protectionDomainId": "test_domain",
+ "name": "fault_set_name",
+ "id": "fault_set_id",
+ "links": []
+ }
+ ]
+
+ RESPONSE_EXEC_DICT = {
+ "delete_sds_exception": "Delete SDS '8f3bb0cc00000002' operation failed with error",
+ "rename_sds_exception": "Failed to update the SDS",
+ "create_sds_exception": "Create SDS test_node0 operation failed with error",
+ "get_sds_exception": "Failed to get the SDS",
+ "rmcache_size_exception": "RM cache size can be set only when RM cache is enabled",
+ "create_sds_wo_sds_name": "Please provide valid sds_name value for creation of SDS.",
+ "create_sds_wo_pd": "Protection Domain is a mandatory parameter",
+ "create_sds_wo_sds_ip_list": "Please provide valid sds_ip_list values for " +
+ "creation of SDS.",
+ "create_sds_incorrect_sds_ip_state": "Incorrect IP state given for creation of SDS.",
+ "create_sds_sds_id": "Creation of SDS is allowed using sds_name " +
+ "only, sds_id given.",
+ "create_sds_sds_new_name": "sds_new_name parameter is not supported " +
+ "during creation of a SDS. Try renaming the " +
+ "SDS after the creation.",
+ "rename_sds_empty_exception": "Provide valid value for name for the creation/modification of the SDS.",
+ "add_ip_exception": "Add IP to SDS '8f3bb0cc00000002' operation failed with error ",
+ "remove_ip_exception": "Remove IP from SDS '8f3bb0cc00000002' operation failed with error ",
+ "set_ip_role_exception": "Update role of IP for SDS '8f3bb0cc00000002' operation failed"
+ }
+
+ @staticmethod
+ def get_sds_exception_response(response_type):
+ return MockSDSApi.RESPONSE_EXEC_DICT.get(response_type, "")
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_snapshot_policy_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_snapshot_policy_api.py
new file mode 100644
index 000000000..35dabb9fd
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_snapshot_policy_api.py
@@ -0,0 +1,186 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""
+Mock Api response for Unit tests of snapshot policy module on Dell Technologies (Dell) PowerFlex
+"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+class MockSnapshotPolicyApi:
+ SNAPSHOT_POLICY_COMMON_ARGS = {
+ "hostname": "**.***.**.***",
+ "snapshot_policy_name": None,
+ "snapshot_policy_id": None,
+ "new_name": None,
+ "access_mode": None,
+ "secure_snapshots": None,
+ "auto_snapshot_creation_cadence": {
+ "time": None,
+ "unit": "Minute"},
+ "num_of_retained_snapshots_per_level": None,
+ "source_volume": None,
+ "source_volume_state": None,
+ "pause": None,
+ "state": None
+ }
+
+ SNAPSHOT_POLICY_GET_LIST = [
+ {
+ "autoSnapshotCreationCadenceInMin": 120,
+ "id": "15ae842500000004",
+ "lastAutoSnapshotCreationFailureReason": "NR",
+ "lastAutoSnapshotFailureInFirstLevel": False,
+ "maxVTreeAutoSnapshots": 40,
+ "name": "Ansible_snap_policy_1",
+ "nextAutoSnapshotCreationTime": 1683617581,
+ "numOfAutoSnapshots": 0,
+ "numOfCreationFailures": 0,
+ "numOfExpiredButLockedSnapshots": 0,
+ "numOfLockedSnapshots": 0,
+ "numOfRetainedSnapshotsPerLevel": [
+ 40
+ ],
+ "numOfSourceVolumes": 0,
+ "secureSnapshots": False,
+ "snapshotAccessMode": "ReadWrite",
+ "snapshotPolicyState": "Active",
+ "systemId": "0e7a082862fedf0f",
+ "timeOfLastAutoSnapshot": 0,
+ "timeOfLastAutoSnapshotCreationFailure": 0
+ }
+ ]
+
+ SNAPSHOT_POLICY_2_GET_LIST = [
+ {
+ "autoSnapshotCreationCadenceInMin": 120,
+ "id": "15ae842500000005",
+ "lastAutoSnapshotCreationFailureReason": "NR",
+ "lastAutoSnapshotFailureInFirstLevel": False,
+ "maxVTreeAutoSnapshots": 40,
+ "name": "testing_2",
+ "nextAutoSnapshotCreationTime": 1683617581,
+ "numOfAutoSnapshots": 0,
+ "numOfCreationFailures": 0,
+ "numOfExpiredButLockedSnapshots": 0,
+ "numOfLockedSnapshots": 0,
+ "numOfRetainedSnapshotsPerLevel": [
+ 40
+ ],
+ "numOfSourceVolumes": 1,
+ "secureSnapshots": False,
+ "snapshotAccessMode": "ReadWrite",
+ "snapshotPolicyState": "Paused",
+ "systemId": "0e7a082862fedf0f",
+ "timeOfLastAutoSnapshot": 0,
+ "timeOfLastAutoSnapshotCreationFailure": 0
+ }
+ ]
+
+ VOLUME_GET_LIST = [
+ {
+ 'storagePoolId': 'test_pool_id_1',
+ 'dataLayout': 'MediumGranularity',
+ 'vtreeId': 'vtree_id_1',
+ 'sizeInKb': 8388608,
+ 'snplIdOfAutoSnapshot': None,
+ 'volumeType': 'ThinProvisioned',
+ 'consistencyGroupId': None,
+ 'ancestorVolumeId': None,
+ 'notGenuineSnapshot': False,
+ 'accessModeLimit': 'ReadWrite',
+ 'secureSnapshotExpTime': 0,
+ 'useRmcache': False,
+ 'managedBy': 'ScaleIO',
+ 'lockedAutoSnapshot': False,
+ 'lockedAutoSnapshotMarkedForRemoval': False,
+ 'autoSnapshotGroupId': None,
+ 'compressionMethod': 'Invalid',
+ 'pairIds': None,
+ 'timeStampIsAccurate': False,
+ 'mappedSdcInfo': None,
+ 'originalExpiryTime': 0,
+ 'retentionLevels': [
+ ],
+ 'snplIdOfSourceVolume': None,
+ 'volumeReplicationState': 'UnmarkedForReplication',
+ 'replicationJournalVolume': False,
+ 'replicationTimeStamp': 0,
+ 'creationTime': 1655878090,
+ 'name': 'source_volume_name',
+ 'id': 'source_volume_id'
+ }
+ ]
+
+ VOLUME_2_GET_LIST = [
+ {
+ 'storagePoolId': 'test_pool_id_1',
+ 'dataLayout': 'MediumGranularity',
+ 'vtreeId': 'vtree_id_1',
+ 'sizeInKb': 8388608,
+ 'snplIdOfAutoSnapshot': None,
+ 'volumeType': 'ThinProvisioned',
+ 'consistencyGroupId': None,
+ 'ancestorVolumeId': None,
+ 'notGenuineSnapshot': False,
+ 'accessModeLimit': 'ReadWrite',
+ 'secureSnapshotExpTime': 0,
+ 'useRmcache': False,
+ 'managedBy': 'ScaleIO',
+ 'lockedAutoSnapshot': False,
+ 'lockedAutoSnapshotMarkedForRemoval': False,
+ 'autoSnapshotGroupId': None,
+ 'compressionMethod': 'Invalid',
+ 'pairIds': None,
+ 'timeStampIsAccurate': False,
+ 'mappedSdcInfo': None,
+ 'originalExpiryTime': 0,
+ 'retentionLevels': [
+ ],
+ 'snplIdOfSourceVolume': "15ae842500000005",
+ 'volumeReplicationState': 'UnmarkedForReplication',
+ 'replicationJournalVolume': False,
+ 'replicationTimeStamp': 0,
+ 'creationTime': 1655878090,
+ 'name': 'source_volume_name_2',
+ 'id': 'source_volume_id_2'
+ }
+ ]
+
+ SNAPSHOT_POLICY_STATISTICS = {
+ "autoSnapshotVolIds": [],
+ "expiredButLockedSnapshotsIds": [],
+ "numOfAutoSnapshots": 0,
+ "numOfExpiredButLockedSnapshots": 0,
+ "numOfSrcVols": 0,
+ "srcVolIds": []
+ }
+
+ @staticmethod
+ def get_snapshot_policy_exception_response(response_type):
+ if response_type == 'get_vol_details_exception':
+ return "Failed to get the volume source_volume_id_2 with error "
+ elif response_type == 'get_snapshot_policy_details_exception':
+ return "Failed to get the snapshot policy with error "
+ elif response_type == 'create_exception':
+ return "Creation of snapshot policy failed with error "
+ elif response_type == 'create_id_exception':
+ return "Creation of snapshot policy is allowed using snapshot_policy_name only, snapshot_policy_id given."
+ elif response_type == 'delete_exception':
+ return "Deletion of snapshot policy 15ae842500000004 failed with error "
+ elif response_type == 'modify_exception':
+ return "Failed to update the snapshot policy 15ae842500000004 with error "
+ elif response_type == 'source_volume_exception':
+ return "Failed to manage the source volume source_volume_id with error "
+ elif response_type == 'add_source_volume_wo_vol':
+ return "Either id or name of source volume needs to be passed with state of source volume"
+ elif response_type == 'add_source_volume_vol_id_name':
+ return "id and name of source volume are mutually exclusive"
+ elif response_type == 'add_non_existing_source_volume':
+ return "Failed to get the volume non_existing_source_volume_name with error Volume with identifier non_existing_source_volume_name not found"
+ elif response_type == 'pause_exception':
+ return "Failed to pause/resume 15ae842500000004 with error"
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py
index 0246b9dd4..87af1d6eb 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_storagepool_api.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2022, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -28,6 +28,7 @@ class MockStoragePoolApi:
STORAGE_POOL_GET_LIST = [
{
'protectionDomainId': '4eeb304600000000',
+ 'protectionDomainName': 'test_pd',
'rebuildEnabled': True,
'dataLayout': 'MediumGranularity',
'persistentChecksumState': 'Protected',
@@ -95,6 +96,149 @@ class MockStoragePoolApi:
}
]
+ STORAGE_POOL_GET_MULTI_LIST = [
+ {
+ 'protectionDomainId': '4eeb304600000000',
+ 'protectionDomainName': 'test_pd',
+ 'rebuildEnabled': True,
+ 'dataLayout': 'MediumGranularity',
+ 'persistentChecksumState': 'Protected',
+ 'addressSpaceUsage': 'Normal',
+ 'externalAccelerationType': 'None',
+ 'rebalanceEnabled': True,
+ 'sparePercentage': 10,
+ 'rmcacheWriteHandlingMode': 'Cached',
+ 'checksumEnabled': False,
+ 'useRfcache': False,
+ 'compressionMethod': 'Invalid',
+ 'fragmentationEnabled': True,
+ 'numOfParallelRebuildRebalanceJobsPerDevice': 2,
+ 'capacityAlertHighThreshold': 80,
+ 'capacityAlertCriticalThreshold': 90,
+ 'capacityUsageState': 'Normal',
+ 'capacityUsageType': 'NetCapacity',
+ 'addressSpaceUsageType': 'DeviceCapacityLimit',
+ 'bgScannerCompareErrorAction': 'ReportAndFix',
+ 'bgScannerReadErrorAction': 'ReportAndFix',
+ 'fglExtraCapacity': None,
+ 'fglOverProvisioningFactor': None,
+ 'fglWriteAtomicitySize': None,
+ 'fglMaxCompressionRatio': None,
+ 'fglPerfProfile': None,
+ 'replicationCapacityMaxRatio': 0,
+ 'persistentChecksumEnabled': True,
+ 'persistentChecksumBuilderLimitKb': 3072,
+ 'persistentChecksumValidateOnRead': False,
+ 'useRmcache': False,
+ 'fglAccpId': None,
+ 'rebuildIoPriorityPolicy': 'limitNumOfConcurrentIos',
+ 'rebalanceIoPriorityPolicy': 'favorAppIos',
+ 'vtreeMigrationIoPriorityPolicy': 'favorAppIos',
+ 'protectedMaintenanceModeIoPriorityPolicy': 'limitNumOfConcurrentIos',
+ 'rebuildIoPriorityNumOfConcurrentIosPerDevice': 1,
+ 'rebalanceIoPriorityNumOfConcurrentIosPerDevice': 1,
+ 'vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice': 1,
+ 'protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice': 1,
+ 'rebuildIoPriorityBwLimitPerDeviceInKbps': 10240,
+ 'rebalanceIoPriorityBwLimitPerDeviceInKbps': 10240,
+ 'vtreeMigrationIoPriorityBwLimitPerDeviceInKbps': 10240,
+ 'protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps': 10240,
+ 'rebuildIoPriorityAppIopsPerDeviceThreshold': None,
+ 'rebalanceIoPriorityAppIopsPerDeviceThreshold': None,
+ 'vtreeMigrationIoPriorityAppIopsPerDeviceThreshold': None,
+ 'protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold': None,
+ 'rebuildIoPriorityAppBwPerDeviceThresholdInKbps': None,
+ 'rebalanceIoPriorityAppBwPerDeviceThresholdInKbps': None,
+ 'vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps': None,
+ 'protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps': None,
+ 'rebuildIoPriorityQuietPeriodInMsec': None,
+ 'rebalanceIoPriorityQuietPeriodInMsec': None,
+ 'vtreeMigrationIoPriorityQuietPeriodInMsec': None,
+ 'protectedMaintenanceModeIoPriorityQuietPeriodInMsec': None,
+ 'zeroPaddingEnabled': True,
+ 'backgroundScannerMode': 'DataComparison',
+ 'backgroundScannerBWLimitKBps': 3072,
+ 'fglMetadataSizeXx100': None,
+ 'fglNvdimmWriteCacheSizeInMb': None,
+ 'fglNvdimmMetadataAmortizationX100': None,
+ 'mediaType': 'HDD',
+ 'name': 'test_pool',
+ 'id': 'test_pool_id_1'
+ },
+ {
+ 'protectionDomainId': '4eeb304600000002',
+ 'protectionDomainName': 'test_pd_1',
+ 'rebuildEnabled': True,
+ 'dataLayout': 'MediumGranularity',
+ 'persistentChecksumState': 'Protected',
+ 'addressSpaceUsage': 'Normal',
+ 'externalAccelerationType': 'None',
+ 'rebalanceEnabled': True,
+ 'sparePercentage': 10,
+ 'rmcacheWriteHandlingMode': 'Cached',
+ 'checksumEnabled': False,
+ 'useRfcache': False,
+ 'compressionMethod': 'Invalid',
+ 'fragmentationEnabled': True,
+ 'numOfParallelRebuildRebalanceJobsPerDevice': 2,
+ 'capacityAlertHighThreshold': 80,
+ 'capacityAlertCriticalThreshold': 90,
+ 'capacityUsageState': 'Normal',
+ 'capacityUsageType': 'NetCapacity',
+ 'addressSpaceUsageType': 'DeviceCapacityLimit',
+ 'bgScannerCompareErrorAction': 'ReportAndFix',
+ 'bgScannerReadErrorAction': 'ReportAndFix',
+ 'fglExtraCapacity': None,
+ 'fglOverProvisioningFactor': None,
+ 'fglWriteAtomicitySize': None,
+ 'fglMaxCompressionRatio': None,
+ 'fglPerfProfile': None,
+ 'replicationCapacityMaxRatio': 0,
+ 'persistentChecksumEnabled': True,
+ 'persistentChecksumBuilderLimitKb': 3072,
+ 'persistentChecksumValidateOnRead': False,
+ 'useRmcache': False,
+ 'fglAccpId': None,
+ 'rebuildIoPriorityPolicy': 'limitNumOfConcurrentIos',
+ 'rebalanceIoPriorityPolicy': 'favorAppIos',
+ 'vtreeMigrationIoPriorityPolicy': 'favorAppIos',
+ 'protectedMaintenanceModeIoPriorityPolicy': 'limitNumOfConcurrentIos',
+ 'rebuildIoPriorityNumOfConcurrentIosPerDevice': 1,
+ 'rebalanceIoPriorityNumOfConcurrentIosPerDevice': 1,
+ 'vtreeMigrationIoPriorityNumOfConcurrentIosPerDevice': 1,
+ 'protectedMaintenanceModeIoPriorityNumOfConcurrentIosPerDevice': 1,
+ 'rebuildIoPriorityBwLimitPerDeviceInKbps': 10240,
+ 'rebalanceIoPriorityBwLimitPerDeviceInKbps': 10240,
+ 'vtreeMigrationIoPriorityBwLimitPerDeviceInKbps': 10240,
+ 'protectedMaintenanceModeIoPriorityBwLimitPerDeviceInKbps': 10240,
+ 'rebuildIoPriorityAppIopsPerDeviceThreshold': None,
+ 'rebalanceIoPriorityAppIopsPerDeviceThreshold': None,
+ 'vtreeMigrationIoPriorityAppIopsPerDeviceThreshold': None,
+ 'protectedMaintenanceModeIoPriorityAppIopsPerDeviceThreshold': None,
+ 'rebuildIoPriorityAppBwPerDeviceThresholdInKbps': None,
+ 'rebalanceIoPriorityAppBwPerDeviceThresholdInKbps': None,
+ 'vtreeMigrationIoPriorityAppBwPerDeviceThresholdInKbps': None,
+ 'protectedMaintenanceModeIoPriorityAppBwPerDeviceThresholdInKbps': None,
+ 'rebuildIoPriorityQuietPeriodInMsec': None,
+ 'rebalanceIoPriorityQuietPeriodInMsec': None,
+ 'vtreeMigrationIoPriorityQuietPeriodInMsec': None,
+ 'protectedMaintenanceModeIoPriorityQuietPeriodInMsec': None,
+ 'zeroPaddingEnabled': True,
+ 'backgroundScannerMode': 'DataComparison',
+ 'backgroundScannerBWLimitKBps': 3072,
+ 'fglMetadataSizeXx100': None,
+ 'fglNvdimmWriteCacheSizeInMb': None,
+ 'fglNvdimmMetadataAmortizationX100': None,
+ 'mediaType': 'HDD',
+ 'name': 'test_pool',
+ 'id': 'test_pool_id_2'
+ }
+ ]
+
+ PROTECTION_DETAILS = [{"pd_id": "4eeb304600000000", "pd_name": "test_pd"}]
+
+ PROTECTION_DETAILS_1 = [{"id": "4eeb304600000001", "name": "test_pd_name"}]
+
STORAGE_POOL_STATISTICS = {
'backgroundScanFixedReadErrorCount': 0,
'pendingMovingOutBckRebuildJobs': 0,
@@ -461,7 +605,23 @@ class MockStoragePoolApi:
'numOfIncomingVtreeMigrations': 1
}
+ RESPONSE_EXEC_DICT = {
+ "get_details": "Failed to get the storage pool test_pool with error ",
+ "invalid_pd_id": "Entered protection domain id does not match with the storage pool's protection domain id.",
+ "get_pd_exception": "Failed to get the protection domain 4eeb304600000001 with error",
+ "create_storage_pool": "Failed to create the storage pool",
+ "rename_storage_pool": "Failed to update the storage pool",
+ "create_pool_id": "storage_pool_name is missing & name required to create a storage pool",
+ "get_pd_non_exist": "Unable to find the protection domain",
+ "get_multi_details": "More than one storage pool found",
+ "create_wo_pd": "Please provide protection domain details",
+ "create_transitional": "TRANSITIONAL media type is not supported during creation.",
+ "create_pool_name_empty": "Empty or white spaced string provided in storage_pool_name.",
+ "create_pool_new_name": "storage_pool_new_name is passed during creation.",
+ "rename_storage_pool_empty": "Empty/White spaced name is not allowed during renaming of a storage pool.",
+ "delete_storage_pool": "Deleting storage pool is not supported through ansible module."
+ }
+
@staticmethod
def get_exception_response(response_type):
- if response_type == 'get_details':
- return "Failed to get the storage pool test_pool with error "
+ return MockStoragePoolApi.RESPONSE_EXEC_DICT.get(response_type, "")
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py
index b05cc84d3..8c264940f 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/module_utils/mock_volume_api.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2022, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -61,7 +61,9 @@ class MockVolumeApi:
'originalExpiryTime': 0,
'retentionLevels': [
],
- 'snplIdOfSourceVolume': None,
+ 'snplIdOfSourceVolume': "snplIdOfSourceVolume",
+ 'snapshotPolicyId': 'snapshotPolicyId',
+ 'snapshotPolicyName': 'snapshotPolicyName',
'volumeReplicationState': 'UnmarkedForReplication',
'replicationJournalVolume': False,
'replicationTimeStamp': 0,
@@ -542,7 +544,61 @@ class MockVolumeApi:
'numOfIncomingVtreeMigrations': 0
}
+ SDC_RESPONSE = [
+ {
+ 'id': 'abdfe71b00030001',
+ }
+ ]
+
+ SDC_RESPONSE_EMPTY = []
+
+ GET_STORAGE_POOL = {
+ 'dataLayout': 'MediumGranularity'
+ }
+
+ GET_STORAGE_POOL_FINE = {
+ 'dataLayout': 'FineGranularity',
+ }
+
+ PROTECTION_DETAILS = [{"pd_id": "pd_id", "pd_name": "pd_name"}]
+
+ GET_ID = {"id": "e0d8f6c900000000"}
+ PROTECTION_DETAILS_MULTI = [
+ {"pd_id": "pd_id", "pd_name": "pd_name"},
+ {"pd_id": "pd_id", "pd_name": "pd_name"},
+ ]
+
+ RESPONSE_EXEC_DICT = {
+ 'get_details': "Failed to get the volume test_id_1 with error ",
+ 'get_sds': "Failed to get the SDC sdc_name with error ",
+ 'create_vol_name': "Please provide valid volume name.",
+ 'create_vol_size': "Size is a mandatory parameter",
+ 'create_vol_ctype': "compression_type for volume can only be",
+ 'create_vol_exc': "Create volume vol_name operation failed with error",
+ 'modify_access': "Modify access mode of SDC operation failed",
+ 'modify_limits': "Modify bandwidth/iops limits of SDC",
+ 'delete_volume': "Delete volume vol_id operation failed with",
+ 'val_params_err1': "sdc_id, sdc_ip and sdc_name are mutually exclusive",
+ 'val_params_err2': "cap_unit can be specified along with size only",
+ 'val_params_err3': "To remove/detach snapshot policy, please provide",
+ 'val_params_err4': "delete_snapshots can be specified only when the state",
+ 'modify_volume_exp': "Failed to update the volume",
+ 'to_modify_err1': "To remove/detach a snapshot policy, provide the ",
+ 'snap_pol_id_err': "Entered snapshot policy id does not ",
+ 'snap_pol_name_err': "Entered snapshot policy name does not ",
+ 'pd_id_err': "Entered protection domain id does not ",
+ 'pool_id_err': "Entered storage pool id does ",
+ 'pd_name_err': "Entered protection domain name does ",
+ 'pool_name_err': "Entered storage pool name does ",
+ 'get_pd_exception': "Failed to get the protection domain ",
+ 'get_sp_exception': "Failed to get the snapshot policy ",
+ 'get_spool_error1': "More than one storage pool found with",
+ 'get_spool_error2': "Failed to get the storage pool",
+ 'map_vol_exception': "Mapping volume name to SDC sdc_id1 failed with error",
+ 'unmap': "Unmap SDC sdc_id from volume vol_id failed with error",
+ 'perform_error1': "vol_new_name parameter is not supported during creation of a volume"
+ }
+
@staticmethod
def get_exception_response(response_type):
- if response_type == 'get_details':
- return "Failed to get the volume test_id_1 with error "
+ return MockVolumeApi.RESPONSE_EXEC_DICT.get(response_type, "")
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_device.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_device.py
new file mode 100644
index 000000000..c18204339
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_device.py
@@ -0,0 +1,471 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for Device module on PowerFlex"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_device_api import MockDeviceApi
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_fail_json \
+ import FailJsonException, fail_json
+
+utils.get_logger = MagicMock()
+utils.get_powerflex_gateway_host_connection = MagicMock()
+utils.PowerFlexClient = MagicMock()
+
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+from ansible_collections.dellemc.powerflex.plugins.modules.device import PowerFlexDevice
+
+
+class TestPowerflexDevice():
+
+ get_module_args = MockDeviceApi.DEVICE_COMMON_ARGS
+
+ @pytest.fixture
+ def device_module_mock(self, mocker):
+ mocker.patch(
+ MockDeviceApi.MODULE_UTILS_PATH + '.PowerFlexClient',
+ new=MockApiException)
+ device_module_mock = PowerFlexDevice()
+ device_module_mock.module.fail_json = fail_json
+ return device_module_mock
+
+ def capture_fail_json_call(self, error_msg, device_module_mock):
+ try:
+ device_module_mock.perform_module_operation()
+ except FailJsonException as fj_object:
+ assert error_msg in fj_object.message
+
+ def test_get_device_detail_using_dev_name_sds_id(self, device_module_mock):
+ self.get_module_args.update({
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "sds_id": MockDeviceApi.SDS_ID_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=MockDeviceApi.DEVICE_GET_LIST)
+ device_module_mock.perform_module_operation()
+ device_module_mock.powerflex_conn.device.get.assert_called()
+
+ def test_get_device_detail_using_path_sds_name(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=MockDeviceApi.DEVICE_GET_LIST)
+ device_module_mock.perform_module_operation()
+ device_module_mock.powerflex_conn.device.get.assert_called()
+
+ def test_get_device_detail_using_dev_id(self, device_module_mock):
+ self.get_module_args.update({
+ "device_id": MockDeviceApi.DEVICE_ID_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=MockDeviceApi.DEVICE_GET_LIST)
+ device_module_mock.perform_module_operation()
+ device_module_mock.powerflex_conn.device.get.assert_called()
+
+ def test_get_device_without_sds(self, device_module_mock):
+ self.get_module_args.update({
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=None)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'get_dev_without_SDS'), device_module_mock)
+
+ def test_get_device_without_sds_with_path(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=None)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'get_device_details_without_path'), device_module_mock)
+
+ def test_get_device_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "device_id": MockDeviceApi.DEVICE_ID_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'get_device_exception'), device_module_mock)
+
+ def test_create_device_with_id(self, device_module_mock):
+ self.get_module_args.update({
+ "device_id": MockDeviceApi.DEVICE_ID_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'create_id_exception'), device_module_mock)
+
+ def test_get_device_with_empty_path(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": "",
+ "sds_id": MockDeviceApi.SDS_ID_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'empty_path'), device_module_mock)
+
+ def test_get_device_with_empty_name(self, device_module_mock):
+ self.get_module_args.update({
+ "device_name": "",
+ "sds_id": MockDeviceApi.SDS_ID_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'empty_device_name'), device_module_mock)
+
+ def test_get_device_with_empty_sds_id(self, device_module_mock):
+ self.get_module_args.update({
+ "device_id": MockDeviceApi.DEVICE_ID_1,
+ "sds_id": "",
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'empty_sds'), device_module_mock)
+
+ def test_get_device_with_empty_sds_name(self, device_module_mock):
+ self.get_module_args.update({
+ "device_id": MockDeviceApi.DEVICE_ID_1,
+ "sds_name": "",
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'empty_sds'), device_module_mock)
+
+ def test_get_device_with_empty_dev_id(self, device_module_mock):
+ self.get_module_args.update({
+ "device_id": "",
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'empty_dev_id'), device_module_mock)
+
+ def test_get_device_with_space_in_name(self, device_module_mock):
+ self.get_module_args.update({
+ "device_name": " ",
+ "sds_id": MockDeviceApi.SDS_ID_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'space_in_name'), device_module_mock)
+
+ def test_get_device_with_space_in_name_with_sds_name(self, device_module_mock):
+ self.get_module_args.update({
+ "device_name": " ",
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'space_in_name'), device_module_mock)
+
+ def test_get_device_without_required_params(self, device_module_mock):
+ self.get_module_args.update({
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response(
+ 'with_required_params'), device_module_mock)
+
+ def test_modify_device_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "media_type": "SSD",
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=MockDeviceApi.DEVICE_GET_LIST)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'modify_exception'), device_module_mock)
+
+ def test_delete_device(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "state": "absent"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=MockDeviceApi.DEVICE_GET_LIST)
+ device_module_mock.perform_module_operation()
+ device_module_mock.powerflex_conn.device.delete.assert_called()
+
+ def test_delete_device_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "state": "absent"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=MockDeviceApi.DEVICE_GET_LIST)
+ device_module_mock.powerflex_conn.device.delete = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'delete_exception'), device_module_mock)
+ device_module_mock.powerflex_conn.device.delete.assert_called()
+
+ def test_get_sds_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "state": "absent"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=False)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'sds_exception'), device_module_mock)
+
+ def test_get_pd_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "protection_domain_name": MockDeviceApi.PD_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "storage_pool_name": MockDeviceApi.SP_NAME_1,
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=False)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'pd_exception'), device_module_mock)
+
+ def test_get_sp_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "protection_domain_name": MockDeviceApi.PD_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "storage_pool_name": MockDeviceApi.SP_NAME_1,
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockDeviceApi.PD_DETAILS_1)
+ device_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=False)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'sp_exception'), device_module_mock)
+
+ def test_get_acc_pool_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "protection_domain_name": MockDeviceApi.PD_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "acceleration_pool_name": MockDeviceApi.SP_NAME_1,
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockDeviceApi.PD_DETAILS_1)
+ device_module_mock.powerflex_conn.acceleration_pool.get = MagicMock(
+ return_value=False)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'ap_exception'), device_module_mock)
+
+ def test_add_device_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "protection_domain_name": MockDeviceApi.PD_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "storage_pool_name": MockDeviceApi.SP_NAME_1,
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockDeviceApi.PD_DETAILS_1)
+ device_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=MockDeviceApi.SP_DETAILS_1)
+ device_module_mock.powerflex_conn.device.create = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'add_exception'), device_module_mock)
+ device_module_mock.powerflex_conn.device.create.assert_called()
+
+ def test_add_device_name_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": " ",
+ "protection_domain_name": MockDeviceApi.PD_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "storage_pool_name": MockDeviceApi.SP_NAME_1,
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockDeviceApi.PD_DETAILS_1)
+ device_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=MockDeviceApi.SP_DETAILS_1)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'add_dev_name_exception'), device_module_mock)
+
+ def test_add_device_path_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": " ",
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "protection_domain_name": MockDeviceApi.PD_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "storage_pool_name": MockDeviceApi.SP_NAME_1,
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockDeviceApi.PD_DETAILS_1)
+ device_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=MockDeviceApi.SP_DETAILS_1)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'add_dev_path_exception'), device_module_mock)
+
+ def test_add_device_ext_acc_type_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": " ",
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "protection_domain_name": MockDeviceApi.PD_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockDeviceApi.PD_DETAILS_1)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'ext_type_exception'), device_module_mock)
+
+ def test_add_device_without_pd_exception(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "storage_pool_name": MockDeviceApi.SP_NAME_1,
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=MockDeviceApi.SP_DETAILS_1)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'add_without_pd'), device_module_mock)
+
+ def test_add_device_without_pd_exception_for_acc_pool(self, device_module_mock):
+ self.get_module_args.update({
+ "current_pathname": MockDeviceApi.PATH_1,
+ "sds_name": MockDeviceApi.SDS_NAME_1,
+ "device_name": MockDeviceApi.DEVICE_NAME_1,
+ "media_type": "HDD",
+ "external_acceleration_type": "ReadAndWrite",
+ "acceleration_pool_name": MockDeviceApi.SP_NAME_1,
+ "force": False,
+ "state": "present"
+ })
+ device_module_mock.module.params = self.get_module_args
+ device_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockDeviceApi.SDS_DETAILS_1)
+ device_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=[])
+ device_module_mock.powerflex_conn.acceleration_pool.get = MagicMock(
+ return_value=MockDeviceApi.SP_DETAILS_1)
+ self.capture_fail_json_call(MockDeviceApi.get_device_exception_response1(
+ 'add_without_pd'), device_module_mock)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_fault_set.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_fault_set.py
new file mode 100644
index 000000000..ea2aa1104
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_fault_set.py
@@ -0,0 +1,215 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for fault set module on PowerFlex"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+# pylint: disable=unused-import
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock
+from mock.mock import MagicMock
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_fault_set_api import MockFaultSetApi
+from ansible_collections.dellemc.powerflex.plugins.modules.fault_set import \
+ FaultSetHandler
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries.powerflex_unit_base \
+ import PowerFlexUnitBase
+
+from ansible_collections.dellemc.powerflex.plugins.modules.fault_set import PowerFlexFaultSet
+
+
+class TestPowerflexFaultSet(PowerFlexUnitBase):
+
+ get_module_args = MockFaultSetApi.FAULT_SET_COMMON_ARGS
+
+ @pytest.fixture
+ def module_object(self):
+ return PowerFlexFaultSet
+
+ def test_get_fault_set_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'fault_set_id': 'fault_set_id_1',
+ 'state': "present"
+ })
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=MockFaultSetApi.FAULT_SET_GET_LIST)
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ FaultSetHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.fault_set.get.assert_called()
+
+ def test_get_fault_set_name_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'fault_set_name': 'fault_set_name_1',
+ 'protection_domain_id': 'test_pd_id_1',
+ 'state': "present"
+ })
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=MockFaultSetApi.FAULT_SET_GET_LIST)
+ FaultSetHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.fault_set.get.assert_called()
+
+ def test_create_fault_set_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "fault_set_name": "test_fs_1",
+ "protection_domain_name": "test_pd_1",
+ "state": "present"
+ })
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=None)
+ FaultSetHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.fault_set.create.assert_called()
+
+ def test_create_fault_set_wo_pd_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "fault_set_name": "test_fs_1",
+ "state": "present"
+ })
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=None)
+ self.capture_fail_json_call(
+ MockFaultSetApi.get_fault_set_exception_response(
+ 'create_fault_set_wo_pd_exception'), powerflex_module_mock, FaultSetHandler)
+
+ def test_create_fault_set_empty_name_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "fault_set_name": " ",
+ "protection_domain_name": "test_pd_1",
+ "state": "present"
+ })
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=None)
+ self.capture_fail_json_call(
+ MockFaultSetApi.get_fault_set_exception_response(
+ 'create_fault_set_empty_name_exception'), powerflex_module_mock, FaultSetHandler)
+
+ def test_create_fault_set_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "fault_set_name": "test_fs_1",
+ "protection_domain_name": "test_pd_1",
+ "state": "present"
+ })
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=None)
+ powerflex_module_mock.powerflex_conn.fault_set.create = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockFaultSetApi.get_fault_set_exception_response(
+ 'create_fault_set_exception'), powerflex_module_mock, FaultSetHandler)
+
+ def test_rename_fault_set_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "fault_set_name": 'fault_set_name_1',
+ "protection_domain_name": "test_pd_1",
+ "fault_set_new_name": "fs_new_name",
+ "state": "present"
+ })
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=MockFaultSetApi.FAULT_SET_GET_LIST[0])
+ FaultSetHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.fault_set.rename.assert_called()
+
+ def test_rename_fault_set_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "fault_set_name": 'fault_set_name_1',
+ "protection_domain_name": "test_pd_1",
+ "fault_set_new_name": "fs_new_name",
+ "state": "present"
+ })
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=MockFaultSetApi.FAULT_SET_GET_LIST[0])
+ powerflex_module_mock.powerflex_conn.fault_set.rename = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockFaultSetApi.get_fault_set_exception_response(
+ 'rename_fault_set_exception'), powerflex_module_mock, FaultSetHandler)
+
+ def test_delete_fault_set_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "fault_set_name": 'test_fs_1',
+ "protection_domain_name": "test_pd_1",
+ "state": "absent"
+ })
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=MockFaultSetApi.FAULT_SET_GET_LIST[0])
+ FaultSetHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.fault_set.delete.assert_called()
+
+ def test_delete_fault_set_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "fault_set_name": 'test_fs_1',
+ "protection_domain_name": "test_pd_1",
+ "state": "absent"
+ })
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=MockFaultSetApi.FAULT_SET_GET_LIST[0])
+ powerflex_module_mock.powerflex_conn.fault_set.delete = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockFaultSetApi.get_fault_set_exception_response(
+ 'delete_fault_set_exception'), powerflex_module_mock, FaultSetHandler)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py
index 2bd0ff158..ce5091212 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_info.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2022, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -9,14 +9,17 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
+# pylint: disable=unused-import
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock
from mock.mock import MagicMock
from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_info_api import MockInfoApi
-from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \
- import MockSDKResponse
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_fault_set_api import MockFaultSetApi
from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
import MockApiException
from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
import utils
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_fail_json \
+ import FailJsonException, fail_json
utils.get_logger = MagicMock()
utils.get_powerflex_gateway_host_connection = MagicMock()
@@ -25,6 +28,7 @@ utils.PowerFlexClient = MagicMock()
from ansible.module_utils import basic
basic.AnsibleModule = MagicMock()
from ansible_collections.dellemc.powerflex.plugins.modules.info import PowerFlexInfo
+INVALID_SORT_MSG = 'messageCode=PARSE002 displayMessage=An invalid column name: invalid is entered in the sort list'
class TestPowerflexInfo():
@@ -33,6 +37,9 @@ class TestPowerflexInfo():
@pytest.fixture
def info_module_mock(self, mocker):
+ mocker.patch(
+ MockInfoApi.MODULE_UTILS_PATH + '.PowerFlexClient',
+ new=MockApiException)
info_module_mock = PowerFlexInfo()
info_module_mock.module.check_mode = False
info_module_mock.powerflex_conn.system.api_version = MagicMock(
@@ -41,8 +48,15 @@ class TestPowerflexInfo():
info_module_mock.powerflex_conn.system.get = MagicMock(
return_value=MockInfoApi.INFO_ARRAY_DETAILS
)
+ info_module_mock.module.fail_json = fail_json
return info_module_mock
+ def capture_fail_json_call(self, error_msg, info_module_mock):
+ try:
+ info_module_mock.perform_module_operation()
+ except FailJsonException as fj_object:
+ assert error_msg in fj_object.message
+
def test_get_volume_details(self, info_module_mock):
self.get_module_args.update({
"gather_subset": ['vol']
@@ -60,6 +74,23 @@ class TestPowerflexInfo():
info_module_mock.powerflex_conn.volume.get.assert_called()
info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes.assert_called()
+ def test_get_volume_details_filter(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['vol'],
+ "filters": [{
+ "filter_key": "storagePoolId",
+ "filter_operator": "equal",
+ "filter_value": "test_pool_id_1"
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ vol_resp = MockInfoApi.INFO_VOLUME_GET_LIST
+ info_module_mock.powerflex_conn.volume.get = MagicMock(
+ return_value=vol_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.volume.get.assert_called()
+
def test_get_volume_details_with_exception(self, info_module_mock):
self.get_module_args.update({
"gather_subset": ['vol']
@@ -72,8 +103,8 @@ class TestPowerflexInfo():
info_module_mock.powerflex_conn.utility.get_statistics_for_all_volumes = MagicMock(
side_effect=MockApiException
)
- info_module_mock.perform_module_operation()
- assert MockInfoApi.get_exception_response('volume_get_details') in info_module_mock.module.fail_json.call_args[1]['msg']
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'volume_get_details'), info_module_mock)
def test_get_sp_details(self, info_module_mock):
self.get_module_args.update({
@@ -92,6 +123,23 @@ class TestPowerflexInfo():
info_module_mock.powerflex_conn.storage_pool.get.assert_called()
info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools.assert_called()
+ def test_get_sp_details_filter(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['storage_pool'],
+ "filters": [{
+ "filter_key": "id",
+ "filter_operator": "equal",
+ "filter_value": "test_pool_id_1"
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ storage_pool_resp = MockInfoApi.INFO_STORAGE_POOL_GET_LIST
+ info_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=storage_pool_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.storage_pool.get.assert_called()
+
def test_get_sp_details_with_exception(self, info_module_mock):
self.get_module_args.update({
"gather_subset": ['storage_pool']
@@ -104,8 +152,8 @@ class TestPowerflexInfo():
info_module_mock.powerflex_conn.utility.get_statistics_for_all_storagepools = MagicMock(
side_effect=MockApiException
)
- info_module_mock.perform_module_operation()
- assert MockInfoApi.get_exception_response('sp_get_details') in info_module_mock.module.fail_json.call_args[1]['msg']
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'sp_get_details'), info_module_mock)
def test_get_rcg_details(self, info_module_mock):
self.get_module_args.update({
@@ -118,6 +166,22 @@ class TestPowerflexInfo():
info_module_mock.perform_module_operation()
info_module_mock.powerflex_conn.replication_consistency_group.get.assert_called()
+ def test_get_rcg_filter_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['rcg'],
+ "filters": [{
+ "filter_key": "id",
+ "filter_operator": "equal",
+ "filter_value": "aadc17d500000000"
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ rcg_resp = MockInfoApi.RCG_LIST
+ info_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
+ return_value=rcg_resp)
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.replication_consistency_group.get.assert_called()
+
def test_get_rcg_details_throws_exception(self, info_module_mock):
self.get_module_args.update({
"gather_subset": ['rcg']
@@ -126,8 +190,8 @@ class TestPowerflexInfo():
info_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
side_effect=MockApiException
)
- info_module_mock.perform_module_operation()
- assert MockInfoApi.get_exception_response('rcg_get_details') in info_module_mock.module.fail_json.call_args[1]['msg']
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'rcg_get_details'), info_module_mock)
def test_get_replication_pair_details(self, info_module_mock):
self.get_module_args.update({
@@ -147,5 +211,396 @@ class TestPowerflexInfo():
info_module_mock.powerflex_conn.replication_pair.get = MagicMock(
side_effect=MockApiException
)
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'replication_pair_get_details'), info_module_mock)
+
+ def test_get_snapshot_policy_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['snapshot_policy']
+ })
+ info_module_mock.module.params = self.get_module_args
+ snapshot_policy_resp = MockInfoApi.INFO_SNAPSHOT_POLICY_GET_LIST
+ info_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=snapshot_policy_resp
+ )
+ snapshot_policy_stat_resp = MockInfoApi.INFO_SNAPSHOT_POLICY_STATISTICS
+ info_module_mock.powerflex_conn.utility.get_statistics_for_all_snapshot_policies = MagicMock(
+ return_value=snapshot_policy_stat_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.snapshot_policy.get.assert_called()
+ info_module_mock.powerflex_conn.utility.get_statistics_for_all_snapshot_policies.assert_called()
+
+ def test_get_snapshot_policy_details_with_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['snapshot_policy']
+ })
+ info_module_mock.module.params = self.get_module_args
+ snapshot_policy_resp = MockInfoApi.INFO_SNAPSHOT_POLICY_GET_LIST
+ info_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=snapshot_policy_resp
+ )
+ info_module_mock.powerflex_conn.utility.get_statistics_for_all_snapshot_policies = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'snapshot_policy_get_details'), info_module_mock)
+
+ def test_get_sdc_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['sdc']
+ })
+ info_module_mock.module.params = self.get_module_args
+ sdc_resp = MockInfoApi.INFO_SDC_FILTER_LIST
+ info_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=sdc_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.sdc.get.assert_called()
+
+ def test_get_sdc_details_filter(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['sdc'],
+ "filters": [{
+ "filter_key": "name",
+ "filter_operator": "equal",
+ "filter_value": "sdc_1"
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ sdc_resp = MockInfoApi.INFO_SDC_FILTER_LIST
+ info_module_mock.powerflex_conn.sdc.create = MagicMock(
+ return_value=sdc_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.sdc.get.assert_called()
+
+ def test_get_sdc_details_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['sdc']
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.powerflex_conn.sdc.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'sdc_get_details'), info_module_mock)
+
+ def test_get_sds_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['sds']
+ })
+ info_module_mock.module.params = self.get_module_args
+ sds_resp = MockInfoApi.INFO_SDS_GET_LIST
+ info_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=sds_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.sds.get.assert_called()
+
+ def test_get_sds_filter_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['sds'],
+ "filters": [
+ {
+ "filter_key": "name",
+ "filter_operator": "equal",
+ "filter_value": "node0",
+ },
+ {
+ "filter_key": "name",
+ "filter_operator": "equal",
+ "filter_value": "node1",
+ },
+ {
+ "filter_key": "id",
+ "filter_operator": "equal",
+ "filter_value": "8f3bb15300000001",
+ }
+ ]
+ })
+ info_module_mock.module.params = self.get_module_args
+ sds_resp = MockInfoApi.INFO_SDS_GET_LIST
+ info_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=sds_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.sds.get.assert_called()
+
+ def test_get_sds_details_filter_invalid(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['sds'],
+ "filters": [{
+ "filter_key": "name",
+ "filter_op": "equal",
+ "filter_value": "LGLAP203",
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'get_sds_details_filter_invalid'), info_module_mock)
+
+ def test_get_sds_details_filter_empty(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['sds'],
+ "filters": [{
+ "filter_key": "name",
+ "filter_operator": None,
+ "filter_value": "LGLAP203",
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'get_sds_details_filter_empty'), info_module_mock)
+
+ def test_get_sds_details_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['sds']
+ })
+ info_module_mock.module.params = self.get_module_args
+ sds_resp = MockInfoApi.INFO_SDS_GET_LIST
+ info_module_mock.powerflex_conn.sds.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'sds_get_details'), info_module_mock)
+
+ def test_get_pd_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['protection_domain']
+ })
+ info_module_mock.module.params = self.get_module_args
+ pd_resp = MockInfoApi.INFO_GET_PD_LIST
+ info_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.protection_domain.get.assert_called()
+
+ def test_get_pd_filter_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['protection_domain'],
+ "filters": [{
+ "filter_key": "name",
+ "filter_operator": "equal",
+ "filter_value": "domain1",
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ pd_resp = MockInfoApi.INFO_GET_PD_LIST
+ info_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.protection_domain.get.assert_called()
+
+ def test_get_pd_details_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['protection_domain']
+ })
+ info_module_mock.module.params = self.get_module_args
+ pd_resp = MockInfoApi.INFO_GET_PD_LIST
+ info_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'pd_get_details'), info_module_mock)
+
+ def test_get_device_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['device']
+ })
+ info_module_mock.module.params = self.get_module_args
+ device_resp = MockInfoApi.INFO_GET_DEVICE_LIST
+ info_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=device_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.device.get.assert_called()
+
+ def test_get_device_filter_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['device'],
+ "filters": [{
+ "filter_key": "name",
+ "filter_operator": "equal",
+ "filter_value": "device230",
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ device_resp = MockInfoApi.INFO_GET_DEVICE_LIST
+ info_module_mock.powerflex_conn.device.get = MagicMock(
+ return_value=device_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.device.get.assert_called()
+
+ def test_get_device_details_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['device']
+ })
+ info_module_mock.module.params = self.get_module_args
+ device_resp = MockInfoApi.INFO_GET_DEVICE_LIST
+ info_module_mock.powerflex_conn.device.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'device_get_details'), info_module_mock)
+
+ def test_get_fault_set_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['fault_set']
+ })
+ info_module_mock.module.params = self.get_module_args
+ pd_resp = MockFaultSetApi.PROTECTION_DOMAIN
+ info_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp['protectiondomain'])
+ fault_set_resp = MockInfoApi.INFO_GET_FAULT_SET_LIST
+ info_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fault_set_resp
+ )
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.fault_set.get.assert_called()
+
+ def test_get_fault_set_details_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['fault_set']
+ })
+ info_module_mock.module.params = self.get_module_args
+ fault_set_resp = MockInfoApi.INFO_GET_FAULT_SET_LIST
+ info_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'fault_set_get_details'), info_module_mock)
+
+ def test_get_fault_set_details_invalid_filter_operator_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['fault_set'],
+ "filters": [{
+ "filter_key": "name",
+ "filter_operator": "does_not_contain",
+ "filter_value": "LGLAP203",
+ }]
+ })
+ info_module_mock.module.params = self.get_module_args
+ fault_set_resp = MockInfoApi.INFO_GET_FAULT_SET_LIST
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'invalid_filter_operator_exception'), info_module_mock)
+
+ def test_get_fault_set_details_api_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['fault_set']
+ })
+ info_module_mock.module.params = self.get_module_args
+ fault_set_resp = MockInfoApi.INFO_GET_FAULT_SET_LIST
+ info_module_mock.powerflex_conn.system.api_version = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'api_exception'), info_module_mock)
+
+ def test_get_fault_set_details_system_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['fault_set']
+ })
+ info_module_mock.module.params = self.get_module_args
+ fault_set_resp = MockInfoApi.INFO_GET_FAULT_SET_LIST
+ info_module_mock.powerflex_conn.system.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'system_exception'), info_module_mock)
+
+ def test_get_managed_device_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['managed_device']
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.powerflex_conn.managed_device.get = MagicMock()
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.managed_device.get.assert_called()
+
+ def test_get_managed_device_details_throws_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['managed_device']
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.powerflex_conn.managed_device.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'managed_device_get_error'), info_module_mock)
+
+ def test_get_service_template_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['service_template']
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.powerflex_conn.service_template.get = MagicMock()
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.service_template.get.assert_called()
+
+ def test_get_service_template_details_throws_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['service_template']
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.powerflex_conn.service_template.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'service_template_get_error'), info_module_mock)
+
+ def test_get_deployment_details(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['deployment'],
+ "limit": 20
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.powerflex_conn.deployment.get = MagicMock()
+ info_module_mock.perform_module_operation()
+ info_module_mock.powerflex_conn.deployment.get.assert_called()
+
+ def test_get_deployment_details_throws_exception(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['deployment']
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.powerflex_conn.deployment.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockInfoApi.get_exception_response(
+ 'deployment_get_error'), info_module_mock)
+
+ def test_get_deployment_details_throws_exception_invalid_sort(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['deployment'],
+ "sort": 'invalid'
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.powerflex_conn.deployment.get = MagicMock(
+ side_effect=MockApiException(INVALID_SORT_MSG)
+ )
+ info_module_mock.perform_module_operation()
+ assert info_module_mock.get_deployments_list() == []
+
+ def test_get_with_multiple_gather_subset(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['deployment', 'service_template'],
+ "sort": 'name', "filters": [{"filter_key": "name", "filter_operator": "equal", "filter_value": "rack"}],
+ })
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.perform_module_operation()
+ assert info_module_mock.populate_filter_list() == []
+ assert info_module_mock.get_param_value('sort') is None
+
+ def test_get_with_invalid_offset_and_limit_for_subset(self, info_module_mock):
+ self.get_module_args.update({
+ "gather_subset": ['deployment'],
+ "limit": -1, "offset": -1
+ })
+ info_module_mock.module.params = self.get_module_args
info_module_mock.perform_module_operation()
- assert MockInfoApi.get_exception_response('replication_pair_get_details') in info_module_mock.module.fail_json.call_args[1]['msg']
+ assert info_module_mock.get_param_value('limit') is None
+ assert info_module_mock.get_param_value('offset') is None
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py
index ced9fc7f7..fa1dfe641 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_protection_domain.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2022, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -8,229 +8,327 @@ from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import pytest
+# pylint: disable=unused-import
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock
from mock.mock import MagicMock
-from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_protection_domain_api import MockProtectionDomainApi
-from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \
- import MockSDKResponse
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_protection_domain_api \
+ import MockProtectionDomainApi
from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
import MockApiException
-from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
- import utils
-
-utils.get_logger = MagicMock()
-utils.get_powerflex_gateway_host_connection = MagicMock()
-utils.PowerFlexClient = MagicMock()
-from ansible.module_utils import basic
-basic.AnsibleModule = MagicMock()
-from ansible_collections.dellemc.powerflex.plugins.modules.protection_domain import PowerFlexProtectionDomain
-
-
-class TestPowerflexProtectionDomain():
-
- get_module_args = {
- 'hostname': '**.***.**.***',
- 'protection_domain_id': '7bd6457000000000',
- 'protection_domain_name': None,
- 'protection_domain_new_name': None,
- 'is_active': True,
- 'network_limits': {
- 'rebuild_limit': 10240,
- 'rebalance_limit': 10240,
- 'vtree_migration_limit': 10240,
- 'overall_limit': 20480,
- 'bandwidth_unit': 'KBps',
- },
- 'rf_cache_limits': {
- 'is_enabled': None,
- 'page_size': 4,
- 'max_io_limit': 16,
- 'pass_through_mode': 'None'
- },
- 'state': 'present'
- }
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries.powerflex_unit_base \
+ import PowerFlexUnitBase
+from ansible_collections.dellemc.powerflex.plugins.modules.protection_domain \
+ import PowerFlexProtectionDomain
+
+
+class TestPowerflexProtectionDomain(PowerFlexUnitBase):
+
+ get_module_args = MockProtectionDomainApi.PD_COMMON_ARGS
@pytest.fixture
- def protection_domain_module_mock(self, mocker):
- mocker.patch(MockProtectionDomainApi.MODULE_UTILS_PATH + '.PowerFlexClient', new=MockApiException)
- protection_domain_module_mock = PowerFlexProtectionDomain()
- return protection_domain_module_mock
-
- def test_get_protection_domain_response(self, protection_domain_module_mock):
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.perform_module_operation()
- protection_domain_module_mock.powerflex_conn.protection_domain.get.assert_called()
-
- def test_create_protection_domain(self, protection_domain_module_mock):
- self.get_module_args.update({
- "protection_domain_name": "test_domain",
- "state": "present"
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.get_protection_domain = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain'][0]
- )
- protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock(return_values=None)
- protection_domain_module_mock.perform_module_operation()
- assert (self.get_module_args['protection_domain_name'] ==
- protection_domain_module_mock.module.exit_json.call_args[1]["protection_domain_details"]['name'])
- assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True
-
- def test_modify_protection_domain(self, protection_domain_module_mock):
- self.get_module_args.update({
- 'network_limits': {
- 'rebuild_limit': 10,
- 'rebalance_limit': 10,
- 'vtree_migration_limit': 11,
- 'overall_limit': 21,
- 'bandwidth_unit': 'GBps',
- }
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- sp_resp = MockSDKResponse(MockProtectionDomainApi.STORAGE_POOL)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.powerflex_conn.protection_domain.get_storage_pools = MagicMock(
- return_value=sp_resp.__dict__['data']['storagepool']
- )
- protection_domain_module_mock.perform_module_operation()
- protection_domain_module_mock.powerflex_conn.protection_domain.network_limits.assert_called()
-
- def test_rename_protection_domain(self, protection_domain_module_mock):
- self.get_module_args.update({
- 'protection_domain_new_name': 'new_test_domain'
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.perform_module_operation()
- protection_domain_module_mock.powerflex_conn.protection_domain.rename.assert_called()
-
- def test_inactivate_protection_domain(self, protection_domain_module_mock):
- self.get_module_args.update({
- 'is_active': False
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.perform_module_operation()
- protection_domain_module_mock.powerflex_conn.protection_domain. \
- inactivate.assert_called()
-
- def test_activate_protection_domain(self, protection_domain_module_mock):
- self.get_module_args.update({
- 'is_active': True
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.powerflex_conn.protection_domain.activate = MagicMock(return_value=None)
- protection_domain_module_mock.perform_module_operation()
- assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True
-
- def test_delete_protection_domain(self, protection_domain_module_mock):
- self.get_module_args.update({
- 'protection_domain_name': 'new_test_domain',
- 'state': 'absent'
- })
- protection_domain_module_mock.module.params = self.get_module_args
- protection_domain_module_mock.get_protection_domain = MagicMock(return_values=None)
- protection_domain_module_mock.perform_module_operation()
- assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True
-
- def test_delete_protection_domain_throws_exception(self, protection_domain_module_mock):
- self.get_module_args.update({
- 'protection_domain_id': '7bd6457000000000',
- 'state': 'absent'
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.powerflex_conn.protection_domain.delete = MagicMock(
- side_effect=utils.PowerFlexClient)
- protection_domain_module_mock.perform_module_operation()
- assert MockProtectionDomainApi.delete_pd_failed_msg(self.get_module_args['protection_domain_id']) in \
- protection_domain_module_mock.module.fail_json.call_args[1]['msg']
-
- def test_get_with_404_exception(self, protection_domain_module_mock):
- MockProtectionDomainApi.status = 404
- self.get_module_args.update({
- "protection_domain_name": "test_domain1"
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock(
- side_effect=utils.PowerFlexClient)
- protection_domain_module_mock.perform_module_operation()
- assert protection_domain_module_mock.module.exit_json.call_args[1]['changed'] is True
-
- def test_modify_protection_domain_throws_exception(self, protection_domain_module_mock):
- self.get_module_args.update({
- "protection_domain_id": "7bd6457000000000",
- 'rf_cache_limits': {
- 'is_enabled': True,
- 'page_size': 64,
- 'max_io_limit': 128,
- 'pass_through_mode': 'invalid_Read'
- }
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.powerflex_conn.protection_domain.set_rfcache_enabled = MagicMock(
- side_effect=utils.PowerFlexClient)
- protection_domain_module_mock.perform_module_operation()
- assert MockProtectionDomainApi.modify_pd_with_failed_msg(self.get_module_args['protection_domain_id']) in \
- protection_domain_module_mock.module.fail_json.call_args[1]['msg']
-
- def test_rename_protection_domain_invalid_value(self, protection_domain_module_mock):
- self.get_module_args.update({
- "protection_domain_name": "test_domain",
- "protection_domain_new_name": " test domain",
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.powerflex_conn.protection_domain.rename = MagicMock(
- side_effect=utils.PowerFlexClient)
- protection_domain_module_mock.perform_module_operation()
- assert MockProtectionDomainApi.rename_pd_failed_msg(self.get_module_args['protection_domain_id']) in \
- protection_domain_module_mock.module.fail_json.call_args[1]['msg']
-
- def test_create_protection_domain_invalid_param(self, protection_domain_module_mock):
- self.get_module_args.update({
- "protection_domain_name": "test_domain1",
- "protection_domain_new_name": "new_domain",
- "state": "present"
- })
- protection_domain_module_mock.module.params = self.get_module_args
- pd_resp = MockSDKResponse(MockProtectionDomainApi.PROTECTION_DOMAIN)
- protection_domain_module_mock.powerflex_conn.protection_domain.get = MagicMock(
- return_value=pd_resp.__dict__['data']['protectiondomain']
- )
- protection_domain_module_mock.powerflex_conn.protection_domain.create = MagicMock(
- side_effect=utils.PowerFlexClient)
- protection_domain_module_mock.perform_module_operation()
- assert MockProtectionDomainApi.version_pd_failed_msg() in \
- protection_domain_module_mock.module.fail_json.call_args[1]['msg']
+ def module_object(self):
+ return PowerFlexProtectionDomain
+
+ @pytest.mark.parametrize("params", [
+ {'protection_domain_id': MockProtectionDomainApi.PD_ID},
+ {"protection_domain_name": MockProtectionDomainApi.PD_NAME},
+ ])
+ def test_get_protection_domain_response(self, powerflex_module_mock, params):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_id': params.get('protection_domain_id', None),
+ 'protection_domain_name': params.get('protection_domain_name', None)
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN
+ )
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.protection_domain.get.assert_called()
+
+ def test_get_pd_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_id': MockProtectionDomainApi.PD_ID,
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockApiException())
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('get_pd_failed_msg'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_validate_input_empty_params(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': ''
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN
+ )
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('empty_pd_msg'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_validate_network_limits_params(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'network_limits': {'overall_limit': -199,
+ 'bandwidth_unit': 'MBps'}
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN
+ )
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('overall_limit_msg'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_create_pd_new_name_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'protection_domain_new_name': MockProtectionDomainApi.PD_NAME
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=[]
+ )
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('new_name_in_create'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_create_pd_(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.protection_domain.create = MagicMock(
+ return_value=None
+ )
+ resp = powerflex_module_mock.create_protection_domain("protection_domain_name")
+ assert resp is True
+ powerflex_module_mock.powerflex_conn.protection_domain.create.assert_called()
+
+ def test_rename_pd(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'protection_domain_new_name': MockProtectionDomainApi.PD_NEW_NAME,
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.protection_domain.rename.assert_called()
+
+ def test_rename_pd_execption(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'protection_domain_new_name': MockProtectionDomainApi.PD_NEW_NAME,
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.rename = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('rename_pd_exception'),
+ powerflex_module_mock, invoke_perform_module=True
+ )
+
+ def test_inactivate_pd(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'is_active': False
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.protection_domain.inactivate.assert_called()
+
+ def test_activate_pd(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_id': MockProtectionDomainApi.PD_ID,
+ 'is_active': True
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN_1)
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.protection_domain.activate.assert_called()
+
+ def test_create_pd_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'is_active': False
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.protection_domain.create = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('create_pd_exception'),
+ powerflex_module_mock, invoke_perform_module=True
+ )
+
+ def test_modify_network_limits(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'network_limits': {'overall_limit': 15,
+ 'bandwidth_unit': 'GBps',
+ 'rebalance_limit': 10,
+ 'rebuild_limit': 10,
+ 'vtree_migration_limit': 10}
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.protection_domain.network_limits.assert_called()
+
+ def test_modify_network_limits_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_id': MockProtectionDomainApi.PD_ID,
+ 'network_limits': {'overall_limit': 15,
+ 'bandwidth_unit': 'GBps',
+ 'rebalance_limit': 10,
+ 'rebuild_limit': 10,
+ 'vtree_migration_limit': 10}
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.network_limits = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('modify_network_limits_exception'),
+ powerflex_module_mock, invoke_perform_module=True
+ )
+
+ def test_enable_rf_cache(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'rf_cache_limits': {'is_enabled': True,
+ 'page_size': 10,
+ 'max_io_limit': 10,
+ 'pass_through_mode': 'Read'}
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.protection_domain.set_rfcache_enabled.assert_called()
+ powerflex_module_mock.powerflex_conn.protection_domain.rfcache_parameters.assert_called()
+
+ def test_modify_rf_cache_params_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'rf_cache_limits': {'is_enabled': True, 'page_size': 10,
+ 'max_io_limit': 10, 'pass_through_mode': 'Read'}
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.set_rfcache_enabled = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('rf_cache_limits_exception'),
+ powerflex_module_mock, invoke_perform_module=True
+ )
+
+ def test_delete_pd(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME,
+ 'state': 'absent'
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.protection_domain.delete.assert_called()
+
+ def test_delete_pd_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_id': MockProtectionDomainApi.PD_ID,
+ 'state': 'absent'
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.delete = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('delete_pd_exception'),
+ powerflex_module_mock, invoke_perform_module=True
+ )
+
+ def test_get_sp(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': MockProtectionDomainApi.PD_NAME
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get_storage_pools = MagicMock(
+ return_value=MockProtectionDomainApi.STORAGE_POOL)
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.protection_domain.get_storage_pools.assert_called()
+
+ def test_get_sp_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_id': MockProtectionDomainApi.PD_ID
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockProtectionDomainApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get_storage_pools = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(
+ MockProtectionDomainApi.get_failed_msgs('get_sp_exception'),
+ powerflex_module_mock, invoke_perform_module=True)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py
index 334de8942..e1074f282 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_consistency_group.py
@@ -5,7 +5,6 @@
"""Unit Tests for replication consistency group module on PowerFlex"""
from __future__ import (absolute_import, division, print_function)
-from unittest.mock import Mock
__metaclass__ = type
@@ -170,7 +169,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
def test_pause_rcg(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "pause": True,
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'pause',
"pause_mode": "StopDataTransfer", "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
@@ -180,7 +179,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.pause.assert_called()
def test_pause_rcg_throws_exception(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "pause": True,
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'pause',
"pause_mode": "StopDataTransfer", "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
@@ -193,7 +192,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
def test_resume_rcg(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "pause": False, "state": "present"})
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'resume', "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
return_value=MockReplicationConsistencyGroupApi.get_rcg_details(pause_mode="StopDataTransfer"))
@@ -201,7 +200,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.resume.assert_called()
def test_resume_rcg_throws_exception(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "pause": False, "state": "present"})
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'resume', "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
return_value=MockReplicationConsistencyGroupApi.get_rcg_details(pause_mode="StopDataTransfer"))
@@ -213,7 +212,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
def test_freeze_rcg(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "freeze": True, "state": "present"})
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'freeze', "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
@@ -221,7 +220,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.freeze.assert_called()
def test_freeze_rcg_throws_exception(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "freeze": True, "state": "present"})
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'freeze', "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
@@ -233,7 +232,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
def test_unfreeze_rcg(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "freeze": False, "state": "present"})
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'unfreeze', "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
return_value=MockReplicationConsistencyGroupApi.get_rcg_details(freeze_state="Frozen")
@@ -242,7 +241,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.unfreeze.assert_called()
def test_unfreeze_rcg_throws_exception(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "freeze": False, "state": "present"})
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'unfreeze', "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
return_value=MockReplicationConsistencyGroupApi.get_rcg_details(freeze_state="Frozen"))
@@ -311,7 +310,7 @@ class TestPowerflexReplicationConsistencyGroup():
replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
def test_pause_rcg_without_pause_mode(self, replication_consistency_group_module_mock):
- self.get_module_args.update({"rcg_name": "test_rcg", "pause": True, "state": "present"})
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'pause', "state": "present"})
replication_consistency_group_module_mock.module.params = self.get_module_args
replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
@@ -340,5 +339,109 @@ class TestPowerflexReplicationConsistencyGroup():
return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
replication_consistency_group_module_mock.powerflex_conn.protection_domain.get = MagicMock(return_value=[{"name": "pd_id"}])
replication_consistency_group_module_mock.perform_module_operation()
- assert "Specify pause as True to pause replication consistency group" in \
+ assert "Specify rcg_state as 'pause' to pause replication consistency group" in \
+ replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_failover_rcg(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": "failover", "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
+ return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
+ replication_consistency_group_module_mock.perform_module_operation()
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.failover.assert_called()
+
+ def test_failover_rcg_throws_exception(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": "failover", "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
+ return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.failover = \
+ MagicMock(side_effect=MockApiException)
+ replication_consistency_group_module_mock.perform_module_operation()
+ assert "Failover replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \
+ + MockReplicationConsistencyGroupApi.FAIL_MSG in \
+ replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_restore_rcg(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": "restore", "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ rcg_details = MockReplicationConsistencyGroupApi.get_rcg_details()
+ rcg_details[0]['failoverType'] = 'Failover'
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(return_value=rcg_details)
+ replication_consistency_group_module_mock.perform_module_operation()
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.restore.assert_called()
+
+ def test_restore_rcg_throws_exception(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": "restore", "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ rcg_details = MockReplicationConsistencyGroupApi.get_rcg_details()
+ rcg_details[0]['failoverType'] = 'Failover'
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(return_value=rcg_details)
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.restore = \
+ MagicMock(side_effect=MockApiException)
+ replication_consistency_group_module_mock.perform_module_operation()
+ assert "Restore replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \
+ + MockReplicationConsistencyGroupApi.FAIL_MSG in \
+ replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_reverse_rcg(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": "reverse", "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ rcg_details = MockReplicationConsistencyGroupApi.get_rcg_details()
+ rcg_details[0]['failoverType'] = 'Failover'
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(return_value=rcg_details)
+ replication_consistency_group_module_mock.perform_module_operation()
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.reverse.assert_called()
+
+ def test_reverse_rcg_throws_exception(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": "reverse", "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ rcg_details = MockReplicationConsistencyGroupApi.get_rcg_details()
+ rcg_details[0]['failoverType'] = 'Failover'
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(return_value=rcg_details)
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.reverse = \
+ MagicMock(side_effect=MockApiException)
+ replication_consistency_group_module_mock.perform_module_operation()
+ assert "Reverse replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \
+ + MockReplicationConsistencyGroupApi.FAIL_MSG in \
+ replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_switchover_rcg(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": "switchover", "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
+ return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
+ replication_consistency_group_module_mock.perform_module_operation()
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.switchover.assert_called()
+
+ def test_switchover_rcg_throws_exception(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": "switchover", "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
+ return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.switchover = \
+ MagicMock(side_effect=MockApiException)
+ replication_consistency_group_module_mock.perform_module_operation()
+ assert "Switchover replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \
+ + MockReplicationConsistencyGroupApi.FAIL_MSG in \
+ replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_sync_rcg(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'sync', "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
+ return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
+ replication_consistency_group_module_mock.perform_module_operation()
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.sync.assert_called()
+
+ def test_sync_rcg_throws_exception(self, replication_consistency_group_module_mock):
+ self.get_module_args.update({"rcg_name": "test_rcg", "rcg_state": 'sync', "state": "present"})
+ replication_consistency_group_module_mock.module.params = self.get_module_args
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.get = MagicMock(
+ return_value=MockReplicationConsistencyGroupApi.get_rcg_details())
+ replication_consistency_group_module_mock.powerflex_conn.replication_consistency_group.sync = \
+ MagicMock(side_effect=MockApiException)
+ replication_consistency_group_module_mock.perform_module_operation()
+ assert "Synchronization of replication consistency group " + MockReplicationConsistencyGroupApi.RCG_ID \
+ + MockReplicationConsistencyGroupApi.FAIL_MSG in \
replication_consistency_group_module_mock.module.fail_json.call_args[1]['msg']
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_pair.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_pair.py
index 81787de8f..e3c9cf18d 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_pair.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_replication_pair.py
@@ -1,11 +1,10 @@
-# Copyright: (c) 2023, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
"""Unit Tests for replication pair module on PowerFlex"""
from __future__ import (absolute_import, division, print_function)
-from unittest.mock import Mock
__metaclass__ = type
@@ -16,6 +15,8 @@ from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_
import MockApiException
from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
import utils
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_fail_json \
+ import FailJsonException, fail_json
utils.get_logger = MagicMock()
utils.get_powerflex_gateway_host_connection = MagicMock()
@@ -26,7 +27,7 @@ basic.AnsibleModule = MagicMock()
from ansible_collections.dellemc.powerflex.plugins.modules.replication_pair import PowerFlexReplicationPair
-class TestPowerflexReplicationPair():
+class TestPowerflexReplicationPair:
get_module_args = MockReplicationPairApi.REPLICATION_PAIR_COMMON_ARGS
@@ -37,6 +38,19 @@ class TestPowerflexReplicationPair():
replication_pair_module_mock.module.check_mode = False
return replication_pair_module_mock
+ @pytest.fixture
+ def replication_pair_mock(self):
+ replication_pair_mock = PowerFlexReplicationPair()
+ replication_pair_mock.module.check_mode = False
+ replication_pair_mock.module.fail_json = fail_json
+ return replication_pair_mock
+
+ def capture_fail_json_call(self, error_msg, device_module_mock):
+ try:
+ device_module_mock.perform_module_operation()
+ except FailJsonException as fj_object:
+ assert error_msg in fj_object.message
+
def test_get_pair_details(self, replication_pair_module_mock):
self.get_module_args.update({
"pair_name": "test_pair",
@@ -235,3 +249,115 @@ class TestPowerflexReplicationPair():
replication_pair_module_mock.perform_module_operation()
assert "Failed to get the replication pairs for replication consistency group" in \
replication_pair_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_rcg_application_pairs(self, replication_pair_module_mock):
+ self.get_module_args.update({
+ "rcg_name": "test_rcg",
+ "pairs": [{"source_volume_name": "src_vol", "target_volume_name": "dest_vol", "source_volume_id": None,
+ "target_volume_id": None, "copy_type": "OnlineCopy", "name": "test_pair"}],
+ "state": "present"
+ })
+ replication_pair_module_mock.module.params = self.get_module_args
+ replication_pair_module_mock.powerflex_conn.volume.get = MagicMock(
+ return_value=MockReplicationPairApi.get_volume_details()
+ )
+ replication_pair_module_mock.get_rcg = MagicMock(return_value={"id": 123, "name": "test_rcg"})
+ replication_pair_module_mock.powerflex_conn.replication_consistency_group.get_replication_pairs = MagicMock(
+ return_value=MockReplicationPairApi.get_pair_details())
+ replication_pair_module_mock.perform_module_operation()
+ replication_pair_module_mock.powerflex_conn.replication_pair.add.assert_called()
+
+ def test_get_rcg_name(self, replication_pair_mock):
+ self.get_module_args.update({
+ "rcg_name": "test_rcg",
+ "pairs": [{"source_volume_name": "src_vol", "target_volume_name": "dest_vol", "source_volume_id": None,
+ "target_volume_id": None, "copy_type": "OnlineCopy", "name": "test_pair"}],
+ "state": "present"
+ })
+ replication_pair_mock.module.params = self.get_module_args
+ replication_pair_mock.powerflex_conn.replication_consistency_group = MagicMock(
+ return_value=[{"name": "test_rcg", "id": 12}]
+ )
+ replication_pair_mock.powerflex_conn.replication_consistency_group.get_replication_pairs = MagicMock(
+ return_value=MockReplicationPairApi.get_pair_details())
+ replication_pair_mock.create_replication_pairs = MagicMock(return_value=True)
+ replication_pair_mock.get_rcg_replication_pairs = MagicMock(return_value={})
+ replication_pair_mock.get_replication_pair = MagicMock(return_value=None)
+ replication_pair_mock.validate_pause_or_resume = MagicMock(return_value=True)
+ replication_pair_mock.perform_module_operation()
+ assert replication_pair_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_get_rcg_id(self, replication_pair_mock):
+ self.get_module_args.update({
+ "rcg_id": 12,
+ "pairs": [{"source_volume_name": "src_vol", "target_volume_name": "dest_vol", "source_volume_id": None,
+ "target_volume_id": None, "copy_type": "OnlineCopy", "name": "test_pair"}],
+ "state": "present"
+ })
+ replication_pair_mock.module.params = self.get_module_args
+ replication_pair_mock.powerflex_conn.replication_consistency_group = MagicMock(
+ return_value=[{"name": "test_rcg", "id": 12}]
+ )
+ replication_pair_mock.powerflex_conn.replication_consistency_group.get_replication_pairs = MagicMock(
+ return_value=MockReplicationPairApi.get_pair_details())
+ replication_pair_mock.create_replication_pairs = MagicMock(return_value=True)
+ replication_pair_mock.get_rcg_replication_pairs = MagicMock(return_value={})
+ replication_pair_mock.get_replication_pair = MagicMock(return_value=None)
+ replication_pair_mock.validate_pause_or_resume = MagicMock(return_value=True)
+ replication_pair_mock.perform_module_operation()
+ assert replication_pair_mock.module.exit_json.call_args[1]['changed'] is True
+
+ def test_get_rcg_exception(self, replication_pair_mock):
+ self.get_module_args.update({
+ "rcg_id": 12,
+ "pairs": [{"source_volume_name": "src_vol", "target_volume_name": "dest_vol", "source_volume_id": None,
+ "target_volume_id": None, "copy_type": "OnlineCopy", "name": "test_pair"}],
+ "state": "present"
+ })
+ replication_pair_mock.module.params = self.get_module_args
+ replication_pair_mock.powerflex_conn.replication_consistency_group.get = MagicMock(side_effect=MockApiException)
+ replication_pair_mock.create_replication_pairs = MagicMock(return_value=None)
+ self.capture_fail_json_call(MockReplicationPairApi.get_error_message('get_rcg_exception'),
+ replication_pair_mock)
+
+ def test_input_validation(self, replication_pair_mock):
+ self.get_module_args.update({
+ "rcg_id": None, "rcg_name": None,
+ "pairs": [{"source_volume_name": "src_vol", "target_volume_name": "dest_vol", "source_volume_id": None,
+ "target_volume_id": None, "copy_type": "OnlineCopy", "name": "test_pair"}],
+ "state": "present"
+ })
+ replication_pair_mock.module.params = self.get_module_args
+ replication_pair_mock.validate_pairs = MagicMock(return_value=None)
+ self.capture_fail_json_call(MockReplicationPairApi.get_error_message('get_rcg_id_name_error'),
+ replication_pair_mock)
+ self.get_module_args.update({
+ "rcg_name": "test_rcg", "pairs": None, "state": "present", "pause": False,
+ "pair_id": None, "pair_name": None
+ })
+ replication_pair_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockReplicationPairApi.get_error_message('get_pause_error'),
+ replication_pair_mock)
+ self.get_module_args.update({
+ "rcg_name": "test_rcg", "pairs": None, "state": "present", "pause": False,
+ "pair_id": None, "pair_name": None
+ })
+ replication_pair_mock.module.params = self.get_module_args
+ replication_pair_mock.validate_input = MagicMock(return_value=None)
+ self.capture_fail_json_call(MockReplicationPairApi.get_error_message('get_pause_or_resume_error'),
+ replication_pair_mock)
+
+ def test_get_volume_exception(self, replication_pair_mock):
+ self.get_module_args.update({
+ "rcg_name": "test_rcg", "pairs": None, "state": "present", "pause": False,
+ "pair_id": None, "pair_name": "test_pair"
+ })
+ replication_pair_mock.module.params = self.get_module_args
+ replication_pair_mock.validate_input = MagicMock(return_value=None)
+ replication_pair_mock.powerflex_conn.replication_consistency_group.get_replication_pairs = MagicMock(
+ return_value=MockReplicationPairApi.get_pair_details())
+ replication_pair_mock.powerflex_conn.volume.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockReplicationPairApi.get_error_message('get_volume_exception'),
+ replication_pair_mock)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_sdc.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_sdc.py
new file mode 100644
index 000000000..dbf9ac1e0
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_sdc.py
@@ -0,0 +1,192 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for SDC module on PowerFlex"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+# pylint: disable=unused-import
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock
+from mock.mock import MagicMock
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdc_api import MockSdcApi
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_fail_json \
+ import FailJsonException, fail_json
+
+utils.get_logger = MagicMock()
+utils.get_powerflex_gateway_host_connection = MagicMock()
+utils.PowerFlexClient = MagicMock()
+
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+from ansible_collections.dellemc.powerflex.plugins.modules.sdc import PowerFlexSdc
+
+
+class TestPowerflexSdc():
+
+ get_module_args = MockSdcApi.COMMON_ARGS
+
+ @pytest.fixture
+ def sdc_module_mock(self, mocker):
+ mocker.patch(
+ MockSdcApi.MODULE_UTILS_PATH + '.PowerFlexClient',
+ new=MockApiException)
+ sdc_module_mock = PowerFlexSdc()
+ sdc_module_mock.module.check_mode = False
+ sdc_module_mock.module.fail_json = fail_json
+ return sdc_module_mock
+
+ def capture_fail_json_call(self, error_msg, sdc_module_mock):
+ try:
+ sdc_module_mock.perform_module_operation()
+ except FailJsonException as fj_object:
+ assert error_msg in fj_object.message
+
+ def test_get_sdc_details(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_name": "test_sdc",
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ sdc_module_mock.perform_module_operation()
+ sdc_module_mock.powerflex_conn.sdc.get.assert_called()
+
+ def test_get_sdc_details_empty_sdc_id_exception(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_id": " ",
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ self.capture_fail_json_call(MockSdcApi.get_sdc_exception_response(
+ 'get_sdc_details_empty_sdc_id_exception'), sdc_module_mock)
+
+ def test_get_sdc_details_with_exception(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_id": MockSdcApi.SDC_ID,
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(MockSdcApi.get_sdc_exception_response(
+ 'get_sdc_details_with_exception'), sdc_module_mock)
+
+ def test_get_sdc_details_mapped_volumes_with_exception(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_id": MockSdcApi.SDC_ID,
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ sdc_module_mock.powerflex_conn.sdc.get_mapped_volumes = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(MockSdcApi.get_sdc_exception_response(
+ 'get_sdc_details_mapped_volumes_with_exception'), sdc_module_mock)
+
+ def test_modify_sdc(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_name": "test_sdc",
+ "performance_profile": "Compact",
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ sdc_module_mock.powerflex_conn.sdc.set_performance_profile = MagicMock(return_value=True)
+ sdc_module_mock.perform_module_operation()
+ sdc_module_mock.powerflex_conn.sdc.set_performance_profile.assert_called()
+
+ def test_modify_sdc_throws_exception(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_name": "test_sdc",
+ "performance_profile": "Compact",
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ sdc_module_mock.powerflex_conn.sdc.set_performance_profile = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(MockSdcApi.get_sdc_exception_response(
+ 'modify_sdc_throws_exception'), sdc_module_mock)
+
+ def test_rename_sdc(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_name": "test_sdc",
+ "sdc_new_name": "test_sdc_renamed",
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ sdc_module_mock.powerflex_conn.sdc.rename = MagicMock(return_value=True)
+ sdc_module_mock.perform_module_operation()
+ sdc_module_mock.powerflex_conn.sdc.rename.assert_called()
+
+ def test_rename_sdc_empty_new_name_exception(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_name": "test_sdc",
+ "sdc_new_name": " ",
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ self.capture_fail_json_call(MockSdcApi.get_sdc_exception_response(
+ 'rename_sdc_empty_new_name_exception'), sdc_module_mock)
+
+ def test_rename_sdc_throws_exception(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_name": "test_sdc",
+ "sdc_new_name": "test_sdc_renamed",
+ "state": "present"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ sdc_module_mock.powerflex_conn.sdc.rename = MagicMock(side_effect=MockApiException)
+ self.capture_fail_json_call(MockSdcApi.get_sdc_exception_response(
+ 'rename_sdc_throws_exception'), sdc_module_mock)
+
+ def test_remove_sdc(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_name": "test_sdc",
+ "state": "absent"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ sdc_module_mock.powerflex_conn.sdc.delete = MagicMock(return_value=True)
+ sdc_module_mock.perform_module_operation()
+ sdc_module_mock.powerflex_conn.sdc.delete.assert_called()
+
+ def test_remove_sdc_throws_exception(self, sdc_module_mock):
+ self.get_module_args.update({
+ "sdc_ip": "1.1.1.1",
+ "state": "absent"
+ })
+ sdc_module_mock.module.params = self.get_module_args
+ sdc_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockSdcApi.get_sdc_details()
+ )
+ sdc_module_mock.powerflex_conn.sdc.delete = MagicMock(side_effect=MockApiException)
+ self.capture_fail_json_call(MockSdcApi.get_sdc_exception_response(
+ 'remove_sdc_throws_exception'), sdc_module_mock)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_sds.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_sds.py
new file mode 100644
index 000000000..30e9a589f
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_sds.py
@@ -0,0 +1,630 @@
+# Copyright: (c) 2024, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for SDS module on PowerFlex"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+# pylint: disable=unused-import
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock
+from mock.mock import MagicMock
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sds_api import MockSDSApi
+from ansible_collections.dellemc.powerflex.plugins.modules.sds import \
+ SDSHandler
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKResponse
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries.powerflex_unit_base \
+ import PowerFlexUnitBase
+
+from ansible_collections.dellemc.powerflex.plugins.modules.sds import PowerFlexSDS
+
+
+class TestPowerflexSDS(PowerFlexUnitBase):
+
+ get_module_args = MockSDSApi.SDS_COMMON_ARGS
+ ip1 = "10.47.xxx.xxx"
+ ip2 = "10.46.xxx.xxx"
+
+ @pytest.fixture
+ def module_object(self):
+ return PowerFlexSDS
+
+ def test_get_sds_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'sds_id': '8f3bb0cc00000002',
+ 'state': 'present'
+ })
+ powerflex_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST)
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.get.assert_called()
+
+ def test_get_sds_name_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'state': "present"
+ })
+ powerflex_module_mock.powerflex_conn.sds.get = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST)
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.get.assert_called()
+
+ def test_get_sds_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'state': "present"
+ })
+ powerflex_module_mock.powerflex_conn.sds.get = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'get_sds_exception'), powerflex_module_mock, SDSHandler)
+
+ def test_create_sds_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': 'test_domain',
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list':
+ [
+ {
+ 'ip': self.ip1,
+ 'role': "all"
+ }
+ ],
+ 'sds_ip_state': 'present-in-sds',
+ 'rmcache_enabled': True,
+ 'rmcache_size': 128,
+ 'performance_profile': "HighPerformance",
+ 'state': "present"
+ })
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=None)
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.create.assert_called()
+
+ def test_create_sds_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': 'test_domain',
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list':
+ [
+ {
+ 'ip': self.ip1,
+ 'role': "all"
+ }
+ ],
+ 'sds_ip_state': 'present-in-sds',
+ 'rmcache_enabled': True,
+ 'rmcache_size': 128,
+ 'performance_profile': "HighPerformance",
+ 'state': "present"
+ })
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ powerflex_module_mock.get_fault_set = MagicMock(
+ return_value=MockSDSApi.FAULT_SET_GET_LIST[0])
+ powerflex_module_mock.powerflex_conn.sds.create = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'create_sds_exception'), powerflex_module_mock, SDSHandler)
+
+ def test_rename_sds_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_new_name": "node0_new",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.rename.assert_called()
+
+ def test_modify_rfcache_enabled_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "rfcache_enabled": False,
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.set_rfcache_enabled.assert_called()
+
+ def test_modify_rmcache_enabled_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "rmcache_enabled": False,
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.set_rmcache_enabled.assert_called()
+
+ def test_modify_rmcache_size_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "rmcache_size": 256,
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.set_rmcache_size.assert_called()
+
+ def test_rmcache_size_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': 'test_domain',
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list':
+ [
+ {
+ 'ip': self.ip1,
+ 'role': "all"
+ }
+ ],
+ 'sds_ip_state': 'present-in-sds',
+ 'rmcache_enabled': False,
+ 'rmcache_size': 128,
+ 'state': "present"
+ })
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'rmcache_size_exception'), powerflex_module_mock, SDSHandler)
+
+ def test_create_sds_wo_sds_name_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'sds_name': None,
+ 'protection_domain_name': 'test_domain',
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list':
+ [
+ {
+ 'ip': self.ip1,
+ 'role': "all"
+ }
+ ],
+ 'sds_ip_state': 'present-in-sds',
+ 'rmcache_enabled': True,
+ 'rmcache_size': 128,
+ 'state': "present"
+ })
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'create_sds_wo_sds_name'), powerflex_module_mock, SDSHandler)
+
+ def test_create_sds_wo_pd_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list':
+ [
+ {
+ 'ip': self.ip1,
+ 'role': "all"
+ }
+ ],
+ 'sds_ip_state': 'present-in-sds',
+ 'rmcache_enabled': True,
+ 'rmcache_size': 128,
+ 'state': "present"
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=None)
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'create_sds_wo_pd'), powerflex_module_mock, SDSHandler)
+
+ def test_create_sds_wo_sds_ip_list_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': 'test_domain',
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list': [],
+ 'sds_ip_state': 'present-in-sds',
+ 'rmcache_enabled': True,
+ 'rmcache_size': 128,
+ 'state': "present"
+ })
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'create_sds_wo_sds_ip_list'), powerflex_module_mock, SDSHandler)
+
+ def test_create_sds_incorrect_sds_ip_state_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'protection_domain_name': 'test_domain',
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list':
+ [
+ {
+ 'ip': self.ip1,
+ 'role': "all"
+ }
+ ],
+ 'sds_ip_state': 'absent-in-sds',
+ 'rmcache_enabled': True,
+ 'rmcache_size': 128,
+ 'state': "present"
+ })
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'create_sds_incorrect_sds_ip_state'), powerflex_module_mock, SDSHandler)
+
+ def test_create_sds_sds_id_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'sds_id': "sds_id_1",
+ 'protection_domain_name': 'test_domain',
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list':
+ [
+ {
+ 'ip': self.ip1,
+ 'role': "all"
+ }
+ ],
+ 'sds_ip_state': 'present-in-sds',
+ 'rmcache_enabled': True,
+ 'rmcache_size': 128,
+ 'state': "present"
+ })
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'create_sds_sds_id'), powerflex_module_mock, SDSHandler)
+
+ def test_create_sds_sds_new_name_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'sds_new_name': "sds_new_name",
+ 'protection_domain_name': 'test_domain',
+ 'fault_set_name': 'fault_set_name',
+ 'sds_ip_list':
+ [
+ {
+ 'ip': self.ip1,
+ 'role': "all"
+ }
+ ],
+ 'sds_ip_state': 'present-in-sds',
+ 'rmcache_enabled': True,
+ 'rmcache_size': 128,
+ 'state': "present"
+ })
+ pd_resp = MockSDKResponse(MockSDSApi.PROTECTION_DOMAIN)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=pd_resp.__dict__['data']['protectiondomain'])
+ fs_resp = MockSDSApi.FAULT_SET_GET_LIST
+ powerflex_module_mock.powerflex_conn.fault_set.get = MagicMock(
+ return_value=fs_resp)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'create_sds_sds_new_name'), powerflex_module_mock, SDSHandler)
+
+ def test_modify_performance_profile_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "performance_profile": "Compact",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.set_performance_parameters.assert_called()
+
+ def test_rename_sds_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_new_name": "node0_new",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ powerflex_module_mock.powerflex_conn.sds.rename = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'rename_sds_exception'), powerflex_module_mock, SDSHandler)
+
+ def test_rename_sds_empty_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_new_name": "",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'rename_sds_empty_exception'), powerflex_module_mock, SDSHandler)
+
+ def test_update_role_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_ip_list":
+ [
+ {
+ "ip": self.ip2,
+ "role": "all"
+ }
+ ],
+ "sds_ip_state": "present-in-sds",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.set_ip_role.assert_called()
+
+ def test_update_role_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_ip_list":
+ [
+ {
+ "ip": self.ip2,
+ "role": "all"
+ }
+ ],
+ "sds_ip_state": "present-in-sds",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ powerflex_module_mock.powerflex_conn.sds.set_ip_role = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'set_ip_role_exception'), powerflex_module_mock, SDSHandler)
+
+ def test_add_ip_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_ip_list":
+ [
+ {
+ "ip": "10.xx.xx.xx",
+ "role": "all"
+ }
+ ],
+ "sds_ip_state": "present-in-sds",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.add_ip.assert_called()
+
+ def test_add_ip_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_ip_list":
+ [
+ {
+ "ip": "10.xx.xx.xx",
+ "role": "all"
+ }
+ ],
+ "sds_ip_state": "present-in-sds",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ powerflex_module_mock.powerflex_conn.sds.add_ip = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'add_ip_exception'), powerflex_module_mock, SDSHandler)
+
+ def test_remove_ip_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_ip_list":
+ [
+ {
+ "ip": self.ip2,
+ "role": "sdcOnly"
+ }
+ ],
+ "sds_ip_state": "absent-in-sds",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.remove_ip.assert_called()
+
+ def test_remove_ip_idempotent(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_ip_list":
+ [
+ {
+ "ip": "10.45.xxx.xxx",
+ "role": "sdcOnly"
+ }
+ ],
+ "sds_ip_state": "absent-in-sds",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.get.assert_called()
+
+ def test_remove_ip_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "sds_ip_list":
+ [
+ {
+ "ip": self.ip2,
+ "role": "sdcOnly"
+ }
+ ],
+ "sds_ip_state": "absent-in-sds",
+ "state": "present"
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ powerflex_module_mock.powerflex_conn.sds.remove_ip = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'remove_ip_exception'), powerflex_module_mock, SDSHandler)
+
+ def test_delete_sds_response(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'state': 'absent'
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ SDSHandler().handle(
+ powerflex_module_mock, powerflex_module_mock.module.params)
+ powerflex_module_mock.powerflex_conn.sds.delete.assert_called()
+
+ def test_delete_fault_set_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ 'state': 'absent'
+ })
+ powerflex_module_mock.get_sds_details = MagicMock(
+ return_value=MockSDSApi.SDS_GET_LIST[0])
+ powerflex_module_mock.powerflex_conn.sds.delete = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockSDSApi.get_sds_exception_response(
+ 'delete_sds_exception'), powerflex_module_mock, SDSHandler)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_snapshot_policy.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_snapshot_policy.py
new file mode 100644
index 000000000..275b7b007
--- /dev/null
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_snapshot_policy.py
@@ -0,0 +1,502 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Unit Tests for snapshot policy module on PowerFlex"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_snapshot_policy_api import MockSnapshotPolicyApi
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
+ import MockApiException
+from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
+ import utils
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_fail_json \
+ import FailJsonException, fail_json
+
+utils.get_logger = MagicMock()
+utils.get_powerflex_gateway_host_connection = MagicMock()
+utils.PowerFlexClient = MagicMock()
+
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+from ansible_collections.dellemc.powerflex.plugins.modules.snapshot_policy import PowerFlexSnapshotPolicy, SnapshotPolicyHandler
+
+
+class TestPowerflexSnapshotPolicy():
+
+ get_module_args = MockSnapshotPolicyApi.SNAPSHOT_POLICY_COMMON_ARGS
+
+ @pytest.fixture
+ def snapshot_policy_module_mock(self, mocker):
+ snapshot_policy_module_mock = PowerFlexSnapshotPolicy()
+ snapshot_policy_module_mock.module.check_mode = False
+ snapshot_policy_module_mock.module.fail_json = fail_json
+ return snapshot_policy_module_mock
+
+ def capture_fail_json_call(self, error_msg, snapshot_policy_module_mock):
+ try:
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ except FailJsonException as fj_object:
+ assert error_msg == fj_object.message
+
+ def test_get_snapshot_policy_detail_using_name(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get.assert_called()
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics.assert_called()
+
+ def test_get_snapshot_policy_detail_using_id(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_id": "testing",
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get.assert_called()
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics.assert_called()
+
+ def test_get_snapshot_policy_details_with_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('get_snapshot_policy_details_exception'),
+ snapshot_policy_module_mock)
+
+ def test_create_snapshot_policy_using_name(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "access_mode": "ReadOnly",
+ "secure_snapshots": True,
+ "auto_snapshot_creation_cadence": {
+ "time": 1,
+ "unit": "Hour"},
+ "num_of_retained_snapshots_per_level": [20],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=None
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.create.assert_called()
+
+ def test_create_snapshot_policy_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "access_mode": "ReadOnly",
+ "secure_snapshots": True,
+ "auto_snapshot_creation_cadence": {
+ "time": 1,
+ "unit": "Hour"},
+ "num_of_retained_snapshots_per_level": [20],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=None
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.create = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('create_exception'),
+ snapshot_policy_module_mock)
+
+ def test_create_snapshot_policy_with_id_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_id": "testing",
+ "access_mode": "ReadOnly",
+ "secure_snapshots": True,
+ "auto_snapshot_creation_cadence": {
+ "time": 1,
+ "unit": "Hour"},
+ "num_of_retained_snapshots_per_level": [20],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=None
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('create_id_exception'),
+ snapshot_policy_module_mock)
+
+ def test_delete_snapshot_policy(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "state": "absent"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.delete.assert_called()
+
+ def test_delete_snapshot_policy_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "state": "absent"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.delete = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('delete_exception'),
+ snapshot_policy_module_mock)
+
+ def test_modify_snapshot_policy(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "auto_snapshot_creation_cadence": {
+ "time": 20,
+ "unit": "Minute"},
+ "num_of_retained_snapshots_per_level": [30],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.modify.assert_called()
+
+ def test_modify_snapshot_policy_wo_snapshot_rule(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "num_of_retained_snapshots_per_level": [30],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.modify.assert_called()
+
+ def test_modify_snapshot_policy_wo_retention_rule(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "auto_snapshot_creation_cadence": {
+ "time": 20,
+ "unit": "Minute"},
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.modify.assert_called()
+
+ def test_modify_snapshot_policy_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "auto_snapshot_creation_cadence": {
+ "time": 20,
+ "unit": "Minute"},
+ "num_of_retained_snapshots_per_level": [30],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.modify = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('modify_exception'),
+ snapshot_policy_module_mock)
+
+ def test_rename_snapshot_policy(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "new_name": "testing_new",
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.rename.assert_called()
+
+ def test_rename_snapshot_policy_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "new_name": "testing_new",
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.rename = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('modify_exception'),
+ snapshot_policy_module_mock)
+
+ def test_add_source_volume(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "source_volume": [{
+ "name": "source_volume_name",
+ "id": None,
+ "auto_snap_removal_action": None,
+ "detach_locked_auto_snapshots": None,
+ "state": "present"}],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ snapshot_policy_module_mock.powerflex_conn.volume.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.VOLUME_GET_LIST
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.add_source_volume.assert_called()
+
+ def test_add_non_existing_volume_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "source_volume": [{
+ "name": "non_existing_source_volume_name",
+ "id": None,
+ "auto_snap_removal_action": None,
+ "detach_locked_auto_snapshots": None}],
+ "source_volume_state": "present",
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ snapshot_policy_module_mock.powerflex_conn.volume.get = MagicMock(
+ return_value=[]
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('add_non_existing_source_volume'),
+ snapshot_policy_module_mock)
+
+ def test_add_source_volume_wo_id_or_name_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "source_volume": [{
+ "name": None,
+ "id": None,
+ "auto_snap_removal_action": None,
+ "detach_locked_auto_snapshots": None,
+ "state": "present"}],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('add_source_volume_wo_vol'),
+ snapshot_policy_module_mock)
+
+ def test_add_source_volume_wo_id_and_name_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "source_volume": [{
+ "name": "source_volume_name",
+ "id": "source_volume_id",
+ "auto_snap_removal_action": None,
+ "detach_locked_auto_snapshots": None,
+ "state": "present"}],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('add_source_volume_vol_id_name'),
+ snapshot_policy_module_mock)
+
+ def test_add_source_volume_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing",
+ "source_volume": [{
+ "name": "source_volume_name",
+ "id": None,
+ "auto_snap_removal_action": None,
+ "detach_locked_auto_snapshots": None,
+ "state": "present"}],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ snapshot_policy_module_mock.powerflex_conn.volume.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.VOLUME_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.add_source_volume = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('source_volume_exception'),
+ snapshot_policy_module_mock)
+
+ def test_remove_source_volume(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing_2",
+ "source_volume": [{
+ "id": "source_volume_id_2",
+ "name": None,
+ "auto_snap_removal_action": 'Remove',
+ "detach_locked_auto_snapshots": None,
+ "state": "absent"}],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_2_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ snapshot_policy_module_mock.powerflex_conn.volume.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.VOLUME_2_GET_LIST
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.remove_source_volume.assert_called()
+
+ def test_pause_snapshot_policy(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing_2",
+ "pause": True,
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.pause.assert_called()
+
+ def test_resume_snapshot_policy(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing_2",
+ "pause": False,
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_2_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ SnapshotPolicyHandler().handle(snapshot_policy_module_mock, snapshot_policy_module_mock.module.params)
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.resume.assert_called()
+
+ def test_pause_snapshot_policy_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing_2",
+ "pause": True,
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_2_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ snapshot_policy_module_mock.powerflex_conn.volume.pause = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('pause_exception'),
+ snapshot_policy_module_mock)
+
+ def test_remove_source_volume_exception(self, snapshot_policy_module_mock):
+ self.get_module_args.update({
+ "snapshot_policy_name": "testing_2",
+ "source_volume": [{
+ "id": "source_volume_id_2",
+ "name": None,
+ "auto_snap_removal_action": 'Remove',
+ "detach_locked_auto_snapshots": None,
+ "state": "absent"}],
+ "state": "present"
+ })
+ snapshot_policy_module_mock.module.params = self.get_module_args
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_2_GET_LIST
+ )
+ snapshot_policy_module_mock.powerflex_conn.snapshot_policy.get_statistics = MagicMock(
+ return_value=MockSnapshotPolicyApi.SNAPSHOT_POLICY_STATISTICS
+ )
+ snapshot_policy_module_mock.powerflex_conn.volume.get = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(MockSnapshotPolicyApi.get_snapshot_policy_exception_response('get_vol_details_exception'),
+ snapshot_policy_module_mock)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py
index a2c463f66..6780ed7ad 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_storagepool.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2022, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -10,13 +10,15 @@ __metaclass__ = type
import pytest
from mock.mock import MagicMock
+# pylint: disable=unused-import
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries import initial_mock
from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_storagepool_api import MockStoragePoolApi
-from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \
- import MockSDKResponse
from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
import MockApiException
from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
import utils
+from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.libraries.powerflex_unit_base \
+ import PowerFlexUnitBase
utils.get_logger = MagicMock()
utils.get_powerflex_gateway_host_connection = MagicMock()
@@ -27,46 +29,355 @@ basic.AnsibleModule = MagicMock()
from ansible_collections.dellemc.powerflex.plugins.modules.storagepool import PowerFlexStoragePool
-class TestPowerflexStoragePool():
+class TestPowerflexStoragePool(PowerFlexUnitBase):
get_module_args = MockStoragePoolApi.STORAGE_POOL_COMMON_ARGS
@pytest.fixture
- def storagepool_module_mock(self, mocker):
- storagepool_module_mock = PowerFlexStoragePool()
- storagepool_module_mock.module.check_mode = False
- return storagepool_module_mock
+ def module_object(self):
+ return PowerFlexStoragePool
- def test_get_storagepool_details(self, storagepool_module_mock):
+ def test_get_storagepool_details(self, powerflex_module_mock):
self.get_module_args.update({
"storage_pool_name": "test_pool",
"state": "present"
})
- storagepool_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.module.params = self.get_module_args
storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST
- storagepool_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
return_value=storagepool_resp
)
storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS
- storagepool_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
return_value=storagepool_statistics_resp
)
- storagepool_module_mock.perform_module_operation()
- storagepool_module_mock.powerflex_conn.storage_pool.get.assert_called()
- storagepool_module_mock.powerflex_conn.storage_pool.get_statistics.assert_called()
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.storage_pool.get.assert_called()
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics.assert_called()
- def test_get_storagepool_details_with_exception(self, storagepool_module_mock):
+ def test_get_storagepool_details_multi(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_MULTI_LIST
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=storagepool_resp
+ )
+ storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=storagepool_statistics_resp
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('get_multi_details'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_get_storagepool_details_with_exception(self, powerflex_module_mock):
self.get_module_args.update({
"storage_pool_name": "test_pool"
})
- storagepool_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.module.params = self.get_module_args
storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST
- storagepool_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
return_value=storagepool_resp
)
- storagepool_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
side_effect=MockApiException
)
- storagepool_module_mock.create_storage_pool = MagicMock(return_value=None)
- storagepool_module_mock.perform_module_operation()
- assert MockStoragePoolApi.get_exception_response('get_details') in storagepool_module_mock.module.fail_json.call_args[1]['msg']
+ powerflex_module_mock.create_storage_pool = MagicMock(return_value=None)
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('get_details'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ @pytest.mark.parametrize("params", [
+ {"pd_id": "4eeb304600000000"},
+ {"pd_name": "test"},
+ ])
+ def test_get_protection_domain(self, powerflex_module_mock, params):
+ pd_id = params.get("pd_id", None)
+ pd_name = params.get("pd_name", None)
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockStoragePoolApi.PROTECTION_DETAILS
+ )
+ pd_details = powerflex_module_mock.get_protection_domain(pd_name, pd_id)
+ assert MockStoragePoolApi.PROTECTION_DETAILS[0] == pd_details
+
+ def test_get_protection_domain_exception(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "storage_pool_name": "test_pool",
+ "protection_domain_id": "4eeb304600000001",
+ "state": "present"
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ side_effect=MockApiException)
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('get_pd_exception'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_get_protection_domain_non_exist(self, powerflex_module_mock):
+ self.set_module_params(
+ powerflex_module_mock,
+ self.get_module_args,
+ {
+ "storage_pool_name": "test_pool",
+ "protection_domain_id": "4eeb304600000001",
+ "state": "present"
+ })
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=None)
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('get_pd_non_exist'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_get_storagepool_details_with_invalid_pd_id(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "protection_domain_id": "4eeb304600000001"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=storagepool_resp
+ )
+ storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=storagepool_statistics_resp
+ )
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockStoragePoolApi.PROTECTION_DETAILS_1
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('invalid_pd_id'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_create_storagepool_response(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "protection_domain_name": "test_pd_name",
+ "media_type": "HDD",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockStoragePoolApi.PROTECTION_DETAILS_1)
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.create = MagicMock(
+ return_value=None
+ )
+ resp = powerflex_module_mock.create_storage_pool(pool_name="test_pool",
+ pd_id=MockStoragePoolApi.PROTECTION_DETAILS_1[0]['id'],
+ media_type="HDD")
+ assert resp is True
+ powerflex_module_mock.powerflex_conn.storage_pool.create.assert_called()
+
+ def test_create_storagepool_only_pool_id(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_id": "test_pool_id",
+ "protection_domain_name": "test_pd_name",
+ "media_type": "HDD",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockStoragePoolApi.PROTECTION_DETAILS_1)
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=[]
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('create_pool_id'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_create_storagepool_new_name(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "storage_pool_new_name": "pool_new_name",
+ "protection_domain_name": "test_pd_name",
+ "media_type": "HDD",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockStoragePoolApi.PROTECTION_DETAILS_1)
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=[]
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('create_pool_new_name'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_create_storagepool_empty_name(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": " ",
+ "protection_domain_name": "test_pd_name",
+ "media_type": "HDD",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockStoragePoolApi.PROTECTION_DETAILS_1)
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('create_pool_name_empty'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_create_storagepool_wo_pd(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "media_type": "HDD",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=None)
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=[]
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('create_wo_pd'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_create_storagepool_transitional_exception(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "protection_domain_name": "test_pd_name",
+ "media_type": "TRANSITIONAL",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockStoragePoolApi.PROTECTION_DETAILS_1)
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.create = MagicMock(
+ return_value=None
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('create_transitional'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_create_storagepool_exception(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "protection_domain_name": "test_pd_name",
+ "media_type": "HDD",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ powerflex_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockStoragePoolApi.PROTECTION_DETAILS_1)
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=[]
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.create = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('create_storage_pool'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_modify_storagepool_details(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "storage_pool_new_name": "new_ansible_pool",
+ "use_rfcache": True,
+ "use_rmcache": True,
+ "media_type": "TRANSITIONAL",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=storagepool_resp
+ )
+ storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=storagepool_statistics_resp
+ )
+ powerflex_module_mock.perform_module_operation()
+ powerflex_module_mock.powerflex_conn.storage_pool.rename.assert_called()
+ powerflex_module_mock.powerflex_conn.storage_pool.set_use_rmcache.assert_called()
+ powerflex_module_mock.powerflex_conn.storage_pool.set_use_rfcache.assert_called()
+ powerflex_module_mock.powerflex_conn.storage_pool.set_media_type.assert_called()
+
+ def test_rename_storagepool_exception(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "storage_pool_new_name": "new_ansible_pool",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=storagepool_resp
+ )
+ storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=storagepool_statistics_resp
+ )
+ powerflex_module_mock.powerflex_conn.storage_pool.rename = MagicMock(
+ side_effect=MockApiException
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('rename_storage_pool'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_rename_storagepool_empty_exception(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "storage_pool_new_name": " ",
+ "state": "present"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=storagepool_resp
+ )
+ storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=storagepool_statistics_resp
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('rename_storage_pool_empty'),
+ powerflex_module_mock, invoke_perform_module=True)
+
+ def test_delete_storagepool_exception(self, powerflex_module_mock):
+ self.get_module_args.update({
+ "storage_pool_name": "test_pool",
+ "state": "absent"
+ })
+ powerflex_module_mock.module.params = self.get_module_args
+ storagepool_resp = MockStoragePoolApi.STORAGE_POOL_GET_LIST
+ powerflex_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ return_value=storagepool_resp
+ )
+ storagepool_statistics_resp = MockStoragePoolApi.STORAGE_POOL_STATISTICS
+ powerflex_module_mock.powerflex_conn.storage_pool.get_statistics = MagicMock(
+ return_value=storagepool_statistics_resp
+ )
+ self.capture_fail_json_call(
+ MockStoragePoolApi.get_exception_response('delete_storage_pool'),
+ powerflex_module_mock, invoke_perform_module=True)
diff --git a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py
index 53cdcfc0d..e36c13695 100644
--- a/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py
+++ b/ansible_collections/dellemc/powerflex/tests/unit/plugins/modules/test_volume.py
@@ -1,4 +1,4 @@
-# Copyright: (c) 2022, Dell Technologies
+# Copyright: (c) 2024, Dell Technologies
# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
@@ -11,8 +11,6 @@ __metaclass__ = type
import pytest
from mock.mock import MagicMock
from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_volume_api import MockVolumeApi
-from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_sdk_response \
- import MockSDKResponse
from ansible_collections.dellemc.powerflex.tests.unit.plugins.module_utils.mock_api_exception \
import MockApiException
from ansible_collections.dellemc.powerflex.plugins.module_utils.storage.dell \
@@ -78,4 +76,662 @@ class TestPowerflexVolume():
)
volume_module_mock.create_volume = MagicMock(return_value=None)
volume_module_mock.perform_module_operation()
- assert MockVolumeApi.get_exception_response('get_details') in volume_module_mock.module.fail_json.call_args[1]['msg']
+ assert MockVolumeApi.get_exception_response(
+ 'get_details') in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ @pytest.mark.parametrize("params", [
+ {"pd_id": "123"},
+ {"pd_name": "test"},
+ ])
+ def test_get_protection_domain(self, volume_module_mock, params):
+ pd_id = params.get("pd_id", None)
+ pd_name = params.get("pd_name", None)
+ volume_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=MockVolumeApi.PROTECTION_DETAILS
+ )
+ pd_details = volume_module_mock.get_protection_domain(pd_name, pd_id)
+ assert MockVolumeApi.PROTECTION_DETAILS[0] == pd_details
+
+ def test_get_protection_domain_exeception(self, volume_module_mock):
+ pd_id = "pd_id"
+ pd_name = "pd_name"
+ volume_module_mock.powerflex_conn.protection_domain.get = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.get_protection_domain(pd_name, pd_id)
+ assert MockVolumeApi.get_exception_response(
+ 'get_pd_exception') in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ @pytest.mark.parametrize("params", [
+ {"pol_id": "123"},
+ {"pol_name": "test"},
+ ])
+ def test_get_snapshot_policy(self, volume_module_mock, params):
+ pol_id = params.get("pol_id", None)
+ pol_name = params.get("pol_name", None)
+ volume_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=MockVolumeApi.PROTECTION_DETAILS
+ )
+ snap_pol_details = volume_module_mock.get_snapshot_policy(
+ pol_id, pol_name)
+ assert MockVolumeApi.PROTECTION_DETAILS[0] == snap_pol_details
+
+ def test_get_snapshot_policy_exception(self, volume_module_mock):
+ pol_id = "pol_id"
+ pol_name = "pol_name"
+ volume_module_mock.powerflex_conn.snapshot_policy.get = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.get_snapshot_policy(pol_id, pol_name)
+ assert MockVolumeApi.get_exception_response(
+ 'get_sp_exception') in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ @pytest.mark.parametrize("params", [
+ {"pol_id": "123", "prot_id": "123"},
+ ])
+ def test_get_storage_pool(self, volume_module_mock, params):
+ pol_id = params.get("pol_id", None)
+ pol_name = params.get("pol_name", None)
+ prot_id = params.get("prot_id", None)
+
+ def mock_get(filter_fields):
+ if filter_fields.get("protectionDomainId", None):
+ return MockVolumeApi.PROTECTION_DETAILS
+ else:
+ return MockVolumeApi.PROTECTION_DETAILS_MULTI
+
+ volume_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ side_effect=mock_get
+ )
+ sp_details = volume_module_mock.get_storage_pool(
+ pol_id, pol_name, prot_id)
+ assert MockVolumeApi.PROTECTION_DETAILS[0] == sp_details
+
+ @pytest.mark.parametrize("params", [
+ {"pol_id": "123", "assert_msg": "get_spool_error1"},
+ {"pol_name": "123", "prot_id": "123", "assert_msg": "get_spool_error2"},
+ ])
+ def test_get_storage_pool_error(self, volume_module_mock, params):
+ pol_id = params.get("pol_id", None)
+ pol_name = params.get("pol_name", None)
+ prot_id = params.get("prot_id", None)
+
+ def mock_get(filter_fields):
+ if filter_fields.get("protectionDomainId", None):
+ return None
+ else:
+ return MockVolumeApi.PROTECTION_DETAILS_MULTI
+
+ volume_module_mock.powerflex_conn.storage_pool.get = MagicMock(
+ side_effect=mock_get
+ )
+ volume_module_mock.get_storage_pool(pol_id, pol_name, prot_id)
+ assert MockVolumeApi.get_exception_response(params.get(
+ "assert_msg")) in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ @pytest.mark.parametrize("params", [
+ {"vol_name": "123", "assert_msg": "get_spool_error1"}
+ ])
+ def test_get_volume(self, volume_module_mock, params):
+ vol_name = params.get("vol_name", None)
+ vol_id = params.get("vol_id", None)
+ volume_module_mock.powerflex_conn.volume.get = MagicMock(
+ return_value=MockVolumeApi.VOLUME_GET_LIST
+ )
+ volume_module_mock.get_snapshot_policy = MagicMock(
+ return_value={"name": "snapshotPolicyName"}
+ )
+ volume_details = volume_module_mock.get_volume(vol_name, vol_id)
+ assert volume_details["snapshotPolicyId"] == MockVolumeApi.VOLUME_GET_LIST[0]["snapshotPolicyId"]
+ assert volume_details["snapshotPolicyName"] == MockVolumeApi.VOLUME_GET_LIST[0]["snapshotPolicyName"]
+
+ def test_get_volume_exeception(self, volume_module_mock):
+ volume_module_mock.powerflex_conn.volume.get = MagicMock(
+ side_effect=MockApiException
+ )
+ volume_module_mock.get_snapshot_policy = MagicMock(
+ return_value={"name": "snapshotPolicyName"}
+ )
+ volume_module_mock.get_volume("test_id_1", "test_id_1")
+ assert MockVolumeApi.get_exception_response(
+ "get_details") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ @pytest.mark.parametrize("params", [
+ {"sdc_id": "sdc_id"},
+ {"sdc_ip": "sdc_ip"},
+ ])
+ def test_get_sdc_id(self, volume_module_mock, params):
+ sdc_name = params.get("sdc_name", None)
+ sdc_ip = params.get("sdc_ip", None)
+ sdc_id = params.get("sdc_id", None)
+ volume_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=MockVolumeApi.SDC_RESPONSE
+ )
+ sdc_details = volume_module_mock.get_sdc_id(sdc_name, sdc_ip, sdc_id)
+ assert MockVolumeApi.SDC_RESPONSE[0]['id'] == sdc_details
+
+ def test_get_sdc_id_error(self, volume_module_mock):
+ sdc_name = "sdc_name"
+ sdc_ip = "sdc_ip"
+ sdc_id = "sdc_id"
+ volume_module_mock.powerflex_conn.sdc.get = MagicMock(
+ return_value=[]
+ )
+ volume_module_mock.get_sdc_id(sdc_name, sdc_ip, sdc_id)
+ assert MockVolumeApi.get_exception_response(
+ "get_sds") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_create_volume_error_vol_name(self, volume_module_mock):
+ volume_module_mock.create_volume("", "pool_id", 1024)
+ assert MockVolumeApi.get_exception_response(
+ "create_vol_name") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_create_volume_error_comp_type(self, volume_module_mock):
+ volume_module_mock.get_storage_pool = MagicMock(
+ return_value=MockVolumeApi.GET_STORAGE_POOL
+ )
+ volume_module_mock.create_volume(
+ "vol_name", "pool_id", 1024, comp_type="comp_type")
+ assert MockVolumeApi.get_exception_response(
+ "create_vol_ctype") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_create_volume_error_size(self, volume_module_mock):
+ volume_module_mock.get_storage_pool = MagicMock(
+ return_value=MockVolumeApi.GET_STORAGE_POOL
+ )
+ volume_module_mock.create_volume("vol_name", "pool_id", None)
+ assert MockVolumeApi.get_exception_response(
+ "create_vol_size") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_create_volume_exception(self, volume_module_mock):
+ volume_module_mock.powerflex_conn.volume.create = MagicMock(
+ side_effect=MockApiException
+ )
+ volume_module_mock.create_volume("vol_name", None, 1024)
+ assert MockVolumeApi.get_exception_response(
+ "create_vol_exc") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_create_volume(self, volume_module_mock):
+ volume_module_mock.powerflex_conn.volume.create = MagicMock(
+ return_value=None
+ )
+ ret = volume_module_mock.create_volume("vol_name", None, 1024)
+ assert ret is True
+
+ def test_modify_access_mode_true(self, volume_module_mock):
+ access_mode_list = [{"accessMode": "READ_ONLY", "sdc_id": "sdc_id"}]
+ volume_module_mock.powerflex_conn.volume.set_access_mode_for_sdc = MagicMock(
+ return_value=None
+ )
+ ret = volume_module_mock.modify_access_mode(
+ "vol_name", access_mode_list)
+ assert ret is True
+
+ def test_modify_access_mode_false(self, volume_module_mock):
+ access_mode_list = [{"accessMode": None, "sdc_id": "sdc_id"}]
+ volume_module_mock.powerflex_conn.volume.set_access_mode_for_sdc = MagicMock(
+ return_value=None
+ )
+ ret = volume_module_mock.modify_access_mode(
+ "vol_name", access_mode_list)
+ assert ret is False
+
+ def test_modify_access_mode_exception(self, volume_module_mock):
+ access_mode_list = [{"accessMode": "READ_ONLY", "sdc_id": "sdc_id"}]
+ volume_module_mock.powerflex_conn.volume.set_access_mode_for_sdc = MagicMock(
+ side_effect=MockApiException
+ )
+ volume_module_mock.modify_access_mode("vol_name", access_mode_list)
+ assert MockVolumeApi.get_exception_response(
+ "modify_access") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_modify_limits_true(self, volume_module_mock):
+ payload = {"sdc_id": "sdc_id",
+ "bandwidth_limit": 1024, "iops_limit": 1024}
+ volume_module_mock.powerflex_conn.volume.set_mapped_sdc_limits = MagicMock(
+ return_value=None
+ )
+ ret = volume_module_mock.modify_limits(payload)
+ assert ret is True
+
+ def test_modify_limits_false(self, volume_module_mock):
+ payload = {"sdc_id": "sdc_id",
+ "bandwidth_limit": None, "iops_limit": None}
+ volume_module_mock.powerflex_conn.volume.set_mapped_sdc_limits = MagicMock(
+ return_value=None
+ )
+ ret = volume_module_mock.modify_limits(payload)
+ assert ret is False
+
+ def test_modify_limits_exception(self, volume_module_mock):
+ payload = {"sdc_id": "sdc_id",
+ "bandwidth_limit": 1024, "iops_limit": 1024}
+ volume_module_mock.powerflex_conn.volume.set_mapped_sdc_limits = MagicMock(
+ side_effect=MockApiException
+ )
+ volume_module_mock.modify_limits(payload)
+ assert MockVolumeApi.get_exception_response(
+ "modify_limits") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_delete_volume_true(self, volume_module_mock):
+ volume_module_mock.powerflex_conn.volume.delete = MagicMock(
+ side_effect=None
+ )
+ ret = volume_module_mock.delete_volume("vol_id", "remove_mode")
+ assert ret is True
+
+ def test_delete_volume_exception(self, volume_module_mock):
+ volume_module_mock.powerflex_conn.volume.delete = MagicMock(
+ side_effect=MockApiException
+ )
+ volume_module_mock.delete_volume("vol_id", "remove_mode")
+ assert MockVolumeApi.get_exception_response(
+ "delete_volume") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_unmap_volume_from_sdc_true(self, volume_module_mock):
+ volume = {"mappedSdcInfo": [{"sdcId": "sdc_id"}], "id": "vol_id"}
+ sdc = [{"sdc_name": "sdc_name"}]
+ volume_module_mock.get_sdc_id = MagicMock(
+ return_value="sdc_id"
+ )
+ volume_module_mock.powerflex_conn.volume.remove_mapped_sdc = MagicMock(
+ return_value=None
+ )
+ ret = volume_module_mock.unmap_volume_from_sdc(volume, sdc)
+ assert ret is True
+
+ def test_unmap_volume_from_sdc_false(self, volume_module_mock):
+ volume = {"mappedSdcInfo": [{"sdcId": "sdc_id"}], "id": "vol_id"}
+ sdc = [{"sdc_ip": "sdc_ip"}]
+ volume_module_mock.get_sdc_id = MagicMock(
+ return_value="sdc_id1"
+ )
+ volume_module_mock.powerflex_conn.volume.remove_mapped_sdc = MagicMock(
+ return_value=None
+ )
+ ret = volume_module_mock.unmap_volume_from_sdc(volume, sdc)
+ assert ret is False
+
+ def test_unmap_volume_from_sdc_exception(self, volume_module_mock):
+ volume = {"mappedSdcInfo": [{"sdcId": "sdc_id"}], "id": "vol_id"}
+ sdc = [{"sdc_id": "sdc_id"}]
+ volume_module_mock.get_sdc_id = MagicMock(
+ return_value="sdc_id"
+ )
+ volume_module_mock.powerflex_conn.volume.remove_mapped_sdc = MagicMock(
+ side_effect=MockApiException
+ )
+ volume_module_mock.unmap_volume_from_sdc(volume, sdc)
+ assert MockVolumeApi.get_exception_response(
+ "unmap") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_map_volume_to_sdc_name(self, volume_module_mock):
+ volume = {
+ "mappedSdcInfo": [
+ {"sdcId": "sdc_id", "accessMode": "READ_WRITE",
+ "limitIops": 1024, "limitBwInMbps": 1024}],
+ "id": "vol_id",
+ }
+ sdc = [{"sdc_name": "sdc_name", "access_mode": "READ_ONLY",
+ "iops_limit": 2048, "bandwidth_limit": 2048}]
+ volume_module_mock.get_sdc_id = MagicMock(
+ return_value="sdc_id"
+ )
+ ret, sdc_modify_list1, sdc_modify_list2 = volume_module_mock.map_volume_to_sdc(
+ volume, sdc)
+ assert ret is False
+ assert sdc_modify_list1[0]["sdc_id"] == "sdc_id"
+ assert sdc_modify_list2[0]["sdc_id"] == "sdc_id"
+
+ def test_map_volume_to_sdc_ip(self, volume_module_mock):
+ volume = {
+ "mappedSdcInfo": [
+ {"sdcId": "sdc_id", "accessMode": "READ_WRITE",
+ "limitIops": 1024, "limitBwInMbps": 1024}],
+ "id": "vol_id",
+ }
+ sdc = [{"sdc_ip": "sdc_ip", "access_mode": "READ_ONLY",
+ "iops_limit": 2048, "bandwidth_limit": 2048}]
+ volume_module_mock.get_sdc_id = MagicMock(
+ return_value="sdc_id1"
+ )
+ volume_module_mock.powerflex_conn.volume.add_mapped_sdc = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.powerflex_conn.volume.set_mapped_sdc_limits = MagicMock(
+ return_value=None
+ )
+ ret, sdc_modify_list1, sdc_modify_list2 = volume_module_mock.map_volume_to_sdc(
+ volume, sdc)
+ assert ret is True
+ assert sdc_modify_list1 == []
+ assert sdc_modify_list2 == []
+
+ def test_map_volume_to_sdc_id(self, volume_module_mock):
+ volume = {
+ "mappedSdcInfo": [
+ {"sdcId": "sdc_id", "accessMode": "READ_WRITE",
+ "limitIops": 1024, "limitBwInMbps": 1024}],
+ "id": "vol_id",
+ }
+ sdc = [{"sdc_id": "sdc_id", "access_mode": "READ_ONLY"}]
+ volume_module_mock.get_sdc_id = MagicMock(
+ return_value="sdc_id1"
+ )
+ volume_module_mock.powerflex_conn.volume.add_mapped_sdc = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.powerflex_conn.volume.set_mapped_sdc_limits = MagicMock(
+ return_value=None
+ )
+ ret, sdc_modify_list1, sdc_modify_list2 = volume_module_mock.map_volume_to_sdc(
+ volume, sdc)
+ assert ret is True
+ assert sdc_modify_list1 == []
+ assert sdc_modify_list2 == []
+
+ def test_map_volume_to_sdc_exception(self, volume_module_mock):
+ volume = {
+ "mappedSdcInfo": [
+ {"sdcId": "sdc_id", "accessMode": "READ_WRITE",
+ "limitIops": 1024, "limitBwInMbps": 1024}],
+ "id": "vol_id",
+ "name": "name"
+ }
+ sdc = [{"sdc_id": "sdc_id", "access_mode": "READ_ONLY",
+ "iops_limit": 2048, "bandwidth_limit": 2048}]
+ volume_module_mock.get_sdc_id = MagicMock(
+ return_value="sdc_id1"
+ )
+ volume_module_mock.powerflex_conn.volume.add_mapped_sdc = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.powerflex_conn.volume.set_mapped_sdc_limits = MagicMock(
+ side_effect=MockApiException
+ )
+ volume_module_mock.map_volume_to_sdc(volume, sdc)
+ assert MockVolumeApi.get_exception_response(
+ "map_vol_exception") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ @pytest.mark.parametrize('params', [
+ {"sdc": [{"sdc_id": "sdc_id", "sdc_name": "sdc_name", "sdc_ip": "sdc_ip"}],
+ "assert_msg": "val_params_err1"},
+ {"cap_unit": "GB", "size": None, "assert_msg": "val_params_err2"},
+ {"asrt": "asrt", "assert_msg": "val_params_err3"},
+ {"state": "present", "del_snaps": "del_snaps",
+ "assert_msg": "val_params_err4"},
+ ])
+ def test_validate_parameters(self, volume_module_mock, params):
+ self.get_module_args.update({
+ "sdc": params.get("sdc", None),
+ "cap_unit": params.get("cap_unit", None),
+ "size": params.get("size", None),
+ })
+ volume_module_mock.module.params = self.get_module_args
+ asrt = params.get("asrt", None)
+ pol_id = params.get("pol_id", None)
+ pol_name = params.get("pol_name", None)
+ del_snaps = params.get("del_snaps", None)
+ state = params.get("state", None)
+ assert_msg = params.get("assert_msg", None)
+ volume_module_mock.validate_parameters(
+ asrt, pol_id, pol_name, del_snaps, state)
+ assert MockVolumeApi.get_exception_response(
+ assert_msg) in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ @pytest.mark.parametrize('params', [
+ {"modify_dict": {
+ "auto_snap_remove_type": "remove",
+ "snap_pol_id": "vol_id",
+ "new_name": "new_name",
+ "new_size": "new_size",
+ "use_rmcache": "use_rmcache",
+ "comp_type": "comp_type"
+ }, "vol_id": "vol_id"},
+ {"modify_dict": {
+ "snap_pol_id": "vol_id"
+ }, "vol_id": "vol_id"},
+ ])
+ def test_modify_volume(self, volume_module_mock, params):
+ vol_id = params.get("vol_id", None)
+ modify_dict = params.get("modify_dict", None)
+ volume_module_mock.get_sdc_id = MagicMock(
+ return_value="sdc_id1"
+ )
+ volume_module_mock.powerflex_conn.snapshot_policy.remove_source_volume = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.powerflex_conn.snapshot_policy.add_source_volume = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.powerflex_conn.snapshot_policy.rename = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.powerflex_conn.snapshot_policy.extend = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.powerflex_conn.snapshot_policy.set_use_rmcache = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.powerflex_conn.snapshot_policy.set_compression_method = MagicMock(
+ return_value=None
+ )
+ ret = volume_module_mock.modify_volume(vol_id, modify_dict)
+ assert ret is True
+
+ def test_modify_volume_execption(self, volume_module_mock):
+ vol_id = "vol_id"
+ modify_dict = {"snap_pol_id": "vol_id"}
+ volume_module_mock.powerflex_conn.snapshot_policy.add_source_volume = MagicMock(
+ side_effect=MockApiException
+ )
+ volume_module_mock.modify_volume(vol_id, modify_dict)
+ assert MockVolumeApi.get_exception_response(
+ "modify_volume_exp") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_to_modify(self, volume_module_mock):
+ vol_details = {
+ "storagePoolId": "sdc_id",
+ "compressionMethod": "tar",
+ "useRmcache": True,
+ "sizeInKb": 2048,
+ "name": "name",
+ "snplIdOfSourceVolume": "snplIdOfSourceVolume"
+ }
+ new_size = 1024
+ use_rmcache = False
+ comp_type = "zip"
+ new_name = "new_name"
+ snap_pol_id = ""
+ asrt = "asrt"
+ volume_module_mock.get_storage_pool = MagicMock(
+ return_value=MockVolumeApi.GET_STORAGE_POOL_FINE
+ )
+ modify_dict = volume_module_mock.to_modify(vol_details, new_size, use_rmcache, comp_type,
+ new_name, snap_pol_id,
+ asrt)
+ assert modify_dict["snap_pol_id"] == "snplIdOfSourceVolume"
+
+ def test_to_modify_comp_type_error(self, volume_module_mock):
+ vol_details = {
+ "storagePoolId": "sdc_id",
+ "compressionMethod": "tar",
+ "useRmcache": True,
+ "sizeInKb": 2048,
+ "name": "name",
+ "snplIdOfSourceVolume": None
+ }
+ new_size = 1024
+ use_rmcache = False
+ comp_type = "zip"
+ new_name = "new_name"
+ snap_pol_id = "snap_pol_id"
+ asrt = None
+ volume_module_mock.get_storage_pool = MagicMock(
+ return_value=MockVolumeApi.GET_STORAGE_POOL
+ )
+ volume_module_mock.to_modify(vol_details, new_size, use_rmcache, comp_type,
+ new_name, snap_pol_id,
+ asrt)
+ assert MockVolumeApi.get_exception_response(
+ "create_vol_ctype") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_to_modify_new_name_error(self, volume_module_mock):
+ vol_details = {
+ "storagePoolId": "sdc_id",
+ "compressionMethod": "tar",
+ "useRmcache": True,
+ "sizeInKb": 2048,
+ "name": "name",
+ "snplIdOfSourceVolume": "snplIdOfSourceVolume"
+ }
+ new_size = None
+ use_rmcache = None
+ comp_type = None
+ new_name = ""
+ snap_pol_id = "snap_pol_id"
+ asrt = None
+ volume_module_mock.get_storage_pool = MagicMock(
+ return_value=MockVolumeApi.GET_STORAGE_POOL_FINE
+ )
+ volume_module_mock.to_modify(vol_details, new_size, use_rmcache, comp_type,
+ new_name, snap_pol_id,
+ asrt)
+ assert MockVolumeApi.get_exception_response(
+ "create_vol_name") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_to_modify_remove_error(self, volume_module_mock):
+ vol_details = {
+ "storagePoolId": "sdc_id",
+ "compressionMethod": "tar",
+ "useRmcache": True,
+ "sizeInKb": 2048,
+ "name": "name",
+ "snplIdOfSourceVolume": "snplIdOfSourceVolume"
+ }
+ new_size = None
+ use_rmcache = None
+ comp_type = None
+ new_name = None
+ snap_pol_id = "snap_pol_id"
+ asrt = "asrt"
+ volume_module_mock.get_storage_pool = MagicMock(
+ return_value=MockVolumeApi.GET_STORAGE_POOL_FINE
+ )
+ volume_module_mock.to_modify(vol_details, new_size, use_rmcache, comp_type,
+ new_name, snap_pol_id,
+ asrt)
+ assert MockVolumeApi.get_exception_response(
+ "to_modify_err1") in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ @pytest.mark.parametrize('params', [
+ {"snap_pol_id": "snap_pol_id", "assert_msg": "snap_pol_id_err"},
+ {"snap_pol_name": "snap_pol_id", "assert_msg": "snap_pol_name_err"},
+ {"pd_id": "pd_id", "assert_msg": "pd_id_err"},
+ {"pool_id": "pool_id", "assert_msg": "pool_id_err"},
+ {"pd_name": "pd_name", "assert_msg": "pd_name_err"},
+ {"pool_name": "pool_name", "assert_msg": "pool_name_err"}
+ ])
+ def test_verify_params(self, volume_module_mock, params):
+ vol_details = {
+ "snapshotPolicyId": "snapshotPolicyId",
+ "snapshotPolicyName": "snapshotPolicyName",
+ "protectionDomainId": "protectionDomainId",
+ "storagePoolId": "storagePoolId",
+ "protectionDomainName": "protectionDomainName",
+ "storagePoolName": "storagePoolName",
+ }
+ snap_pol_name = params.get("snap_pol_name", None)
+ snap_pol_id = params.get("snap_pol_id", None)
+ pd_name = params.get("pd_name", None)
+ pd_id = params.get("pd_id", None)
+ pool_name = params.get("pool_name", None)
+ pool_id = params.get("pool_id", None)
+ assert_msg = params.get("assert_msg", None)
+ volume_module_mock.verify_params(vol_details, snap_pol_name, snap_pol_id, pd_name,
+ pd_id, pool_name, pool_id)
+ assert MockVolumeApi.get_exception_response(
+ assert_msg) in volume_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_perform_module_operation_delete(self, volume_module_mock):
+ self.get_module_args.update({
+ "compression_type": "tar",
+ "vol_type": "vol_type",
+ "auto_snap_remove_type": "asrt",
+ "size": 20,
+ "protection_domain_name": "protection_domain_name",
+ "storage_pool_name": "storage_pool_name",
+ "snapshot_policy_name": "snapshot_policy_name",
+ "vol_name": "vol_name",
+ "state": "absent",
+ "delete_snapshots": True
+ })
+ volume_module_mock.module.params = self.get_module_args
+ volume_module_mock.validate_parameters = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.get_protection_domain = MagicMock(
+ return_value=MockVolumeApi.GET_ID
+ )
+ volume_module_mock.get_storage_pool = MagicMock(
+ return_value=MockVolumeApi.GET_ID
+ )
+ volume_module_mock.get_snapshot_policy = MagicMock(
+ return_value=MockVolumeApi.GET_ID
+ )
+ volume_module_mock.get_volume = MagicMock(
+ return_value=MockVolumeApi.GET_ID
+ )
+ volume_module_mock.delete_volume = MagicMock(
+ return_value=True
+ )
+ volume_module_mock.verify_params = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.perform_module_operation()
+ assert volume_module_mock.module.exit_json.call_args[1]['changed'] is True
+ assert volume_module_mock.module.exit_json.call_args[1]['volume_details'] == {
+ }
+
+ def test_perform_module_operation_create_fail(self, volume_module_mock):
+ self.get_module_args.update({
+ "compression_type": "tar",
+ "vol_type": "vol_type",
+ "auto_snap_remove_type": "asrt",
+ "size": 1,
+ "protection_domain_name": "protection_domain_name",
+ "storage_pool_name": "storage_pool_name",
+ "snapshot_policy_name": "",
+ "snapshot_policy_id": "",
+ "vol_name": "vol_name",
+ "state": "present",
+ "delete_snapshots": True,
+ "cap_unit": "TB",
+ "vol_id": "vol_id",
+ "vol_new_name": "vol_new_name",
+ })
+ volume_module_mock.module.params = self.get_module_args
+ volume_module_mock.validate_parameters = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.get_protection_domain = MagicMock(
+ return_value=MockVolumeApi.GET_ID
+ )
+ volume_module_mock.get_storage_pool = MagicMock(
+ return_value=MockVolumeApi.GET_ID
+ )
+ volume_module_mock.get_snapshot_policy = MagicMock(
+ return_value=MockVolumeApi.GET_ID
+ )
+ volume_module_mock.get_volume = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.verify_params = MagicMock(
+ return_value=None
+ )
+ volume_module_mock.create_volume = MagicMock(
+ return_value=False
+ )
+ volume_module_mock.perform_module_operation()
+ assert MockVolumeApi.get_exception_response(
+ "perform_error1") in volume_module_mock.module.fail_json.call_args[1]['msg']
diff --git a/ansible_collections/dellemc/powerflex/tests/requirements.txt b/ansible_collections/dellemc/powerflex/tests/unit/requirements.txt
index 3541acd15..3541acd15 100644
--- a/ansible_collections/dellemc/powerflex/tests/requirements.txt
+++ b/ansible_collections/dellemc/powerflex/tests/unit/requirements.txt
diff --git a/ansible_collections/dellemc/unity/.ansible-lint b/ansible_collections/dellemc/unity/.ansible-lint
new file mode 100644
index 000000000..4a9971a19
--- /dev/null
+++ b/ansible_collections/dellemc/unity/.ansible-lint
@@ -0,0 +1,4 @@
+exclude_paths:
+ - changelogs/
+ - .github/
+ - meta/
diff --git a/ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml b/ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml
index d9fb5cf04..58d3ea030 100644
--- a/ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml
+++ b/ansible_collections/dellemc/unity/.github/workflows/ansible-test.yml
@@ -15,7 +15,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- ansible-version: [stable-2.12]
+ ansible-version: [stable-2.13]
steps:
- name: Check out code
uses: actions/checkout@v2
@@ -50,12 +50,10 @@ jobs:
fail-fast: false
matrix:
python-version: ["3.9", "3.10", "3.11"]
- ansible-version: [stable-2.12, stable-2.13, stable-2.14]
+ ansible-version: [stable-2.13, stable-2.14, stable-2.15]
exclude:
# Python 3.11 is supported only from ansible-core 2.14 onwards
- python-version: "3.11"
- ansible-version: stable-2.12
- - python-version: "3.11"
ansible-version: stable-2.13
steps:
@@ -93,7 +91,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- ansible-version: [stable-2.12, stable-2.13, stable-2.14]
+ ansible-version: [stable-2.13, stable-2.14, stable-2.15]
steps:
- name: Set up Python 3.9
@@ -121,3 +119,44 @@ jobs:
- name: Run sanity tests
run: ansible-test sanity --docker -v --color
working-directory: /home/runner/.ansible/collections/ansible_collections/dellemc/unity
+
+ lint:
+ name: Ansible lint
+ runs-on: ubuntu-latest
+ needs: [build]
+ strategy:
+ fail-fast: false
+ matrix:
+ python-version: ["3.9", "3.10", "3.11"]
+ ansible-version: [stable-2.13, stable-2.14, stable-2.15]
+
+ steps:
+ # Important: This sets up your GITHUB_WORKSPACE environment variable
+ - name: Checkout the source code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0 # needed for progressive mode to work
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install ansible (${{ matrix.ansible-version }}) version
+ run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible-version }}.tar.gz --disable-pip-version-check
+
+ - name: Install ansible lint
+ run: pip install ansible-lint --disable-pip-version-check
+
+ - name: Download migrated collection artifacts
+ uses: actions/download-artifact@v1
+ with:
+ name: collection
+ path: .cache/collection-tarballs
+
+ - name: Install collection build
+ run: ansible-galaxy collection install .cache/collection-tarballs/*.tar.gz
+
+ - name: Run Ansible lint
+ run: ansible-lint --show-relpath
+ working-directory: /home/runner/work/ansible-unity/ansible-unity
diff --git a/ansible_collections/dellemc/unity/CHANGELOG.rst b/ansible_collections/dellemc/unity/CHANGELOG.rst
index 55eb73ca8..cf231b653 100644
--- a/ansible_collections/dellemc/unity/CHANGELOG.rst
+++ b/ansible_collections/dellemc/unity/CHANGELOG.rst
@@ -5,15 +5,38 @@ Dellemc.Unity Change Log
.. contents:: Topics
+v1.7.1
+======
+
+Minor Changes
+-------------
+
+- Patch update to fix import errors in utils file.
+
+v1.7.0
+======
+
+Minor Changes
+-------------
+
+- Added replication session module to get details, pause, resume, sync, failover, failback and delete replication sessions.
+- Added support for Unity XT SeaHawk 5.3
+- Documentation updates for boolean values based on ansible community guidelines.
+
+New Modules
+-----------
+
+- dellemc.unity.replication_session - Manage replication session on the Unity storage system
+
v1.6.0
======
Minor Changes
-------------
+- Add synchronous replication support for filesystem.
- Support addition of host from the Host List to NFS Export in nfs module.
- Support enable/disable advanced dedup in volume module.
-- Add synchronous replication support for filesystem.
v1.5.0
======
diff --git a/ansible_collections/dellemc/unity/FILES.json b/ansible_collections/dellemc/unity/FILES.json
index 74f5f6106..24ae1cf93 100644
--- a/ansible_collections/dellemc/unity/FILES.json
+++ b/ansible_collections/dellemc/unity/FILES.json
@@ -8,6 +8,13 @@
"format": 1
},
{
+ "name": ".ansible-lint",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f15456e7e2d28634ccb97ea4f73cd6165ce5b6a974a088330b05f5d730b7f674",
+ "format": 1
+ },
+ {
"name": ".github",
"ftype": "dir",
"chksum_type": null,
@@ -81,14 +88,14 @@
"name": ".github/workflows/ansible-test.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "36061bc14911cf2e9f7a4bd7c15771f40cc19c6153de788a79b02fe39d776684",
+ "chksum_sha256": "d648c6b2038a891200af1f6ae981928a37427a14b230e0b7b6ba030cae29a37a",
"format": 1
},
{
"name": "CHANGELOG.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1de2b419f19d4fc6298d49bcc811ead97876e67da345da4569ae7d68c5ec2e72",
+ "chksum_sha256": "0f7304d22c291fa4120f7f01ce4db2d000c00d422fd5fb2a4b2cc771e49c43f6",
"format": 1
},
{
@@ -109,7 +116,7 @@
"name": "README.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a787189adc44f92c54d358fc05673e8a96815159f8b8296e5eed187171089bc5",
+ "chksum_sha256": "e9dda8bf38a7da066fc2fbfff79d3945479c31549df311b3d18229fb2e3634ed",
"format": 1
},
{
@@ -123,14 +130,14 @@
"name": "changelogs/.plugin-cache.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1bf36094cdc0a4661da6b69615073ece39cece910d58dbdd889c4e99070fbbd0",
+ "chksum_sha256": "4869399d305f2a50f7f0abe8a42823ecd1ca153957ed55d8b913bfda4b9dbfae",
"format": 1
},
{
"name": "changelogs/changelog.yaml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b7fd9dd55038169d69b139a961a0501c0805bc1a86a046750c76f4a94b366c19",
+ "chksum_sha256": "0c48d1c2f86f067385dfee3d1c7e4355b42325860936dd4395ecde150b7d894d",
"format": 1
},
{
@@ -179,21 +186,21 @@
"name": "docs/CONTRIBUTING.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "598e9bdfdfb5fcbcba74e866d2680c80e75e8c3e9567c4706df9de36660b1490",
+ "chksum_sha256": "4cf604f9b0b86445fa475876590076d016fa422d2b062e4242e6c4060e3b6738",
"format": 1
},
{
"name": "docs/INSTALLATION.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "a6fb197119f7cb5a83d6079ea4abe69c8c44a29f1686909ed08cc0bf05b67f2d",
+ "chksum_sha256": "eb198be51142a91a0196541ff636aabbb68f17e34de329213a41c83ad3059923",
"format": 1
},
{
"name": "docs/ISSUE_TRIAGE.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "846a696c85036bd2e65dc8517932ec8dbf88c305726a230fdcc4a75e989a2d53",
+ "chksum_sha256": "c5a6b4bd9e4875696f16454844a45f7b2cf95102f1960c1a25cf5e2dafff3e14",
"format": 1
},
{
@@ -207,21 +214,21 @@
"name": "docs/MAINTAINER_GUIDE.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "e1e926f5ecbcb3c016e8394fd386a22d0a9235cd6e049b84a9dce42990c60fa8",
+ "chksum_sha256": "9c3558b79f0913255880f5d2066b98dd2ca5e1e51bce28ccb3bf6cac390a23d7",
"format": 1
},
{
"name": "docs/Release Notes.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "25fc0f22b54c1ecc5fb94cf6f0546600aa444416f79a57c8d367adbd83c1e565",
+ "chksum_sha256": "3d01761e2b3a2260eeb24e776c773a89a37389156e7e4d43b9c77d24d0506afa",
"format": 1
},
{
"name": "docs/SECURITY.md",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b80e365d12066e0f6c0cf7dce905b1530fc827b4c8111f6533910982ab196174",
+ "chksum_sha256": "11f2d0c94f6b4e19e25a683d25d7dda948f9c4c05bd23fc5e40eeaf23f84cf00",
"format": 1
},
{
@@ -242,119 +249,126 @@
"name": "docs/modules/cifsserver.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ade3ef8ba26c63eeff0dd582a8f9b8a2616d96cb7066bc84410e6ed4c0c02fc5",
+ "chksum_sha256": "accb0fec62fb6b7e828d5c86fb8b31f386409b85155c4071d5f059f1e451b270",
"format": 1
},
{
"name": "docs/modules/consistencygroup.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4a0ac0e3028289436bc9dcc07e14821fab118552e56e28558541346628b73fab",
+ "chksum_sha256": "59b142a37b5afd529f65101b06f20e0ec06617a98939867cbb5dae4d18db9f1d",
"format": 1
},
{
"name": "docs/modules/filesystem.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f93698d8d35931abff7bb6262ac10e3603aa300878248001d3566d1ec39c0ee2",
+ "chksum_sha256": "5d51a857e4b4a0fe2eae41f598751a7a53e6e7bea2c7543d415c4f0797b42e00",
"format": 1
},
{
"name": "docs/modules/filesystem_snapshot.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0974b3beba09ff4a44a6977be12764377213f960a0a0c08e2be3fc4a025b4583",
+ "chksum_sha256": "da7a239d93ee37c1c897029df2d5e984f1c261bdba9027aa518a59c7bf369f6b",
"format": 1
},
{
"name": "docs/modules/host.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b78755a1bdbec795874457f3ee5e63a49d60fb21653617fb58fbefe6f8144282",
+ "chksum_sha256": "d87b955b1e69506eef89d51a5fd7af09ebb1ce02f96a153bf3dd79c0c60d79b5",
"format": 1
},
{
"name": "docs/modules/info.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "19af0ad15ef490c6520d3001657e49b31c386e745d7cf61ac88898bf363ea50d",
+ "chksum_sha256": "e6da035d8419e140759ff270af8e190a3c832710e8e6499948fc23923511c407",
"format": 1
},
{
"name": "docs/modules/interface.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "14bec74112975514f622216f2b2f272197b96f0cb6f99e41ade9491af5bda29e",
+ "chksum_sha256": "15cd4e252987b1cc2a0e887eaf2a6e30c23d0a3c1c1fe779446ad8e0ea8d575d",
"format": 1
},
{
"name": "docs/modules/nasserver.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "0433167e81af69381b7bb7e5350d1414ba6a86de1b66ef82b964d180a9d17229",
+ "chksum_sha256": "938cda5e8fb9200084ad3fb7fc64b60b0e9734103e83dadc3af44af697c8bb86",
"format": 1
},
{
"name": "docs/modules/nfs.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5a1471c26c1b8144f2c159069276b4a7e0d2eb34ab5fb82def30ac9a0c224ef9",
+ "chksum_sha256": "78f10ffebfbf31ccd595bc185ea66b709964edad32503820e11af3d92796503d",
"format": 1
},
{
"name": "docs/modules/nfsserver.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "95005625b57eeb149e047a58cc5f8cd395d48bdf4ac533ab37af7c92ef6c0b01",
+ "chksum_sha256": "83c13d27190e3ff69dd2e9e8e028d0aaee59df327e61c30f4ed65b96d45872e3",
+ "format": 1
+ },
+ {
+ "name": "docs/modules/replication_session.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e36e6472bca3364b80e33e912db31f3cc68da02b978904042273382cdb87ea14",
"format": 1
},
{
"name": "docs/modules/smbshare.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8ae4d6ac050fe829100c4844d6663de2c60f68bd16a18942466c4fa879aaffb5",
+ "chksum_sha256": "779f2cb17655c0bd6b2be3b3612e18d8cb807d3176ec9f07791b94cfe370a9b4",
"format": 1
},
{
"name": "docs/modules/snapshot.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "f92f063ad273c68ddc443dc638d4d9bbef928796f8be56ff8ba3257edf222fee",
+ "chksum_sha256": "66e4b15d3bb4d331318d985436989cf2337883cecd55ab752890ab6d56e14143",
"format": 1
},
{
"name": "docs/modules/snapshotschedule.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bef45c4ee0716258b1afd49736cd566ee6f0f7332e533c2d8e438ede0a70fd55",
+ "chksum_sha256": "bc752ed982fdc709585a8edfaaec05c4061e630414a542b0e5574bbcc61fac4a",
"format": 1
},
{
"name": "docs/modules/storagepool.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "eb19a36818f64d71bf2ba8836568d36dce82b2d4a112f7d516e4b7a9d7e46b18",
+ "chksum_sha256": "1eba8fd13dc00e5f3a8a49411058ac33165c4fb8a3e321b12a0a8e014a3fd3a8",
"format": 1
},
{
"name": "docs/modules/tree_quota.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "68e0ed5f7d7fd816a58711b36faef61aed44c23b67624d0c669e04a058329cf2",
+ "chksum_sha256": "f48926868635b9727ecc8dda8dd50da310f74cc248cc26299b5662564bfc3f7d",
"format": 1
},
{
"name": "docs/modules/user_quota.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1e973112cf06e41e27c465d0e375045e6f0bfc6954a5b12325ff189c92885b9b",
+ "chksum_sha256": "7fe15a1a03c4d750d6197c665ebf3fe8ebf48ff1e85294b91404968bcf3bded9",
"format": 1
},
{
"name": "docs/modules/volume.rst",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "206b027c28b5a0e7c4e40d6402ca45e0c6cc67c8623821c3077bf3b00a54dd13",
+ "chksum_sha256": "ab54ce3a682cb69c1466f1fb19e2e61f324d6675bbf8d2f2eaa771f744158fbc",
"format": 1
},
{
@@ -375,7 +389,147 @@
"name": "meta/runtime.yml",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "1ad0fa1a5cceaac69ea46df66d57fe6f290544c8efa6fabd2a2982296e428536",
+ "chksum_sha256": "0d8f17122fc4d22811162d2eb588ef6ffdc292b62b1df6beea44e5f5fedad1d6",
+ "format": 1
+ },
+ {
+ "name": "playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/cifsserver.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3809d62736eaf513a337ee8c371485d67798a2c4eb2398e6621d6b4b07316092",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/consistencygroup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8ff33fa32b01c77624dd935df4a32294d1760a0bd01cba923db39e0b45f69b0",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/filesystem.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2fd065d78705cefe3e4cce4b329c2ba318692121888f127166b25c19c9c39f5f",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/filesystem_snapshot.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a47f477b7afda91e280a98002e37a111a34a6d91b72fc94db1e9a377dcb223de",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/host.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e87d766849fea821f0064fef19e1a76c42ac1cb2eea1bd50bc2654fe0ccd28c",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d4fa965c1079707a27dea201226ea1800285476f2588bbbc6b557c1cd8f6eb0",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/interface.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b8447c7c2f1c6f51d9ed08ab2355e1077c92adf7de4b66560f77bb7c0b131553",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/nasserver.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00295ef54f3655da2e574a4c459f87a118e996604a663f707df559221630252f",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/nfs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd72bea470d6420b86418e6929a94f2eff3f8c73a028366117796db735446918",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/nfsserver.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e24eff50673e48210528f515d975ceaa34dffe9ad672e22ace52736c149c704",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/replication_session.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ebaebacea07bbc0d7dd30df303b7b72037b64f5a83397e0a34b0911f42dc1cff",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/smbshare.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "39553eb9b04df2a67b33c4b33a6352aedab15d998b3cb5fc598e65014d47b3ea",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/snapshot.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f7eedc620a64747d90d9db4823561c992c5f2bb8a398e1b818bd0cac44cf0f0",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/snapshotschedule.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6fa79b8d9d454b3443a4e98b265ed62ac18b56c4b1c491a73d016fe57061da64",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/storagepool.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1739a3232e6aaab563dd17bd96c3e8c3b38c495b0c5d6ca010247c282b9e9d18",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/tree_quota.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3dfe0414f050dfc19b12749b93c02283b43db31a6095c8937d4528a90c34e4b",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/user_quota.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3ba8ee6e4b62f7d5946aa2f37b8c433aa6e4f8d35a1d868f42f0d2391286eb0f",
+ "format": 1
+ },
+ {
+ "name": "playbooks/modules/volume.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a1a67ee35bf654a58d3a554e4d714743faa1bafd6f5a8e8a912c086827f59718",
"format": 1
},
{
@@ -396,7 +550,7 @@
"name": "plugins/doc_fragments/unity.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "59baa9afd0063cf6ccc64d0be2f93bcdac944d2d53d9a9808654d5715173eab2",
+ "chksum_sha256": "cf04d65ed0cf78f3979951670bace9f5930ede590cafc562bc7db97ce16238d9",
"format": 1
},
{
@@ -438,7 +592,7 @@
"name": "plugins/module_utils/storage/dell/utils.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7fec20103cba14b247e99885a64d5f447876743d1a2c1aabfa41344fa3b5811a",
+ "chksum_sha256": "b85cdbfe3e6b2212fb53f5e1586853164ece1bb2124fa31875180c0b997e24db",
"format": 1
},
{
@@ -452,126 +606,133 @@
"name": "plugins/modules/cifsserver.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ad663a0af0d1f0dc86ea15772d49cb79813e5970abeadc5fa88ff0fbb798f1c9",
+ "chksum_sha256": "725164681ca1d8e611b4c51a45881c42f9fb4255e07146d24db04c40f6aa4f2e",
"format": 1
},
{
"name": "plugins/modules/consistencygroup.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "84a261b6260c02d87c16108a949a7a9861c837d8b90630059d159418986a2167",
+ "chksum_sha256": "4db39611403cf3c3acd0290d26f7de37bf680a50d51b86106b1658060f9e3af3",
"format": 1
},
{
"name": "plugins/modules/filesystem.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ea31747f729d9e933d6f10ceee77c69a8fe924a9b8b55ea52eabd65a4c48e69c",
+ "chksum_sha256": "c3f3ff2fd8bb07a600a25cecaf00caa533dd8d242903cdd24dc8c25381953d63",
"format": 1
},
{
"name": "plugins/modules/filesystem_snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6d01454f787b460865970a4a0607590874a8ac974b03e10fc4a336ae9ed97522",
+ "chksum_sha256": "c9c6eb9dbf17604409652740babf1bac714c487d56f78dae4fd5dbab88037cb2",
"format": 1
},
{
"name": "plugins/modules/host.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "c84b7702c1aa417739ac3a3e6e5102ee5a0489b71f481bd5b33d80d73ed01ba0",
+ "chksum_sha256": "94913be39ce75c16165eeb84e46ab8396322d14205c21d8046d53732fa1921e4",
"format": 1
},
{
"name": "plugins/modules/info.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "4f19b0105b22885b546964e9f8316c837cc0ddb0a91d94a07f84317a9358eedd",
+ "chksum_sha256": "9653d4b095f23e5b1dfd53c5485611457bffe6807643f898635d4bc41c74630e",
"format": 1
},
{
"name": "plugins/modules/interface.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "492628928153228d7934856ec4861b169540aa2cba74c89d493705ce243b3661",
+ "chksum_sha256": "ed2d921df52b3a54e41f5279240402f290c9f4d5a7c36c4ccb442fb0b7bc0f02",
"format": 1
},
{
"name": "plugins/modules/nasserver.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "fe7701ebc60a48151cc72b7463cf8bf3c73c31adb1dded7e487ab49054f95112",
+ "chksum_sha256": "c9909cc77062b9e43e54f2b62b3407412cc68915b5e7bc8f0b3726bec091b381",
"format": 1
},
{
"name": "plugins/modules/nfs.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "93f0a525a5f4a4da9e3d51526e97539a7d2929baf68d4f3b048ec1ea63b79528",
+ "chksum_sha256": "72d5eb3a6bed5969eb5e656bdf1965bce77a790c7e0f6909019ab493dd7cb08e",
"format": 1
},
{
"name": "plugins/modules/nfsserver.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "40554df77af25ca89f7cc4757fd45d8493a269d136e1818f5c2fc3584372de1b",
+ "chksum_sha256": "4629fa9ca28f77bd3de962fe5ee226a814153bdce75d3d8c6089210217a4c3e2",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/replication_session.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a558a66364e9bfbd0bca35999951ef0eacc3e629ba6139f00612a9522460d1f",
"format": 1
},
{
"name": "plugins/modules/smbshare.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7b40e8869df87faefa535902c72a32c68f650ae77f5a0ad9941d12e2c97dbbb1",
+ "chksum_sha256": "6812eafdad4de20ea4105921fc3f06c9e1082df13e71035100bfeb374e4a7067",
"format": 1
},
{
"name": "plugins/modules/snapshot.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8029c7c788f29e0079f78b3c5ded6194aab6fa32e8e63491c43415a543cfecc5",
+ "chksum_sha256": "05f8a7b43e33347e1a71283c87f1af24a696836b52ffb271e96c601ca74d6ba4",
"format": 1
},
{
"name": "plugins/modules/snapshotschedule.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "046673dba8971fc40b4ccdce39de288b6e9ba7f1a18963f3cd486ab0d24e9dd6",
+ "chksum_sha256": "6214c681ce55b24a89c8307e55d56f770665227d40929e918d906c20570a0c2d",
"format": 1
},
{
"name": "plugins/modules/storagepool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "59696ca229b897b7f546c4e332143d4591aaf4eea3df2d3e808cbff0204f1d7c",
+ "chksum_sha256": "da90ecfe49e95add45dd9b936905d5d8e3076ad3aab4416ec9823583ad1c4cd3",
"format": 1
},
{
"name": "plugins/modules/tree_quota.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ada935019e3b1e4fc61cae3c4134fbf1481058fb8245be836000850ad56e7009",
+ "chksum_sha256": "494320b0e7cc55515bb85d9a39e20f4c01a8dfbafae9b3855e46ec3a3c98898b",
"format": 1
},
{
"name": "plugins/modules/user_quota.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "228c82f62dca70b8c4ab613c6c935ed022a44c014b2d3752e9cd2856e876707b",
+ "chksum_sha256": "bd9b8bc4f0b76cea3e13d0ccf7ec7ac1f41ab3d73609d732c07720aac1df99b1",
"format": 1
},
{
"name": "plugins/modules/volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "8d4064bc0a20e43ba0c45110563315f481220d485af3a2289fc8a4786f8b3814",
+ "chksum_sha256": "d93a6b6a055cbe33647c1386b2e9efdc86465c286a5a79b02e0370497a8b4b2b",
"format": 1
},
{
"name": "requirements.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "ea96c62d419724d448e0ed0b2099f5242e6a9cc26abca64844f1ed99c082e844",
+ "chksum_sha256": "5d35d3ccd9770146a1eedb946a9207317ae60ff9f09f074fcaacbe2750e083d4",
"format": 1
},
{
@@ -603,24 +764,24 @@
"format": 1
},
{
- "name": "tests/sanity/ignore-2.12.txt",
+ "name": "tests/sanity/ignore-2.13.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "2e85c8aca7d338809e87300cf372956e1b4850ece6bc475f6fa4c66ca405812a",
+ "chksum_sha256": "8286d2f238aa5a2835bdd8a9ff38663a0e70b416a2b4a2971a54d75d76a349e7",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.13.txt",
+ "name": "tests/sanity/ignore-2.14.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bb03c71bf8838c2b75394a5100397d843b8741a8d3814446f928c7ddaa265ffd",
+ "chksum_sha256": "8286d2f238aa5a2835bdd8a9ff38663a0e70b416a2b4a2971a54d75d76a349e7",
"format": 1
},
{
- "name": "tests/sanity/ignore-2.14.txt",
+ "name": "tests/sanity/ignore-2.15.txt",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "bb03c71bf8838c2b75394a5100397d843b8741a8d3814446f928c7ddaa265ffd",
+ "chksum_sha256": "8286d2f238aa5a2835bdd8a9ff38663a0e70b416a2b4a2971a54d75d76a349e7",
"format": 1
},
{
@@ -655,7 +816,7 @@
"name": "tests/unit/plugins/module_utils/mock_cifsserver_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "b864ceef5c5db21f3282a76094c8d9226186dadebf9851432fff4fb57e59cfad",
+ "chksum_sha256": "4b0f47afe54a544f0aa9cabed74a5511c1e798ae12daee78ddd85e1bbf76d456",
"format": 1
},
{
@@ -680,10 +841,17 @@
"format": 1
},
{
+ "name": "tests/unit/plugins/module_utils/mock_info_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d8237dc997fb5915a7aad313a11010f651d4d46ff53a6b0edca3811d0f71ca5b",
+ "format": 1
+ },
+ {
"name": "tests/unit/plugins/module_utils/mock_interface_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "6c8de82b6b7311a2ec191fc510d0bc06cde5e0627f74e83711de69c629e826fc",
+ "chksum_sha256": "0195681753eabd889203311a381397c9bbf483438b9a1dcb520f20b8c11d3d22",
"format": 1
},
{
@@ -704,7 +872,14 @@
"name": "tests/unit/plugins/module_utils/mock_nfsserver_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "03b34a46b5c55696c4741dbd3749b1a654cf401e29e79443908145ef87ff5994",
+ "chksum_sha256": "90bf4f60e96b65e52328c269520c444094c897a4c58f5415045ee5d403f8b100",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/mock_replication_session_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f11b929be0bde059e7c8cb26d389c978fbe296a4d456a9a5427988aab3fb392",
"format": 1
},
{
@@ -725,7 +900,7 @@
"name": "tests/unit/plugins/module_utils/mock_volume_api.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "80d7788c208356919f39c49c924745263eb96806ca95ce60a1030822455ed48e",
+ "chksum_sha256": "c033edbccc18e2c37683259706cbbdd90565e252bd63520ac87987a0ada87924",
"format": 1
},
{
@@ -739,7 +914,7 @@
"name": "tests/unit/plugins/modules/test_cifsserver.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "7031be0fee19af368f61e6f07404c941ec64139f066514eeca48a5f3c9224749",
+ "chksum_sha256": "8db360d0a119fbd207a12efb73c170bcaa4a2b680484c927759dc5fb84ca5db7",
"format": 1
},
{
@@ -760,7 +935,14 @@
"name": "tests/unit/plugins/modules/test_host.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "cbc6464fdcbc66df43da1ef26cbf64d86b77406f28ded032dc7c7c4da6034cd0",
+ "chksum_sha256": "e8432a8d9d6b6b55ded117818178ed7f7d5fc5c99dfadcdb524fb650c1a68244",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86e434e614ac38382a05373df79f20ad2343be0167e5cb826f7ac6b374e76100",
"format": 1
},
{
@@ -788,28 +970,28 @@
"name": "tests/unit/plugins/modules/test_nfsserver.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "72726040ab5aff9e023872018e3abd192dba24acc41be7137d3f98ae7712c444",
+ "chksum_sha256": "0050135f054b1c8ef45f8bf5fda2627bf7f59ff898934d59db2f513c15ca1a0e",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_storagepool.py",
+ "name": "tests/unit/plugins/modules/test_replication_session.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "63dafda1a4e630b23a637affabacfefd5c14a3231a0fae02d3886b0bf7656525",
+ "chksum_sha256": "882ae8076ac1acabf1bb187da4a2fc2ef70155b23fd733e4e9f1f4530ad4b7a9",
"format": 1
},
{
- "name": "tests/unit/plugins/modules/test_volume.py",
+ "name": "tests/unit/plugins/modules/test_storagepool.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5bc48d2969cfaa5670ab538ba51cef532e1c3177004e2a2d6dbbd2cd7b4e7714",
+ "chksum_sha256": "63dafda1a4e630b23a637affabacfefd5c14a3231a0fae02d3886b0bf7656525",
"format": 1
},
{
- "name": "ansible.cfg",
+ "name": "tests/unit/plugins/modules/test_volume.py",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "5360ab997ea2c7ed8a6efc7e8324e7b6ec7479af057fe15ff23fe885f05b58b2",
+ "chksum_sha256": "5bc48d2969cfaa5670ab538ba51cef532e1c3177004e2a2d6dbbd2cd7b4e7714",
"format": 1
}
],
diff --git a/ansible_collections/dellemc/unity/MANIFEST.json b/ansible_collections/dellemc/unity/MANIFEST.json
index bff7c8f2f..fbd2511d5 100644
--- a/ansible_collections/dellemc/unity/MANIFEST.json
+++ b/ansible_collections/dellemc/unity/MANIFEST.json
@@ -2,7 +2,7 @@
"collection_info": {
"namespace": "dellemc",
"name": "unity",
- "version": "1.6.0",
+ "version": "1.7.1",
"authors": [
"Akash Shendge <ansible.team@dell.com>",
"Ambuj Dubey <ansible.team@dell.com>",
@@ -25,16 +25,16 @@
],
"license_file": null,
"dependencies": {},
- "repository": "https://github.com/dell/ansible-unity/tree/1.6.0",
- "documentation": "https://github.com/dell/ansible-unity/tree/1.6.0/docs",
- "homepage": "https://github.com/dell/ansible-unity/tree/1.6.0",
+ "repository": "https://github.com/dell/ansible-unity/tree/1.7.1",
+ "documentation": "https://github.com/dell/ansible-unity/tree/1.7.1/docs",
+ "homepage": "https://github.com/dell/ansible-unity/tree/1.7.1",
"issues": "https://www.dell.com/community/Automation/bd-p/Automation"
},
"file_manifest_file": {
"name": "FILES.json",
"ftype": "file",
"chksum_type": "sha256",
- "chksum_sha256": "de87855f156b345c7d278a09b66679818c6ddcf5544a868ec8bd68dc4c2d5162",
+ "chksum_sha256": "3a6859700a30b9a90ae32f32e2e99b09a7289bf793832b1657a943ecdb8604d8",
"format": 1
},
"format": 1
diff --git a/ansible_collections/dellemc/unity/README.md b/ansible_collections/dellemc/unity/README.md
index 58dbb9539..a50754721 100644
--- a/ansible_collections/dellemc/unity/README.md
+++ b/ansible_collections/dellemc/unity/README.md
@@ -2,63 +2,64 @@
The Ansible Modules for Dell Technologies (Dell) Unity allow Data Center and IT administrators to use RedHat Ansible to automate and orchestrate the configuration and management of Dell Unity arrays.
-The capabilities of the Ansible modules are managing consistency groups, filesystem, filesystem snapshots, CIFS server, NAS server, NFS server, NFS export, SMB share, interface, hosts, snapshots, snapshot schedules, storage pools, user quotas, quota trees and volumes. Capabilities also include gathering facts from the array. The options available for each are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request.
+The capabilities of the Ansible modules are managing consistency groups, filesystem, filesystem snapshots, CIFS server, NAS server, NFS server, NFS export, SMB share, interface, hosts, snapshots, snapshot schedules, storage pools, user quotas, quota trees, replication sessions and volumes. Capabilities also include gathering facts from the array. The options available for each are list, show, create, modify and delete. These tasks can be executed by running simple playbooks written in yaml syntax. The modules are written so that all the operations are idempotent, so making multiple identical requests has the same effect as making a single request.
## Table of contents
-* [Code of conduct](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CODE_OF_CONDUCT.md)
-* [Maintainer guide](https://github.com/dell/ansible-unity/blob/1.6.0/docs/MAINTAINER_GUIDE.md)
-* [Committer guide](https://github.com/dell/ansible-unity/blob/1.6.0/docs/COMMITTER_GUIDE.md)
-* [Contributing guide](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CONTRIBUTING.md)
-* [Branching strategy](https://github.com/dell/ansible-unity/blob/1.6.0/docs/BRANCHING.md)
-* [List of adopters](https://github.com/dell/ansible-unity/blob/1.6.0/docs/ADOPTERS.md)
-* [Maintainers](https://github.com/dell/ansible-unity/blob/1.6.0/docs/MAINTAINERS.md)
-* [Support](https://github.com/dell/ansible-unity/blob/1.6.0/docs/SUPPORT.md)
+* [Code of conduct](https://github.com/dell/ansible-unity/blob/1.7.1/docs/CODE_OF_CONDUCT.md)
+* [Maintainer guide](https://github.com/dell/ansible-unity/blob/1.7.1/docs/MAINTAINER_GUIDE.md)
+* [Committer guide](https://github.com/dell/ansible-unity/blob/1.7.1/docs/COMMITTER_GUIDE.md)
+* [Contributing guide](https://github.com/dell/ansible-unity/blob/1.7.1/docs/CONTRIBUTING.md)
+* [Branching strategy](https://github.com/dell/ansible-unity/blob/1.7.1/docs/BRANCHING.md)
+* [List of adopters](https://github.com/dell/ansible-unity/blob/1.7.1/docs/ADOPTERS.md)
+* [Maintainers](https://github.com/dell/ansible-unity/blob/1.7.1/docs/MAINTAINERS.md)
+* [Support](https://github.com/dell/ansible-unity/blob/1.7.1/docs/SUPPORT.md)
* [License](#license)
-* [Security](https://github.com/dell/ansible-unity/blob/1.6.0/docs/SECURITY.md)
+* [Security](https://github.com/dell/ansible-unity/blob/1.7.1/docs/SECURITY.md)
* [Prerequisites](#prerequisites)
* [List of Ansible modules for Dell Unity](#list-of-ansible-modules-for-dell-unity)
* [Installation and execution of Ansible modules for Dell Unity](#installation-and-execution-of-ansible-modules-for-dell-unity)
* [Releasing, Maintenance and Deprecation](#releasing-maintenance-and-deprecation)
## License
-The Ansible collection for Unity is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-unity/blob/1.6.0/LICENSE) for the full terms. Ansible modules and module utilities that are part of the Ansible collection for Unity are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-unity/blob/1.6.0/MODULE-LICENSE) for the full terms.
+The Ansible collection for Unity is released and licensed under the GPL-3.0 license. See [LICENSE](https://github.com/dell/ansible-unity/blob/1.7.1/LICENSE) for the full terms. Ansible modules and module utilities that are part of the Ansible collection for Unity are released and licensed under the Apache 2.0 license. See [MODULE-LICENSE](https://github.com/dell/ansible-unity/blob/1.7.1/MODULE-LICENSE) for the full terms.
## Supported Platforms
- * Dell Unity Arrays version 5.1, 5.2
+ * Dell Unity Arrays version 5.1, 5.2, 5.3
## Prerequisites
This table provides information about the software prerequisites for the Ansible Modules for Dell Unity.
| **Ansible Modules** | **Python version** | **Storops - Python SDK version** | **Ansible** |
|---------------------|--------------------|----------------------------------|-------------|
-| v1.6.0 | 3.9 <br> 3.10 <br> 3.11 | 1.2.11 | 2.12 <br> 2.13 <br> 2.14|
+| v1.7.1 | 3.9 <br> 3.10 <br> 3.11 | 1.2.11 | 2.13 <br> 2.14 <br> 2.15|
## Idempotency
The modules are written in such a way that all requests are idempotent and hence fault-tolerant. It essentially means that the result of a successfully performed request is independent of the number of times it is executed.
## List of Ansible Modules for Dell Unity
- * [Consistency group module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/consistencygroup.rst)
- * [Filesystem module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/filesystem.rst)
- * [Filesystem snapshot module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/filesystem_snapshot.rst)
- * [Info module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/info.rst)
- * [Host module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/host.rst)
- * [CIFS server module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/cifsserver.rst)
- * [NAS server module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/nasserver.rst)
- * [NFS server module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/nfsserver.rst)
- * [NFS export module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/nfs.rst)
- * [SMB share module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/smbshare.rst)
- * [Interface module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/interface.rst)
- * [Snapshot module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/snapshot.rst)
- * [Snapshot schedule module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/snapshotschedule.rst)
- * [Storage pool module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/storagepool.rst)
- * [User quota module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/user_quota.rste)
- * [Quota tree module ](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/tree_quota.rst)
- * [Volume module](https://github.com/dell/ansible-unity/blob/1.6.0/docs/modules/volume.rst)
+ * [Consistency group module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/consistencygroup.rst)
+ * [Filesystem module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/filesystem.rst)
+ * [Filesystem snapshot module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/filesystem_snapshot.rst)
+ * [Info module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/info.rst)
+ * [Host module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/host.rst)
+ * [CIFS server module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/cifsserver.rst)
+ * [NAS server module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/nasserver.rst)
+ * [NFS server module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/nfsserver.rst)
+ * [NFS export module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/nfs.rst)
+ * [SMB share module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/smbshare.rst)
+ * [Interface module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/interface.rst)
+ * [Snapshot module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/snapshot.rst)
+ * [Snapshot schedule module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/snapshotschedule.rst)
+ * [Storage pool module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/storagepool.rst)
+ * [User quota module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/user_quota.rste)
+ * [Quota tree module ](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/tree_quota.rst)
+ * [Volume module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/volume.rst)
+ * [Replication session module](https://github.com/dell/ansible-unity/blob/1.7.1/docs/modules/replication_session.rst)
## Installation and execution of Ansible modules for Dell Unity
-The installation and execution steps of Ansible modules for Dell Unity can be found [here](https://github.com/dell/ansible-unity/blob/1.6.0/docs/INSTALLATION.md).
+The installation and execution steps of Ansible modules for Dell Unity can be found [here](https://github.com/dell/ansible-unity/blob/1.7.1/docs/INSTALLATION.md).
## Releasing, Maintenance and Deprecation
@@ -66,6 +67,6 @@ Ansible Modules for Dell Technnologies Unity follows [Semantic Versioning](https
New version will be release regularly if significant changes (bug fix or new feature) are made in the collection.
-Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-unity/blob/1.6.0/docs/BRANCHING.md).
+Released code versions are located on "release" branches with names of the form "release-x.y.z" where x.y.z corresponds to the version number. More information on branching strategy followed can be found [here](https://github.com/dell/ansible-unity/blob/1.7.1/docs/BRANCHING.md).
Ansible Modules for Dell Technologies Unity deprecation cycle is aligned with that of [Ansible](https://docs.ansible.com/ansible/latest/dev_guide/module_lifecycle.html).
diff --git a/ansible_collections/dellemc/unity/ansible.cfg b/ansible_collections/dellemc/unity/ansible.cfg
deleted file mode 100644
index c10d1da22..000000000
--- a/ansible_collections/dellemc/unity/ansible.cfg
+++ /dev/null
@@ -1,484 +0,0 @@
-# config file for ansible -- https://ansible.com/
-# ===============================================
-
-# nearly all parameters can be overridden in ansible-playbook
-# or with command line flags. ansible will read ANSIBLE_CONFIG,
-# ansible.cfg in the current working directory, .ansible.cfg in
-# the home directory or /etc/ansible/ansible.cfg, whichever it
-# finds first
-
-[defaults]
-
-# some basic default values...
-
-#inventory = /etc/ansible/hosts
-#library = /usr/share/my_modules/
-#module_utils = /usr/share/my_module_utils/
-#remote_tmp = ~/.ansible/tmp
-#local_tmp = ~/.ansible/tmp
-#plugin_filters_cfg = /etc/ansible/plugin_filters.yml
-#forks = 5
-#poll_interval = 15
-#sudo_user = root
-#ask_sudo_pass = True
-#ask_pass = True
-#transport = smart
-#remote_port = 22
-#module_lang = C
-#module_set_locale = False
-
-# plays will gather facts by default, which contain information about
-# the remote system.
-#
-# smart - gather by default, but don't regather if already gathered
-# implicit - gather by default, turn off with gather_facts: False
-# explicit - do not gather by default, must say gather_facts: True
-#gathering = implicit
-
-# This only affects the gathering done by a play's gather_facts directive,
-# by default gathering retrieves all facts subsets
-# all - gather all subsets
-# network - gather min and network facts
-# hardware - gather hardware facts (longest facts to retrieve)
-# virtual - gather min and virtual facts
-# facter - import facts from facter
-# ohai - import facts from ohai
-# You can combine them using comma (ex: network,virtual)
-# You can negate them using ! (ex: !hardware,!facter,!ohai)
-# A minimal set of facts is always gathered.
-#gather_subset = all
-
-# some hardware related facts are collected
-# with a maximum timeout of 10 seconds. This
-# option lets you increase or decrease that
-# timeout to something more suitable for the
-# environment.
-# gather_timeout = 10
-
-# additional paths to search for roles in, colon separated
-#roles_path = /etc/ansible/roles
-
-# uncomment this to disable SSH key host checking
-#host_key_checking = False
-
-# change the default callback, you can only have one 'stdout' type enabled at a time.
-#stdout_callback = skippy
-
-
-## Ansible ships with some plugins that require whitelisting,
-## this is done to avoid running all of a type by default.
-## These setting lists those that you want enabled for your system.
-## Custom plugins should not need this unless plugin author specifies it.
-
-# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
-#callback_whitelist = timer, mail
-
-# Determine whether includes in tasks and handlers are "static" by
-# default. As of 2.0, includes are dynamic by default. Setting these
-# values to True will make includes behave more like they did in the
-# 1.x versions.
-#task_includes_static = False
-#handler_includes_static = False
-
-# Controls if a missing handler for a notification event is an error or a warning
-#error_on_missing_handler = True
-
-# change this for alternative sudo implementations
-#sudo_exe = sudo
-
-# What flags to pass to sudo
-# WARNING: leaving out the defaults might create unexpected behaviours
-#sudo_flags = -H -S -n
-
-# SSH timeout
-#timeout = 10
-
-# default user to use for playbooks if user is not specified
-# (/usr/bin/ansible will use current user as default)
-#remote_user = root
-
-# logging is off by default unless this path is defined
-# if so defined, consider logrotate
-#log_path = /var/log/ansible.log
-
-# default module name for /usr/bin/ansible
-#module_name = command
-
-# use this shell for commands executed under sudo
-# you may need to change this to bin/bash in rare instances
-# if sudo is constrained
-#executable = /bin/sh
-
-# if inventory variables overlap, does the higher precedence one win
-# or are hash values merged together? The default is 'replace' but
-# this can also be set to 'merge'.
-#hash_behaviour = replace
-
-# by default, variables from roles will be visible in the global variable
-# scope. To prevent this, the following option can be enabled, and only
-# tasks and handlers within the role will see the variables there
-#private_role_vars = yes
-
-# list any Jinja2 extensions to enable here:
-#jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
-
-# if set, always use this private key file for authentication, same as
-# if passing --private-key to ansible or ansible-playbook
-#private_key_file = /path/to/file
-
-# If set, configures the path to the Vault password file as an alternative to
-# specifying --vault-password-file on the command line.
-#vault_password_file = /path/to/vault_password_file
-
-# format of string {{ ansible_managed }} available within Jinja2
-# templates indicates to users editing templates files will be replaced.
-# replacing {file}, {host} and {uid} and strftime codes with proper values.
-#ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
-# {file}, {host}, {uid}, and the timestamp can all interfere with idempotence
-# in some situations so the default is a static string:
-#ansible_managed = Ansible managed
-
-# by default, ansible-playbook will display "Skipping [host]" if it determines a task
-# should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
-# messages. NOTE: the task header will still be shown regardless of whether or not the
-# task is skipped.
-#display_skipped_hosts = True
-
-# by default, if a task in a playbook does not include a name: field then
-# ansible-playbook will construct a header that includes the task's action but
-# not the task's args. This is a security feature because ansible cannot know
-# if the *module* considers an argument to be no_log at the time that the
-# header is printed. If your environment doesn't have a problem securing
-# stdout from ansible-playbook (or you have manually specified no_log in your
-# playbook on all of the tasks where you have secret information) then you can
-# safely set this to True to get more informative messages.
-#display_args_to_stdout = False
-
-# by default (as of 1.3), Ansible will raise errors when attempting to dereference
-# Jinja2 variables that are not set in templates or action lines. Uncomment this line
-# to revert the behavior to pre-1.3.
-#error_on_undefined_vars = False
-
-# by default (as of 1.6), Ansible may display warnings based on the configuration of the
-# system running ansible itself. This may include warnings about 3rd party packages or
-# other conditions that should be resolved if possible.
-# to disable these warnings, set the following value to False:
-#system_warnings = True
-
-# by default (as of 1.4), Ansible may display deprecation warnings for language
-# features that should no longer be used and will be removed in future versions.
-# to disable these warnings, set the following value to False:
-#deprecation_warnings = True
-
-# (as of 1.8), Ansible can optionally warn when usage of the shell and
-# command module appear to be simplified by using a default Ansible module
-# instead. These warnings can be silenced by adjusting the following
-# setting or adding warn=yes or warn=no to the end of the command line
-# parameter string. This will for example suggest using the git module
-# instead of shelling out to the git command.
-# command_warnings = False
-
-
-# set plugin path directories here, separate with colons
-#action_plugins = /usr/share/ansible/plugins/action
-#cache_plugins = /usr/share/ansible/plugins/cache
-#callback_plugins = /usr/share/ansible/plugins/callback
-#connection_plugins = /usr/share/ansible/plugins/connection
-#lookup_plugins = /usr/share/ansible/plugins/lookup
-#inventory_plugins = /usr/share/ansible/plugins/inventory
-#vars_plugins = /usr/share/ansible/plugins/vars
-#filter_plugins = /usr/share/ansible/plugins/filter
-#test_plugins = /usr/share/ansible/plugins/test
-#terminal_plugins = /usr/share/ansible/plugins/terminal
-#strategy_plugins = /usr/share/ansible/plugins/strategy
-
-
-# by default, ansible will use the 'linear' strategy but you may want to try
-# another one
-#strategy = free
-
-# by default callbacks are not loaded for /bin/ansible, enable this if you
-# want, for example, a notification or logging callback to also apply to
-# /bin/ansible runs
-#bin_ansible_callbacks = False
-
-
-# don't like cows? that's unfortunate.
-# set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
-#nocows = 1
-
-# set which cowsay stencil you'd like to use by default. When set to 'random',
-# a random stencil will be selected for each task. The selection will be filtered
-# against the `cow_whitelist` option below.
-#cow_selection = default
-#cow_selection = random
-
-# when using the 'random' option for cowsay, stencils will be restricted to this list.
-# it should be formatted as a comma-separated list with no spaces between names.
-# NOTE: line continuations here are for formatting purposes only, as the INI parser
-# in python does not support them.
-#cow_whitelist=bud-frogs,bunny,cheese,daemon,default,dragon,elephant-in-snake,elephant,eyes,\
-# hellokitty,kitty,luke-koala,meow,milk,moofasa,moose,ren,sheep,small,stegosaurus,\
-# stimpy,supermilker,three-eyes,turkey,turtle,tux,udder,vader-koala,vader,www
-
-# don't like colors either?
-# set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
-#nocolor = 1
-
-# if set to a persistent type (not 'memory', for example 'redis') fact values
-# from previous runs in Ansible will be stored. This may be useful when
-# wanting to use, for example, IP information from one group of servers
-# without having to talk to them in the same playbook run to get their
-# current IP information.
-#fact_caching = memory
-
-
-# retry files
-# When a playbook fails by default a .retry file will be created in ~/
-# You can disable this feature by setting retry_files_enabled to False
-# and you can change the location of the files by setting retry_files_save_path
-
-#retry_files_enabled = False
-#retry_files_save_path = ~/.ansible-retry
-
-# squash actions
-# Ansible can optimise actions that call modules with list parameters
-# when looping. Instead of calling the module once per with_ item, the
-# module is called once with all items at once. Currently this only works
-# under limited circumstances, and only with parameters named 'name'.
-#squash_actions = apk,apt,dnf,homebrew,pacman,pkgng,yum,zypper
-
-# prevents logging of task data, off by default
-#no_log = False
-
-# prevents logging of tasks, but only on the targets, data is still logged on the master/controller
-#no_target_syslog = False
-
-# controls whether Ansible will raise an error or warning if a task has no
-# choice but to create world readable temporary files to execute a module on
-# the remote machine. This option is False by default for security. Users may
-# turn this on to have behaviour more like Ansible prior to 2.1.x. See
-# https://docs.ansible.com/ansible/become.html#becoming-an-unprivileged-user
-# for more secure ways to fix this than enabling this option.
-#allow_world_readable_tmpfiles = False
-
-# controls the compression level of variables sent to
-# worker processes. At the default of 0, no compression
-# is used. This value must be an integer from 0 to 9.
-#var_compression_level = 9
-
-# controls what compression method is used for new-style ansible modules when
-# they are sent to the remote system. The compression types depend on having
-# support compiled into both the controller's python and the client's python.
-# The names should match with the python Zipfile compression types:
-# * ZIP_STORED (no compression. available everywhere)
-# * ZIP_DEFLATED (uses zlib, the default)
-# These values may be set per host via the ansible_module_compression inventory
-# variable
-#module_compression = 'ZIP_DEFLATED'
-
-# This controls the cutoff point (in bytes) on --diff for files
-# set to 0 for unlimited (RAM may suffer!).
-#max_diff_size = 1048576
-
-# This controls how ansible handles multiple --tags and --skip-tags arguments
-# on the CLI. If this is True then multiple arguments are merged together. If
-# it is False, then the last specified argument is used and the others are ignored.
-# This option will be removed in 2.8.
-#merge_multiple_cli_flags = True
-
-# Controls showing custom stats at the end, off by default
-#show_custom_stats = True
-
-# Controls which files to ignore when using a directory as inventory with
-# possibly multiple sources (both static and dynamic)
-#inventory_ignore_extensions = ~, .orig, .bak, .ini, .cfg, .retry, .pyc, .pyo
-
-# This family of modules use an alternative execution path optimized for network appliances
-# only update this setting if you know how this works, otherwise it can break module execution
-#network_group_modules=eos, nxos, ios, iosxr, junos, vyos
-
-# When enabled, this option allows lookups (via variables like {{lookup('foo')}} or when used as
-# a loop with `with_foo`) to return data that is not marked "unsafe". This means the data may contain
-# jinja2 templating language which will be run through the templating engine.
-# ENABLING THIS COULD BE A SECURITY RISK
-#allow_unsafe_lookups = False
-
-# set default errors for all plays
-#any_errors_fatal = False
-
-[inventory]
-# enable inventory plugins, default: 'host_list', 'script', 'yaml', 'ini'
-#enable_plugins = host_list, virtualbox, yaml, constructed
-
-# ignore these extensions when parsing a directory as inventory source
-#ignore_extensions = .pyc, .pyo, .swp, .bak, ~, .rpm, .md, .txt, ~, .orig, .ini, .cfg, .retry
-
-# ignore files matching these patterns when parsing a directory as inventory source
-#ignore_patterns=
-
-# If 'true' unparsed inventory sources become fatal errors, they are warnings otherwise.
-#unparsed_is_failed=False
-
-[privilege_escalation]
-#become=True
-#become_method=sudo
-#become_user=root
-#become_ask_pass=False
-
-[paramiko_connection]
-
-# uncomment this line to cause the paramiko connection plugin to not record new host
-# keys encountered. Increases performance on new host additions. Setting works independently of the
-# host key checking setting above.
-#record_host_keys=False
-
-# by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
-# line to disable this behaviour.
-#pty=False
-
-# paramiko will default to looking for SSH keys initially when trying to
-# authenticate to remote devices. This is a problem for some network devices
-# that close the connection after a key failure. Uncomment this line to
-# disable the Paramiko look for keys function
-#look_for_keys = False
-
-# When using persistent connections with Paramiko, the connection runs in a
-# background process. If the host doesn't already have a valid SSH key, by
-# default Ansible will prompt to add the host key. This will cause connections
-# running in background processes to fail. Uncomment this line to have
-# Paramiko automatically add host keys.
-#host_key_auto_add = True
-
-[ssh_connection]
-
-# ssh arguments to use
-# Leaving off ControlPersist will result in poor performance, so use
-# paramiko on older platforms rather than removing it, -C controls compression use
-#ssh_args = -C -o ControlMaster=auto -o ControlPersist=60s
-
-# The base directory for the ControlPath sockets.
-# This is the "%(directory)s" in the control_path option
-#
-# Example:
-# control_path_dir = /tmp/.ansible/cp
-#control_path_dir = ~/.ansible/cp
-
-# The path to use for the ControlPath sockets. This defaults to a hashed string of the hostname,
-# port and username (empty string in the config). The hash mitigates a common problem users
-# found with long hostames and the conventional %(directory)s/ansible-ssh-%%h-%%p-%%r format.
-# In those cases, a "too long for Unix domain socket" ssh error would occur.
-#
-# Example:
-# control_path = %(directory)s/%%h-%%r
-#control_path =
-
-# Enabling pipelining reduces the number of SSH operations required to
-# execute a module on the remote server. This can result in a significant
-# performance improvement when enabled, however when using "sudo:" you must
-# first disable 'requiretty' in /etc/sudoers
-#
-# By default, this option is disabled to preserve compatibility with
-# sudoers configurations that have requiretty (the default on many distros).
-#
-#pipelining = False
-
-# Control the mechanism for transferring files (old)
-# * smart = try sftp and then try scp [default]
-# * True = use scp only
-# * False = use sftp only
-#scp_if_ssh = smart
-
-# Control the mechanism for transferring files (new)
-# If set, this will override the scp_if_ssh option
-# * sftp = use sftp to transfer files
-# * scp = use scp to transfer files
-# * piped = use 'dd' over SSH to transfer files
-# * smart = try sftp, scp, and piped, in that order [default]
-#transfer_method = smart
-
-# if False, sftp will not use batch mode to transfer files. This may cause some
-# types of file transfer failures impossible to catch however, and should
-# only be disabled if your sftp version has problems with batch mode
-#sftp_batch_mode = False
-
-# The -tt argument is passed to ssh when pipelining is not enabled because sudo
-# requires a tty by default.
-#use_tty = True
-
-[persistent_connection]
-
-# Configures the persistent connection timeout value in seconds. This value is
-# how long the persistent connection will remain idle before it is destroyed.
-# If the connection doesn't receive a request before the timeout value
-# expires, the connection is shutdown. The default value is 30 seconds.
-#connect_timeout = 30
-
-# Configures the persistent connection retry timeout. This value configures the
-# the retry timeout that ansible-connection will wait to connect
-# to the local domain socket. This value must be larger than the
-# ssh timeout (timeout) and less than persistent connection idle timeout (connect_timeout).
-# The default value is 15 seconds.
-#connect_retry_timeout = 15
-
-# The command timeout value defines the amount of time to wait for a command
-# or RPC call before timing out. The value for the command timeout must
-# be less than the value of the persistent connection idle timeout (connect_timeout)
-# The default value is 10 second.
-#command_timeout = 10
-
-[accelerate]
-#accelerate_port = 5099
-#accelerate_timeout = 30
-#accelerate_connect_timeout = 5.0
-
-# The daemon timeout is measured in minutes. This time is measured
-# from the last activity to the accelerate daemon.
-#accelerate_daemon_timeout = 30
-
-# If set to yes, accelerate_multi_key will allow multiple
-# private keys to be uploaded to it, though each user must
-# have access to the system via SSH to add a new key. The default
-# is "no".
-#accelerate_multi_key = yes
-
-[selinux]
-# file systems that require special treatment when dealing with security context
-# the default behaviour that copies the existing context or uses the user default
-# needs to be changed to use the file system dependent context.
-#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p
-
-# Set this to yes to allow libvirt_lxc connections to work without SELinux.
-#libvirt_lxc_noseclabel = yes
-
-[colors]
-#highlight = white
-#verbose = blue
-#warn = bright purple
-#error = red
-#debug = dark gray
-#deprecate = purple
-#skip = cyan
-#unreachable = red
-#ok = green
-#changed = yellow
-#diff_add = green
-#diff_remove = red
-#diff_lines = cyan
-
-
-[diff]
-# Always print diff when running ( same as always running with -D/--diff )
-# always = no
-
-# Set how many context lines to show in diff
-# context = 3
-
-[galaxy]
-server_list = automation_hub
-
-[galaxy_server.automation_hub]
-url=https://cloud.redhat.com/api/automation-hub/
-auth_url=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token/
-
-token=eyJhbGciOiJIUzI1NiIsInR5cCIgOiAiSldUIiwia2lkIiA6ICJhZDUyMjdhMy1iY2ZkLTRjZjAtYTdiNi0zOTk4MzVhMDg1NjYifQ.eyJpYXQiOjE2NzkzMDkyMTcsImp0aSI6IjJmZTdjZjA1LTAxZDQtNDMwMi1iMWNlLTgzNjlhNWJmNjViMyIsImlzcyI6Imh0dHBzOi8vc3NvLnJlZGhhdC5jb20vYXV0aC9yZWFsbXMvcmVkaGF0LWV4dGVybmFsIiwiYXVkIjoiaHR0cHM6Ly9zc28ucmVkaGF0LmNvbS9hdXRoL3JlYWxtcy9yZWRoYXQtZXh0ZXJuYWwiLCJzdWIiOiJmOjUyOGQ3NmZmLWY3MDgtNDNlZC04Y2Q1LWZlMTZmNGZlMGNlNjpqZW5uaWZlcl9qb2huIiwidHlwIjoiT2ZmbGluZSIsImF6cCI6ImNsb3VkLXNlcnZpY2VzIiwibm9uY2UiOiJmZTY2MGYxMS1kODFjLTQ2YWItYTkzNS1hZTAxZmY2MjA2OTciLCJzZXNzaW9uX3N0YXRlIjoiMzI3ZDlhNjgtZTkxMi00N2NiLWI3NDctNWE5YmQzZTJlZjlmIiwic2NvcGUiOiJvcGVuaWQgYXBpLmlhbS5zZXJ2aWNlX2FjY291bnRzIGFwaS5pYW0ub3JnYW5pemF0aW9uIG9mZmxpbmVfYWNjZXNzIiwic2lkIjoiMzI3ZDlhNjgtZTkxMi00N2NiLWI3NDctNWE5YmQzZTJlZjlmIn0.iGbseoF6AXetWNa0sFsfzbmzvizwaBcY0rd14YFJqcU \ No newline at end of file
diff --git a/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml b/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml
index 40d737b5f..5e4c94e39 100644
--- a/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml
+++ b/ansible_collections/dellemc/unity/changelogs/.plugin-cache.yaml
@@ -6,6 +6,7 @@ plugins:
callback: {}
cliconf: {}
connection: {}
+ filter: {}
httpapi: {}
inventory: {}
lookup: {}
@@ -60,6 +61,11 @@ plugins:
name: nfsserver
namespace: ''
version_added: 1.4.0
+ replication_session:
+ description: Manage replication session on Unity storage system
+ name: replication_session
+ namespace: ''
+ version_added: 1.7.0
smbshare:
description: Manage SMB shares on Unity storage system
name: smbshare
@@ -98,5 +104,6 @@ plugins:
netconf: {}
shell: {}
strategy: {}
+ test: {}
vars: {}
-version: 1.6.0
+version: 1.7.1
diff --git a/ansible_collections/dellemc/unity/changelogs/changelog.yaml b/ansible_collections/dellemc/unity/changelogs/changelog.yaml
index ee13691d9..6ab226ac6 100644
--- a/ansible_collections/dellemc/unity/changelogs/changelog.yaml
+++ b/ansible_collections/dellemc/unity/changelogs/changelog.yaml
@@ -153,7 +153,24 @@ releases:
1.6.0:
changes:
minor_changes:
+ - Add synchronous replication support for filesystem.
- Support addition of host from the Host List to NFS Export in nfs module.
- Support enable/disable advanced dedup in volume module.
- - Add synchronous replication support for filesystem.
release_date: '2023-03-31'
+ 1.7.0:
+ changes:
+ minor_changes:
+ - Added replication session module to get details, pause, resume, sync, failover,
+ failback and delete replication sessions.
+ - Added support for Unity XT SeaHawk 5.3
+ - Documentation updates for boolean values based on ansible community guidelines.
+ modules:
+ - description: Manage replication session on the Unity storage system
+ name: replication_session
+ namespace: ''
+ release_date: '2023-06-30'
+ 1.7.1:
+ changes:
+ minor_changes:
+ - Patch update to fix import errors in utils file.
+ release_date: '2023-07-31'
diff --git a/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md b/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md
index 1cf25a511..f26c1cd08 100644
--- a/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md
+++ b/ansible_collections/dellemc/unity/docs/CONTRIBUTING.md
@@ -10,7 +10,7 @@ You may obtain a copy of the License at
# How to contribute
-Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CODE_OF_CONDUCT.md).
+Become one of the contributors to this project! We thrive to build a welcoming and open community for anyone who wants to use the project or contribute to it. There are just a few small guidelines you need to follow. To help us create a safe and positive community experience for all, we require all participants to adhere to the [Code of Conduct](https://github.com/dell/ansible-unity/blob/1.7.1/docs/CODE_OF_CONDUCT.md).
## Table of contents
@@ -76,7 +76,7 @@ Triage helps ensure that issues resolve quickly by:
If you don't have the knowledge or time to code, consider helping with _issue triage_. The Ansible modules for Dell Unity community will thank you for saving them time by spending some of yours.
-Read more about the ways you can [Triage issues](https://github.com/dell/ansible-unity/blob/1.6.0/docs/ISSUE_TRIAGE.md).
+Read more about the ways you can [Triage issues](https://github.com/dell/ansible-unity/blob/1.7.1/docs/ISSUE_TRIAGE.md).
## Your first contribution
@@ -89,7 +89,7 @@ When you're ready to contribute, it's time to create a pull request.
## Branching
-* [Branching Strategy for Ansible modules for Dell Unity](https://github.com/dell/ansible-unity/blob/1.6.0/docs/BRANCHING.md)
+* [Branching Strategy for Ansible modules for Dell Unity](https://github.com/dell/ansible-unity/blob/1.7.1/docs/BRANCHING.md)
## Signing your commits
@@ -144,7 +144,7 @@ Make sure that the title for your pull request uses the same format as the subje
### Quality gates for pull requests
-GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-unity/blob/1.6.0/docs/SUPPORT.md).
+GitHub Actions are used to enforce quality gates when a pull request is created or when any commit is made to the pull request. These GitHub Actions enforce our minimum code quality requirement for any code that get checked into the repository. If any of the quality gates fail, it is expected that the contributor will look into the check log, understand the problem and resolve the issue. If help is needed, please feel free to reach out the maintainers of the project for [support](https://github.com/dell/ansible-unity/blob/1.7.1/docs/SUPPORT.md).
#### Code sanitization
diff --git a/ansible_collections/dellemc/unity/docs/INSTALLATION.md b/ansible_collections/dellemc/unity/docs/INSTALLATION.md
index 01f2856b0..e361588f1 100644
--- a/ansible_collections/dellemc/unity/docs/INSTALLATION.md
+++ b/ansible_collections/dellemc/unity/docs/INSTALLATION.md
@@ -35,7 +35,7 @@ You may obtain a copy of the License at
* Download the latest tar build from any of the available distribution channel [Ansible Galaxy](https://galaxy.ansible.com/dellemc/unity) /[Automation Hub](https://console.redhat.com/ansible/automation-hub/repo/published/dellemc/unity) and use this command to install the collection anywhere in your system:
- ansible-galaxy collection install dellemc-unity-1.6.0.tar.gz -p <install_path>
+ ansible-galaxy collection install dellemc-unity-1.7.1.tar.gz -p <install_path>
* Set the environment variable:
@@ -62,7 +62,7 @@ You may obtain a copy of the License at
## Ansible modules execution
-The Ansible server must be configured with Python library for Unity to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-unity/blob/1.6.0/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules.
+The Ansible server must be configured with Python library for Unity to run the Ansible playbooks. The [Documents](https://github.com/dell/ansible-unity/blob/1.7.1/docs/) provide information on different Ansible modules along with their functions and syntax. The parameters table in the Product Guide provides information on various parameters which needs to be configured before running the modules.
## SSL certificate validation
diff --git a/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md b/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md
index d3e443494..2e25b256a 100644
--- a/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md
+++ b/ansible_collections/dellemc/unity/docs/ISSUE_TRIAGE.md
@@ -43,7 +43,7 @@ Should explain what happened, what was expected and how to reproduce it together
- Ansible Version: [e.g. 2.14]
- Python Version [e.g. 3.10]
- - Ansible modules for Dell Unity Version: [e.g. 1.6.0]
+ - Ansible modules for Dell Unity Version: [e.g. 1.7.1]
- Unity SDK version: [e.g. Unity 1.2.11]
- Any other additional information...
diff --git a/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md b/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md
index 78d13dd1d..a46a3d37d 100644
--- a/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md
+++ b/ansible_collections/dellemc/unity/docs/MAINTAINER_GUIDE.md
@@ -27,7 +27,7 @@ If a candidate is approved, a Maintainer contacts the candidate to invite them t
## Maintainer policies
* Lead by example
-* Follow the [Code of Conduct](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-unity/blob/1.6.0/docs/COMMITTER_GUIDE.md) guides
+* Follow the [Code of Conduct](https://github.com/dell/ansible-unity/blob/1.7.1/docs/CODE_OF_CONDUCT.md) and the guidelines in the [Contributing](https://github.com/dell/ansible-unity/blob/1.7.1/docs/CONTRIBUTING.md) and [Committer](https://github.com/dell/ansible-unity/blob/1.7.1/docs/COMMITTER_GUIDE.md) guides
* Promote a friendly and collaborative environment within our community
* Be actively engaged in discussions, answering questions, updating defects, and reviewing pull requests
* Criticize code, not people. Ideally, tell the contributor a better way to do what they need.
diff --git a/ansible_collections/dellemc/unity/docs/Release Notes.md b/ansible_collections/dellemc/unity/docs/Release Notes.md
index 47d3fa3a5..4243667c1 100644
--- a/ansible_collections/dellemc/unity/docs/Release Notes.md
+++ b/ansible_collections/dellemc/unity/docs/Release Notes.md
@@ -1,6 +1,6 @@
**Ansible Modules for Dell Technologies Unity**
=========================================
-### Release Notes 1.6.0
+### Release Notes 1.7.1
> © 2022 Dell Inc. or its subsidiaries. All rights reserved. Dell
> and other trademarks are trademarks of Dell Inc. or its
@@ -28,7 +28,7 @@ Table 1. Revision history
| Revision | Date | Description |
|----------|----------------|---------------------------------------------------------|
-| 01 | March 2023 | Current release of Ansible Modules for Dell Unity 1.6.0 |
+| 01 | July 2023 | Current release of Ansible Modules for Dell Unity 1.7.1 |
Product Description
-------------------
@@ -71,8 +71,7 @@ for Unity GitHub](https://github.com/dell/ansible-unity/) page.
Documentation
-------------
-The documentation is available on [Ansible Modules for Unity GitHub](https://github.com/dell/ansible-unity/tree/1.6.0/docs)
+The documentation is available on [Ansible Modules for Unity GitHub](https://github.com/dell/ansible-unity/tree/1.7.1/docs)
page. It includes the following:
- README
- Release Notes (this document)
-- Product Guide
diff --git a/ansible_collections/dellemc/unity/docs/SECURITY.md b/ansible_collections/dellemc/unity/docs/SECURITY.md
index 16e1acf79..f77239eac 100644
--- a/ansible_collections/dellemc/unity/docs/SECURITY.md
+++ b/ansible_collections/dellemc/unity/docs/SECURITY.md
@@ -12,7 +12,7 @@ You may obtain a copy of the License at
The Ansible modules for Dell Unity repository are inspected for security vulnerabilities via blackduck scans and static code analysis.
-In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-unity/blob/1.6.0/docs/CONTRIBUTING.md#Pull-requests) for more information.
+In addition to this, there are various security checks that get executed against a branch when a pull request is created/updated. Please refer to [pull request](https://github.com/dell/ansible-unity/blob/1.7.1/docs/CONTRIBUTING.md#Pull-requests) for more information.
## Reporting a vulnerability
diff --git a/ansible_collections/dellemc/unity/docs/modules/cifsserver.rst b/ansible_collections/dellemc/unity/docs/modules/cifsserver.rst
index 71b7527f2..2c3c9286e 100644
--- a/ansible_collections/dellemc/unity/docs/modules/cifsserver.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/cifsserver.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst b/ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst
index ac5727cfd..4f6060c9f 100644
--- a/ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/consistencygroup.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/filesystem.rst b/ansible_collections/dellemc/unity/docs/modules/filesystem.rst
index 81881dfbb..05ff4ebb9 100644
--- a/ansible_collections/dellemc/unity/docs/modules/filesystem.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/filesystem.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst b/ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst
index c75f81611..02fa2a6de 100644
--- a/ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/filesystem_snapshot.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/host.rst b/ansible_collections/dellemc/unity/docs/modules/host.rst
index b0afe55b9..d2e48dd18 100644
--- a/ansible_collections/dellemc/unity/docs/modules/host.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/host.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/info.rst b/ansible_collections/dellemc/unity/docs/modules/info.rst
index 7b1ef111c..6b2a9d8ad 100644
--- a/ansible_collections/dellemc/unity/docs/modules/info.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/info.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/interface.rst b/ansible_collections/dellemc/unity/docs/modules/interface.rst
index aad1c02e8..cc33254f5 100644
--- a/ansible_collections/dellemc/unity/docs/modules/interface.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/interface.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/nasserver.rst b/ansible_collections/dellemc/unity/docs/modules/nasserver.rst
index 284f37326..97d298b9c 100644
--- a/ansible_collections/dellemc/unity/docs/modules/nasserver.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/nasserver.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/nfs.rst b/ansible_collections/dellemc/unity/docs/modules/nfs.rst
index cce2058f5..c64c8983a 100644
--- a/ansible_collections/dellemc/unity/docs/modules/nfs.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/nfs.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/nfsserver.rst b/ansible_collections/dellemc/unity/docs/modules/nfsserver.rst
index 0836bb63c..15d786d58 100644
--- a/ansible_collections/dellemc/unity/docs/modules/nfsserver.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/nfsserver.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/replication_session.rst b/ansible_collections/dellemc/unity/docs/modules/replication_session.rst
new file mode 100644
index 000000000..b401c403d
--- /dev/null
+++ b/ansible_collections/dellemc/unity/docs/modules/replication_session.rst
@@ -0,0 +1,294 @@
+.. _replication_session_module:
+
+
+replication_session -- Manage replication session on Unity storage system
+=========================================================================
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Synopsis
+--------
+
+Managing replication session on Unity storage system includes getting details, pause, resume, sync, failover, failback and deleting the replication session.
+
+
+
+Requirements
+------------
+The below requirements are needed on the host that executes this module.
+
+- A Dell Unity Storage device version 5.1 or later.
+- Ansible-core 2.13 or later.
+- Python 3.9, 3.10 or 3.11.
+- Storops Python SDK 1.2.11.
+
+
+
+Parameters
+----------
+
+ session_id (optional, str, None)
+ ID of replication session.
+
+
+ session_name (optional, str, None)
+ Name of replication session.
+
+
+ pause (optional, bool, None)
+ Pause or resume replication session.
+
+
+ sync (optional, bool, None)
+ Sync a replication session.
+
+
+ failover_with_sync (optional, bool, None)
+ If ``true``, Sync the source and destination resources before failing over the asynchronous replication session or keep them in sync after failing over the synchronous replication session.
+
+ If ``false``, Failover a replication session.
+
+
+ failback (optional, bool, None)
+ Failback a replication session.
+
+
+ force_full_copy (optional, bool, None)
+ Indicates whether to sync back all data from the destination SP to the source SP during the failback session. Needed during resume operation when replication session goes out of sync due to a fault.
+
+
+ force (optional, bool, None)
+ Skip pre-checks on file system(s) replication sessions of a NAS server when a replication failover is issued from the source NAS server.
+
+
+ state (optional, str, present)
+ State variable to determine whether replication session will exist or not.
+
+
+ unispherehost (True, str, None)
+ IP or FQDN of the Unity management server.
+
+
+ username (True, str, None)
+ The username of the Unity management server.
+
+
+ password (True, str, None)
+ The password of the Unity management server.
+
+
+ validate_certs (optional, bool, True)
+ Boolean variable to specify whether or not to validate SSL certificate.
+
+ ``true`` - Indicates that the SSL certificate should be verified.
+
+ ``false`` - Indicates that the SSL certificate should not be verified.
+
+
+ port (optional, int, 443)
+ Port number through which communication happens with Unity management server.
+
+
+
+
+
+Notes
+-----
+
+.. note::
+ - The *check_mode* is supported.
+ - The modules present in this collection named as 'dellemc.unity' are built to support the Dell Unity storage platform.
+
+
+
+
+Examples
+--------
+
+.. code-block:: yaml+jinja
+
+
+ - name: Get replication session details
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+
+ - name: Get replication session details based on session_id
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_id: "103079215114_APM00213404195_0000_103079215274_APM00213404194_0000"
+
+ - name: Pause a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ pause: true
+
+ - name: Resume a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ pause: false
+ force_full_copy: true
+
+ - name: Sync a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ sync: true
+
+ - name: Failover with sync a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ failover_with_sync: true
+ force: true
+
+ - name: Failover a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ failover_with_sync: false
+
+ - name: Failback a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ failback: true
+ force_full_copy: true
+
+ - name: Delete a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ state: "absent"
+
+
+
+Return Values
+-------------
+
+changed (always, bool, True)
+ Whether or not the resource has changed.
+
+
+replication_session_details (When replication session exists., dict, {'current_transfer_est_remain_time': 0, 'daily_snap_replication_policy': None, 'dst_resource_id': 'nas_8', 'dst_spa_interface': {'UnityRemoteInterface': {'hash': 8771253398547, 'id': 'APM00213404195:if_181'}}, 'dst_spb_interface': {'UnityRemoteInterface': {'hash': 8771253424144, 'id': 'APM00213404195:if_180'}}, 'dst_status': 'ReplicationSessionStatusEnum.OK', 'existed': True, 'hash': 8771259012271, 'health': {'UnityHealth': {'hash': 8771253424168}}, 'hourly_snap_replication_policy': None, 'id': '103079215114_APM00213404195_0000_103079215274_APM00213404194_0000', 'last_sync_time': '2023-04-18 10:35:25+00:00', 'local_role': 'ReplicationSessionReplicationRoleEnum.DESTINATION', 'max_time_out_of_sync': 0, 'members': None, 'name': 'rep_sess_nas', 'network_status': 'ReplicationSessionNetworkStatusEnum.OK', 'remote_system': {'UnityRemoteSystem': {'hash': 8771253380142}}, 'replication_resource_type': 'ReplicationEndpointResourceTypeEnum.NASSERVER', 'src_resource_id': 'nas_213', 'src_spa_interface': {'UnityRemoteInterface': {'hash': 8771253475010, 'id': 'APM00213404194:if_195'}}, 'src_spb_interface': {'UnityRemoteInterface': {'hash': 8771253374169, 'id': 'APM00213404194:if_194'}}, 'src_status': 'ReplicationSessionStatusEnum.OK', 'status': 'ReplicationOpStatusEnum.ACTIVE', 'sync_progress': 0, 'sync_state': 'ReplicationSessionSyncStateEnum.IN_SYNC'})
+ Details of the replication session.
+
+
+ id (, str, )
+ Unique identifier of the replicationSession instance.
+
+
+ name (, str, )
+ User-specified replication session name.
+
+
+ replicationResourceType (, str, )
+ Replication resource type of replication session endpoints.
+
+
+ status (, str, )
+ Replication status of the replication session.
+
+
+ remoteSystem (, dict, )
+ Specifies the remote system to use as the destination for the replication session.
+
+
+ UnityRemoteSystem (, dict, )
+ Information about remote storage system.
+
+
+ id (, str, )
+ Unique identifier of the remote system instance.
+
+
+ serialNumber (, str, )
+ Serial number of the remote system.
+
+
+
+
+ maxTimeOutOfSync (, int, )
+ Maximum time to wait before the system syncs the source and destination resources.
+
+
+ srcStatus (, str, )
+ Status of the source end of the session.
+
+
+ networkStatus (, str, )
+ Status of the network connection used by the replication session.
+
+
+ dstStatus (, str, )
+ Status of the destination end of the replication session.
+
+
+ lastSyncTime (, str, )
+ Date and time of the last replication synchronization.
+
+
+ syncState (, str, )
+ Synchronization state between source and destination resource of the replication session.
+
+
+ syncProgress (, int, )
+ Synchronization completion percentage between source and destination resources of the replication session.
+
+
+ dstResourceId (, str, )
+ Identifier of the destination resource.
+
+
+ currentTransferEstRemainTime (, int, )
+ Estimated time left for the replication synchronization to complete.
+
+
+
+
+
+
+Status
+------
+
+
+
+
+
+Authors
+~~~~~~~
+
+- Jennifer John (@Jennifer-John) <ansible.team@dell.com>
+
diff --git a/ansible_collections/dellemc/unity/docs/modules/smbshare.rst b/ansible_collections/dellemc/unity/docs/modules/smbshare.rst
index 697bda3ff..027fcee25 100644
--- a/ansible_collections/dellemc/unity/docs/modules/smbshare.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/smbshare.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/snapshot.rst b/ansible_collections/dellemc/unity/docs/modules/snapshot.rst
index 46b2aa997..5ef582114 100644
--- a/ansible_collections/dellemc/unity/docs/modules/snapshot.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/snapshot.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst b/ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst
index 4e9a37de2..2ee511e8c 100644
--- a/ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/snapshotschedule.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/storagepool.rst b/ansible_collections/dellemc/unity/docs/modules/storagepool.rst
index 764f2a812..48875898f 100644
--- a/ansible_collections/dellemc/unity/docs/modules/storagepool.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/storagepool.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/tree_quota.rst b/ansible_collections/dellemc/unity/docs/modules/tree_quota.rst
index 285ab9d79..68afe348f 100644
--- a/ansible_collections/dellemc/unity/docs/modules/tree_quota.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/tree_quota.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/user_quota.rst b/ansible_collections/dellemc/unity/docs/modules/user_quota.rst
index 7d0bbb808..1f7be59b2 100644
--- a/ansible_collections/dellemc/unity/docs/modules/user_quota.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/user_quota.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/docs/modules/volume.rst b/ansible_collections/dellemc/unity/docs/modules/volume.rst
index ed4c5f202..bcb767f65 100644
--- a/ansible_collections/dellemc/unity/docs/modules/volume.rst
+++ b/ansible_collections/dellemc/unity/docs/modules/volume.rst
@@ -21,7 +21,7 @@ Requirements
The below requirements are needed on the host that executes this module.
- A Dell Unity Storage device version 5.1 or later.
-- Ansible-core 2.12 or later.
+- Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
diff --git a/ansible_collections/dellemc/unity/meta/runtime.yml b/ansible_collections/dellemc/unity/meta/runtime.yml
index 31f912445..82e44c7f7 100644
--- a/ansible_collections/dellemc/unity/meta/runtime.yml
+++ b/ansible_collections/dellemc/unity/meta/runtime.yml
@@ -1,5 +1,5 @@
---
-requires_ansible: ">=2.12"
+requires_ansible: ">=2.13"
plugin_routing:
modules:
dellemc_unity_info:
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/cifsserver.yml b/ansible_collections/dellemc/unity/playbooks/modules/cifsserver.yml
new file mode 100644
index 000000000..ee2f980f8
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/cifsserver.yml
@@ -0,0 +1,151 @@
+---
+- name: CIFS Server Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '***'
+ domain_username: 'domain_user'
+ domain_password: '**'
+
+ tasks:
+ - name: Create standalone CIFS server with check mode
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ netbios_name: "ansible_cifs"
+ workgroup: "ansible"
+ local_password: "**"
+ nas_server_name: "Ansible_server_1"
+ state: "present"
+ check_mode: true
+
+ - name: Create standalone CIFS server
+ register: result
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ netbios_name: "ansible_cifs"
+ workgroup: "ansible"
+ local_password: "**"
+ nas_server_name: "Ansible_server_1"
+ state: "present"
+
+ - name: Create standalone CIFS server - Idempotency
+ register: result
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ netbios_name: "ansible_cifs"
+ workgroup: "ansible"
+ local_password: "**"
+ nas_server_name: "Ansible_server_1"
+ state: "present"
+
+ - name: Get CIFS server details
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cifs_server_id: "{{ result.cifs_server_details.id }}"
+ state: "present"
+
+ - name: Get CIFS server details using NAS server name
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "Ansible_server_1"
+ state: "present"
+
+ - name: Get CIFS server details using NAS server ID
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_id: "{{ result.cifs_server_details.nas_server.UnityNasServer.id }}"
+ state: "present"
+
+ - name: Delete CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cifs_server_id: "{{ result.cifs_server_details.id }}"
+ state: "absent"
+
+ - name: Create CIFS server belonging to Active Directory
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_id: "nas_52"
+ cifs_server_name: "test_cifs_server"
+ domain: "test.lab.domain.com"
+ domain_username: "{{ domain_username }}"
+ domain_password: "{{ domain_password }}"
+ state: "present"
+
+ - name: Create CIFS server belonging to Active Directory - Idempotency
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_id: "nas_52"
+ cifs_server_name: "test_cifs_server"
+ domain: "test.lab.domain.com"
+ domain_username: "{{ domain_username }}"
+ domain_password: "{{ domain_password }}"
+ state: "present"
+
+ - name: Delete CIFS server with check mode
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cifs_server_name: "test_cifs_server"
+ unjoin_cifs_server_account: true
+ domain_username: "{{ domain_username }}"
+ domain_password: "{{ domain_password }}"
+ state: "absent"
+ check_mode: true
+
+ - name: Delete CIFS server
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cifs_server_name: "test_cifs_server"
+ unjoin_cifs_server_account: true
+ domain_username: "{{ domain_username }}"
+ domain_password: "{{ domain_password }}"
+ state: "absent"
+
+ - name: Delete CIFS server - Idempotency
+ dellemc.unity.cifsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cifs_server_name: "test_cifs_server"
+ unjoin_cifs_server_account: true
+ domain_username: "{{ domain_username }}"
+ domain_password: "{{ domain_password }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/consistencygroup.yml b/ansible_collections/dellemc/unity/playbooks/modules/consistencygroup.yml
new file mode 100644
index 000000000..f183304ba
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/consistencygroup.yml
@@ -0,0 +1,259 @@
+---
+- name: Consistency Group Operations
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '***'
+ cg_name: 'Ansible_CG_Testing'
+ new_cg_name: 'Ansible_CG_Rename_Testing'
+ description: "Ansible demo"
+ snap_schedule1: "Ansible_Test"
+ snap_schedule2: "CG_test_schedule"
+ tiering_policy1: "LOWEST"
+ state_present: "present"
+ state_absent: "absent"
+ vol_state_present: "present-in-group"
+ vol_state_absent: "absent-in-group"
+ mapping_state_present: "mapped"
+ mapping_state_absent: "unmapped"
+
+ tasks:
+ - name: Create consistency group with volume and host access
+ register: result_cg
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ cg_name: "{{ cg_name }}"
+ description: "{{ description }}"
+ snap_schedule: "{{ snap_schedule1 }}"
+ volumes:
+ - vol_name: "Test_cg_vol-1"
+ vol_state: "{{ vol_state_present }}"
+ hosts:
+ - host_name: "10.*.*.*"
+ - host_id: "Host_511"
+ mapping_state: "{{ mapping_state_present }}"
+ state: "{{ state_present }}"
+
+ - name: Set cg_id
+ ansible.builtin.set_fact:
+ cg_id: "{{ result_cg.consistency_group_details.id }}"
+
+ - name: Get details of consistency group using id
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }} "
+ cg_id: "{{ cg_id }}"
+ state: "{{ state_present }}"
+
+ - name: Add volumes to consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_id: "{{ cg_id }}"
+ volumes:
+ - vol_name: "Test_cg_vol-2"
+ - vol_id: "sv_8984"
+ vol_state: "{{ vol_state_present }}"
+ state: "{{ state_present }}"
+
+ - name: Add volumes to consistency group - Idempotency
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_id: "{{ cg_id }}"
+ volumes:
+ - vol_name: "Test_cg_vol-2"
+ - vol_id: "sv_8984"
+ vol_state: "{{ vol_state_present }}"
+ state: "{{ state_present }}"
+
+ - name: Rename consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ cg_name }}"
+ new_cg_name: "{{ new_cg_name }}"
+ state: "{{ state_present }}"
+
+ - name: Rename consistency group - Idempotency
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ new_cg_name: "{{ new_cg_name }}"
+ state: "{{ state_present }}"
+
+ - name: Modify consistency group details
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ snap_schedule: "{{ snap_schedule2 }}"
+ tiering_policy: "{{ tiering_policy1 }}"
+ state: "{{ state_present }}"
+
+ - name: Modify consistency group details - Idempotency
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ snap_schedule: "{{ snap_schedule2 }}"
+ tiering_policy: "{{ tiering_policy1 }}"
+ state: "{{ state_present }}"
+
+ - name: Map host to consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ hosts:
+ - host_name: "10.*.*.*"
+ mapping_state: "{{ mapping_state_present }}"
+ state: "{{ state_present }}"
+
+ - name: Map host to consistency group -- Idempotency
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ hosts:
+ - host_name: "10.*.*.*"
+ mapping_state: "{{ mapping_state_present }}"
+ state: "{{ state_present }}"
+
+ - name: Unmap host to consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ hosts:
+ - host_name: "10.*.*.*"
+ - host_id: "Host_511"
+ mapping_state: "{{ mapping_state_absent }}"
+ state: "{{ state_present }}"
+
+ - name: Unmap host to consistency group -- Idempotency
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ hosts:
+ - host_name: "10.*.*.*"
+ - host_id: "Host_511"
+ mapping_state: "{{ mapping_state_absent }}"
+ state: "{{ state_present }}"
+
+ - name: Remove volumes from consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ volumes:
+ - vol_name: "Test_cg_vol-1"
+ - vol_name: "Test_cg_vol-2"
+ - vol_id: "sv_8984"
+ vol_state: "{{ vol_state_absent }}"
+ state: "{{ state_present }}"
+
+ - name: Remove volumes from consistency group - Idempotency
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ volumes:
+ - vol_name: "Test_cg_vol-1"
+ - vol_name: "Test_cg_vol-2"
+ - vol_id: "sv_8984"
+ vol_state: "{{ vol_state_absent }}"
+ state: "{{ state_present }}"
+
+ - name: Delete consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ state: "{{ state_absent }}"
+
+ - name: Delete consistency group Idempotency
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "{{ new_cg_name }}"
+ state: "{{ state_absent }}"
+
+ - name: Enable replication for consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "dis_repl_ans_source"
+ replication_params:
+ destination_cg_name: "destination_dis_repl_ans_source"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ remote_system:
+ remote_system_host: '10.*.*.*'
+ remote_system_verifycert: false
+ remote_system_username: 'user'
+ remote_system_password: '**'
+ destination_pool_name: "Extreme_Perf_tier"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Disable replication for consistency group
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "dis_repl_ans_source"
+ replication_state: "disable"
+ state: "present"
+
+ - name: Disable replication for consistency group Idempotency
+ dellemc.unity.consistencygroup:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ cg_name: "dis_repl_ans_source"
+ replication_state: "disable"
+ state: "present"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/filesystem.yml b/ansible_collections/dellemc/unity/playbooks/modules/filesystem.yml
new file mode 100644
index 000000000..36757103f
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/filesystem.yml
@@ -0,0 +1,294 @@
+---
+- name: FileSystem Operations
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ filesystem_name: "FS_Ansible"
+ nas_server_name: "lgla**"
+ size: 10
+ pool_name: "Ansible_Unity_SP_2"
+ cap_gb: "GB"
+ description: "Ansible demo"
+ snap_schedule_name_1: "SS9_empty_DesRet_SS"
+ snap_schedule_name_2: "Ansible_vol_snap_schedule1"
+ pool_id: "pool_2"
+ tiering_policy: "LOWEST"
+ is_thin: true
+ data_reduction: true
+ state_present: "present"
+ state_absent: "absent"
+ state_mapped: "mapped"
+ state_unmapped: "unmapped"
+
+ tasks:
+ - name: Create FileSystem
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ pool_name: "{{ pool_name }}"
+ size: "{{ size }}"
+ description: "{{ description }}"
+ snap_schedule_name: "{{ snap_schedule_name_2 }}"
+ state: "{{ state_present }}"
+ register: fs_result
+
+ - name: Create FileSystem - idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ pool_name: "{{ pool_name }}"
+ size: "{{ size }}"
+ description: "{{ description }}"
+ snap_schedule_name: "{{ snap_schedule_name_2 }}"
+ state: "{{ state_present }}"
+
+ - name: Set filesystem_id
+ ansible.builtin.set_fact:
+ filesystem_id: "{{ fs_result.filesystem_details.id }}"
+
+ - name: Create FileSystem with quota configuration
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "SP_FS_test_quota_config_1"
+ nas_server_name: "{{ nas_server_name }}"
+ pool_id: "{{ pool_id }}"
+ size: "{{ size }}"
+ description: "{{ description }}"
+ quota_config:
+ default_hard_limit: 10
+ grace_period_unit: "minutes"
+ is_user_quota_enabled: true
+ grace_period: 100
+ state: "{{ state_present }}"
+
+ - name: Create FileSystem with quota configuration -- Idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "SP_FS_test_quota_config_1"
+ nas_server_name: "{{ nas_server_name }}"
+ pool_id: "{{ pool_id }}"
+ size: "{{ size }}"
+ description: "{{ description }}"
+ quota_config:
+ default_hard_limit: 10
+ grace_period_unit: "minutes"
+ is_user_quota_enabled: true
+ grace_period: 100
+ state: "{{ state_present }}"
+
+ - name: Expand FileSystem Size
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ size: 15
+ state: "present"
+
+ - name: Expand FileSystem Size - Idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ size: 15
+ state: "present"
+
+ - name: Modify FileSystem smb_properties
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ smb_properties:
+ is_smb_op_locks_enabled: true
+ smb_notify_on_change_dir_depth: 5
+ is_smb_notify_on_access_enabled: true
+ state: "present"
+
+ - name: Modify FileSystem smb_properties - Idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ smb_properties:
+ is_smb_op_locks_enabled: true
+ smb_notify_on_change_dir_depth: 5
+ is_smb_notify_on_access_enabled: true
+ state: "present"
+
+ - name: Modify FileSystem smb_properties - Idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ smb_properties:
+ is_smb_op_locks_enabled: true
+ smb_notify_on_change_dir_depth: 5
+ is_smb_notify_on_access_enabled: true
+ state: "present"
+
+ - name: Modify FileSystem snap schedule
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ snap_schedule_name: "{{ snap_schedule_name_1 }}"
+ state: "{{ state_present }}"
+
+ - name: Modify FileSystem snap schedule - Idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ snap_schedule_name: "{{ snap_schedule_name_1 }}"
+ state: "{{ state_present }}"
+
+ - name: Remove snap-schedule from filesystem
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ snap_schedule_name: ""
+ state: "{{ state_present }}"
+
+ - name: Get details of FileSystem using id
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ state: "present"
+
+ - name: Delete a FileSystem using id
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ state: "absent"
+
+ - name: Delete a FileSystem using id -Idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ state: "absent"
+
+ - name: Enable replication for filesystem
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "test_fs_rrepl_repl"
+ nas_server_name: "test_nas1"
+ replication_params:
+ replication_name: "local_repl_new"
+ replication_mode: "asynchronous"
+ replication_type: "remote"
+ rpo: 20
+ remote_system:
+ remote_system_host: '10.*.*.*'
+ remote_system_verifycert: false
+ remote_system_username: 'user'
+ remote_system_password: '**'
+ destination_pool_name: "test_pool"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for filesystem - Idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "test_fs_rrepl_repl"
+ nas_server_name: "test_nas1"
+ replication_params:
+ replication_name: "local_repl_new"
+ replication_mode: "asynchronous"
+ replication_type: "remote"
+ rpo: 20
+ remote_system:
+ remote_system_host: '10.*.*.*'
+ remote_system_verifycert: false
+ remote_system_username: 'user'
+ remote_system_password: '**'
+ destination_pool_name: "test_pool"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Modify replication for filesystem
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "test_fs_rrepl_repl"
+ nas_server_name: "test_nas1"
+ replication_params:
+ replication_name: "local_repl_new"
+ new_replication_name: "local_repl_234"
+ replication_mode: "asynchronous"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Disable replication for filesystem
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "test_fs_rrepl_repl"
+ nas_server_name: "test_nas1"
+ replication_params:
+ replication_name: "local_repl_234"
+ replication_state: "disable"
+ state: "present"
+
+ - name: Disable replication for filesystem - Idempotency
+ dellemc.unity.filesystem:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "test_fs_rrepl_repl"
+ nas_server_name: "test_nas1"
+ replication_state: "disable"
+ state: "present"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/filesystem_snapshot.yml b/ansible_collections/dellemc/unity/playbooks/modules/filesystem_snapshot.yml
new file mode 100644
index 000000000..16ce88db2
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/filesystem_snapshot.yml
@@ -0,0 +1,111 @@
+---
+- name: Filesystem Snapshot Module Operations in Unity
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ snapshot_name: "11_ansible_test_FS_snap"
+ snapshot_name_1: "11_ansible_test_FS_snap_1"
+ filesystem_name_1: "11_ansible_test_FS"
+ nas_server_name_1: "lgla**"
+ nas_server_name_2: "lgla**"
+ description: "Created using playbook"
+ new_description: "Description updated using playbook"
+ expiry_time: "04/15/2021 2:30"
+ new_expiry_time: "04/15/2021 5:30"
+ fs_access_type_1: "Checkpoint"
+ fs_access_type_2: "Protocol"
+
+ tasks:
+ - name: Create Filesystem Snapshot
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ snapshot_name_1 }}"
+ filesystem_name: "{{ filesystem_name_1 }}"
+ nas_server_name: "{{ nas_server_name_1 }}"
+ description: "{{ description }}"
+ auto_delete: true
+ fs_access_type: "{{ fs_access_type_1 }}"
+ state: "present"
+ register: result
+
+ - name: Set snapshot_id
+ ansible.builtin.set_fact:
+ snapshot_id: "{{ result.dellemc.unity.filesystem_snapshot_details.id }}"
+
+ - name: Create Filesystem Snapshot - Idempotency
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ snapshot_name_1 }}"
+ filesystem_name: "{{ filesystem_name_1 }}"
+ nas_server_name: "{{ nas_server_name_1 }}"
+ description: "{{ description }}"
+ auto_delete: true
+ fs_access_type: "{{ fs_access_type_1 }}"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details using Name
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ snapshot_name_1 }}"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details using ID
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ state: "present"
+
+ - name: Update Filesystem Snapshot attributes
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ snapshot_name_1 }}"
+ description: "{{ new_description }}"
+ auto_delete: false
+ expiry_time: "{{ new_expiry_time }}"
+ state: "present"
+
+ - name: Get Filesystem Snapshot Details
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ snapshot_name_1 }}"
+ state: "present"
+
+ - name: Delete Filesystem Snapshot using Name
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ snapshot_name_1 }}"
+ state: "absent"
+
+ - name: Delete Filesystem Snapshot using ID- Idempotency
+ dellemc.unity.filesystem_snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ snapshot_id }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/host.yml b/ansible_collections/dellemc/unity/playbooks/modules/host.yml
new file mode 100644
index 000000000..04b28c8e5
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/host.yml
@@ -0,0 +1,180 @@
+---
+- name: Host Module Operations on Unity
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ host_name_1: "ansible-test-host-1"
+ host_name_2: "ansible-test-host-2"
+ host_name_3: "ansible-test-host"
+ new_host_name_1: "ansible-test-host-3"
+ new_host_name_2: "ansible-test-host-4"
+
+ tasks:
+ - name: Create empty Host "{{ host_name_1 }}"
+ register: result
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name_1 }}"
+ host_os: "Linux"
+ description: "ansible-test-host-1"
+ state: "present"
+
+ - name: Set host_id
+ ansible.builtin.set_fact:
+ host_id_1: "{{ result.host_details.id }}"
+
+ - name: Create empty Host "{{ host_name_3 }}"
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name_3 }}"
+ host_os: "Linux"
+ description: "ansible-test-host"
+ state: "present"
+
+ - name: Create Host with Initiators "{{ host_name_2 }}"
+ register: result
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name_2 }}"
+ host_os: "Linux"
+ description: "ansible-test-host-2"
+ initiators:
+ - "20:00:00:90:FA:13:82:34:10:00:00:90:FA:13:82:34"
+ - "20:00:00:90:FA:13:81:8C:10:00:00:90:FA:13:81:8C"
+ initiator_state: "present-in-host"
+ state: "present"
+
+ - name: Set host_id
+ ansible.builtin.set_fact:
+ host_id_2: "{{ result.host_details.id }}"
+
+ - name: Get Host details using host_name.
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name_2 }}"
+ state: "present"
+
+ - name: Get Host details using host_id.
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_id: "{{ host_id_2 }}"
+ state: "present"
+
+ - name: Modify Host using host_name
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name_1 }}"
+ host_os: "Mac OS"
+ description: "ansible-test-host-1"
+ state: "present"
+
+ - name: Modify Host using host_id
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_id: "{{ host_id_2 }}"
+ new_host_name: "{{ new_host_name_1 }}"
+ host_os: "Mac OS"
+ description: "ansible-test-host-3"
+ state: "present"
+
+ - name: Add Initiators to Host.
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name_1 }}"
+ initiators:
+ - "20:00:00:90:FA:13:82:35:10:00:00:90:FA:13:82:35"
+ - "iqn.11-05.com.test:f14a6cef331b"
+ initiator_state: "present-in-host"
+ state: "present"
+
+ - name: Modify multiple attributes of Host.
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name_1 }}"
+ new_host_name: "{{ new_host_name_2 }}"
+ host_os: "Linux"
+ description: "ansible-test-host-4"
+ initiators:
+ - "iqn.11-05.com.test:24514718452e"
+ initiator_state: "present-in-host"
+ state: "present"
+
+ - name: Delete Host. {{ new_host_name_2 }}
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ new_host_name_2 }}"
+ state: "absent"
+
+ - name: Delete Host. {{ host_name_3 }}
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name_3 }}"
+ state: "absent"
+
+ - name: Delete Host. {{ new_host_name_1 }}
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ new_host_name_1 }}"
+ state: "absent"
+
+ - name: Add network address to Host.
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name }}"
+ network_address: "192.*.*.*"
+ network_address_state: "present-in-host"
+ state: "present"
+
+ - name: Delete network address from Host.
+ dellemc.unity.host:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ host_name: "{{ host_name }}"
+ network_address: "192.*.*.*"
+ network_address_state: "absent-in-host"
+ state: "present"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/info.yml b/ansible_collections/dellemc/unity/playbooks/modules/info.yml
new file mode 100644
index 000000000..e2c9b3162
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/info.yml
@@ -0,0 +1,220 @@
+---
+- name: Info Module Operations on Unity
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+
+ tasks:
+ - name: Get detailed list of Unity entities.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - host
+ - fc_initiator
+ - iscsi_initiator
+ - cg
+ - storage_pool
+ - vol
+ - snapshot_schedule
+ - nas_server
+ - file_system
+ - snapshot
+ - nfs_export
+ - smb_share
+ - user_quota
+ - tree_quota
+ - disk_group
+
+ - name: Get information of Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+
+ - name: Get list of Unity hosts.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - host
+
+ - name: Get list of FC initiators on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - fc_initiator
+
+ - name: Get list of ISCSI initiators on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - iscsi_initiator
+
+ - name: Get list of consistency groups on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - cg
+
+ - name: Get list of storage pools on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - storage_pool
+
+ - name: Get list of volumes on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - vol
+
+ - name: Get list of snapshot schedules on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - snapshot_schedule
+
+ - name: Get list of NAS Servers on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - nas_server
+
+ - name: Get list of File Systems on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - file_system
+
+ - name: Get list of Snapshots on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - snapshot
+
+ - name: Get list of NFS exports on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - nfs_export
+
+ - name: Get list of SMB shares on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - smb_share
+
+ - name: Get list of user quota on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - user_quota
+
+ - name: Get list of quota tree on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - tree_quota
+
+ - name: Get details of Disk Group.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - disk_group
+
+ - name: Get list of NFS Servers on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - nfs_server
+
+ - name: Get list of CIFS Servers on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - cifs_server
+
+ - name: Get list of ethernet ports on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - ethernet_port
+
+ - name: Get list of file interfaces on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - file_interface
+
+ - name: Get list of replication sessions on Unity array.
+ dellemc.unity.info:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ gather_subset:
+ - replication_session
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/interface.yml b/ansible_collections/dellemc/unity/playbooks/modules/interface.yml
new file mode 100644
index 000000000..3095a3edc
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/interface.yml
@@ -0,0 +1,91 @@
+---
+- name: Interface Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+
+ tasks:
+ - name: Add Interface as Backup to NAS Server
+ dellemc.unity.interface:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "BACKUP"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Add Interface as Backup to NAS Server - Idempotency
+ dellemc.unity.interface:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "BACKUP"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Get Interface details
+ dellemc.unity.interface:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ interface_ip: "xx.xx.xx.xx"
+ state: "present"
+
+ - name: Add Interface as Production to NAS Server
+ dellemc.unity.interface:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "PRODUCTION"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Add Interface as Production to NAS Server - Idempotency
+ dellemc.unity.interface:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ ethernet_port_name: "SP A 4-Port Card Ethernet Port 0"
+ role: "PRODUCTION"
+ interface_ip: "xx.xx.xx.xx"
+ netmask: "xx.xx.xx.xx"
+ gateway: "xx.xx.xx.xx"
+ vlan_id: 324
+ state: "present"
+
+ - name: Delete Interface
+ dellemc.unity.interface:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ interface_ip: "xx.xx.xx.xx"
+ state: "absent"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/nasserver.yml b/ansible_collections/dellemc/unity/playbooks/modules/nasserver.yml
new file mode 100644
index 000000000..72f9b7daf
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/nasserver.yml
@@ -0,0 +1,173 @@
+---
+- name: NAS Server Module Operations in Unity
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ nas_server_name: "lglad073"
+ nas_server_new_name: "new_lglad073"
+ tasks:
+ - name: Get NAS Server Details using Name
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "{{ nas_server_name }}"
+ state: "present"
+
+ - name: Rename NAS Server to new Name
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "{{ nas_server_name }}"
+ nas_server_new_name: "{{ nas_server_new_name }}"
+ state: "present"
+
+ - name: Rename NAS Server to Old Name
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "{{ nas_server_new_name }}"
+ nas_server_new_name: "{{ nas_server_name }}"
+ state: "present"
+
+ - name: Modify Details of NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "{{ nas_server_name }}"
+ current_unix_directory_service: "NONE"
+ is_packet_reflect_enabled: true
+ state: "present"
+
+ - name: Enable replication for NAS Server on Local System
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_id: "nas_10"
+ replication_reuse_resource: false
+ replication_params:
+ replication_name: "test_replication"
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "local"
+ destination_pool_name: "Pool_Ansible_Neo_DND"
+ destination_sp: "SPA"
+ is_backup: true
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for NAS Server on Remote System
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ replication_reuse_resource: false
+ replication_params:
+ replication_name: "test_replication"
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ remote_system:
+ remote_system_host: '10.*.*.*'
+ remote_system_verifycert: false
+ remote_system_username: 'test1'
+ remote_system_password: 'test1!'
+ destination_pool_name: "fastVP_pool"
+ destination_sp: "SPA"
+ is_backup: true
+ replication_state: "enable"
+ state: "present"
+
+ - name: Enable replication for NAS Server on Remote System in existing NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ replication_reuse_resource: true
+ replication_params:
+ destination_nas_server_name: "destination_nas"
+ replication_mode: "asynchronous"
+ rpo: 60
+ replication_type: "remote"
+ replication_name: "test_replication"
+ remote_system:
+ remote_system_host: '10.*.*.*'
+ remote_system_verifycert: false
+ remote_system_username: 'test1'
+ remote_system_password: 'test1!'
+ destination_pool_name: "fastVP_pool"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Modify replication for NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_repl_rename"
+ new_replication_name: "test_replication"
+ rpo: 20
+ replication_mode: "asynchronous"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Modify replication for NAS Server - Idempotency
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_replication"
+ new_replication_name: "test_repl_rename"
+ rpo: 20
+ replication_mode: "asynchronous"
+ replication_state: "enable"
+ state: "present"
+
+ - name: Disable replication for NAS Server
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_replication"
+ replication_state: "disable"
+ state: "present"
+
+ - name: Disable replication for NAS Server - Idempotency
+ dellemc.unity.nasserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ replication_params:
+ replication_name: "test_replication"
+ replication_state: "disable"
+ state: "present"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/nfs.yml b/ansible_collections/dellemc/unity/playbooks/modules/nfs.yml
new file mode 100644
index 000000000..f0bb48b11
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/nfs.yml
@@ -0,0 +1,551 @@
+---
+- name: NFS Share Module Operations on Unity
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ nfs_export_name_from_fs: 'ansible_nfs'
+ nfs_export_name_from_snap: 'ansible_snap_nfs'
+ filesystem_id: 'fs_377'
+ filesystem_name: 'ansible_fs'
+ nas_server_id: 'nas_3'
+ nas_server_name: 'lglad071'
+ snapshot_id: '171798692329'
+ snapshot_name: 'ansible_fs_snap'
+ description: 'Ansible Unity Module'
+ anonymous_gid: 4294967290
+ anonymous_uid: 4294967290
+ host_id1: 'Host_12'
+ host_id2: 'Host_14'
+ host_id3: 'Host_31'
+ host_id4: 'Host_63'
+ host_name1: 'testlgl3'
+ host_name2: 'testlgl2'
+ host_name3: 'testlgl1'
+ host_name4: 'test.lss.com'
+ ip_address1: '10.*.*.*/24'
+ ip_address2: 'fdfe:9042:c53d:0:250:56ff:fea2:5143'
+
+ tasks:
+ - name: Create nfs share from filesystem
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ path: '/'
+ filesystem_id: "{{ filesystem_id }}"
+ description: "{{ description }}"
+ min_security: "SYS"
+ default_access: "READ_ONLY"
+ state: "present"
+ register: result
+
+ - name: Set nfs share ID created from filesystem
+ ansible.builtin.set_fact:
+ nfs_export_id_from_fs: "{{ result['nfs_share_details']['id'] }}"
+
+ - name: Create nfs share from filesystem - idempotency
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ path: '/'
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_id: "{{ nas_server_id }}"
+ state: "present"
+ register: result
+
+ - name: Modify nfs share(fs)
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_id: "{{ nas_server_id }}"
+ description: ""
+ default_access: "READ_ONLY_ROOT"
+ anonymous_gid: "{{ anonymous_gid }}"
+ anonymous_uid: "{{ anonymous_uid }}"
+ state: "present"
+ register: result
+
+ - name: Modify nfs share(fs) - idempotency
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_id: "{{ nas_server_id }}"
+ description: ""
+ default_access: "READ_ONLY_ROOT"
+ anonymous_gid: "{{ anonymous_gid }}"
+ anonymous_uid: "{{ anonymous_uid }}"
+ state: "present"
+ register: result
+
+ - name: Create nfs share from snapshot
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ path: '/'
+ snapshot_name: "{{ snapshot_name }}"
+ description: "{{ description }}"
+ min_security: "SYS"
+ default_access: "READ_ONLY"
+ state: "present"
+ register: result
+
+ - name: Set nfs share ID created from snapshot
+ ansible.builtin.set_fact:
+ nfs_export_id_from_snap: "{{ result['nfs_share_details']['id'] }}"
+
+ - name: Create nfs share from snapshot - idempotency
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ path: '/'
+ snapshot_id: "{{ snapshot_id }}"
+ state: "present"
+ register: result
+
+ - name: Modify nfs share(snapshot)
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_id: "{{ snapshot_id }}"
+ description: ''
+ default_access: "READ_ONLY_ROOT"
+ anonymous_gid: "{{ anonymous_gid }}"
+ anonymous_uid: "{{ anonymous_uid }}"
+ state: "present"
+ register: result
+
+ - name: Modify nfs share(snapshot) - idempotency
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_id: "{{ snapshot_id }}"
+ description: ''
+ default_access: "READ_ONLY_ROOT"
+ anonymous_gid: "{{ anonymous_gid }}"
+ anonymous_uid: "{{ anonymous_uid }}"
+ state: "present"
+ register: result
+
+ - name: Add host in nfs share(fs)
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name1 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name1 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ host_state: "present-in-export"
+ state: "present"
+ register: result
+
+ - name: Add host in nfs share(fs) - idempotency
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name1 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name1 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ host_state: "present-in-export"
+ state: "present"
+ register: result
+
+ - name: Add more host in nfs share(fs)
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ - host_name: "{{ host_name1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ - host_id: "{{ host_id3 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name2 }}"
+ - host_name: "{{ host_name3 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name4 }}"
+ - host_id: "{{ host_id4 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ - ip_address: "{{ ip_address2 }}"
+ host_state: "present-in-export"
+ state: "present"
+ register: result
+
+ - name: Remove host in nfs share(fs)
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ - host_name: "{{ host_name1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ - host_id: "{{ host_id3 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name2 }}"
+ - host_name: "{{ host_name3 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name4 }}"
+ - host_id: "{{ host_id4 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ - ip_address: "{{ ip_address2 }}"
+ host_state: "absent-in-export"
+ state: "present"
+ register: result
+
+ - name: Remove host in nfs share(fs) - idempotency
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ - host_name: "{{ host_name1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ - host_id: "{{ host_id3 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name2 }}"
+ - host_name: "{{ host_name3 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name4 }}"
+ - host_id: "{{ host_id4 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ - ip_address: "{{ ip_address2 }}"
+ host_state: "absent-in-export"
+ state: "present"
+ register: result
+
+ - name: Add host in nfs share(snapshot)
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_name: "{{ snapshot_name }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name1 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name2 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ host_state: "present-in-export"
+ state: "present"
+ register: result
+
+ - name: Add host in nfs share(snapshot) - idempotency
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_name: "{{ snapshot_name }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name1 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name2 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ host_state: "present-in-export"
+ state: "present"
+ register: result
+
+ - name: Add more host in nfs share(snapshot)
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_name: "{{ snapshot_name }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ - host_name: "{{ host_name1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ - host_id: "{{ host_id3 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name2 }}"
+ - host_name: "{{ host_name3 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name4 }}"
+ - host_id: "{{ host_id4 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ - ip_address: "{{ ip_address2 }}"
+ host_state: "present-in-export"
+ state: "present"
+ register: result
+
+ - name: Remove host in nfs share(snapshot)
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_name: "{{ snapshot_name }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ - host_name: "{{ host_name1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ - host_id: "{{ host_id3 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name2 }}"
+ - host_name: "{{ host_name3 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name4 }}"
+ - host_id: "{{ host_id4 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ - ip_address: "{{ ip_address2 }}"
+ host_state: "absent-in-export"
+ state: "present"
+ register: result
+
+ - name: Remove host in nfs share(snapshot) - idempotency
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_name: "{{ snapshot_name }}"
+ adv_host_mgmt_enabled: true
+ no_access_hosts:
+ - host_id: "{{ host_id1 }}"
+ - host_name: "{{ host_name1 }}"
+ read_only_hosts:
+ - host_id: "{{ host_id2 }}"
+ - host_id: "{{ host_id3 }}"
+ read_only_root_hosts:
+ - host_name: "{{ host_name2 }}"
+ - host_name: "{{ host_name3 }}"
+ read_write_hosts:
+ - host_name: "{{ host_name4 }}"
+ - host_id: "{{ host_id4 }}"
+ read_write_root_hosts:
+ - ip_address: "{{ ip_address1 }}"
+ - ip_address: "{{ ip_address2 }}"
+ host_state: "absent-in-export"
+ state: "present"
+ register: result
+
+ - name: Get nfs details by nfs ID
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_id: "{{ nfs_export_id_from_fs }}"
+ state: "present"
+ register: result
+
+ - name: Get nfs details by nfs name and filesystem ID
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ state: "present"
+ register: result
+
+ - name: Get nfs details by nfs name, filesystem name & nas server ID
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_id: "{{ nas_server_id }}"
+ state: "present"
+ register: result
+
+ - name: Get nfs details by nfs name, filesystem name & nas server name
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ state: "present"
+ register: result
+
+ - name: Get nfs details by snapshot name
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_name: "{{ snapshot_name }}"
+ state: "present"
+ register: result
+
+ - name: Get nfs details by snapshot ID
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_snap }}"
+ snapshot_id: "{{ snapshot_id }}"
+ state: "present"
+ register: result
+
+ - name: Delete nfs share by nfs ID
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_id: "{{ nfs_export_id_from_snap }}"
+ state: "absent"
+ register: result
+
+ - name: Delete nfs share by nfs name
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "{{ nfs_export_name_from_fs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ state: "absent"
+ register: result
+
+ - name: Add host in nfs share(fs) with adv_host_mgmt_enabled as false
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "csishare-csivol-346b60e518"
+ filesystem_id: "fs_14267"
+ adv_host_mgmt_enabled: false
+ no_access_hosts:
+ - domain: "google.com"
+ read_only_hosts:
+ - netgroup: "netgroup_admin"
+ read_only_root_hosts:
+ - host_name: "host5"
+ read_write_hosts:
+ - subnet: "10.*.*.*/255.255.255.0"
+ read_write_root_hosts:
+ - ip_address: "10.*.*.8"
+ host_state: "present-in-export"
+ state: "present"
+ register: result
+
+ - name: Remove host in nfs share(fs) with adv_host_mgmt_enabled as false
+ dellemc.unity.nfs:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nfs_export_name: "csishare-csivol-346b60e518"
+ filesystem_id: "fs_14267"
+ adv_host_mgmt_enabled: false
+ no_access_hosts:
+ - domain: "google.com"
+ read_only_hosts:
+ - netgroup: "netgroup_admin"
+ read_only_root_hosts:
+ - host_name: "host5"
+ read_write_hosts:
+ - subnet: "10.*.*.*/255.255.255.0"
+ read_write_root_hosts:
+ - ip_address: "10.*.*.*"
+ host_state: "absent-in-export"
+ state: "present"
+ register: result
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/nfsserver.yml b/ansible_collections/dellemc/unity/playbooks/modules/nfsserver.yml
new file mode 100644
index 000000000..83d0d7b32
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/nfsserver.yml
@@ -0,0 +1,104 @@
+---
+- name: NFS Server Operations
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+
+ tasks:
+ - name: Create NFS server with kdctype as Windows
+ dellemc.unity.nfsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: true
+ kerberos_domain_controller_type: "WINDOWS"
+ kerberos_domain_controller_username: "user"
+ kerberos_domain_controller_password: "**"
+ is_extended_credentials_enabled: true
+ nfs_v4_enabled: true
+ state: "present"
+
+ - name: Create NFS server with kdctype as Windows - Idempotency
+ dellemc.unity.nfsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: true
+ kerberos_domain_controller_type: "WINDOWS"
+ kerberos_domain_controller_username: "user"
+ kerberos_domain_controller_password: "**"
+ is_extended_credentials_enabled: true
+ nfs_v4_enabled: true
+ state: "present"
+
+ - name: Get NFS server details
+ dellemc.unity.nfsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ state: "present"
+
+ - name: Delete NFS server
+ dellemc.unity.nfsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ kerberos_domain_controller_username: "user"
+ kerberos_domain_controller_password: "**"
+ remove_spn_from_kerberos: true
+ state: "absent"
+
+ - name: Create NFS server with kdctype as Unix
+ dellemc.unity.nfsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: true
+ kerberos_domain_controller_type: "UNIX"
+ is_extended_credentials_enabled: true
+ nfs_v4_enabled: true
+ state: "present"
+
+ - name: Create NFS server with kdctype as Unix - Idempotency
+ dellemc.unity.nfsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ host_name: "dummy_nas23"
+ is_secure_enabled: true
+ kerberos_domain_controller_type: "UNIX"
+ is_extended_credentials_enabled: true
+ nfs_v4_enabled: true
+ state: "present"
+
+ - name: Delete NFS server
+ dellemc.unity.nfsserver:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ nas_server_name: "dummy_nas"
+ kerberos_domain_controller_username: "user"
+ kerberos_domain_controller_password: "**"
+ remove_spn_from_kerberos: true
+ state: "absent"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/replication_session.yml b/ansible_collections/dellemc/unity/playbooks/modules/replication_session.yml
new file mode 100644
index 000000000..484e29a90
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/replication_session.yml
@@ -0,0 +1,133 @@
+---
+- name: Replication Session Operations
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'admin'
+ password: '**'
+ session_id: "103079215195_APM00213404195_0000_103079215215_APM00213404195_0000"
+ session_name: "test-session"
+
+ tasks:
+ - name: Get replication session details
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+
+ - name: Get replication session details
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_id: "{{ session_id }}"
+
+ - name: Pause replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+ pause: true
+
+ - name: Pause replication session -- Idempotency
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+ pause: true
+
+ - name: Resume replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+ force_full_copy: true
+ pause: false
+
+ - name: Resume replication session -- Idempotency
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+ force_full_copy: true
+ pause: false
+
+ - name: Sync replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_id: "{{ session_id }}"
+ sync: true
+
+ - name: Failover replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+ failover_with_sync: true
+ force: true
+
+ - name: Failover replication session -- Idempotency
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+ failover_with_sync: true
+ force: true
+
+ - name: Failback replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{ dest_unispherhost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+ failback: true
+ force_full_copy: true
+
+ - name: Failback replication session -- Idempotency
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "{{ session_name }}"
+ failback: true
+ force_full_copy: true
+
+ - name: Delete replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "test123"
+ state: "absent"
+
+ - name: Delete replication session -- Idempotency
+ dellemc.unity.replication_session:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ session_name: "test123"
+ state: "absent"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/smbshare.yml b/ansible_collections/dellemc/unity/playbooks/modules/smbshare.yml
new file mode 100644
index 000000000..91de71d0c
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/smbshare.yml
@@ -0,0 +1,229 @@
+---
+- name: SMB Share Module Operations in Unity
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ filesystem_name: "ansible_smb_share_fs"
+ snapshot_name: "ansible_smb_share_snap"
+ nas_server_name: "lglad071"
+ description: "Share is created using playbook"
+ new_description: "modified the description of share using playbook"
+ fs_share_name: "ansible_share_2"
+ snap_share_name: "ansible_share_21"
+ tasks:
+ - name: Create a SMB Share for Filesystem
+ register: smb_result
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ fs_share_name }}"
+ path: "/"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ umask: "000"
+ description: "Added description of filesystem SMB share during creation"
+ offline_availability: "PROGRAMS"
+ is_abe_enabled: true
+ is_branch_cache_enabled: true
+ is_continuous_availability_enabled: true
+ is_encryption_enabled: true
+ state: "present"
+
+ - name: Set Snapshot SMB Share ID
+ ansible.builtin.set_fact:
+ fs_share_id: "{{ smb_result.smb_share_details.id }}"
+
+ - name: Create a SMB Share for Filesystem Idempotency
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ fs_share_name }}"
+ path: "/"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ umask: "000"
+ description: "Added description of filesystem SMB share during creation"
+ offline_availability: "PROGRAMS"
+ is_abe_enabled: true
+ is_branch_cache_enabled: true
+ is_continuous_availability_enabled: true
+ is_encryption_enabled: true
+ state: "present"
+
+ - name: Get SMB Share Details using Name
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ fs_share_name }}"
+ filesystem_id: "fs_65"
+ state: "present"
+
+ - name: Update the details of the SMB share
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ fs_share_name }}"
+ filesystem_id: "fs_65"
+ umask: "777"
+ description: "updated description of filesystem SMB share"
+ offline_availability: "DOCUMENTS"
+ is_abe_enabled: false
+ is_branch_cache_enabled: false
+ is_continuous_availability_enabled: false
+ is_encryption_enabled: false
+ state: "present"
+
+ - name: Update the details of the SMB share Idempotency
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ fs_share_name }}"
+ filesystem_id: "fs_65"
+ umask: "777"
+ description: "updated description of filesystem SMB share"
+ offline_availability: "DOCUMENTS"
+ is_abe_enabled: false
+ is_branch_cache_enabled: false
+ is_continuous_availability_enabled: false
+ is_encryption_enabled: false
+ state: "present"
+
+ - name: Delete SMB share for Filesystem
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ fs_share_name }}"
+ filesystem_id: "fs_65"
+ state: "absent"
+
+ - name: Delete SMB share for Filesystem Idempotency
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ fs_share_name }}"
+ filesystem_id: "fs_65"
+ state: "absent"
+
+ # Snapshot Name and ID both are unique across Unity Array.
+ # Hence, NAS Server is not required for Unique Identification of Snapshot.
+ - name: Create a SMB Share for Snapshot
+ register: smb_result
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ snap_share_name }}"
+ path: "/"
+ snapshot_name: "{{ snapshot_name }}"
+ umask: "000"
+ description: "Added description of Snapshot SMB share during creation"
+ offline_availability: "PROGRAMS"
+ is_abe_enabled: true
+ is_branch_cache_enabled: true
+ is_continuous_availability_enabled: true
+ is_encryption_enabled: true
+ state: "present"
+
+ - name: Set Snapshot SMB Share ID
+ ansible.builtin.set_fact:
+ snap_share_id: "{{ smb_result.smb_share_details.id }}"
+
+ - name: Create a SMB Share for Snapshot Idempotency
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ snap_share_name }}"
+ path: "/"
+ snapshot_name: "{{ snapshot_name }}"
+ umask: "000"
+ description: "Added description of Snapshot SMB share during creation"
+ offline_availability: "PROGRAMS"
+ is_abe_enabled: true
+ is_branch_cache_enabled: true
+ is_continuous_availability_enabled: true
+ is_encryption_enabled: true
+ state: "present"
+
+ - name: Get SMB Share Details using Name
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ snap_share_name }}"
+ snapshot_name: "{{ snapshot_name }}"
+ state: "present"
+
+ - name: Update the details of the SMB share
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ snap_share_name }}"
+ snapshot_name: "{{ snapshot_name }}"
+ umask: "777"
+ description: "updated description of snapshot's SMB share"
+ offline_availability: "DOCUMENTS"
+ is_abe_enabled: false
+ is_branch_cache_enabled: false
+ is_continuous_availability_enabled: false
+ is_encryption_enabled: false
+ state: "present"
+
+ - name: Update the details of the SMB share Idempotency
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ snap_share_name }}"
+ snapshot_name: "{{ snapshot_name }}"
+ umask: "777"
+ description: "updated description of snapshot's SMB share"
+ offline_availability: "DOCUMENTS"
+ is_abe_enabled: false
+ is_branch_cache_enabled: false
+ is_continuous_availability_enabled: false
+ is_encryption_enabled: false
+ state: "present"
+
+ - name: Delete SMB share for Filesystem Snapshot
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_id: "{{ snap_share_id }}"
+ state: "absent"
+
+ - name: Delete SMB share for Filesystem Snapshot Idempotency
+ dellemc.unity.smbshare:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ share_name: "{{ snap_share_name }}"
+ snapshot_name: "{{ snapshot_name }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/snapshot.yml b/ansible_collections/dellemc/unity/playbooks/modules/snapshot.yml
new file mode 100644
index 000000000..164106f7f
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/snapshot.yml
@@ -0,0 +1,194 @@
+---
+- name: Snapshot Module Operations in Unity
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'admin'
+ password: 'Password123!'
+ cg_snapshot_name: "ansible_snap_cg_1_1"
+ vol_snapshot_name: "ansible_snap_lun_4_2"
+ vol_name: "ansible_snap_lun_4"
+ cg_name: "ansible_snap_cg_1"
+ description: "Created using playbook"
+ new_description: "modified description using playbook"
+ host_name: "ansible_snap_host"
+ expiry_time: "04/15/2021 2:30"
+ new_expiry_time: "04/10/2021 2:30"
+ new_snapshot_name: "new_ansible_snap_lun_4_2"
+
+ tasks:
+ - name: Create a Snapshot for a CG
+ register: result
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ cg_name: "{{ cg_name }}"
+ snapshot_name: "{{ cg_snapshot_name }}"
+ description: "{{ description }}"
+ auto_delete: false
+ state: "present"
+
+ - name: Set snapshot id
+ ansible.builtin.set_fact:
+ cg_snapshot_id: "{{ result.snapshot_details.id }}"
+
+ - name: Create a Snapshot for a CG Idempotency
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ cg_name: "{{ cg_name }}"
+ snapshot_name: "{{ cg_snapshot_name }}"
+ description: "{{ description }}"
+ auto_delete: false
+ state: "present"
+
+ - name: Create a Snapshot for a LUN with Host attached.
+ register: result
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ vol_name: "{{ vol_name }}"
+ snapshot_name: "{{ vol_snapshot_name }}"
+ expiry_time: "{{ expiry_time }}"
+ description: "{{ description }}"
+ host_name: "{{ host_name }}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Set snapshot id
+ ansible.builtin.set_fact:
+ vol_snapshot_id: "{{ result.snapshot_details.id }}"
+
+ - name: Create a Snapshot for a LUN with Host attached Idempotency.
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ vol_name: "{{ vol_name }}"
+ snapshot_name: "{{ vol_snapshot_name }}"
+ expiry_time: "{{ expiry_time }}"
+ description: "{{ description }}"
+ host_name: "{{ host_name }}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Unmap a host for a Snapshot using Id
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ snapshot_id: "{{ vol_snapshot_id }}"
+ host_name: "{{ host_name }}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Unmap a host for a Snapshot Idempotency case
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ snapshot_name: "{{ vol_snapshot_name }}"
+ host_name: "{{ host_name }}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Map snapshot to a host using Id
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ port: "{{ port }}"
+ snapshot_id: "{{ vol_snapshot_id }}"
+ host_name: "{{ host_name }}"
+ host_state: "mapped"
+ state: "present"
+
+ - name: Get Snapshot Details using Id
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ cg_snapshot_id }}"
+ state: "present"
+
+ - name: Update attributes of a Snapshot for a LUN using Id
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ vol_snapshot_id }}"
+ new_snapshot_name: "{{ new_snapshot_name }}"
+ expiry_time: "{{ new_expiry_time }}"
+ description: "{{ new_description }}"
+ host_name: "{{ host_name }}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Update attributes of a Snapshot for a LUN Idempotency case
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ new_snapshot_name }}"
+ expiry_time: "{{ new_expiry_time }}"
+ description: "{{ new_description }}"
+ host_name: "{{ host_name }}"
+ host_state: "unmapped"
+ state: "present"
+
+ - name: Delete Snapshot of CG.
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ cg_snapshot_name }}"
+ state: "absent"
+
+ - name: Delete Snapshot of CG using Id Idempotency case.
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_id: "{{ cg_snapshot_id }}"
+ state: "absent"
+
+ - name: Delete Snapshot of volume.
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ new_snapshot_name }}"
+ state: "absent"
+
+ - name: Delete Snapshot of volume Idempotency.
+ dellemc.unity.snapshot:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ snapshot_name: "{{ new_snapshot_name }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/snapshotschedule.yml b/ansible_collections/dellemc/unity/playbooks/modules/snapshotschedule.yml
new file mode 100644
index 000000000..f2859a06a
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/snapshotschedule.yml
@@ -0,0 +1,203 @@
+---
+- name: Snapshot schedule operations on Unity
+ hosts: localhost
+ connection: local
+ gather_facts: false
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'admin'
+ password: 'Password123!'
+ state_present: 'present'
+ state_absent: 'absent'
+
+ tasks:
+ - name: Create snapshot schedule (Rule Type - every_n_hours)
+ register: result
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_N_Hours_Testing"
+ type: "every_n_hours"
+ interval: 6
+ desired_retention: 24
+ state: "{{ state_present }}"
+
+ - name: Set id
+ ansible.builtin.set_fact:
+ id: "{{ result.snapshot_schedule_details.id }}"
+
+ - name: Create snapshot schedule (Rule Type - every_n_hours) - Idempotency
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_N_Hours_Testing"
+ type: "every_n_hours"
+ interval: 6
+ desired_retention: 24
+ state: "{{ state_present }}"
+
+ - name: Create snapshot schedule (Rule Type - every_day)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_Day_Testing"
+ type: "every_day"
+ hours_of_day:
+ - 8
+ - 14
+ auto_delete: true
+ state: "{{ state_present }}"
+
+ - name: Create snapshot schedule (Rule Type - every_n_days)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_N_Day_Testing"
+ type: "every_n_days"
+ day_interval: 2
+ desired_retention: 16
+ retention_unit: "days"
+ state: "{{ state_present }}"
+
+ - name: Create snapshot schedule (Rule Type - every_week)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_Week_Testing"
+ type: "every_week"
+ days_of_week:
+ - MONDAY
+ - FRIDAY
+ hour: 12
+ minute: 30
+ desired_retention: 200
+ state: "{{ state_present }}"
+
+ - name: Create snapshot schedule (Rule Type - every_month)
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_Month_Testing"
+ type: "every_month"
+ day_of_month: 17
+ auto_delete: true
+ state: "{{ state_present }}"
+
+ - name: Get snapshot schedule details using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_N_Hours_Testing"
+ state: "{{ state_present }}"
+
+ - name: Get snapshot schedule details using id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ id: "{{ id }}"
+ state: "{{ state_present }}"
+
+ - name: Modify snapshot schedule details id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ id: "{{ id }}"
+ type: "every_n_hours"
+ interval: 8
+ state: "{{ state_present }}"
+
+ - name: Modify snapshot schedule details id - Idempotency
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ id: "{{ id }}"
+ type: "every_n_hours"
+ interval: 8
+ state: "{{ state_present }}"
+
+ - name: Modify snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_Day_Testing"
+ type: "every_day"
+ desired_retention: 200
+ auto_delete: false
+ state: "{{ state_present }}"
+
+ - name: Delete snapshot schedule using id
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ id: "{{ id }}"
+ state: "{{ state_absent }}"
+
+ - name: Delete snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_Day_Testing"
+ state: "{{ state_absent }}"
+
+ - name: Delete snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_N_Day_Testing"
+ state: "{{ state_absent }}"
+
+ - name: Delete snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_Week_Testing"
+ state: "{{ state_absent }}"
+
+ - name: Delete snapshot schedule using name
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_Month_Testing"
+ state: "{{ state_absent }}"
+
+ - name: Delete snapshot schedule using name - Idempotency
+ dellemc.unity.snapshotschedule:
+ unispherehost: "{{ unispherehost }}"
+ validate_certs: "{{ validate_certs }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ name: "Ansible_Every_Month_Testing"
+ state: "{{ state_absent }}"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/storagepool.yml b/ansible_collections/dellemc/unity/playbooks/modules/storagepool.yml
new file mode 100644
index 000000000..a53328edc
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/storagepool.yml
@@ -0,0 +1,185 @@
+---
+- name: Storage pool Module Operations on Unity
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ pool_name: "test_DND"
+ new_pool_name: "new_pool2"
+ fast_vp_enabled: "enabled"
+ fast_cache_enabled: "enabled"
+ pool_description: "updated"
+ disk_group_id: "dg_16"
+ disk_num: 2
+ raid_type: "RAID10"
+ stripe_width: "BEST_FIT"
+ alert_threshold: 50
+ is_harvest_enabled: true
+ pool_harvest_high_threshold: 59
+ pool_harvest_low_threshold: 40
+ is_snap_harvest_enabled: true
+ snap_harvest_high_threshold: 80
+ snap_harvest_low_threshold: 60
+ pool_type: "DYNAMIC"
+
+ tasks:
+ - name: Get the details of Storage pool by name
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "Test_Pool"
+ pool_description: "test pool"
+ raid_groups:
+ disk_group_id: "dg_36"
+ disk_num: 3
+ raid_type: "RAID10"
+ stripe_width: "BEST_FIT"
+ alert_threshold: 50
+ is_harvest_enabled: true
+ pool_harvest_high_threshold: 60
+ pool_harvest_low_threshold: 40
+ is_snap_harvest_enabled: true
+ snap_harvest_high_threshold: 70
+ snap_harvest_low_threshold: 50
+ fast_vp: "enabled"
+ fast_cache: "enabled"
+ pool_type: "DYNAMIC"
+ state: "present"
+
+ - name: Get the details of Storage pool by name
+ register: result
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "{{ pool_name }}"
+ state: "present"
+
+ - name: Set storage pool id
+ ansible.builtin.set_fact:
+ pool_id: "{{ result.storage_pool_details.id }}"
+
+ - name: Get the details of Storage pool by pool id
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_id: "{{ pool_id }}"
+ state: "present"
+
+ - name: Rename Storage pool by name
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "{{ pool_name }}"
+ new_pool_name: "{{ new_pool_name }}"
+ state: "present"
+
+ - name: Update Storage pool description
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "{{ new_pool_name }}"
+ pool_description: "{{ pool_description }}"
+ state: "present"
+
+ - name: Update Storage pool description- Idempotent
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "{{ new_pool_name }}"
+ pool_description: "{{ pool_description }}"
+ state: "present"
+
+ - name: Update fast_vp by pool name
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "{{ new_pool_name }}"
+ fast_vp: "{{ fast_vp_enabled }}"
+ state: "present"
+
+ - name: Update fast_cache by pool name
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "{{ new_pool_name }}"
+ fast_cache: "{{ fast_cache_enabled }}"
+ state: "present"
+
+ - name: Rename Storage pool by name
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "{{ new_pool_name }}"
+ new_pool_name: "{{ pool_name }}"
+ state: "present"
+
+ - name: Create a Storage pool
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "test_pool"
+ pool_description: "Unity test pool."
+ raid_groups:
+ disk_group_id: "{{ disk_group_id }}"
+ disk_num: "{{ disk_num }}"
+ raid_type: "{{ raid_type }}"
+ stripe_width: "{{ stripe_width }}"
+ alert_threshold: "{{ alert_threshold }}"
+ is_harvest_enabled: "{{ is_harvest_enabled }}"
+ pool_harvest_high_threshold: "{{ pool_harvest_high_threshold }}"
+ pool_harvest_low_threshold: "{{ pool_harvest_low_threshold }}"
+ is_snap_harvest_enabled: "{{ is_snap_harvest_enabled }}"
+ snap_harvest_high_threshold: "{{ snap_harvest_high_threshold }}"
+ snap_harvest_low_threshold: "{{ snap_harvest_low_threshold }}"
+ fast_vp: "{{ fast_vp_enabled }}"
+ fast_cache: "{{ fast_cache_enabled }}"
+ pool_type: "DYNAMIC"
+ state: "present"
+
+ - name: Create a StoragePool - idempotency
+ dellemc.unity.storagepool:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ pool_name: "test_pool"
+ pool_description: "Unity test pool."
+ raid_groups:
+ disk_group_id: "{{ disk_group_id }}"
+ disk_num: "{{ disk_num }}"
+ raid_type: "{{ raid_type }}"
+ stripe_width: "{{ stripe_width }}"
+ alert_threshold: "{{ alert_threshold }}"
+ is_harvest_enabled: "{{ is_harvest_enabled }}"
+ pool_harvest_high_threshold: "{{ pool_harvest_high_threshold }}"
+ pool_harvest_low_threshold: "{{ pool_harvest_low_threshold }}"
+ is_snap_harvest_enabled: "{{ is_snap_harvest_enabled }}"
+ snap_harvest_high_threshold: "{{ snap_harvest_high_threshold }}"
+ snap_harvest_low_threshold: "{{ snap_harvest_low_threshold }}"
+ fast_vp: "{{ fast_vp_enabled }}"
+ fast_cache: "{{ fast_cache_enabled }}"
+ pool_type: "DYNAMIC"
+ state: "present"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/tree_quota.yml b/ansible_collections/dellemc/unity/playbooks/modules/tree_quota.yml
new file mode 100644
index 000000000..1665ea118
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/tree_quota.yml
@@ -0,0 +1,138 @@
+---
+- name: Tree Quota Operations
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ filesystem_name: "SP_Filesystem_test"
+ filesystem_id: "fs_2279"
+ nas_server_name: "lglad068"
+ path: "/sample_quota"
+ soft_limit: 2
+ state_present: "present"
+ state_absent: "absent"
+
+ tasks:
+ - name: Create quota tree of filesystem
+ register: result
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ path: "{{ path }}"
+ hard_limit: 2
+ cap_unit: "TB"
+ description: "Sample quota tree"
+ state: "{{ state_present }}"
+
+ - name: Set tree_quota_id
+ ansible.builtin.set_fact:
+ tree_quota_id: "{{ result.get_tree_quota_details.id }}"
+
+ - name: Create quota tree of filesystem -- Idempotency
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ path: "{{ path }}"
+ hard_limit: 2
+ cap_unit: "TB"
+ description: "Sample quota tree"
+ state: "{{ state_present }}"
+
+ - name: Get quota tree details by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ path: "{{ path }}"
+ state: "{{ state_present }}"
+
+ - name: Get quota tree details by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ tree_quota_id: "{{ tree_quota_id }}"
+ state: "{{ state_present }}"
+
+ - name: Modify quota tree of filesystem by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ path: "{{ path }}"
+ soft_limit: "{{ soft_limit }}"
+ cap_unit: "TB"
+ description: "Sample quota tree modified"
+ state: "{{ state_present }}"
+
+ - name: Modify quota tree of filesystem -- Idempotency
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ path: "{{ path }}"
+ soft_limit: "{{ soft_limit }}"
+ cap_unit: "TB"
+ description: "Sample quota tree modified"
+ state: "{{ state_present }}"
+
+ - name: Modify quota tree of filesystem by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ tree_quota_id: "{{ tree_quota_id }}"
+ soft_limit: "{{ soft_limit }}"
+ cap_unit: "TB"
+ description: "Sample quota tree modified"
+ state: "{{ state_present }}"
+
+ - name: Delete quota tree of filesystem by quota tree path
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ path: "{{ path }}"
+ state: "{{ state_absent }}"
+
+ - name: Delete quota tree of filesystem -- Idempotency
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ path: "{{ path }}"
+ state: "{{ state_absent }}"
+
+ - name: Delete quota tree of filesystem by quota tree id
+ dellemc.unity.tree_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ tree_quota_id: "treequota_171798700679_1"
+ state: "{{ state_absent }}"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/user_quota.yml b/ansible_collections/dellemc/unity/playbooks/modules/user_quota.yml
new file mode 100644
index 000000000..350cf4f62
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/user_quota.yml
@@ -0,0 +1,255 @@
+---
+- name: User Quota Operations
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'admin'
+ password: 'Password123!'
+ filesystem_name: "SP_Filesystem_test"
+ filesystem_id: "fs_2279"
+ nas_server_name: "lglad068"
+ user_name: "test2"
+ uid: "2"
+ tree_quota_id: "treequota_171798701972_1"
+ path: "/sample"
+ state_present: "present"
+ state_absent: "absent"
+
+ tasks:
+ - name: Create user quota for a filesystem
+ register: result_user_quota
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ soft_limit: "1"
+ cap_unit: "TB"
+ hard_limit: "2"
+ state: "{{ state_present }}"
+
+ - name: Set user_quota_id
+ ansible.builtin.set_fact:
+ user_quota_id: "{{ result_user_quota.get_user_quota_details.id }}"
+
+ - name: Create user quota for a filesystem -- Idempotency
+ register: result_user_quota
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ soft_limit: "1"
+ cap_unit: "TB"
+ hard_limit: "2"
+ state: "{{ state_present }}"
+
+
+ - name: Get user quota details from user_name
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ user_name: "{{ user_name }}"
+ state: "{{ state_present }}"
+
+ - name: Modify user quota through user_quota_id
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ soft_limit: "900"
+ hard_limit: "1000"
+ user_quota_id: "{{ user_quota_id }}"
+ state: "{{ state_present }}"
+
+
+ - name: Modify user quota through user_quota_id -- Idempotency
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ soft_limit: "900"
+ hard_limit: "1000"
+ user_quota_id: "{{ user_quota_id }}"
+ state: "{{ state_present }}"
+
+ - name: Modify user quota details from uid
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ uid: "{{ uid }}"
+ soft_limit: "1"
+ cap_unit: "TB"
+ state: "{{ state_present }}"
+
+ - name: Modify user quota details from uid -- Idempotency
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ uid: "{{ uid }}"
+ soft_limit: "1"
+ cap_unit: "TB"
+ state: "{{ state_present }}"
+
+ - name: Delete user quota
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ user_name: "{{ user_name }}"
+ state: "{{ state_absent }}"
+
+ - name: Delete user quota -- Idempotency
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_id: "{{ filesystem_id }}"
+ user_name: "{{ user_name }}"
+ state: "{{ state_absent }}"
+
+ - name: Create user quota for a quota tree
+ register: result_user_quota
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ path: "{{ path }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ soft_limit: "1"
+ cap_unit: "TB"
+ hard_limit: "2"
+ state: "{{ state_present }}"
+
+ - name: Set user_quota_id
+ ansible.builtin.set_fact:
+ user_quota_id: "{{ result_user_quota.get_user_quota_details.id }}"
+
+ - name: Create user quota for a quota tree -- Idempotency
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ path: "{{ path }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ soft_limit: "1"
+ cap_unit: "TB"
+ hard_limit: "2"
+ state: "{{ state_present }}"
+
+ - name: Modify user quota for a quota tree by path
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ path: "{{ path }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ soft_limit: "2"
+ cap_unit: "TB"
+ hard_limit: "2"
+ state: "{{ state_present }}"
+
+ - name: Modify user quota for a quota tree by quota tree id
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ tree_quota_id: "{{ tree_quota_id }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ soft_limit: "800"
+ hard_limit: "900"
+ state: "{{ state_present }}"
+
+ - name: Modify user quota for a quota tree by quota tree id -- Idempotency
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ tree_quota_id: "{{ tree_quota_id }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ soft_limit: "800"
+ hard_limit: "900"
+ state: "{{ state_present }}"
+
+ - name: Delete user quota for a quota tree by quota tree id
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ tree_quota_id: "{{ tree_quota_id }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ state: "{{ state_absent }}"
+
+ - name: Delete user quota for a quota tree by path
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ path: "{{ path }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ state: "{{ state_absent }}"
+
+ - name: Delete user quota for a quota tree by path -- Idempotency
+ dellemc.unity.user_quota:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ filesystem_name: "{{ filesystem_name }}"
+ nas_server_name: "{{ nas_server_name }}"
+ path: "{{ path }}"
+ user_type: "Unix"
+ user_name: "{{ user_name }}"
+ state: "{{ state_absent }}"
diff --git a/ansible_collections/dellemc/unity/playbooks/modules/volume.yml b/ansible_collections/dellemc/unity/playbooks/modules/volume.yml
new file mode 100644
index 000000000..45f62c01f
--- /dev/null
+++ b/ansible_collections/dellemc/unity/playbooks/modules/volume.yml
@@ -0,0 +1,241 @@
+---
+- name: Volume Operations
+ hosts: localhost
+ connection: local
+ vars:
+ unispherehost: '10.*.*.*'
+ validate_certs: false
+ username: 'user'
+ password: '**'
+ vol_name: "Ansible_vol1_test"
+ new_vol_name: "New_Ansible_vol1_test"
+ size: 2
+ pool_name: "Ansible_Unity_SP_2"
+ cap_gb: "GB"
+ description: "Ansible demo"
+ snap_schedule: "Ansible_vol_snap_schedule1"
+ io_limit_policy: "Ansible_IO_limit1"
+ tiering_policy: "LOWEST"
+ is_thin: true
+ compression: true
+ sp: "SPA"
+ host_name: "10.*.*.*"
+ host_id: "Host_929"
+ vol_hosts:
+ - host_name: "10.*.*.*"
+ hlu: 1
+ - host_id: "Host_929"
+ hlu: 3
+ hlu: 2
+ state_present: "present"
+ state_absent: "absent"
+ state_mapped: "mapped"
+ state_unmapped: "unmapped"
+
+ tasks:
+ - name: Create Volume
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_name: "{{ vol_name }}"
+ description: "{{ description }}"
+ pool_name: "{{ pool_name }}"
+ size: "{{ size }}"
+ cap_unit: "{{ cap_gb }}"
+ tiering_policy: "{{ tiering_policy }}"
+ is_thin: true
+ compression: true
+ advanced_dedup: true
+ state: "{{ state_present }}"
+ register: vol_result
+
+ - name: Create Volume - Idempotency
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_name: "{{ vol_name }}"
+ description: "{{ description }}"
+ pool_name: "{{ pool_name }}"
+ size: "{{ size }}"
+ cap_unit: "{{ cap_gb }}"
+ tiering_policy: "{{ tiering_policy }}"
+ is_thin: true
+ compression: true
+ advanced_dedup: true
+ state: "{{ state_present }}"
+
+ - name: Set vol_id
+ ansible.builtin.set_fact:
+ vol_id: "{{ vol_result.volume_details.id }}"
+
+ - name: Expand Volume by volume id
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ size: 5
+ cap_unit: "{{ cap_gb }}"
+ state: "{{ state_present }}"
+
+ - name: Modify Volume attributes
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_name: "{{ vol_name }}"
+ tiering_policy: "AUTOTIER"
+ snap_schedule: "{{ snap_schedule }}"
+ io_limit_policy: "{{ io_limit_policy }}"
+ is_thin: true
+ compression: true
+ advanced_dedup: true
+ state: "{{ state_present }}"
+
+ - name: Modify Volume attributes - Idempotency
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_name: "{{ vol_name }}"
+ tiering_policy: "AUTOTIER"
+ snap_schedule: "{{ snap_schedule }}"
+ io_limit_policy: "{{ io_limit_policy }}"
+ is_thin: true
+ compression: true
+ advanced_dedup: true
+ state: "{{ state_present }}"
+
+ - name: Remove snap_schedule from a Volume
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_name: "{{ vol_name }}"
+ snap_schedule: ""
+ state: "{{ state_present }}"
+
+ - name: Map Host by host_name to Volume
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ host_name: "{{ host_name }}"
+ hlu: "{{ hlu }}"
+ mapping_state: "{{ state_mapped }}"
+ state: "{{ state_present }}"
+
+ - name: Map Host by host_name to Volume- Idempotency
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ host_name: "{{ host_name }}"
+ hlu: 2
+ mapping_state: "{{ state_mapped }}"
+ state: "{{ state_present }}"
+
+ - name: Unmap Host by host_name from Volume
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ host_name: "{{ host_name }}"
+ mapping_state: "{{ state_unmapped }}"
+ state: "{{ state_present }}"
+
+ - name: Unmap Host by host_name from Volume -Idempotency
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ host_name: "{{ host_name }}"
+ mapping_state: "{{ state_unmapped }}"
+ state: "{{ state_present }}"
+
+ - name: Map Multiple Hosts to a Volume
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ hosts: "{{ vol_hosts }}"
+ mapping_state: "{{ state_mapped }}"
+ state: "{{ state_present }}"
+
+ - name: Map Multiple Hosts to a Volume - Idempotency
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ hosts: "{{ vol_hosts }}"
+ mapping_state: "{{ state_mapped }}"
+ state: "{{ state_present }}"
+
+ - name: Unmap Hosts from Volume
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ hosts: "{{ vol_hosts }}"
+ mapping_state: "{{ state_unmapped }}"
+ state: "{{ state_present }}"
+
+ - name: Unmap Hosts from Volume - Idempotency
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ hosts: "{{ vol_hosts }}"
+ mapping_state: "{{ state_unmapped }}"
+ state: "{{ state_present }}"
+
+ - name: Get details of volume using id
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ state: "present"
+
+ - name: Delete a volume using id
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ state: "absent"
+
+ - name: Delete a volume using id -Idempotency
+ dellemc.unity.volume:
+ unispherehost: "{{ unispherehost }}"
+ username: "{{ username }}"
+ password: "{{ password }}"
+ validate_certs: "{{ validate_certs }}"
+ vol_id: "{{ vol_id }}"
+ state: "absent"
diff --git a/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py b/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py
index 1ebc7f40f..0df468567 100644
--- a/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py
+++ b/ansible_collections/dellemc/unity/plugins/doc_fragments/unity.py
@@ -44,7 +44,7 @@ class ModuleDocFragment(object):
default: 443
requirements:
- A Dell Unity Storage device version 5.1 or later.
- - Ansible-core 2.12 or later.
+ - Ansible-core 2.13 or later.
- Python 3.9, 3.10 or 3.11.
- Storops Python SDK 1.2.11.
notes:
diff --git a/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py
index c44b2bcee..e8fa0374c 100644
--- a/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py
+++ b/ansible_collections/dellemc/unity/plugins/module_utils/storage/dell/utils.py
@@ -25,44 +25,44 @@ except ImportError:
try:
from storops import UnitySystem
- from storops.unity.client import UnityClient
+ from storops.unity.client import UnityClient # noqa # pylint: disable=unused-import
from storops.unity.resource import host, cg, snap_schedule, snap, \
- cifs_share, nas_server
- from storops.unity.resource.lun import UnityLun
- from storops.unity.resource.pool import UnityPool, UnityPoolList, RaidGroupParameter
+ cifs_share, nas_server # noqa # pylint: disable=unused-import
+ from storops.unity.resource.lun import UnityLun # noqa # pylint: disable=unused-import
+ from storops.unity.resource.pool import UnityPool, UnityPoolList, RaidGroupParameter # noqa # pylint: disable=unused-import
from storops.unity.resource.filesystem import UnityFileSystem, \
- UnityFileSystemList
- from storops.unity.resource.nas_server import UnityNasServer
+ UnityFileSystemList # noqa # pylint: disable=unused-import
+ from storops.unity.resource.nas_server import UnityNasServer # noqa # pylint: disable=unused-import
from storops.unity.resource.nfs_share import UnityNfsShare, \
- UnityNfsShareList
+ UnityNfsShareList # noqa # pylint: disable=unused-import
from storops.unity.resource.snap_schedule import UnitySnapScheduleList, \
- UnitySnapSchedule
- from storops.unity.resource.replication_session import UnityReplicationSession
+ UnitySnapSchedule # noqa # pylint: disable=unused-import
+ from storops.unity.resource.replication_session import UnityReplicationSession # noqa # pylint: disable=unused-import
from storops.unity.enums import HostInitiatorTypeEnum, \
- TieringPolicyEnum, ScheduleTypeEnum, DayOfWeekEnum, NodeEnum, \
- HostLUNAccessEnum, HostTypeEnum, AccessPolicyEnum, \
- FilesystemTypeEnum, FSSupportedProtocolEnum, FSFormatEnum, \
- NFSTypeEnum, NFSShareDefaultAccessEnum, NFSShareSecurityEnum, \
- FilesystemSnapAccessTypeEnum, FSLockingPolicyEnum, \
- CifsShareOfflineAvailabilityEnum, NasServerUnixDirectoryServiceEnum, \
- KdcTypeEnum, NodeEnum, FileInterfaceRoleEnum
+ TieringPolicyEnum, ScheduleTypeEnum, DayOfWeekEnum, NodeEnum # noqa # pylint: disable=unused-import
+ from storops.unity.enums import HostLUNAccessEnum, HostTypeEnum, AccessPolicyEnum, \
+ FilesystemTypeEnum, FSSupportedProtocolEnum, FSFormatEnum # noqa # pylint: disable=unused-import
+ from storops.unity.enums import NFSTypeEnum, NFSShareDefaultAccessEnum, NFSShareSecurityEnum, \
+ FilesystemSnapAccessTypeEnum, FSLockingPolicyEnum # noqa # pylint: disable=unused-import
+ from storops.unity.enums import CifsShareOfflineAvailabilityEnum, NasServerUnixDirectoryServiceEnum, \
+ KdcTypeEnum, NodeEnum, FileInterfaceRoleEnum, ReplicationOpStatusEnum # noqa # pylint: disable=unused-import
from storops.exception import UnityResourceNotFoundError, \
- StoropsConnectTimeoutError, UnityNfsShareNameExistedError
- from storops.connection.exceptions import HttpError, HTTPClientError
+ StoropsConnectTimeoutError, UnityNfsShareNameExistedError # noqa # pylint: disable=unused-import
+ from storops.connection.exceptions import HttpError, HTTPClientError # noqa # pylint: disable=unused-import
from storops.unity.resource.user_quota import UnityUserQuota, \
- UnityUserQuotaList
+ UnityUserQuotaList # noqa # pylint: disable=unused-import
from storops.unity.resource.tree_quota import UnityTreeQuota, \
- UnityTreeQuotaList
+ UnityTreeQuotaList # noqa # pylint: disable=unused-import
from storops.unity.resource.quota_config import UnityQuotaConfig, \
- UnityQuotaConfigList
- from storops.unity.resource.storage_resource import UnityStorageResource
+ UnityQuotaConfigList # noqa # pylint: disable=unused-import
+ from storops.unity.resource.storage_resource import UnityStorageResource # noqa # pylint: disable=unused-import
from storops.unity.enums import QuotaPolicyEnum, RaidTypeEnum, \
- RaidStripeWidthEnum, StoragePoolTypeEnum
+ RaidStripeWidthEnum, StoragePoolTypeEnum # noqa # pylint: disable=unused-import
from storops.unity.resource.disk import UnityDisk, \
- UnityDiskList, UnityDiskGroup, UnityDiskGroupList
- from storops.unity.resource.cifs_server import UnityCifsServer
- from storops.unity.resource.nfs_server import UnityNfsServer
- from storops.unity.resource.interface import UnityFileInterface
+ UnityDiskList, UnityDiskGroup, UnityDiskGroupList # noqa # pylint: disable=unused-import
+ from storops.unity.resource.cifs_server import UnityCifsServer # noqa # pylint: disable=unused-import
+ from storops.unity.resource.nfs_server import UnityNfsServer # noqa # pylint: disable=unused-import
+ from storops.unity.resource.interface import UnityFileInterface # noqa # pylint: disable=unused-import
HAS_UNITY_SDK, STOROPS_IMP_ERR = True, None
except ImportError:
diff --git a/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py b/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py
index d40c4f11d..0225eb381 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/cifsserver.py
@@ -124,7 +124,7 @@ EXAMPLES = r'''
password: "{{password}}"
validate_certs: "{{validate_certs}}"
cifs_server_id: "cifs_37"
- unjoin_cifs_server_account: True
+ unjoin_cifs_server_account: true
domain_username: "domain_username"
domain_password: "domain_password"
state: "absent"
@@ -277,7 +277,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import
LOG = utils.get_logger('cifsserver')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class CIFSServer(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py b/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py
index 14e4de506..e0d6a6c06 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/consistencygroup.py
@@ -321,7 +321,7 @@ EXAMPLES = r"""
replication_type: "remote"
remote_system:
remote_system_host: '10.1.2.3'
- remote_system_verifycert: False
+ remote_system_verifycert: false
remote_system_username: 'username'
remote_system_password: 'password'
destination_pool_name: "pool_test_1"
@@ -494,7 +494,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
LOG = utils.get_logger('consistencygroup',
log_devel=logging.INFO)
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class ConsistencyGroup(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/filesystem.py b/ansible_collections/dellemc/unity/plugins/modules/filesystem.py
index b10f85386..95cffeec6 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/filesystem.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/filesystem.py
@@ -330,7 +330,7 @@ EXAMPLES = r"""
grace_period: 8
grace_period_unit: "days"
default_soft_limit: 10
- is_user_quota_enabled: False
+ is_user_quota_enabled: false
state: "present"
- name: Expand FileSystem size
@@ -364,9 +364,9 @@ EXAMPLES = r"""
filesystem_name: "ansible_test_fs"
nas_server_name: "lglap761"
smb_properties:
- is_smb_op_locks_enabled: True
+ is_smb_op_locks_enabled: true
smb_notify_on_change_dir_depth: 5
- is_smb_notify_on_access_enabled: True
+ is_smb_notify_on_access_enabled: true
state: "present"
- name: Modify FileSystem Snap Schedule
@@ -411,7 +411,7 @@ EXAMPLES = r"""
rpo: 60
remote_system:
remote_system_host: '0.1.2.3'
- remote_system_verifycert: False
+ remote_system_verifycert: false
remote_system_username: 'username'
remote_system_password: 'password'
destination_pool_name: "pool_test_1"
@@ -689,7 +689,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
LOG = utils.get_logger('filesystem')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class Filesystem(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py b/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py
index 35e536a47..a82fbe89b 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/filesystem_snapshot.py
@@ -122,7 +122,7 @@ EXAMPLES = r'''
filesystem_name: "ansible_test_FS"
nas_server_name: "lglad069"
description: "Created using playbook"
- auto_delete: True
+ auto_delete: true
fs_access_type: "Protocol"
state: "present"
@@ -166,7 +166,7 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
snapshot_name: "ansible_test_FS_snap"
description: "Description updated"
- auto_delete: False
+ auto_delete: false
expiry_time: "04/15/2021 5:30"
state: "present"
@@ -304,7 +304,7 @@ from datetime import datetime
LOG = utils.get_logger('filesystem_snapshot')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class FilesystemSnapshot(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/host.py b/ansible_collections/dellemc/unity/plugins/modules/host.py
index 21a5fbae1..fcc13dd9a 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/host.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/host.py
@@ -347,7 +347,7 @@ import ipaddress
LOG = utils.get_logger('host')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class Host(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/info.py b/ansible_collections/dellemc/unity/plugins/modules/info.py
index e89d86335..641074286 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/info.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/info.py
@@ -38,6 +38,7 @@ description:
Get list of CIFS Servers in Unity array.
Get list of Ethernet ports in Unity array.
Get list of File interfaces used in Unity array.
+ Get list of Replication sessions in Unity array.
extends_documentation_fragment:
- dellemc.unity.unity
@@ -54,7 +55,7 @@ options:
for which information is required.
choices: [host, fc_initiator, iscsi_initiator, cg, storage_pool, vol,
snapshot_schedule, nas_server, file_system, snapshot, nfs_export,
- smb_share, user_quota, tree_quota, disk_group, nfs_server, cifs_server, ethernet_port, file_interface]
+ smb_share, user_quota, tree_quota, disk_group, nfs_server, cifs_server, ethernet_port, file_interface, replication_session]
type: list
elements: str
@@ -89,6 +90,7 @@ EXAMPLES = r'''
- cifs_server
- ethernet_port
- file_interface
+ - replication_session
- name: Get information of Unity array
dellemc.unity.info:
@@ -267,6 +269,15 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
gather_subset:
- file_interface
+
+ - name: Get list of replication sessions on Unity array
+ dellemc.unity.info:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ gather_subset:
+ - replication_session
'''
RETURN = r'''
@@ -1213,9 +1224,77 @@ File_interfaces:
"name": "3_APMXXXXXXXXXX"
}
]
+Replication_sessions:
+ description: Details of the Replication sessions.
+ returned: When Replication sessions exist.
+ type: list
+ contains:
+ id:
+ description: The ID of the Replication session.
+ type: str
+ name:
+ description: The name of the Replication session.
+ type: str
+ sample: [
+ {
+ "current_transfer_est_remain_time": 0,
+ "daily_snap_replication_policy": null,
+ "dst_resource_id": "nas_8",
+ "dst_spa_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253398547,
+ "id": "APM00213404195:if_181"
+ }
+ },
+ "dst_spb_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253424144,
+ "id": "APM00213404195:if_180"
+ }
+ },
+ "dst_status": "ReplicationSessionStatusEnum.OK",
+ "existed": true,
+ "hash": 8771259012271,
+ "health": {
+ "UnityHealth": {
+ "hash": 8771253424168
+ }
+ },
+ "hourly_snap_replication_policy": null,
+ "id": "103079215114_APM00213404195_0000_103079215274_APM00213404194_0000",
+ "last_sync_time": "2023-04-18 10:35:25+00:00",
+ "local_role": "ReplicationSessionReplicationRoleEnum.DESTINATION",
+ "max_time_out_of_sync": 0,
+ "members": null,
+ "name": "rep_sess_nas",
+ "network_status": "ReplicationSessionNetworkStatusEnum.OK",
+ "remote_system": {
+ "UnityRemoteSystem": {
+ "hash": 8771253380142
+ }
+ },
+ "replication_resource_type": "ReplicationEndpointResourceTypeEnum.NASSERVER",
+ "src_resource_id": "nas_213",
+ "src_spa_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253475010,
+ "id": "APM00213404194:if_195"
+ }
+ },
+ "src_spb_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253374169,
+ "id": "APM00213404194:if_194"
+ }
+ },
+ "src_status": "ReplicationSessionStatusEnum.OK",
+ "status": "ReplicationOpStatusEnum.ACTIVE",
+ "sync_progress": 0,
+ "sync_state": "ReplicationSessionSyncStateEnum.IN_SYNC"
+ },
+ ]
'''
-from re import sub
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
import utils
@@ -1223,7 +1302,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
LOG = utils.get_logger('info')
SUCCESSFULL_LISTED_MSG = 'Successfully listed.'
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class Info(object):
@@ -1549,6 +1628,20 @@ class Info(object):
LOG.error(msg)
self.module.fail_json(msg=msg)
+ def get_replication_session_list(self):
+ """Get the list of replication sessions on a given Unity storage system"""
+
+ try:
+ LOG.info("Getting replication sessions list")
+ replication_sessions = self.unity.get_replication_session()
+ return result_list(replication_sessions)
+
+ except Exception as e:
+ msg = 'Get replication session list from unity array failed with' \
+ ' error %s' % (str(e))
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
def perform_module_operation(self):
""" Perform different actions on Info based on user parameter
chosen in playbook """
@@ -1575,6 +1668,7 @@ class Info(object):
cifs_server = []
ethernet_port = []
file_interface = []
+ replication_session = []
subset = self.module.params['gather_subset']
if subset is not None:
@@ -1616,6 +1710,8 @@ class Info(object):
ethernet_port = self.get_ethernet_port_list()
if 'file_interface' in subset:
file_interface = self.get_file_interface_list()
+ if 'replication_session' in subset:
+ replication_session = self.get_replication_session_list()
self.module.exit_json(
Array_Details=array_details,
@@ -1637,7 +1733,8 @@ class Info(object):
NFS_Servers=nfs_server,
CIFS_Servers=cifs_server,
Ethernet_ports=ethernet_port,
- File_interfaces=file_interface
+ File_interfaces=file_interface,
+ Replication_sessions=replication_session
)
@@ -1770,7 +1867,7 @@ def get_info_parameters():
'file_system', 'snapshot',
'nfs_export', 'smb_share',
'user_quota', 'tree_quota', 'disk_group', 'nfs_server', 'cifs_server',
- 'ethernet_port', 'file_interface']))
+ 'ethernet_port', 'file_interface', 'replication_session']))
def main():
diff --git a/ansible_collections/dellemc/unity/plugins/modules/interface.py b/ansible_collections/dellemc/unity/plugins/modules/interface.py
index 95ddfd26a..2523f940e 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/interface.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/interface.py
@@ -227,7 +227,7 @@ from ipaddress import ip_network
LOG = utils.get_logger('interface')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class Interface(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nasserver.py b/ansible_collections/dellemc/unity/plugins/modules/nasserver.py
index 713125cc2..925cc932e 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/nasserver.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/nasserver.py
@@ -225,15 +225,15 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
nas_server_name: "{{nas_server_name}}"
nas_server_new_name: "updated_sample_nas_server"
- is_replication_destination: False
- is_backup_only: False
- is_multiprotocol_enabled: True
- allow_unmapped_user: True
+ is_replication_destination: false
+ is_backup_only: false
+ is_multiprotocol_enabled: true
+ allow_unmapped_user: true
default_unix_user: "default_unix_sample_user"
default_windows_user: "default_windows_sample_user"
- enable_windows_to_unix_username_mapping: True
+ enable_windows_to_unix_username_mapping: true
current_unix_directory_service: "LDAP"
- is_packet_reflect_enabled: True
+ is_packet_reflect_enabled: true
state: "present"
- name: Enable replication for NAS Server on Local System
@@ -243,7 +243,7 @@ EXAMPLES = r'''
password: "{{password}}"
validate_certs: "{{validate_certs}}"
nas_server_id: "nas_10"
- replication_reuse_resource: False
+ replication_reuse_resource: false
replication_params:
replication_name: "test_replication"
destination_nas_server_name: "destination_nas"
@@ -252,7 +252,7 @@ EXAMPLES = r'''
replication_type: "local"
destination_pool_name: "Pool_Ansible_Neo_DND"
destination_sp: "SPA"
- is_backup: True
+ is_backup: true
replication_state: "enable"
state: "present"
@@ -263,7 +263,7 @@ EXAMPLES = r'''
password: "{{password}}"
validate_certs: "{{validate_certs}}"
nas_server_name: "dummy_nas"
- replication_reuse_resource: False
+ replication_reuse_resource: false
replication_params:
replication_name: "test_replication"
destination_nas_server_name: "destination_nas"
@@ -272,12 +272,12 @@ EXAMPLES = r'''
replication_type: "remote"
remote_system:
remote_system_host: '10.10.10.10'
- remote_system_verifycert: False
+ remote_system_verifycert: false
remote_system_username: 'test1'
remote_system_password: 'test1!'
destination_pool_name: "fastVP_pool"
destination_sp: "SPA"
- is_backup: True
+ is_backup: true
replication_state: "enable"
state: "present"
@@ -288,7 +288,7 @@ EXAMPLES = r'''
password: "{{password}}"
validate_certs: "{{validate_certs}}"
nas_server_name: "dummy_nas"
- replication_reuse_resource: True
+ replication_reuse_resource: true
replication_params:
destination_nas_server_name: "destination_nas"
replication_mode: "asynchronous"
@@ -297,7 +297,7 @@ EXAMPLES = r'''
replication_name: "test_replication"
remote_system:
remote_system_host: '10.10.10.10'
- remote_system_verifycert: False
+ remote_system_verifycert: false
remote_system_username: 'test1'
remote_system_password: 'test1!'
destination_pool_name: "fastVP_pool"
@@ -347,7 +347,7 @@ changed:
description: Whether or not the resource has changed.
returned: always
type: bool
- sample: True
+ sample: true
nas_server_details:
description: The NAS server details.
type: dict
@@ -388,7 +388,7 @@ nas_server_details:
type: bool
is_replication_destination:
description: If the NAS server is a replication destination
- then True.
+ then true.
type: bool
is_windows_to_unix_username_mapping_enabled:
description: Indicates whether a Unix to/from Windows user name
@@ -482,7 +482,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
import utils
LOG = utils.get_logger('nasserver')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class NASServer(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nfs.py b/ansible_collections/dellemc/unity/plugins/modules/nfs.py
index e6223066b..473e40b2a 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/nfs.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/nfs.py
@@ -568,7 +568,7 @@ nfs_share_details:
'root_access_hosts': None,
'snap': None,
'type': 'NFSTypeEnum.NFS_SHARE',
- 'existed': True,
+ 'existed': true,
'nas_server': {
'UnityNasServer': {
'id': 'nas_id_1',
@@ -607,7 +607,7 @@ HOST_DICT = dict(type='list', required=False, elements='dict',
HOST_STATE_LIST = ['present-in-export', 'absent-in-export']
STATE_LIST = ['present', 'absent']
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class NFS(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py b/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py
index e492e3af0..30d2c787f 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/nfsserver.py
@@ -92,12 +92,12 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
nas_server_name: "dummy_nas"
host_name: "dummy_nas23"
- is_secure_enabled: True
+ is_secure_enabled: true
kerberos_domain_controller_type: "WINDOWS"
kerberos_domain_controller_username: "administrator"
kerberos_domain_controller_password: "Password123!"
- is_extended_credentials_enabled: True
- nfs_v4_enabled: True
+ is_extended_credentials_enabled: true
+ nfs_v4_enabled: true
state: "present"
- name: Create NFS server with kdctype as Unix
@@ -108,10 +108,10 @@ EXAMPLES = r'''
validate_certs: "{{validate_certs}}"
nas_server_name: "dummy_nas"
host_name: "dummy_nas23"
- is_secure_enabled: True
+ is_secure_enabled: true
kerberos_domain_controller_type: "UNIX"
- is_extended_credentials_enabled: True
- nfs_v4_enabled: True
+ is_extended_credentials_enabled: true
+ nfs_v4_enabled: true
state: "present"
- name: Get NFS server details
@@ -132,7 +132,7 @@ EXAMPLES = r'''
nas_server_name: "dummy_nas"
kerberos_domain_controller_username: "administrator"
kerberos_domain_controller_password: "Password123!"
- unjoin_server_account: False
+ unjoin_server_account: false
state: "absent"
'''
@@ -209,7 +209,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
LOG = utils.get_logger('nfsserver')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class NFSServer(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/replication_session.py b/ansible_collections/dellemc/unity/plugins/modules/replication_session.py
new file mode 100644
index 000000000..20907d50d
--- /dev/null
+++ b/ansible_collections/dellemc/unity/plugins/modules/replication_session.py
@@ -0,0 +1,551 @@
+#!/usr/bin/python
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Ansible module for managing replication session on Unity"""
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = r"""
+
+module: replication_session
+version_added: '1.7.0'
+short_description: Manage replication session on Unity storage system
+description:
+- Managing replication session on Unity storage system includes getting details, pause,
+ resume, sync, failover, failback and deleting the replication session.
+
+extends_documentation_fragment:
+ - dellemc.unity.unity
+
+author:
+- Jennifer John (@Jennifer-John) <ansible.team@dell.com>
+
+options:
+ session_id:
+ description:
+ - ID of replication session.
+ type: str
+ session_name:
+ description:
+ - Name of replication session.
+ type: str
+ pause:
+ description:
+ - Pause or resume replication session.
+ type: bool
+ sync:
+ description:
+ - Sync a replication session.
+ type: bool
+ failover_with_sync:
+ description:
+ - If C(true), Sync the source and destination resources before failing over the asynchronous
+ replication session or keep them in sync after failing over the synchronous
+ replication session.
+ - If C(false), Failover a replication session.
+ type: bool
+ failback:
+ description:
+ - Failback a replication session.
+ type: bool
+ force_full_copy:
+ description:
+ - Indicates whether to sync back all data from the destination SP to the source
+ SP during the failback session. Needed during resume operation when replication
+ session goes out of sync due to a fault.
+ type: bool
+ force:
+ description:
+ - Skip pre-checks on file system(s) replication sessions of a NAS server when a
+ replication failover is issued from the source NAS server.
+ type: bool
+ state:
+ description:
+ - State variable to determine whether replication session will exist or not.
+ choices: ['absent', 'present']
+ default: present
+ type: str
+
+notes:
+ - The I(check_mode) is supported.
+"""
+
+EXAMPLES = r"""
+- name: Get replication session details
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+
+- name: Get replication session details based on session_id
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_id: "103079215114_APM00213404195_0000_103079215274_APM00213404194_0000"
+
+- name: Pause a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ pause: true
+
+- name: Resume a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ pause: false
+ force_full_copy: true
+
+- name: Sync a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ sync: true
+
+- name: Failover with sync a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ failover_with_sync: true
+ force: true
+
+- name: Failover a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ failover_with_sync: false
+
+- name: Failback a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ failback: true
+ force_full_copy: true
+
+- name: Delete a replication session
+ dellemc.unity.replication_session:
+ unispherehost: "{{unispherehost}}"
+ username: "{{username}}"
+ password: "{{password}}"
+ validate_certs: "{{validate_certs}}"
+ session_name: "fs_replication"
+ state: "absent"
+"""
+
+RETURN = r'''
+
+changed:
+ description: Whether or not the resource has changed.
+ returned: always
+ type: bool
+ sample: true
+
+replication_session_details:
+ description: Details of the replication session.
+ returned: When replication session exists.
+ type: dict
+ contains:
+ id:
+ description: Unique identifier of the replicationSession instance.
+ type: str
+ name:
+ description: User-specified replication session name.
+ type: str
+ replicationResourceType:
+ description: Replication resource type of replication session endpoints.
+ type: str
+ status:
+ description: Replication status of the replication session.
+ type: str
+ remoteSystem:
+ description: Specifies the remote system to use as the destination for the replication session.
+ type: dict
+ contains:
+ UnityRemoteSystem:
+ description: Information about remote storage system.
+ type: dict
+ contains:
+ id:
+ description: Unique identifier of the remote system instance.
+ type: str
+ serialNumber:
+ description: Serial number of the remote system.
+ type: str
+ maxTimeOutOfSync:
+ description: Maximum time to wait before the system syncs the source and destination resources.
+ type: int
+ srcStatus:
+ description: Status of the source end of the session.
+ type: str
+ networkStatus:
+ description: Status of the network connection used by the replication session.
+ type: str
+ dstStatus:
+ description: Status of the destination end of the replication session.
+ type: str
+ lastSyncTime:
+ description: Date and time of the last replication synchronization.
+ type: str
+ syncState:
+ description: Synchronization state between source and destination resource of the replication session.
+ type: str
+ syncProgress:
+ description: Synchronization completion percentage between source and destination resources of the replication session.
+ type: int
+ dstResourceId:
+ description: Identifier of the destination resource.
+ type: str
+ currentTransferEstRemainTime:
+ description: Estimated time left for the replication synchronization to complete.
+ type: int
+ sample: {
+ "current_transfer_est_remain_time": 0,
+ "daily_snap_replication_policy": null,
+ "dst_resource_id": "nas_8",
+ "dst_spa_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253398547,
+ "id": "APM00213404195:if_181"
+ }
+ },
+ "dst_spb_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253424144,
+ "id": "APM00213404195:if_180"
+ }
+ },
+ "dst_status": "ReplicationSessionStatusEnum.OK",
+ "existed": true,
+ "hash": 8771259012271,
+ "health": {
+ "UnityHealth": {
+ "hash": 8771253424168
+ }
+ },
+ "hourly_snap_replication_policy": null,
+ "id": "103079215114_APM00213404195_0000_103079215274_APM00213404194_0000",
+ "last_sync_time": "2023-04-18 10:35:25+00:00",
+ "local_role": "ReplicationSessionReplicationRoleEnum.DESTINATION",
+ "max_time_out_of_sync": 0,
+ "members": null,
+ "name": "rep_sess_nas",
+ "network_status": "ReplicationSessionNetworkStatusEnum.OK",
+ "remote_system": {
+ "UnityRemoteSystem": {
+ "hash": 8771253380142
+ }
+ },
+ "replication_resource_type": "ReplicationEndpointResourceTypeEnum.NASSERVER",
+ "src_resource_id": "nas_213",
+ "src_spa_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253475010,
+ "id": "APM00213404194:if_195"
+ }
+ },
+ "src_spb_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253374169,
+ "id": "APM00213404194:if_194"
+ }
+ },
+ "src_status": "ReplicationSessionStatusEnum.OK",
+ "status": "ReplicationOpStatusEnum.ACTIVE",
+ "sync_progress": 0,
+ "sync_state": "ReplicationSessionSyncStateEnum.IN_SYNC"
+ }
+'''
+
+from ansible.module_utils.basic import AnsibleModule
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
+ import utils
+
+LOG = utils.get_logger('replication_session')
+
+application_type = "Ansible/1.7.0"
+
+
+class ReplicationSession(object):
+
+ """Class with replication session operations"""
+
+ def __init__(self):
+ """Define all parameters required by this module"""
+ self.module_params = utils.get_unity_management_host_parameters()
+ self.module_params.update(get_replication_session_parameters())
+
+ mutually_exclusive = [['session_id', 'session_name']]
+
+ required_one_of = [['session_id', 'session_name']]
+
+ # initialize the Ansible module
+ self.module = AnsibleModule(
+ argument_spec=self.module_params,
+ supports_check_mode=True,
+ mutually_exclusive=mutually_exclusive,
+ required_one_of=required_one_of)
+ utils.ensure_required_libs(self.module)
+ self.result = dict(
+ changed=False,
+ replication_session_details={}
+ )
+
+ self.unity_conn = utils.get_unity_unisphere_connection(
+ self.module.params, application_type)
+ LOG.info('Check Mode Flag %s', self.module.check_mode)
+
+ def get_replication_session(self, id=None, name=None):
+ """Get the details of a replication session.
+ :param id: The id of the replication session
+ :param name: The name of the replication session
+ :return: instance of the replication session if exist.
+ """
+
+ id_or_name = id if id else name
+ errormsg = f"Retrieving details of replication session {id_or_name} failed with error"
+
+ try:
+ obj_replication_session = self.unity_conn.get_replication_session(name=name, _id=id)
+
+ LOG.info("Successfully retrieved the replication session object %s ", obj_replication_session)
+ if obj_replication_session.existed:
+ return obj_replication_session
+ except utils.HttpError as e:
+ if e.http_status == 401:
+ self.module.fail_json(msg=f"Incorrect username or password {str(e)}")
+ else:
+ msg = f"{errormsg} {str(e)}"
+ self.module.fail_json(msg=msg)
+ except utils.UnityResourceNotFoundError as e:
+ msg = f"{errormsg} {str(e)}"
+ LOG.error(msg)
+ return None
+ except Exception as e:
+ msg = f"{errormsg} {str(e)}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def pause(self, session_obj):
+ """Pause the replication session.
+ :param session_obj: Replication session object
+ :return: True if pause is successful.
+ """
+ try:
+ LOG.info("Pause replication session %s", session_obj.name)
+ if session_obj.status.name != utils.ReplicationOpStatusEnum.PAUSED.name:
+ if not self.module.check_mode:
+ session_obj.pause()
+ return True
+ except Exception as e:
+ msg = f"Pause replication session {session_obj.name} failed with error {str(e)}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def resume(self, session_obj, force_full_copy):
+ """Resume the replication session.
+ :param session_obj: Replication session object
+ :param force_full_copy: needed when replication session goes out of sync due to a fault.
+ :return: True if resume is successful.
+ """
+ try:
+ LOG.info("Resume replication session %s", session_obj.name)
+ if session_obj.status.name in (utils.ReplicationOpStatusEnum.PAUSED.name,
+ utils.ReplicationOpStatusEnum.FAILED_OVER.name,
+ utils.ReplicationOpStatusEnum.FAILED_OVER_WITH_SYNC.name):
+ if not self.module.check_mode:
+ session_obj.resume(force_full_copy=force_full_copy)
+ return True
+ except Exception as e:
+ msg = f"Resume replication session {session_obj.name} failed with error {str(e)}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def sync(self, session_obj):
+ """Sync the replication session.
+ :param session_obj: Replication session object
+ :return: True if sync is successful.
+ """
+ try:
+ LOG.info("Sync replication session %s", session_obj.name)
+ if not self.module.check_mode:
+ session_obj.sync()
+ return True
+ except Exception as e:
+ msg = f"Sync replication session {session_obj.name} failed with error {str(e)}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def failover(self, session_obj, sync_failover, force):
+ """Failover the replication session.
+ :param session_obj: Replication session object
+ :param sync_failover: To sync the source and destination resources
+ :param force: Skip pre-checks on file system(s) replication sessions of a NAS server
+ :return: True if failover is successful.
+ """
+ try:
+ LOG.info("Failover replication session %s", session_obj.name)
+ if (sync_failover and session_obj.status.name != utils.ReplicationOpStatusEnum.FAILED_OVER_WITH_SYNC.name) or \
+ (not sync_failover and session_obj.status.name != utils.ReplicationOpStatusEnum.FAILED_OVER.name):
+ if not self.module.check_mode:
+ session_obj.failover(sync=sync_failover, force=force)
+ return True
+ except Exception as e:
+ msg = f"Failover replication session {session_obj.name} failed with error {str(e)}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def failback(self, session_obj, force_full_copy):
+ """Failback the replication session.
+ :param session_obj: Replication session object
+ :param force_full_copy: needed when replication session goes out of sync due to a fault.
+ :return: True if failback is successful.
+ """
+ try:
+ LOG.info("Failback replication session %s", session_obj.name)
+ if session_obj.status.name in (utils.ReplicationOpStatusEnum.FAILED_OVER.name,
+ utils.ReplicationOpStatusEnum.FAILED_OVER_WITH_SYNC.name,
+ utils.ReplicationOpStatusEnum.PAUSED.name):
+ if not self.module.check_mode:
+ session_obj.failback(force_full_copy=force_full_copy)
+ return True
+ except Exception as e:
+ msg = f"Failback replication session {session_obj.name} failed with error {str(e)}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+ def delete(self, session_obj):
+ """Delete the replication session.
+ :param session_obj: Replication session object
+ :return: True if delete is successful.
+ """
+ try:
+ LOG.info("Delete replication session %s", session_obj.name)
+ if not self.module.check_mode:
+ session_obj.delete()
+ return True
+ except Exception as e:
+ msg = f"Deleting replication session {session_obj.name} failed with error {str(e)}"
+ LOG.error(msg)
+ self.module.fail_json(msg=msg)
+
+
+def get_replication_session_parameters():
+ """This method provide parameters required for the ansible replication session
+ module on Unity"""
+ return dict(
+ session_id=dict(type='str'), session_name=dict(type='str'),
+ pause=dict(type='bool'), sync=dict(type='bool'),
+ force=dict(type='bool'), failover_with_sync=dict(type='bool'),
+ failback=dict(type='bool'), force_full_copy=dict(type='bool'),
+ state=dict(type='str', choices=['present', 'absent'], default='present')
+ )
+
+
+class ReplicationSessionFailoverHandler():
+ def handle(self, session_object, session_params, replication_session_obj):
+ if replication_session_obj and session_params['state'] == 'present' and session_params['failover_with_sync'] is not None:
+ session_object.result['changed'] = \
+ session_object.failover(replication_session_obj, session_params['failover_with_sync'], session_params['force']) or False
+ if session_object.result['changed']:
+ replication_session_obj = session_object.get_replication_session(session_params['session_id'], session_params['session_name'])
+ ReplicationSessionFailbackHandler().handle(session_object, session_params, replication_session_obj)
+
+
+class ReplicationSessionFailbackHandler():
+ def handle(self, session_object, session_params, replication_session_obj):
+ if replication_session_obj and session_params['state'] == 'present' and session_params['failback']:
+ session_object.result['changed'] = \
+ session_object.failback(replication_session_obj, session_params['force_full_copy']) or False
+ if session_object.result['changed']:
+ replication_session_obj = session_object.get_replication_session(session_params['session_id'], session_params['session_name'])
+ ReplicationSessionDeleteHandler().handle(session_object, session_params, replication_session_obj)
+
+
+class ReplicationSessionSyncHandler():
+ def handle(self, session_object, session_params, replication_session_obj):
+ if replication_session_obj and session_params['state'] == 'present' and session_params['sync']:
+ session_object.result['changed'] = session_object.sync(replication_session_obj)
+ if session_object.result['changed']:
+ replication_session_obj = session_object.get_replication_session(session_params['session_id'], session_params['session_name'])
+ ReplicationSessionFailoverHandler().handle(session_object, session_params, replication_session_obj)
+
+
+class ReplicationSessionDeleteHandler():
+ def handle(self, session_object, session_params, replication_session_obj):
+ if replication_session_obj and session_params['state'] == 'absent':
+ session_object.result['changed'] = session_object.delete(replication_session_obj)
+ if session_object.result['changed']:
+ replication_session_obj = session_object.get_replication_session(session_params['session_id'], session_params['session_name'])
+ ReplicationSessionExitHandler().handle(session_object, replication_session_obj)
+
+
+class ReplicationSessionExitHandler():
+ def handle(self, session_object, replication_session_obj):
+ if replication_session_obj:
+ session_object.result['replication_session_details'] = replication_session_obj._get_properties()
+ session_object.module.exit_json(**session_object.result)
+
+
+class ReplicationSessionResumeHandler():
+ def handle(self, session_object, session_params, replication_session_obj):
+ if replication_session_obj and session_params['state'] == 'present' and session_params['pause'] is False:
+ session_object.result['changed'] = \
+ session_object.resume(replication_session_obj, session_params['force_full_copy']) or False
+ if session_object.result['changed']:
+ replication_session_obj = session_object.get_replication_session(session_params['session_id'], session_params['session_name'])
+ ReplicationSessionSyncHandler().handle(session_object, session_params, replication_session_obj)
+
+
+class ReplicationSessionPauseHandler():
+ def handle(self, session_object, session_params, replication_session_obj):
+ if replication_session_obj and session_params['state'] == 'present' and session_params['pause']:
+ session_object.result['changed'] = \
+ session_object.pause(replication_session_obj) or False
+ if session_object.result['changed']:
+ replication_session_obj = session_object.get_replication_session(session_params['session_id'], session_params['session_name'])
+ ReplicationSessionResumeHandler().handle(session_object, session_params, replication_session_obj)
+
+
+class ReplicationSessionHandler():
+ def handle(self, session_object, session_params):
+ replication_session_obj = session_object.get_replication_session(session_params['session_id'], session_params['session_name'])
+ if session_params['state'] == 'present' and not replication_session_obj:
+ session_object.module.fail_json(msg=f"Replication session {session_params['session_id'] or session_params['session_name']} is invalid.")
+ ReplicationSessionPauseHandler().handle(session_object, session_params, replication_session_obj)
+
+
+def main():
+ """ Create Unity replication session object and perform action on it
+ based on user input from playbook"""
+ obj = ReplicationSession()
+ ReplicationSessionHandler().handle(obj, obj.module.params)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/dellemc/unity/plugins/modules/smbshare.py b/ansible_collections/dellemc/unity/plugins/modules/smbshare.py
index 58bc8c709..d8b78a7d9 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/smbshare.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/smbshare.py
@@ -150,11 +150,11 @@ EXAMPLES = r'''
nas_server_id: "NAS_11"
path: "/sample_fs"
description: "Sample SMB share created"
- is_abe_enabled: True
- is_branch_cache_enabled: True
+ is_abe_enabled: true
+ is_branch_cache_enabled: true
offline_availability: "DOCUMENTS"
- is_continuous_availability_enabled: True
- is_encryption_enabled: True
+ is_continuous_availability_enabled: true
+ is_encryption_enabled: true
umask: "777"
state: "present"
- name: Modify Attributes of SMB share for a filesystem
@@ -166,11 +166,11 @@ EXAMPLES = r'''
share_name: "sample_smb_share"
nas_server_name: "sample_nas_server"
description: "Sample SMB share attributes updated"
- is_abe_enabled: False
- is_branch_cache_enabled: False
+ is_abe_enabled: false
+ is_branch_cache_enabled: false
offline_availability: "MANUAL"
- is_continuous_availability_enabled: "False"
- is_encryption_enabled: "False"
+ is_continuous_availability_enabled: "false"
+ is_encryption_enabled: "false"
umask: "022"
state: "present"
- name: Create SMB share for a snapshot
@@ -184,10 +184,10 @@ EXAMPLES = r'''
nas_server_id: "NAS_11"
path: "/sample_snapshot"
description: "Sample SMB share created for snapshot"
- is_abe_enabled: True
- is_branch_cache_enabled: True
- is_continuous_availability_enabled: True
- is_encryption_enabled: True
+ is_abe_enabled: true
+ is_branch_cache_enabled: true
+ is_continuous_availability_enabled: true
+ is_encryption_enabled: true
umask: "777"
state: "present"
- name: Modify Attributes of SMB share for a snapshot
@@ -199,11 +199,11 @@ EXAMPLES = r'''
share_name: "sample_snap_smb_share"
snapshot_name: "sample_snapshot"
description: "Sample SMB share attributes updated for snapshot"
- is_abe_enabled: False
- is_branch_cache_enabled: False
+ is_abe_enabled: false
+ is_branch_cache_enabled: false
offline_availability: "MANUAL"
- is_continuous_availability_enabled: "False"
- is_encryption_enabled: "False"
+ is_continuous_availability_enabled: "false"
+ is_encryption_enabled: "false"
umask: "022"
state: "present"
- name: Get details of SMB share
@@ -229,7 +229,7 @@ changed:
description: Whether or not the resource has changed.
returned: always
type: bool
- sample: True
+ sample: true
smb_share_details:
description: The SMB share details.
type: dict
@@ -325,7 +325,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
LOG = utils.get_logger('smbshare')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class SMBShare(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/snapshot.py b/ansible_collections/dellemc/unity/plugins/modules/snapshot.py
index c8aba1846..5660e3c5c 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/snapshot.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/snapshot.py
@@ -122,7 +122,7 @@ EXAMPLES = r'''
cg_name: "{{cg_name}}"
snapshot_name: "{{cg_snapshot_name}}"
description: "{{description}}"
- auto_delete: False
+ auto_delete: false
state: "present"
- name: Create a Snapshot for a volume with Host attached
@@ -257,7 +257,6 @@ snapshot_details:
}
'''
-import logging
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
import utils
@@ -265,7 +264,7 @@ from datetime import datetime
LOG = utils.get_logger('snapshot')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class Snapshot(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py b/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py
index aba5524cd..1d6e6ec6c 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/snapshotschedule.py
@@ -144,7 +144,7 @@ EXAMPLES = r"""
hours_of_day:
- 8
- 14
- auto_delete: True
+ auto_delete: true
state: "{{state_present}}"
- name: Create snapshot schedule (Rule Type - every_n_days)
@@ -185,7 +185,7 @@ EXAMPLES = r"""
name: "Ansible_Every_Month_Testing"
type: "every_month"
day_of_month: 17
- auto_delete: True
+ auto_delete: true
state: "{{state_present}}"
- name: Get snapshot schedule details using name
@@ -226,7 +226,7 @@ EXAMPLES = r"""
name: "Ansible_Every_Day_Testing"
type: "every_day"
desired_retention: 200
- auto_delete: False
+ auto_delete: false
state: "{{state_present}}"
- name: Delete snapshot schedule using id
@@ -253,7 +253,7 @@ changed:
description: Whether or not the resource has changed.
returned: always
type: bool
- sample: True
+ sample: true
snapshot_schedule_details:
description: Details of the snapshot schedule.
@@ -385,14 +385,13 @@ snapshot_schedule_details:
}
"""
-import logging
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
import utils
LOG = utils.get_logger('snapshotschedule')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class SnapshotSchedule(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/storagepool.py b/ansible_collections/dellemc/unity/plugins/modules/storagepool.py
index ddb7eef65..6438e9c6a 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/storagepool.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/storagepool.py
@@ -217,10 +217,10 @@ EXAMPLES = r'''
raid_type : "RAID10"
stripe_width : "BEST_FIT"
alert_threshold : 50
- is_harvest_enabled : True
+ is_harvest_enabled : true
pool_harvest_high_threshold : 60
pool_harvest_low_threshold : 40
- is_snap_harvest_enabled : True
+ is_snap_harvest_enabled : true
snap_harvest_high_threshold : 70
snap_harvest_low_threshold : 50
fast_vp: "enabled"
@@ -235,7 +235,7 @@ RETURN = r'''
description: Whether or not the storage pool has changed.
returned: always
type: bool
- sample: True
+ sample: true
storage_pool_details:
description: The storage pool details.
@@ -464,11 +464,10 @@ RETURN = r'''
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
import utils
-import logging
LOG = utils.get_logger('storagepool')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class StoragePool(object):
@@ -499,7 +498,9 @@ class StoragePool(object):
details = api_response._get_properties()
is_fast_vp_enabled = api_response._get_property_from_raw(
- 'pool_fast_vp').is_schedule_enabled
+ 'pool_fast_vp')
+ if is_fast_vp_enabled:
+ is_fast_vp_enabled = is_fast_vp_enabled.is_schedule_enabled
details['is_fast_vp_enabled'] = is_fast_vp_enabled
details['size_free_with_unit'] = utils.\
diff --git a/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py b/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py
index 063834b45..b066a01fa 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/tree_quota.py
@@ -199,7 +199,7 @@ changed:
description: Whether or not the resource has changed.
returned: always
type: bool
- sample: True
+ sample: true
get_tree_quota_details:
description: Details of the quota tree.
@@ -283,7 +283,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
LOG = utils.get_logger('tree_quota')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class QuotaTree(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/user_quota.py b/ansible_collections/dellemc/unity/plugins/modules/user_quota.py
index d9116c3a5..06413aa53 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/user_quota.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/user_quota.py
@@ -294,7 +294,7 @@ changed:
description: Whether or not the resource has changed.
returned: always
type: bool
- sample: True
+ sample: true
get_user_quota_details:
description: Details of the user quota.
@@ -427,7 +427,7 @@ from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
LOG = utils.get_logger('user_quota')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
class UserQuota(object):
diff --git a/ansible_collections/dellemc/unity/plugins/modules/volume.py b/ansible_collections/dellemc/unity/plugins/modules/volume.py
index 82bcb0174..81790ea24 100644
--- a/ansible_collections/dellemc/unity/plugins/modules/volume.py
+++ b/ansible_collections/dellemc/unity/plugins/modules/volume.py
@@ -177,9 +177,9 @@ EXAMPLES = r"""
pool_name: "{{pool}}"
size: 2
cap_unit: "{{cap_GB}}"
- is_thin: True
- compression: True
- advanced_dedup: True
+ is_thin: true
+ compression: true
+ advanced_dedup: true
state: "{{state_present}}"
- name: Expand Volume by volume id
@@ -240,9 +240,9 @@ EXAMPLES = r"""
vol_name: "{{vol_name}}"
new_vol_name: "{{new_vol_name}}"
tiering_policy: "AUTOTIER"
- compression: True
- is_thin: True
- advanced_dedup: True
+ compression: true
+ is_thin: true
+ advanced_dedup: true
state: "{{state_present}}"
- name: Delete Volume by vol name
@@ -270,7 +270,7 @@ changed:
description: Whether or not the resource has changed.
returned: always
type: bool
- sample: True
+ sample: true
volume_details:
description: Details of the volume.
@@ -392,11 +392,10 @@ volume_details:
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
import utils
-import logging
LOG = utils.get_logger('volume')
-application_type = "Ansible/1.6.0"
+application_type = "Ansible/1.7.1"
def is_none_or_empty_string(param):
diff --git a/ansible_collections/dellemc/unity/requirements.txt b/ansible_collections/dellemc/unity/requirements.txt
index 2325e97fb..847cc465d 100644
--- a/ansible_collections/dellemc/unity/requirements.txt
+++ b/ansible_collections/dellemc/unity/requirements.txt
@@ -1,3 +1,2 @@
-urllib3
+urllib3>=1.26.7
storops>=1.2.11
-setuptools
diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt
deleted file mode 100644
index f78c82922..000000000
--- a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.12.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-plugins/modules/nfs.py compile-2.6
-plugins/modules/nfs.py import-2.6
-plugins/modules/consistencygroup.py validate-modules:missing-gplv3-license
-plugins/modules/filesystem.py validate-modules:missing-gplv3-license
-plugins/modules/filesystem_snapshot.py validate-modules:missing-gplv3-license
-plugins/modules/info.py validate-modules:missing-gplv3-license
-plugins/modules/host.py validate-modules:missing-gplv3-license
-plugins/modules/nasserver.py validate-modules:missing-gplv3-license
-plugins/modules/nfs.py validate-modules:missing-gplv3-license
-plugins/modules/smbshare.py validate-modules:missing-gplv3-license
-plugins/modules/snapshot.py validate-modules:missing-gplv3-license
-plugins/modules/snapshotschedule.py validate-modules:missing-gplv3-license
-plugins/modules/storagepool.py validate-modules:missing-gplv3-license
-plugins/modules/tree_quota.py validate-modules:missing-gplv3-license
-plugins/modules/user_quota.py validate-modules:missing-gplv3-license
-plugins/modules/volume.py validate-modules:missing-gplv3-license
-plugins/modules/cifsserver.py validate-modules:missing-gplv3-license
-plugins/modules/nfsserver.py validate-modules:missing-gplv3-license
-plugins/modules/host.py import-2.6
-plugins/modules/host.py import-2.7
-plugins/modules/interface.py import-2.6
-plugins/modules/interface.py import-2.7
-plugins/modules/nfs.py import-2.7
-plugins/modules/nfs.py import-3.5
-plugins/modules/nfs.py compile-2.7
-plugins/modules/nfs.py compile-3.5
-plugins/modules/filesystem.py import-2.6
-plugins/modules/filesystem.py compile-2.6
-plugins/modules/filesystem.py compile-2.7
-plugins/modules/filesystem.py compile-3.5
-plugins/modules/filesystem.py import-2.7
-plugins/modules/filesystem.py import-3.5
-plugins/modules/interface.py validate-modules:missing-gplv3-license \ No newline at end of file
diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt
index a175e9976..a7aa13146 100644
--- a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt
+++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.13.txt
@@ -25,3 +25,8 @@ plugins/modules/filesystem.py compile-3.5
plugins/modules/filesystem.py import-2.7
plugins/modules/filesystem.py import-3.5
plugins/modules/interface.py validate-modules:missing-gplv3-license
+plugins/modules/replication_session.py validate-modules:missing-gplv3-license
+plugins/modules/replication_session.py import-2.7
+plugins/modules/replication_session.py import-3.5
+plugins/modules/replication_session.py compile-2.7
+plugins/modules/replication_session.py compile-3.5
diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt
index a175e9976..a7aa13146 100644
--- a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt
+++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.14.txt
@@ -25,3 +25,8 @@ plugins/modules/filesystem.py compile-3.5
plugins/modules/filesystem.py import-2.7
plugins/modules/filesystem.py import-3.5
plugins/modules/interface.py validate-modules:missing-gplv3-license
+plugins/modules/replication_session.py validate-modules:missing-gplv3-license
+plugins/modules/replication_session.py import-2.7
+plugins/modules/replication_session.py import-3.5
+plugins/modules/replication_session.py compile-2.7
+plugins/modules/replication_session.py compile-3.5
diff --git a/ansible_collections/dellemc/unity/tests/sanity/ignore-2.15.txt b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.15.txt
new file mode 100644
index 000000000..a7aa13146
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/sanity/ignore-2.15.txt
@@ -0,0 +1,32 @@
+plugins/modules/consistencygroup.py validate-modules:missing-gplv3-license
+plugins/modules/filesystem.py validate-modules:missing-gplv3-license
+plugins/modules/filesystem_snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/info.py validate-modules:missing-gplv3-license
+plugins/modules/host.py validate-modules:missing-gplv3-license
+plugins/modules/nasserver.py validate-modules:missing-gplv3-license
+plugins/modules/nfs.py validate-modules:missing-gplv3-license
+plugins/modules/smbshare.py validate-modules:missing-gplv3-license
+plugins/modules/snapshot.py validate-modules:missing-gplv3-license
+plugins/modules/snapshotschedule.py validate-modules:missing-gplv3-license
+plugins/modules/storagepool.py validate-modules:missing-gplv3-license
+plugins/modules/tree_quota.py validate-modules:missing-gplv3-license
+plugins/modules/user_quota.py validate-modules:missing-gplv3-license
+plugins/modules/volume.py validate-modules:missing-gplv3-license
+plugins/modules/cifsserver.py validate-modules:missing-gplv3-license
+plugins/modules/nfsserver.py validate-modules:missing-gplv3-license
+plugins/modules/host.py import-2.7
+plugins/modules/interface.py import-2.7
+plugins/modules/nfs.py import-2.7
+plugins/modules/nfs.py import-3.5
+plugins/modules/nfs.py compile-2.7
+plugins/modules/nfs.py compile-3.5
+plugins/modules/filesystem.py compile-2.7
+plugins/modules/filesystem.py compile-3.5
+plugins/modules/filesystem.py import-2.7
+plugins/modules/filesystem.py import-3.5
+plugins/modules/interface.py validate-modules:missing-gplv3-license
+plugins/modules/replication_session.py validate-modules:missing-gplv3-license
+plugins/modules/replication_session.py import-2.7
+plugins/modules/replication_session.py import-3.5
+plugins/modules/replication_session.py compile-2.7
+plugins/modules/replication_session.py compile-3.5
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py
index 427d530fa..d3f0e90a1 100644
--- a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_cifsserver_api.py
@@ -5,13 +5,9 @@
"""Mock Api response for Unit tests of CIFS server module on Unity"""
from __future__ import (absolute_import, division, print_function)
-from unittest.mock import MagicMock
__metaclass__ = type
-from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
- import MockSDKObject
-
class MockCIFSServerApi:
CIFS_SERVER_MODULE_ARGS = {
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_info_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_info_api.py
new file mode 100644
index 000000000..80ae035a1
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_info_api.py
@@ -0,0 +1,58 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of Info module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+from mock.mock import Mock
+__metaclass__ = type
+
+
+class MockInfoApi:
+ @staticmethod
+ def get_replication_sessions_response(status="ReplicationSessionStatusEnum.OK"):
+ return [Mock(**{
+ "current_transfer_est_remain_time": 0,
+ "daily_snap_replication_policy": None,
+ "dst_resource_id": "nas_8",
+ "dst_spa_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253398547,
+ "id": "APM00213404195:if_181"
+ }
+ },
+ "dst_status": status,
+ "existed": True,
+ "hash": 8771259012271,
+ "health": {
+ "UnityHealth": {
+ "hash": 8771253424168
+ }
+ },
+ "hourly_snap_replication_policy": None,
+ "id": "103079215114_APM00213404195_0000_103079215274_APM00213404194_0000",
+ "last_sync_time": "2023-04-18 10:35:25+00:00",
+ "local_role": "ReplicationSessionReplicationRoleEnum.DESTINATION",
+ "max_time_out_of_sync": 0,
+ "members": None,
+ "name": "rep_session",
+ "network_status": "ReplicationSessionNetworkStatusEnum.OK",
+ "remote_system": {
+ "UnityRemoteSystem": {
+ "hash": 8771253380142
+ }
+ },
+ "replication_resource_type": "ReplicationEndpointResourceTypeEnum.NASSERVER",
+ "src_resource_id": "nas_213",
+ "src_spa_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253475010,
+ "id": "APM00213404194:if_195"
+ }
+ },
+ "src_status": status,
+ "status": status,
+ "sync_progress": 0,
+ "sync_state": "ReplicationSessionSyncStateEnum.IN_SYNC"}
+ )]
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py
index 6bd53ea9b..046ff2c41 100644
--- a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_interface_api.py
@@ -5,7 +5,6 @@
"""Mock Api response for Unit tests of interface on Unity"""
from __future__ import (absolute_import, division, print_function)
-from unittest.mock import MagicMock
__metaclass__ = type
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py
index 1254f0035..9c4eadc12 100644
--- a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_nfsserver_api.py
@@ -5,7 +5,6 @@
"""Mock Api response for Unit tests of NFS server module on Unity"""
from __future__ import (absolute_import, division, print_function)
-from unittest.mock import MagicMock
__metaclass__ = type
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_replication_session_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_replication_session_api.py
new file mode 100644
index 000000000..050084316
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_replication_session_api.py
@@ -0,0 +1,112 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http: //www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of Replication session module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
+ import MockSDKObject
+
+__metaclass__ = type
+
+
+class MockReplicationSessionApi:
+ MODULE_ARGS = {
+ 'session_id': None,
+ 'session_name': None,
+ 'pause': None,
+ 'failback': None,
+ 'sync': None,
+ 'failover_with_sync': None,
+ 'force_full_copy': None,
+ 'force': None,
+ 'state': 'present'
+ }
+
+ @staticmethod
+ def get_replication_session_details(status="ACTIVE"):
+ return {
+ "current_transfer_est_remain_time": 0,
+ "daily_snap_replication_policy": None,
+ "dst_resource_id": "nas_8",
+ "dst_spa_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253398547,
+ "id": "APM00213404195:if_181"
+ }
+ },
+ "dst_status": "ReplicationSessionStatusEnum.OK",
+ "existed": True,
+ "hash": 8771259012271,
+ "health": {
+ "UnityHealth": {
+ "hash": 8771253424168
+ }
+ },
+ "hourly_snap_replication_policy": None,
+ "id": "103079215114_APM00213404195_0000_103079215274_APM00213404194_0000",
+ "last_sync_time": "2023-04-18 10:35:25+00:00",
+ "local_role": "ReplicationSessionReplicationRoleEnum.DESTINATION",
+ "max_time_out_of_sync": 0,
+ "members": None,
+ "name": "rep_session",
+ "network_status": "ReplicationSessionNetworkStatusEnum.OK",
+ "remote_system": {
+ "UnityRemoteSystem": {
+ "hash": 8771253380142
+ }
+ },
+ "replication_resource_type": "ReplicationEndpointResourceTypeEnum.NASSERVER",
+ "src_resource_id": "nas_213",
+ "src_spa_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253475010,
+ "id": "APM00213404194:if_195"
+ }
+ },
+ "src_spb_interface": {
+ "UnityRemoteInterface": {
+ "hash": 8771253374169,
+ "id": "APM00213404194:if_194"
+ }
+ },
+ "src_status": "ReplicationSessionStatusEnum.OK",
+ "status": Status(status),
+ "sync_progress": 0,
+ "sync_state": "ReplicationSessionSyncStateEnum.IN_SYNC"
+ }
+
+
+class Status:
+ name = "ACTIVE"
+
+ def __init__(self, status):
+ self.name = status
+
+
+class MockReplicationSessionObject(MockSDKObject):
+ pause_session = False
+ resume_session = False
+ failover_session = False
+ failback_session = False
+ sync_session = False
+ delete_session = False
+
+ def pause(self):
+ self.pause_session = True
+
+ def resume(self, force_full_copy=None):
+ self.resume_session = True
+
+ def failover(self, sync=None, force=None):
+ self.failover_session = True
+
+ def failback(self, force_full_copy=None):
+ self.failback_session = True
+
+ def sync(self):
+ self.sync_session = True
+
+ def delete(self):
+ self.delete_session = True
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py
index 82097a338..ddf5c407e 100644
--- a/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/module_utils/mock_volume_api.py
@@ -8,8 +8,6 @@ from __future__ import absolute_import, division, print_function
__metaclass__ = type
-from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response import MockSDKObject
-
class MockVolumeApi:
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py
index e28c2e935..a1143a771 100644
--- a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_cifsserver.py
@@ -14,8 +14,6 @@ from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_cifs
import MockCIFSServerApi
from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
import MockSDKObject
-from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
- import HttpError as http_error, MockApiException
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils
utils.get_logger = MagicMock()
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py
index de94c38d3..028a2fa4e 100644
--- a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_host.py
@@ -15,7 +15,7 @@ from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_host
from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
import MockSDKObject
from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
- import HttpError as http_error, MockApiException
+ import HttpError as http_error
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell \
import utils
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_info.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_info.py
new file mode 100644
index 000000000..a36567660
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_info.py
@@ -0,0 +1,54 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of Info module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_info_api \
+ import MockInfoApi
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import HttpError as http_error
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+from ansible_collections.dellemc.unity.plugins.modules.info import Info
+
+
+class TestInfo():
+
+ get_module_args = {"gather_subset": None, "state": "present"}
+
+ @pytest.fixture
+ def info_module_mock(self):
+ info_module_mock = Info()
+ info_module_mock.unity = MagicMock()
+ return info_module_mock
+
+ def test_get_replication_session_details(self, info_module_mock):
+ self.get_module_args.update({'gather_subset': 'replication_session'})
+ info_module_mock.module.params = self.get_module_args
+ info_module_mock.unity.get_replication_session = \
+ MagicMock(return_value=MockInfoApi.get_replication_sessions_response())
+ info_module_mock.perform_module_operation()
+ assert info_module_mock.module.exit_json.call_args[1]['Replication_sessions'] is not None
+
+ def test_get_replication_session_details_throws_exception(self, info_module_mock):
+ self.get_module_args.update({'gather_subset': 'replication_session'})
+ info_module_mock.module.params = self.get_module_args
+ utils.HttpError = http_error
+ info_module_mock.unity.get_replication_session = \
+ MagicMock(side_effect=http_error)
+ info_module_mock.perform_module_operation()
+ assert "Get replication session list from unity array failed with error" in \
+ info_module_mock.module.fail_json.call_args[1]['msg']
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py
index c2a680487..f7900eeb1 100644
--- a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_nfsserver.py
@@ -12,8 +12,6 @@ import pytest
from mock.mock import MagicMock
from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_nfsserver_api \
import MockNFSServerApi
-from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_sdk_response \
- import MockSDKObject
from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
import HttpError as http_error, MockApiException
from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils
diff --git a/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_replication_session.py b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_replication_session.py
new file mode 100644
index 000000000..9af1800e6
--- /dev/null
+++ b/ansible_collections/dellemc/unity/tests/unit/plugins/modules/test_replication_session.py
@@ -0,0 +1,178 @@
+# Copyright: (c) 2023, Dell Technologies
+
+# Apache License version 2.0 (see MODULE-LICENSE or http://www.apache.org/licenses/LICENSE-2.0.txt)
+
+"""Mock Api response for Unit tests of Replication session module on Unity"""
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+import pytest
+from mock.mock import MagicMock
+from enum import Enum
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_replication_session_api \
+ import MockReplicationSessionApi, MockReplicationSessionObject
+from ansible_collections.dellemc.unity.tests.unit.plugins.module_utils.mock_api_exception \
+ import HttpError as http_error
+from ansible_collections.dellemc.unity.plugins.module_utils.storage.dell import utils
+
+utils.get_logger = MagicMock()
+utils.get_unity_management_host_parameters = MagicMock()
+utils.ensure_required_libs = MagicMock()
+utils.get_unity_unisphere_connection = MagicMock()
+from ansible.module_utils import basic
+basic.AnsibleModule = MagicMock()
+from ansible_collections.dellemc.unity.plugins.modules.replication_session import ReplicationSession, ReplicationSessionHandler
+
+
+class ReplicationOpStatusEnum(Enum):
+ FAILED_OVER_WITH_SYNC = (0x8400, 'Failed_Over_with_Sync')
+ FAILED_OVER = (0x8401, 'Failed_Over')
+ PAUSED = (0x8403, 'Paused')
+
+
+class TestReplicationSession():
+
+ get_module_args = MockReplicationSessionApi.MODULE_ARGS
+ session_name = "rep_session"
+ FAILED_WITH_ERROR = " failed with error"
+ replication_session_obj = MockReplicationSessionObject(MockReplicationSessionApi.get_replication_session_details())
+
+ @pytest.fixture
+ def replication_session_module_mock(self):
+ setattr(utils, 'ReplicationOpStatusEnum', ReplicationOpStatusEnum)
+ replication_session_module_mock = ReplicationSession()
+ replication_session_module_mock.unity_conn = MagicMock()
+ replication_session_module_mock.module.check_mode = False
+ return replication_session_module_mock
+
+ def test_get_replication_session_details(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1'})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert replication_session_module_mock.module.exit_json.call_args[1]['changed'] is False
+
+ def test_get_replication_session_details_throws_exception(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1'})
+ replication_session_module_mock.module.params = self.get_module_args
+ utils.HttpError = http_error
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(side_effect=http_error)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert "is invalid" in \
+ replication_session_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_pause_replication_session(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'pause': True})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert self.replication_session_obj.pause_session is True
+
+ def test_pause_replication_session_throws_exception(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'pause': True})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ self.replication_session_obj.pause = MagicMock(side_effect=Exception)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert "Pause replication session " + self.session_name + self.FAILED_WITH_ERROR in \
+ replication_session_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_resume_replication_session(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'pause': False})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_obj = MockReplicationSessionObject(MockReplicationSessionApi.get_replication_session_details("PAUSED"))
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=replication_session_obj)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert replication_session_obj.resume_session is True
+
+ def test_resume_replication_session_throws_exception(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'pause': False})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_obj = MockReplicationSessionObject(MockReplicationSessionApi.get_replication_session_details("PAUSED"))
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=replication_session_obj)
+ replication_session_obj.resume = MagicMock(side_effect=Exception)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert "Resume replication session " + self.session_name + self.FAILED_WITH_ERROR in \
+ replication_session_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_failover_replication_session(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'failover_with_sync': True, 'force': True})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert self.replication_session_obj.failover_session is True
+
+ def test_failover_replication_session_throws_exception(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'failover_with_sync': True, 'force': True})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ self.replication_session_obj.failover = MagicMock(side_effect=Exception)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert "Failover replication session " + self.session_name + self.FAILED_WITH_ERROR in \
+ replication_session_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_failback_replication_session_details(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'failback': True, 'force_full_copy': True})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_obj = MockReplicationSessionObject(MockReplicationSessionApi.get_replication_session_details("FAILED_OVER"))
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=replication_session_obj)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert replication_session_obj.failback_session is True
+
+ def test_failback_replication_session_throws_exception(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'failback': True, 'force_full_copy': True})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_obj = MockReplicationSessionObject(MockReplicationSessionApi.get_replication_session_details("FAILED_OVER"))
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=replication_session_obj)
+ replication_session_obj.failback = MagicMock(side_effect=Exception)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert "Failback replication session " + self.session_name + self.FAILED_WITH_ERROR in \
+ replication_session_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_sync_replication_session(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'sync': True})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert self.replication_session_obj.sync_session is True
+
+ def test_sync_replication_session_throws_exception(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'sync': True})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ self.replication_session_obj.sync = MagicMock(side_effect=Exception)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert "Sync replication session " + self.session_name + self.FAILED_WITH_ERROR in \
+ replication_session_module_mock.module.fail_json.call_args[1]['msg']
+
+ def test_delete_replication_session(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'state': 'absent'})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert self.replication_session_obj.delete_session is True
+
+ def test_delete_replication_session_throws_exception(self, replication_session_module_mock):
+ self.get_module_args.update({'session_name': 'session1', 'state': 'absent'})
+ replication_session_module_mock.module.params = self.get_module_args
+ replication_session_module_mock.unity_conn.get_replication_session = \
+ MagicMock(return_value=self.replication_session_obj)
+ self.replication_session_obj.delete = MagicMock(side_effect=Exception)
+ ReplicationSessionHandler().handle(replication_session_module_mock, replication_session_module_mock.module.params)
+ assert "Deleting replication session " + self.session_name + self.FAILED_WITH_ERROR in \
+ replication_session_module_mock.module.fail_json.call_args[1]['msg']